diff --git a/.gitignore b/.gitignore index 3cb646d5e..207d350bb 100644 --- a/.gitignore +++ b/.gitignore @@ -14,8 +14,6 @@ tmp-int-test-core *.report -**/sha256 - **/gomock_reflect_* coverage.* diff --git a/apis/mediatype/constants.go b/apis/mediatype/constants.go index fe76380e0..f10b2a39a 100644 --- a/apis/mediatype/constants.go +++ b/apis/mediatype/constants.go @@ -27,6 +27,10 @@ const ( // Optionally the compression can be added as "+gzip" BlueprintArtifactsLayerMediaTypeV1 = "application/vnd.gardener.landscaper.blueprint.layer.v1.tar" + // JSONSchemaType is the name of json schema type in a component descriptor. + // Although it is not a media or MIME type, this currently seems like the best place to put it. + JSONSchemaType = "landscaper.gardener.cloud/jsonschema" + // JSONSchemaArtifactsMediaTypeV0 is the reserved media type for a jsonschema that is stored as layer in an oci artifact. // This is the legacy deprecated artifact media type use JSONSchemaArtifactsMediaTypeV1 instead. // DEPRECATED diff --git a/go.mod b/go.mod index 6cfb75f8b..916e23630 100644 --- a/go.mod +++ b/go.mod @@ -17,11 +17,14 @@ require ( github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 github.com/imdario/mergo v0.3.15 + github.com/mandelsoft/filepath v0.0.0-20230412200429-36b1eb66bd27 + github.com/mandelsoft/logging v0.0.0-20230905123808-7042ee3aae45 github.com/mandelsoft/spiff v1.7.0-beta-5 - github.com/mandelsoft/vfs v0.0.0-20230506183150-975954b82357 + github.com/mandelsoft/vfs v0.0.0-20230713123140-269aa4fb1338 github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo/v2 v2.9.5 github.com/onsi/gomega v1.27.7 + github.com/open-component-model/ocm v0.3.0-rc.1.0.20230921153454-4fdca213d05a github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0-rc3 github.com/pkg/errors v0.9.1 @@ -36,7 +39,7 @@ require ( golang.org/x/sync v0.3.0 golang.org/x/sys v0.9.0 gopkg.in/yaml.v3 v3.0.1 - helm.sh/helm/v3 v3.11.1 + helm.sh/helm/v3 v3.12.2 k8s.io/api v0.27.3 k8s.io/apiextensions-apiserver v0.27.2 k8s.io/apimachinery v0.27.3 @@ -49,111 +52,247 @@ require ( require ( cloud.google.com/go/compute v1.19.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect + filippo.io/edwards25519 v1.0.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.29 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/BurntSushi/toml v1.2.1 // indirect + github.com/DataDog/gostackparse v0.6.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/Masterminds/squirrel v1.5.3 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect + github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect + github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect + github.com/alibabacloud-go/darabonba-openapi v0.1.18 // indirect + github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 // indirect + github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect + github.com/alibabacloud-go/openapi-util v0.0.11 // indirect + github.com/alibabacloud-go/tea v1.1.18 // indirect + github.com/alibabacloud-go/tea-utils v1.4.4 // indirect + github.com/alibabacloud-go/tea-xml v1.1.2 // indirect + github.com/aliyun/credentials-go v1.2.3 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go-v2 v1.18.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 // indirect + github.com/aws/aws-sdk-go-v2/config v1.18.27 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.26 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.33 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.18.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect + github.com/aws/smithy-go v1.13.5 // indirect + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/buildkite/agent/v3 v3.49.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/chrismellard/docker-credential-acr-env v0.0.0-20220119192733-fe33c00cee21 // indirect + github.com/clbanning/mxj/v2 v2.5.6 // indirect + github.com/cloudflare/circl v1.3.3 // indirect + github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect + github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect + github.com/containers/image/v5 v5.21.1 // indirect + github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect + github.com/containers/ocicrypt v1.1.6 // indirect + github.com/containers/storage v1.42.0 // indirect + github.com/coreos/go-oidc/v3 v3.6.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/digitorus/pkcs7 v0.0.0-20221212123742-001c36b64ec3 // indirect + github.com/digitorus/timestamp v0.0.0-20221019182153-ef3b63b79b31 // indirect + github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/docker v23.0.5+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect + github.com/drone/envsubst v1.0.3 // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.13.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fvbommel/sortorder v1.0.2 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.2.4 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/runtime v0.26.0 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-sql-driver/mysql v1.7.1 // indirect + github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.14.0 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-test/deep v1.1.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/certificate-transparency-go v1.1.6 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-containerregistry v0.15.2 // indirect + github.com/google/go-github/v45 v45.2.0 // indirect + github.com/google/go-github/v50 v50.2.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c // indirect + github.com/google/s2a-go v0.1.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/huandu/xstrings v1.3.3 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.5 // indirect + github.com/klauspost/pgzip v1.2.5 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mandelsoft/filepath v0.0.0-20230412200429-36b1eb66bd27 // indirect + github.com/marstr/guid v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/mattn/go-sqlite3 v1.14.16 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/nxadm/tail v1.4.8 // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/distribution-spec v1.0.1 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pborman/uuid v1.2.1 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect - github.com/rubenv/sql-migrate v1.2.0 // indirect + github.com/rivo/uniseg v0.4.2 // indirect + github.com/rubenv/sql-migrate v1.3.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sergi/go-diff v1.3.1 // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.6.0 // indirect + github.com/segmentio/ksuid v1.0.4 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sigstore/cosign/v2 v2.1.1 // indirect + github.com/sigstore/fulcio v1.3.1 // indirect + github.com/sigstore/rekor v1.2.2-0.20230530122220-67cc9e58bd23 // indirect + github.com/sigstore/sigstore v1.7.1 // indirect + github.com/sigstore/timestamp-authority v1.1.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.5.1 // indirect - github.com/stretchr/testify v1.8.4 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/viper v1.16.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.1.6 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf v0.5.2 // indirect + github.com/theupdateframework/notary v0.7.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/tjfoc/gmsm v1.3.2 // indirect + github.com/tonglil/buflogr v1.0.1 // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/ulikunitz/xz v0.5.10 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/vbatts/tar-split v0.11.3 // indirect + github.com/xanzy/go-gitlab v0.86.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xlab/treeprint v1.1.0 // indirect + github.com/zeebo/errs v1.3.0 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.16.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect go.starlark.net v0.0.0-20221028183056-acb66ad56dd2 // indirect + go.step.sm/crypto v0.32.1 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/net v0.11.0 // indirect golang.org/x/term v0.9.0 // indirect @@ -161,25 +300,29 @@ require ( golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.9.3 // indirect gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect + google.golang.org/api v0.128.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect google.golang.org/grpc v1.56.0 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/apiserver v0.27.2 // indirect - k8s.io/cli-runtime v0.26.1 // indirect + k8s.io/cli-runtime v0.27.2 // indirect k8s.io/component-base v0.27.2 // indirect k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect - k8s.io/kubectl v0.26.0 // indirect + k8s.io/kubectl v0.27.2 // indirect oras.land/oras-go v1.2.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect + sigs.k8s.io/kustomize/api v0.13.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.1 // indirect + sigs.k8s.io/release-utils v0.7.4 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) @@ -187,5 +330,4 @@ replace ( github.com/gardener/landscaper/apis => ./apis github.com/gardener/landscaper/controller-utils => ./controller-utils github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1 - k8s.io/kubectl => k8s.io/kubectl v0.26.1 ) diff --git a/go.sum b/go.sum index ff311202b..49f8e27e9 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,11 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,9 +18,11 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -31,6 +36,8 @@ cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2Aawl cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= +cloud.google.com/go/kms v1.12.1 h1:xZmZuwy2cwzsocmKDOPu4BL7umg8QXagQx6fKVmf45U= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -40,116 +47,565 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230329111138-12e09aba5ebd h1:1tbEqR4NyQLgiod7vLXSswHteGetAVZrMGCqrJxLKRs= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.12.0 h1:4Kynh6Hn2ekyIsBgNQJb3dn1+/MyvzfUJebti2emB/A= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.6/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs= +github.com/Azure/go-autorest/autorest v0.11.8/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.2/go.mod h1:q98IH4qgc3eWM4/WOeR5+YPmBuy8Lq0jNRDwSM0CuFk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.1/go.mod h1:JfDgiIO1/RPu6z42AdQTyjOoCM2MFhLqSBDvMEkDgcg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DataDog/gostackparse v0.6.0 h1:egCGQviIabPwsyoWpGvIBGrEnNWez35aEO7OJ1vBI4o= +github.com/DataDog/gostackparse v0.6.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= +github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc= -github.com/Masterminds/squirrel v1.5.3/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 h1:ZK3C5DtzV2nVAQTx5S5jQvMeDqWtD1By5mOoyY/xJek= +github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/a8m/expect v1.0.0/go.mod h1:4IwSCMumY49ScypDnjNbYEjgVeqy1/U2cEs3Lat96eA= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 h1:+XfOU14S4bGuwyvCijJwhhBIjYN+YXS18jrCY2EzJaY= github.com/ahmetb/gen-crd-api-reference-docs v0.3.0/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= +github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= +github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= +github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc= +github.com/alibabacloud-go/darabonba-openapi v0.1.12/go.mod h1:sTAjsFJmVsmcVeklL9d9uDBlFsgl43wZ6jhI6BHqHqU= +github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI= +github.com/alibabacloud-go/darabonba-openapi v0.1.18 h1:3eUVmAr7WCJp7fgIvmCd9ZUyuwtJYbtUqJIed5eXCmk= +github.com/alibabacloud-go/darabonba-openapi v0.1.18/go.mod h1:PB4HffMhJVmAgNKNq3wYbTUlFvPgxJpTzd1F5pTuUsc= +github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 h1:NqugFkGxx1TXSh/pBcU00Y6bljgDPaFdh5MUSeJ7e50= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= +github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= +github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.11 h1:iYnqOPR5hyEEnNZmebGyRMkkEJRWUEjDiiaOHZ5aNhA= +github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= +github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.1.18 h1:+6GJ06eu5Cr/Mkj09vWrf6QAfrPepctY2OxcWNclRC0= +github.com/alibabacloud-go/tea v1.1.18/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-utils v1.4.4 h1:lxCDvNCdTo9FaXKKq45+4vGETQUKNOW/qKTcX9Sk53o= +github.com/alibabacloud-go/tea-utils v1.4.4/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-xml v1.1.2 h1:oLxa7JUXm2EDFzMg+7oRsYc+kutgCVwm+bZlhhmvW5M= +github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= +github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= +github.com/aliyun/credentials-go v1.2.3 h1:Vmodnr52Rz1mcbwn0kzMhLRKb6soizewuKXdfZiNemU= +github.com/aliyun/credentials-go v1.2.3/go.mod h1:/KowD1cfGSLrLsH28Jr8W+xwoId0ywIy5lNzDz6O1vw= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.44.288 h1:Ln7fIao/nl0ACtelgR1I4AiEw/GLNkKcXfCaHupUW5Q= +github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250= +github.com/aws/aws-sdk-go-v2 v1.14.0/go.mod h1:ZA3Y8V0LrlWj63MQAnRHgKf/5QB//LSZCPNWlWrNGLU= +github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= +github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.18.1 h1:+tefE750oAb7ZQGzla6bLkOwfcQCEtC5y2RqoqCeqKo= +github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 h1:tcFliCWne+zOuUfKNRn8JdFBuWPDuISDH08wD2ULkhk= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8/go.mod h1:JTnlBSot91steJeti4ryyu/tLd4Sk84O5W22L7O2EQU= +github.com/aws/aws-sdk-go-v2/config v1.5.0/go.mod h1:RWlPOAW3E3tbtNAqTwvSW54Of/yP3oiZXMI0xfUdjyA= +github.com/aws/aws-sdk-go-v2/config v1.17.7/go.mod h1:dN2gja/QXxFF15hQreyrqYhLBaQo1d9ZKe/v/uplQoI= +github.com/aws/aws-sdk-go-v2/config v1.18.27 h1:Az9uLwmssTE6OGTpsFqOnaGpLnKDqNYOJzWuC6UAYzA= +github.com/aws/aws-sdk-go-v2/config v1.18.27/go.mod h1:0My+YgmkGxeqjXZb5BYme5pc4drjTnM+x1GJ3zv42Nw= +github.com/aws/aws-sdk-go-v2/credentials v1.3.1/go.mod h1:r0n73xwsIVagq8RsxmZbGSRQFj9As3je72C2WzUIToc= +github.com/aws/aws-sdk-go-v2/credentials v1.12.20/go.mod h1:UKY5HyIux08bbNA7Blv4PcXQ8cTkGh7ghHMFklaviR4= +github.com/aws/aws-sdk-go-v2/credentials v1.13.26 h1:qmU+yhKmOCyujmuPY7tf5MxR/RKyZrOPO3V4DobiTUk= +github.com/aws/aws-sdk-go-v2/credentials v1.13.26/go.mod h1:GoXt2YC8jHUBbA4jr+W3JiemnIbkXOfxSXcisUsZ3os= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.3.0/go.mod h1:2LAuqPx1I6jNfaGDucWfA2zqQCYCOMCDHiCOciALyNw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 h1:LxK/bitrAr4lnh9LnIS6i7zWbCOdMsfzKFBI6LUCS0I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4/go.mod h1:E1hLXN/BL2e6YizK1zFlYd8vsfi2GTjbjBazinMmeaM= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.33 h1:fAoVmNGhir6BR+RU0/EI+6+D7abM+MCwWf8v4ip5jNI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.33/go.mod h1:84XgODVR8uRhmOnUkKGUZKqIMxmjmLOR8Uyp7G/TPwc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.5/go.mod h1:2hXc8ooJqF2nAznsbJQIn+7h851/bu8GVC80OVTTqf8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 h1:A5UqQEmPaCFpedKouS4v+dHCTUo2sKqhoKO9U5kxyWo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.3.0/go.mod h1:miRSv9l093jX/t/j+mBCaLqFHo9xKYzJ7DGm1BsGoJM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 h1:srIVS45eQuewqz6fKKu6ZGXaq6FuFg5NzgQBAM6g8Y4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.1.1/go.mod h1:Zy8smImhTdOETZqfyn01iNOe0CNggVbPjCajyaz6Gvg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 h1:LWA+3kDM8ly001vJ1X1waCuLJdtTl48gwkPKWy9sosI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35/go.mod h1:0Eg1YjxE0Bhn56lx+SHJwCzhW+2JGtizsrx+lCqrfm0= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14 h1:ZSIPAkAsCCjYrhqfw2+lNzWDzxzHXEckFkTePL5RSWQ= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14/go.mod h1:AyGgqiKv9ECM6IZeNQtdT8NnMvUb3/2wokeq2Fgryto= +github.com/aws/aws-sdk-go-v2/service/ecr v1.4.1/go.mod h1:FglZcyeiBqcbvyinl+n14aT/EWC7S1MIH+Gan2iizt0= +github.com/aws/aws-sdk-go-v2/service/ecr v1.18.7 h1:oQ1Esut3iaL2Dydt2RBd9gbuUevToXpdTI+Uh1xXryI= +github.com/aws/aws-sdk-go-v2/service/ecr v1.18.7/go.mod h1:RHhgOMnMIkgB4TmxQat9obSnZ6fF1fuA27+itZKUi1o= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.4.1/go.mod h1:eD5Eo4drVP2FLTw0G+SMIPWNWvQRGGTtIZR2XeAagoA= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.12.0 h1:LsqBpyRofMG6eDs6YGud6FhdGyIyXelAasPOZ6wWLro= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.12.0/go.mod h1:IArQ3IBR00FkuraKwudKZZU32OxJfdTdwV+W5iZh3Y4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9 h1:Lh1AShsuIJTwMkoxVCAYPJgNG5H+eN6SmoUn8nOZ5wE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9/go.mod h1:a9j48l6yL5XINLHLcOKInjdvknN+vWqPBxqeIDw7ktw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18 h1:BBYoNQt2kUZUUK4bIPsKrCcjVPUMNsgQpNAwhznK/zo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18/go.mod h1:NS55eQ4YixUJPTC+INxi2/jCqe1y2Uw3rnh9wEOVJxY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.1/go.mod h1:zceowr5Z1Nh2WVP8bf/3ikB41IZW59E4yIYbg+pC6mw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 h1:bkRyG4a929RCnpVSTvLM2j/T4ls015ZhhYApbmYs15s= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28/go.mod h1:jj7znCIg05jXlaGBlFMGP8+7UN3VtCkRBG2spnmRQkU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17 h1:HfVVR1vItaG6le+Bpw6P4midjBDMKnjMyZnw9MXYUcE= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17/go.mod h1:YqMdV+gEKCQ59NrB7rzrJdALeBIsYiVi8Inj3+KcqHI= +github.com/aws/aws-sdk-go-v2/service/kms v1.22.2 h1:jwmtdM1/l1DRNy5jQrrYpsQm8zwetkgeqhAqefDr1yI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11 h1:3/gm/JTX9bX8CpzTgIlrtYpB3EVBDxyg/GY/QdcIEZw= +github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11/go.mod h1:fmgDANqTUCxciViKl9hb/zD5LFbvPINFRgWhDbR+vZo= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.1/go.mod h1:J3A3RGUvuCZjvSuZEcOpHDnzZP/sKbhDWV2T1EOzFIM= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 h1:nneMBM2p79PGWBQovYO/6Xnc2ryRMw3InnDJq1FHkSY= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.12/go.mod h1:HuCOxYsF21eKrerARYO6HapNeh9GBNq7fius2AcwodY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 h1:2qTR7IFk7/0IN/adSFhYu9Xthr0zVFTgBrmPldILn80= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12/go.mod h1:E4VrHCPzmVB/KFXtqBGKb3c8zpbNBgKe3fisDNLAW5w= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.0/go.mod h1:q7o0j7d7HrJk/vr9uUt3BVRASvcU7gYZB9PUgPiByXg= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= +github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 h1:XFJ2Z6sNUUcAz9poj+245DMkrHE4h2j5I9/xD50RHfE= +github.com/aws/aws-sdk-go-v2/service/sts v1.19.2/go.mod h1:dp0yLPsLBOi++WTxzCjA/oZqi6NPIhoR+uF7GeMU9eg= +github.com/aws/smithy-go v1.6.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.11.0/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795 h1:IWeCJzU+IYaO2rVEBlGPTBfe90cmGXFTLdhUFlzDGsY= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795/go.mod h1:8vJsEZ4iRqG+Vx6pKhWK6U00qcj0KC37IsfszMkY6UE= github.com/b4b4r07/go-pipe v0.0.0-20191010045404-84b446f57366/go.mod h1:1ymsiQNa3qebVEEVtuIdhtAXRfjO4qFCFq1bBUOT2HE= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0 h1:s7+5BfS4WFJoVF9pnB8kBk03S7pZXRdKamnV0FOl5Sc= github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/buildkite/agent/v3 v3.49.0 h1:FSmRQz8YFhaCXg4MfE7JucPcY7mQ/HWM55ir1j3E9qM= +github.com/buildkite/agent/v3 v3.49.0/go.mod h1:iasSyh3KPjOPCnyvnZB1trkkX7jrdL8PnLBgjdVJxgU= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20220119192733-fe33c00cee21 h1:XlpL9EHrPOBJMLDDOf35/G4t5rGAFNNAZQ3cDcWavtc= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20220119192733-fe33c00cee21/go.mod h1:Zlre/PVxuSI9y6/UV4NwGixQ48RHQDSPiUkofr6rbMU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/clbanning/mxj/v2 v2.5.6 h1:Jm4VaCI/+Ug5Q57IzEoZbwx4iQFA6wkXv72juUSeK+g= +github.com/clbanning/mxj/v2 v2.5.6/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= +github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudfoundry-incubator/candiedyaml v0.0.0-20170901234223-a41693b7b7af h1:6Cpkahw28+gcBdnXQL7LcMTX488+6jl6hfoTMRT6Hm4= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= +github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M= +github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q= +github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= +github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/ocicrypt v1.1.6 h1:uoG52u2e91RE4UqmBICZY8dNshgfvkdl3BW6jnxiFaI= +github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= +github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= +github.com/containers/storage v1.42.0 h1:zm2AQD4NDeTB3JQ8X+Wo5+VRqNB+b4ocEd7Qj6ylPJA= +github.com/containers/storage v1.42.0/go.mod h1:JiUJwOgOo1dr2DdOUc1MRe2GCAXABYoYmOdPF8yvH78= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.6.0 h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o= +github.com/coreos/go-oidc/v3 v3.6.0/go.mod h1:ZpHUsHBucTUj6WOkrP4E20UPynbLZzhTQ1XKCXkxyPc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 h1:vU+EP9ZuFUCYE0NYLwTSob+3LNEJATzNfP/DC7SWGWI= +github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitorus/pkcs7 v0.0.0-20221019075359-21b8b40e6bb4/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20221212123742-001c36b64ec3 h1:rjCXeRWazGsbcBlExMcAW8H1LGdgJ9r619y7+aeKgds= +github.com/digitorus/pkcs7 v0.0.0-20221212123742-001c36b64ec3/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20221019182153-ef3b63b79b31 h1:3go0tpsBpbs9L/oysk3jDwRprlLRRkpSU7YxKlTfU+o= +github.com/digitorus/timestamp v0.0.0-20221019182153-ef3b63b79b31/go.mod h1:6V2ND8Yf8TOJ4h+9pmUlx8kXvNLBB2QplToVVZQ3rF0= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE= github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= +github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= +github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -162,25 +618,45 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= +github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= +github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/fluxcd/pkg/ssa v0.24.1 h1:0dn5FqyYdGa+VuDp5EJrkLbPq5xhhSAAkMgGUeMpOM0= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo= +github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/gardener/component-cli v0.44.0 h1:19w703QNyV0VRhoqbRv6NgibK/t6OZxoLqkBWO/JrFM= github.com/gardener/component-cli v0.44.0/go.mod h1:TMYFZYiopyvbgVzaMALgUcJvup3LP/qOl/NQpS7uvjM= github.com/gardener/component-spec/bindings-go v0.0.52/go.mod h1:kQFMTWowNAp9tOp6aImQa/NoLzfvX29jN5Qgud9rpQU= @@ -188,18 +664,28 @@ github.com/gardener/component-spec/bindings-go v0.0.95 h1:0h9ZRfWo0d84PkC/reVRXA github.com/gardener/component-spec/bindings-go v0.0.95/go.mod h1:qr7kADDXbXB0huul+ih/B43YkwyiMFYQepp/tqJ331c= github.com/gardener/image-vector v0.10.0 h1:Ysg3hxfiGUG/doajiZ0nQuUaJYwfO5BZCOcijL3tRuo= github.com/gardener/image-vector v0.10.0/go.mod h1:32SHGcbmmueeK9VkawsFcEbsoENXQPIuuYiFBUP+vMQ= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gertd/go-pluralize v0.2.1 h1:M3uASbVjMnTsPb0PNqg+E/24Vwigyo/tvyMTtAlLgiA= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= +github.com/go-gorp/gorp/v3 v3.0.5/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -207,57 +693,138 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= +github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= +github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-rod/rod v0.113.3 h1:oLiKZW721CCMwA5g7977cWfcAKQ+FuosP47Zf1QiDrA= +github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= -github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -293,11 +860,18 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.6 h1:SW5K3sr7ptST/pIvNkSVWMiJqemRmkjJPPT0jzXdOOY= +github.com/google/certificate-transparency-go v1.1.6/go.mod h1:0OJjOsOk+wj6aYQgP7FU0ioQ0AJUmnWPFMqTjQeazPQ= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -311,10 +885,21 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= +github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI= +github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28= +github.com/google/go-github/v50 v50.2.0 h1:j2FyongEHlO9nxXLc+LP3wuBSVU9mVxfpdYUexMpIfk= +github.com/google/go-github/v50 v50.2.0/go.mod h1:VBY8FB6yPIjrtKhozXv4FQupxKLS6H4m6xFZlT43q8Q= +github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -331,43 +916,83 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c h1:lvddKcYTQ545ADhBujtIJmqQrZBDsGo7XIMbAQe/sNY= github.com/google/pprof v0.0.0-20221103000818-d260c55eee4c/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -375,30 +1000,65 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc= +github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8= +github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU= +github.com/jellydator/ttlcache/v3 v3.0.1 h1:cHgCSMS7TdQcoprXnWUptJZzyFsqs18Lt8VVhRuZYVU= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE= +github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= +github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVtDElaEIDdKwpPqUIZJfzkNLV34htpEc= +github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/joncalhoun/pipe v0.0.0-20170510025636-72505674a733/go.mod h1:2MNFZhLx2HMHTN4xKH6FhpoQWqmD8Ato8QOE2hp5hY4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -409,82 +1069,139 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8= +github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA= +github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mandelsoft/filepath v0.0.0-20200909114706-3df73d378d55/go.mod h1:n4xEiUD2HNHnn2w5ZKF0qgjDecHVCWAl5DxZ7+pcFU8= github.com/mandelsoft/filepath v0.0.0-20230412200429-36b1eb66bd27 h1:VivN6K8H4H0G4bUmeQH68fQIROL5c8S2iyvVe4teufc= github.com/mandelsoft/filepath v0.0.0-20230412200429-36b1eb66bd27/go.mod h1:LxhqC7khDoRENwooP6f/vWvia9ivj6TqLYrR39zqkN0= +github.com/mandelsoft/logging v0.0.0-20230905123808-7042ee3aae45 h1:BGJBqw9q1Brn3XAYcj52hhonjV7aHUftVJ3SuJpXQ/M= +github.com/mandelsoft/logging v0.0.0-20230905123808-7042ee3aae45/go.mod h1:J/kRqdfAOQmMPfJeAcV2pfX1QLV9/NlAQdAJzpnaU+g= github.com/mandelsoft/spiff v1.7.0-beta-5 h1:3kC10nTviDQhL8diSxp7i4IC2iSiDg6KPbH1CAq7Lfw= github.com/mandelsoft/spiff v1.7.0-beta-5/go.mod h1:TwEeOPuRZxlzQBCLEyVTlHmBSruSGGNdiQ2fovVJ8ao= github.com/mandelsoft/vfs v0.0.0-20210530103237-5249dc39ce91/go.mod h1:74aV7kulg9C434HiI3zNALN79QHc9IZMN+SI4UdLn14= -github.com/mandelsoft/vfs v0.0.0-20230506183150-975954b82357 h1:BJKXJKeVAn9Z1Ke6N1p3G20SjMwOuJNBeSxT9two2FA= -github.com/mandelsoft/vfs v0.0.0-20230506183150-975954b82357/go.mod h1:Z6caUgivS+P837kxRUaYPuzLKxNpOly4WlInNhrcWyc= +github.com/mandelsoft/vfs v0.0.0-20230713123140-269aa4fb1338 h1:06XAA8Z3qNQLdq8eUtxGRDgJ3dSzJuqt82y3E3siO6s= +github.com/mandelsoft/vfs v0.0.0-20230713123140-269aa4fb1338/go.mod h1:Z6caUgivS+P837kxRUaYPuzLKxNpOly4WlInNhrcWyc= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.4/go.mod h1:vTLESy5mRhKOs9KDp0/RATawxP1UqBmdrpVRMnpcvKQ= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -496,9 +1213,14 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -506,7 +1228,19 @@ github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.1/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -518,124 +1252,314 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= +github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= +github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= +github.com/open-component-model/ocm v0.3.0-rc.1.0.20230921153454-4fdca213d05a h1:2SGDGE+R5dTqfIEmRGJXlEdQgEGbuGDLjuvz4FosROw= +github.com/open-component-model/ocm v0.3.0-rc.1.0.20230921153454-4fdca213d05a/go.mod h1:Fg24n/qWyXZyfcQkn1MIs9TzyjC0yP4Km3lI4cxMqis= github.com/opencontainers/distribution-spec v1.0.1 h1:hT6tF6uKZAQh+HH3BrJqn7/xHhMoDHzahIg4KxD2DqM= github.com/opencontainers/distribution-spec v1.0.1/go.mod h1:copR2flp+jTEvQIFMb6MIx45OkrxzqyjszPDT3hx/5Q= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84/go.mod h1:Qnt1q4cjDNQI9bT832ziho5Iw2BhK8o1KwLOwW56VP4= github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= +github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rubenv/sql-migrate v1.2.0 h1:fOXMPLMd41sK7Tg75SXDec15k3zg5WNV6SjuDRiNfcU= -github.com/rubenv/sql-migrate v1.2.0/go.mod h1:Z5uVnq7vrIrPmHbVFfR4YLHRZquxeHpckCnRq0P/K9Y= +github.com/rubenv/sql-migrate v1.3.1 h1:Vx+n4Du8X8VTYuXbhNxdEUoh6wiJERA0GlWocR5FrbA= +github.com/rubenv/sql-migrate v1.3.1/go.mod h1:YzG/Vh82CwyhTFXy+Mf5ahAiiEOpAlHurg+23VEzcsk= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/secure-systems-lab/go-securesystemslib v0.6.0 h1:T65atpAVCJQK14UA57LMdZGpHi4QYSH/9FZyNGqMYIA= +github.com/secure-systems-lab/go-securesystemslib v0.6.0/go.mod h1:8Mtpo9JKks/qhPG4HGZ2LGMvrPbzuxwfz/f/zLfEWkk= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sigstore/cosign/v2 v2.1.1 h1:HOI6pWaEie0wLituDWWaqC5U9MaXablKNf6QroVhj6k= +github.com/sigstore/cosign/v2 v2.1.1/go.mod h1:S9KGmdQ/Dd29TdgUwGCNeXR7scJWZwREh4A9Za2PRPY= +github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y= +github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU= +github.com/sigstore/rekor v1.2.2-0.20230530122220-67cc9e58bd23 h1:eZY7mQFcc0VvNr0fiAK3/n7kh73+T06KzBEIUYzFSDQ= +github.com/sigstore/rekor v1.2.2-0.20230530122220-67cc9e58bd23/go.mod h1:h1tOLhldpfILtziWpUDgGBu0vulWk9Kh72t6XzBGJok= +github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks= +github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.7.1 h1:rDHrG/63b3nBq3G9plg7iYnWN6lBhOfq/XultlCZgII= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.7.1 h1:X3ezwolP+b1jP3R6XPOWhUU0TZKONiv6EIRuySlZGrY= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.7.1 h1:mj1KhdzzP1me994bt1UXhq5KZGSR1SoqxTqcT+hfPMk= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.7.1 h1:fhOToGY5fC5TY101an8i/oDYpoLzUJ1nUFwhnHA1+XY= +github.com/sigstore/timestamp-authority v1.1.1 h1:EldrdeBED0edNzDMvYZDf5CyWgtSchtR9DKYyksNR8M= +github.com/sigstore/timestamp-authority v1.1.1/go.mod h1:cEDLEHl/L3ppqKDaiZ3Cg4ikcaYleuq90I/BFNePzF0= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/spiffe/go-spiffe/v2 v2.1.6 h1:4SdizuQieFyL9eNU+SPiCArH4kynzaKOOj0VvM8R7Xo= +github.com/spiffe/go-spiffe/v2 v2.1.6/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -643,34 +1567,131 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA= +github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA= +github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c= +github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/tjfoc/gmsm v1.3.2 h1:7JVkAn5bvUJ7HtU08iW6UiD+UTmJTIToHCfeFzkcCxM= +github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tonglil/buflogr v1.0.1 h1:WXFZLKxLfqcVSmckwiMCF8jJwjIgmStJmg63YKRF1p0= +github.com/tonglil/buflogr v1.0.1/go.mod h1:yYWwvSpn/3uAaqjf6mJg/XMiAciaR0QcRJH2gJGDxNE= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= +github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= +github.com/vbauerster/mpb/v7 v7.4.1/go.mod h1:Ygg2mV9Vj9sQBWqsK2m2pidcf9H3s6bNKtqd3/M4gBo= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xanzy/go-gitlab v0.86.0 h1:jR8V9cK9jXRQDb46KOB20NCF3ksY09luaG0IfXE6p7w= +github.com/xanzy/go-gitlab v0.86.0/go.mod h1:5ryv+MnpZStBH8I/77HuQBsMbBGANtVpLWC15qOjWAw= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= +github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -679,40 +1700,92 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.starlark.net v0.0.0-20221028183056-acb66ad56dd2 h1:5/KzhcSqd4UgY51l17r7C5g/JiE6DRw1Vq7VJfQHuMc= go.starlark.net v0.0.0-20221028183056-acb66ad56dd2/go.mod h1:kIVgS18CjmEC3PqMd5kaJSGEifyV/CeB9x506ZJ1Vbk= +go.step.sm/crypto v0.32.1 h1:kAiL21zTqAgYu1geOYxH+ApUCUX+oclB25TccnNEYTU= +go.step.sm/crypto v0.32.1/go.mod h1:JwarCq+Sn6N8IbRSKfSJfjUNKfO8c4N1mcNxYXuxXzc= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -725,6 +1798,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -752,26 +1827,32 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -789,18 +1870,33 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -815,12 +1911,15 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -829,6 +1928,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -837,73 +1937,136 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200806060901-a37d78b92225/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201005172224-997123666555/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -916,14 +2079,24 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -933,17 +2106,24 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -962,37 +2142,45 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1015,6 +2203,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1023,11 +2213,13 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1036,6 +2228,7 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1044,33 +2237,48 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1086,8 +2294,13 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.56.0 h1:+y7Bs8rtMd07LeXmL3NxcTLn7mUkbKZqEpPhMNkwJEE= google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1105,37 +2318,65 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= +gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII= +gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/pipe.v2 v2.0.0-20140414041502-3c2ca4d52544/go.mod h1:UhTeH/yXCK/KY7TX24mqPkaQ7gZeqmWd/8SSS8B3aHw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM= +gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -helm.sh/helm/v3 v3.11.1 h1:cmL9fFohOoNQf+wnp2Wa0OhNFH0KFnSzEkVxi3fcc3I= -helm.sh/helm/v3 v3.11.1/go.mod h1:z/Bu/BylToGno/6dtNGuSmjRqxKq5gaH+FU0BPO+AQ8= +helm.sh/helm/v3 v3.12.2 h1:kFyDBr/mgJUlyGzVTCieG4wW0zmo7fcNRWK0+FKkxqU= +helm.sh/helm/v3 v3.12.2/go.mod h1:v1PMayudIfZAvec3Wp4wAErensvK/rv5fu/xCiE6t3I= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1143,26 +2384,56 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y= k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg= k8s.io/apiextensions-apiserver v0.27.2 h1:iwhyoeS4xj9Y7v8YExhUwbVuBhMr3Q4bd/laClBV6Bo= k8s.io/apiextensions-apiserver v0.27.2/go.mod h1:Oz9UdvGguL3ULgRdY9QMUzL2RZImotgxvGjdWRq6ZXQ= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= k8s.io/apiserver v0.27.2 h1:p+tjwrcQEZDrEorCZV2/qE8osGTINPuS5ZNqWAvKm5E= k8s.io/apiserver v0.27.2/go.mod h1:EsOf39d75rMivgvvwjJ3OW/u9n1/BmUMK5otEOJrb1Y= -k8s.io/cli-runtime v0.26.1 h1:f9+bRQ1V3elQsx37KmZy5fRAh56mVLbE9A7EMdlqVdI= -k8s.io/cli-runtime v0.26.1/go.mod h1:+e5Ym/ARySKscUhZ8K3hZ+ZBo/wYPIcg+7b5sFYi6Gg= +k8s.io/cli-runtime v0.27.2 h1:9HI8gfReNujKXt16tGOAnb8b4NZ5E+e0mQQHKhFGwYw= +k8s.io/cli-runtime v0.27.2/go.mod h1:9UecpyPDTkhiYY4d9htzRqN+rKomJgyb4wi0OfrmCjw= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8= k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= k8s.io/component-base v0.27.2 h1:neju+7s/r5O4x4/txeUONNTS9r1HsPbyoPBAtHsDCpo= k8s.io/component-base v0.27.2/go.mod h1:5UPk7EjfgrfgRIuDBFtsEFAe4DAvP3U+M8RTzoSJkpo= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1172,15 +2443,25 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/kubectl v0.26.1 h1:K8A0Jjlwg8GqrxOXxAbjY5xtmXYeYjLU96cHp2WMQ7s= -k8s.io/kubectl v0.26.1/go.mod h1:miYFVzldVbdIiXMrHZYmL/EDWwJKM+F0sSsdxsATFPo= +k8s.io/kubectl v0.27.2 h1:sSBM2j94MHBFRWfHIWtEXWCicViQzZsb177rNsKBhZg= +k8s.io/kubectl v0.27.2/go.mod h1:GCOODtxPcrjh+EC611MqREkU8RjYBh10ldQCQ6zpFKw= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.3 h1:v8PJl+gEAntI1pJ/LCrDgsuk+1PKVavVEPsYIHFE5uY= @@ -1188,17 +2469,26 @@ oras.land/oras-go v1.2.3/go.mod h1:M/uaPdYklze0Vf3AakfarnpoEckvw0ESbRdN8Z1vdJg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/cli-utils v0.34.0 h1:zCUitt54f0/MYj/ajVFnG6XSXMhpZ72O/3RewIchW8w= sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUTb/+4c= sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= +sigs.k8s.io/kustomize/api v0.13.2 h1:kejWfLeJhUsTGioDoFNJET5LQe/ajzXhJGYoU+pJsiA= +sigs.k8s.io/kustomize/api v0.13.2/go.mod h1:DUp325VVMFVcQSq+ZxyDisA8wtldwHxLZbr1g94UHsw= +sigs.k8s.io/kustomize/kyaml v0.14.1 h1:c8iibius7l24G2wVAGZn/Va2wNys03GXLjYVIcFVxKA= +sigs.k8s.io/kustomize/kyaml v0.14.1/go.mod h1:AN1/IpawKilWD7V+YvQwRGUvuUOOWpjsHu6uHwonSF4= +sigs.k8s.io/release-utils v0.7.4 h1:17LmJrydpUloTCtaoWj95uKlcrUp4h2A9Sa+ZL+lV9w= +sigs.k8s.io/release-utils v0.7.4/go.mod h1:JEt2QPHItd5Pg2UKLAU8PEaSlF4bUjCZimpxFDgymVU= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/pkg/components/cache/blueprint/cache.go b/pkg/components/cache/blueprint/cache.go new file mode 100644 index 000000000..e4253b748 --- /dev/null +++ b/pkg/components/cache/blueprint/cache.go @@ -0,0 +1,312 @@ +// SPDX-FileCopyrightText: 2021 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 + +package blueprint + +import ( + "context" + "errors" + "fmt" + + "os" + "path/filepath" + "sync" + "time" + + "github.com/gardener/landscaper/pkg/components/cache" + "github.com/gardener/landscaper/pkg/components/model" + "github.com/gardener/landscaper/pkg/landscaper/blueprints" + + "github.com/mandelsoft/vfs/pkg/memoryfs" + "github.com/mandelsoft/vfs/pkg/projectionfs" + "github.com/mandelsoft/vfs/pkg/readonlyfs" + "github.com/mandelsoft/vfs/pkg/vfs" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/gardener/landscaper/apis/config" + "github.com/gardener/landscaper/apis/config/v1alpha1" + lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" + "github.com/gardener/landscaper/controller-utils/pkg/logging" + "github.com/gardener/landscaper/pkg/api" + "github.com/gardener/landscaper/pkg/utils" +) + +var storeSingleton *Store + +// GetBlueprintStore returns the currently active store. +func GetBlueprintStore() *Store { + return storeSingleton +} + +func init() { + // init default blueprint store + store, err := DefaultStore(memoryfs.New()) + if err != nil { + panic(err) + } + SetStore(store) +} + +// SetStore returns the currently active store. +func SetStore(store *Store) { + if storeSingleton != nil { + _ = storeSingleton.Close() + } + storeSingleton = store +} + +var NotFoundError = errors.New("NOTFOUND") +var StoreClosedError = errors.New("STORE_CLOSED") + +// Store describes a blueprint cache using a base filesystem. +// The blueprints are stored decompressed and untarred in the root of the filesystem using a hash value. +// root +// - +// - blueprint.yaml +// - ... some other data +// +// The hash is calculated using the component descriptor and the name of the blueprint. +type Store struct { + log logging.Logger + disabled bool + mux sync.RWMutex + indexMethod config.IndexMethod + index cache.Index + fs vfs.FileSystem + + size int64 + currentSize int64 + // usage describes the actual usage of the filesystem. + // It calculated by using the max size and the current size. + usage float64 + + gcConfig config.GarbageCollectionConfiguration + resetStopChan chan struct{} + closed bool +} + +// NewStore creates a new blueprint cache using a base filesystem. +// +// The caller should always close the cache for a graceful termination. +// +// The store should be initialized once as this is a global singleton. +func NewStore(log logging.Logger, baseFs vfs.FileSystem, config config.BlueprintStore) (*Store, error) { + if len(config.Path) == 0 { + var err error + config.Path, err = vfs.TempDir(baseFs, baseFs.FSTempDir(), "bsStore") + if err != nil { + return nil, fmt.Errorf("unable to setup temporary directory for blueprint store: %w", err) + } + } + + fs, err := projectionfs.New(baseFs, config.Path) + if err != nil { + return nil, fmt.Errorf("unable to create store filesystem: %w", err) + } + + store := &Store{ + log: log, + disabled: config.DisableCache, + indexMethod: config.IndexMethod, + index: cache.NewIndex(), + fs: fs, + gcConfig: config.GarbageCollectionConfiguration, + } + + if config.Size != "0" { + quantity, err := resource.ParseQuantity(config.Size) + if err != nil { + return nil, fmt.Errorf("unable to parse size %q: %w", config.Size, err) + } + sizeInBytes, ok := quantity.AsInt64() + if !ok { + return nil, fmt.Errorf("unable to parse size %q as int", config.Size) + } + store.size = sizeInBytes + + store.StartResetInterval() + } + return store, nil +} + +// DefaultStore creates a default blueprint store. +func DefaultStore(fs vfs.FileSystem) (*Store, error) { + defaultStoreConfig := config.BlueprintStore{} + cs := v1alpha1.BlueprintStore{} + v1alpha1.SetDefaults_BlueprintStore(&cs) + if err := v1alpha1.Convert_v1alpha1_BlueprintStore_To_config_BlueprintStore(&cs, &defaultStoreConfig, nil); err != nil { + return nil, err + } + return NewStore(logging.Discard(), fs, defaultStoreConfig) +} + +func (s *Store) Close() error { + s.closed = true + close(s.resetStopChan) + return nil +} + +// Put stores a blueprint on the given filesystem. +func (s *Store) Put(ctx context.Context, blueprintID string, content *model.TypedResourceContent) (bool, error) { + if s.closed { + return false, StoreClosedError + } + + if blueprintID == "" { + return false, nil + } + + blueprint, ok := content.Resource.(*blueprints.Blueprint) + if !ok { + return false, errors.New("content is not a file system") + } + + bpPath := filepath.Join("/", blueprintID) + + s.mux.Lock() + defer s.mux.Unlock() + if _, err := s.fs.Stat(bpPath); err == nil { + if err := s.fs.RemoveAll(bpPath); err != nil { + s.log.Error(err, "unable to cleanup directory") + return false, errors.New("unable to cleanup store") + } + } + + if err := s.fs.Mkdir(bpPath, os.ModePerm); err != nil && !os.IsExist(err) { + return false, fmt.Errorf("unable to create blueprint directory: %w", err) + } + err := vfs.CopyDir(blueprint.Fs, "/", s.fs, blueprintID) + if err != nil { + return false, fmt.Errorf("unable to copy blueprint into cache: %w", err) + } + + size, err := utils.GetSizeOfDirectory(s.fs, bpPath) + if err != nil { + return true, fmt.Errorf("unable to get size of blueprint directory: %w", err) + } + s.index.Add(blueprintID, size, time.Now()) + s.updateUsage(size) + cache.StoredItems.Inc() + defer func() { + go s.RunGarbageCollection() + }() + + return true, nil +} + +// CurrentSize returns the current used storage. +func (s *Store) CurrentSize() int64 { + return s.currentSize +} + +func (s *Store) updateUsage(size int64) { + s.currentSize = s.currentSize + size + s.usage = float64(s.currentSize) / float64(s.size) + blueprints.DiskUsage.Set(float64(s.currentSize)) +} + +// Get reads the blueprint from the filesystem. +func (s *Store) Get(_ context.Context, blueprintID string) (*blueprints.Blueprint, error) { + if s.closed { + return nil, StoreClosedError + } + if s.disabled { + return nil, NotFoundError + } + if blueprintID == "" { + return nil, nil + } + bpPath := filepath.Join("/", blueprintID) + s.mux.RLock() + defer s.mux.RUnlock() + if _, err := s.fs.Stat(bpPath); err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + s.index.Hit(blueprintID) + return BuildBlueprintFromPath(s.fs, bpPath) +} + +// BuildBlueprintFromPath creates a read-only blueprint from an extracted blueprint. +func BuildBlueprintFromPath(fs vfs.FileSystem, bpPath string) (*blueprints.Blueprint, error) { + blueprintBytes, err := vfs.ReadFile(fs, filepath.Join(bpPath, lsv1alpha1.BlueprintFileName)) + if err != nil { + return nil, fmt.Errorf("unable to read blueprint definition: %w", err) + } + blueprint := &lsv1alpha1.Blueprint{} + if _, _, err := api.Decoder.Decode(blueprintBytes, nil, blueprint); err != nil { + return nil, fmt.Errorf("unable to decode blueprint definition: %w", err) + } + bpFs, err := projectionfs.New(readonlyfs.New(fs), bpPath) + if err != nil { + return nil, fmt.Errorf("unable to create blueprint filesystem: %w", err) + } + return blueprints.New(blueprint, readonlyfs.New(bpFs)), nil +} + +//////////////////////// +// Garbage Collection // +/////////////////////// + +// StartResetInterval starts the reset counter for the cache hits. +func (s *Store) StartResetInterval() { + interval := time.NewTicker(s.gcConfig.ResetInterval.Duration) + s.resetStopChan = make(chan struct{}) + go func() { + for { + select { + case <-interval.C: + s.index.Reset() + case <-s.resetStopChan: + interval.Stop() + return + } + } + }() +} + +// RunGarbageCollection runs the garbage collection of the filesystem. +// The gc only deletes items when the max size reached a certain threshold. +// If that threshold is reached the files are deleted with the following priority: +// - least hits +// - oldest +// - random +func (s *Store) RunGarbageCollection() { + // do not run gc if the size is infinite + if s.size == 0 { + return + } + // while the garbage collection is running read operations are blocked + // todo: improve to only block read operations on really deleted objects + s.mux.Lock() + defer s.mux.Unlock() + + // first check if we reached the threshold to start garbage collection + if s.usage < s.gcConfig.GCHighThreshold { + s.log.Debug(fmt.Sprintf("run gc with %v%% usage", s.usage)) + return + } + + // while the index is read and copied no write should happen + index := s.index.DeepCopy() + + // sort all files according to their deletion priority + items := index.PriorityList() + for s.usage > s.gcConfig.GCLowThreshold { + if len(items) == 0 { + return + } + item := items[0] + if err := s.fs.RemoveAll(filepath.Join("/", item.Name)); err != nil { + s.log.Error(err, "unable to delete blueprint directory", "file", item.Name) + } + s.log.Debug("garbage collected", "item", item.Name) + s.updateUsage(-item.Size) + cache.StoredItems.Dec() + // remove currently garbage collected item + items = items[1:] + } +} diff --git a/pkg/components/cache/blueprint/cache_test.go b/pkg/components/cache/blueprint/cache_test.go new file mode 100644 index 000000000..55affcec8 --- /dev/null +++ b/pkg/components/cache/blueprint/cache_test.go @@ -0,0 +1,259 @@ +// SPDX-FileCopyrightText: 2021 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 + +package blueprint_test + +import ( + "context" + "os" + + "github.com/mandelsoft/vfs/pkg/osfs" + + . "github.com/gardener/landscaper/pkg/components/cache/blueprint" + "github.com/gardener/landscaper/pkg/components/model" + + "github.com/mandelsoft/vfs/pkg/memoryfs" + "github.com/mandelsoft/vfs/pkg/vfs" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/gardener/landscaper/apis/config" + "github.com/gardener/landscaper/apis/config/v1alpha1" + "github.com/gardener/landscaper/apis/mediatype" + "github.com/gardener/landscaper/controller-utils/pkg/logging" + "github.com/gardener/landscaper/pkg/utils/simplelogger" +) + +const ( + BLUEPRINT_ID = "testid" + TESTDATA_PATH = "testdata" + BLUEPRINT_SUBPATH = "blueprint" + BLUEPRINT_EDITED_SUBPATH = "blueprint_edited" +) + +// TODO +var _ = XDescribe("Put", func() { + + var defaultStoreConfig config.BlueprintStore + + BeforeEach(func() { + cs := v1alpha1.BlueprintStore{} + v1alpha1.SetDefaults_BlueprintStore(&cs) + Expect(v1alpha1.Convert_v1alpha1_BlueprintStore_To_config_BlueprintStore(&cs, &defaultStoreConfig, nil)).To(Succeed()) + }) + + It("should be nil if blueprint is not stored", func() { + ctx := context.Background() + memFs := memoryfs.New() + store, err := NewStore(logging.Discard(), memFs, defaultStoreConfig) + Expect(err).ToNot(HaveOccurred()) + + bp, err := store.Get(ctx, BLUEPRINT_ID) + Expect(err).ToNot(HaveOccurred()) + Expect(bp).To(BeNil()) + }) + + It("should store and retrieve the stored blueprint", func() { + ctx := context.Background() + memFs := memoryfs.New() + store, err := NewStore(logging.Discard(), memFs, defaultStoreConfig) + Expect(err).ToNot(HaveOccurred()) + + fs := memoryfs.New() + err = vfs.CopyDir(osfs.New(), TESTDATA_PATH, fs, "/") + Expect(err).ToNot(HaveOccurred()) + + bp, err := BuildBlueprintFromPath(fs, BLUEPRINT_SUBPATH) + Expect(err).ToNot(HaveOccurred()) + + ok, err := store.Put(ctx, BLUEPRINT_ID, &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(ok).To(BeTrue()) + + bpFromCache, err := store.Get(ctx, BLUEPRINT_ID) + Expect(err).ToNot(HaveOccurred()) + Expect(bpFromCache.Info.Annotations).To(HaveKeyWithValue("test", "original")) + }) + + It("should update the blueprint if same id", func() { + ctx := context.Background() + memFs := memoryfs.New() + store, err := NewStore(logging.Discard(), memFs, defaultStoreConfig) + Expect(err).ToNot(HaveOccurred()) + + fs := memoryfs.New() + err = vfs.CopyDir(osfs.New(), TESTDATA_PATH, fs, "/") + Expect(err).ToNot(HaveOccurred()) + + bp, err := BuildBlueprintFromPath(fs, BLUEPRINT_SUBPATH) + Expect(err).ToNot(HaveOccurred()) + + ok, err := store.Put(ctx, BLUEPRINT_ID, &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(ok).To(BeTrue()) + + bpFromCache, err := store.Get(ctx, BLUEPRINT_ID) + Expect(err).ToNot(HaveOccurred()) + Expect(bpFromCache.Info.Annotations).To(HaveKeyWithValue("test", "original")) + + bp, err = BuildBlueprintFromPath(fs, BLUEPRINT_EDITED_SUBPATH) + Expect(err).ToNot(HaveOccurred()) + + ok, err = store.Put(ctx, BLUEPRINT_ID, &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(ok).To(BeTrue()) + + bpFromCache, err = store.Get(ctx, BLUEPRINT_ID) + Expect(err).ToNot(HaveOccurred()) + Expect(bpFromCache.Info.Annotations).To(HaveKeyWithValue("test", "different")) + }) + + Context("cache", func() { + It("should not try to get the blueprint from the store if the cache is disabled", func() { + ctx := context.Background() + memFs := memoryfs.New() + defaultStoreConfig.DisableCache = true + store, err := NewStore(logging.Discard(), memFs, defaultStoreConfig) + Expect(err).ToNot(HaveOccurred()) + + fs := memoryfs.New() + err = vfs.CopyDir(osfs.New(), TESTDATA_PATH, fs, "/") + Expect(err).ToNot(HaveOccurred()) + + bp, err := BuildBlueprintFromPath(fs, BLUEPRINT_SUBPATH) + Expect(err).ToNot(HaveOccurred()) + + ok, err := store.Put(ctx, BLUEPRINT_ID, &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(ok).To(BeTrue()) + + bpFromCache, err := store.Get(ctx, BLUEPRINT_ID) + Expect(err).To(Equal(NotFoundError)) + Expect(bpFromCache).To(BeNil()) + }) + + }) + + Context("GarbageCollection", func() { + It("should delete entry with the least hits", func() { + // it will NOT always delete the one with the least hits, the deletion priority calculation is a weighted + // function cosidering hits and how old it is (thus, the cache will not become blocked by old entries with + // several hits) + + ctx := context.Background() + defaultStoreConfig.Size = "1Ki" + store, err := NewStore(logging.Wrap(simplelogger.NewIOLogger(GinkgoWriter)), memoryfs.New(), defaultStoreConfig) + Expect(err).ToNot(HaveOccurred()) + + fs := memoryfs.New() + err = vfs.CopyDir(osfs.New(), TESTDATA_PATH, fs, "/") + Expect(err).ToNot(HaveOccurred()) + + // add a dummy data file to the blueprint to fill the cache + Expect(vfs.WriteFile(fs, BLUEPRINT_SUBPATH+"/test/data", make([]byte, 200), os.ModePerm)).ToNot(HaveOccurred()) + bp, err := BuildBlueprintFromPath(fs, BLUEPRINT_SUBPATH) + Expect(err).ToNot(HaveOccurred()) + + ok, err := store.Put(ctx, "a", &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(ok).To(BeTrue()) + + // increase hits on index a + bpA, err := store.Get(ctx, "a") + Expect(err).To(BeNil()) + Expect(bpA.Info.Annotations).To(HaveKeyWithValue("test", "original")) + + // create a dummy data object to fill the cache + Expect(vfs.WriteFile(fs, BLUEPRINT_EDITED_SUBPATH+"/test/data", make([]byte, 200), os.ModePerm)).ToNot(HaveOccurred()) + bp, err = BuildBlueprintFromPath(fs, BLUEPRINT_EDITED_SUBPATH) + Expect(err).ToNot(HaveOccurred()) + + ok, err = store.Put(ctx, "b", &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(ok).To(BeTrue()) + + // the garbage collection should happen during each store + // But as the gc process is async lets explicitly run it again to not have to wait here + store.RunGarbageCollection() + + // the second one is gc'ed because it has fewer hits + bp, err = store.Get(ctx, "b") + Expect(err).ToNot(HaveOccurred()) + Expect(bp).To(BeNil()) + + bp, err = store.Get(ctx, "a") + Expect(err).ToNot(HaveOccurred()) + Expect(bp).ToNot(BeNil()) + Expect(bp.Info.Annotations).To(HaveKeyWithValue("test", "original")) + }) + + It("should delete the oldest entry (if all have equal hits)", func() { + ctx := context.Background() + defaultStoreConfig.Size = "1Ki" + store, err := NewStore(logging.Wrap(simplelogger.NewIOLogger(GinkgoWriter)), memoryfs.New(), defaultStoreConfig) + Expect(err).ToNot(HaveOccurred()) + + fs := memoryfs.New() + err = vfs.CopyDir(osfs.New(), TESTDATA_PATH, fs, "/") + Expect(err).ToNot(HaveOccurred()) + + // add a dummy data file to the blueprint to fill the cache + Expect(vfs.WriteFile(fs, BLUEPRINT_SUBPATH+"/test/data", make([]byte, 200), os.ModePerm)).ToNot(HaveOccurred()) + bp, err := BuildBlueprintFromPath(fs, BLUEPRINT_SUBPATH) + Expect(err).ToNot(HaveOccurred()) + + ok, err := store.Put(ctx, "a", &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(ok).To(BeTrue()) + + // create a dummy data object to fill the cache + Expect(vfs.WriteFile(fs, BLUEPRINT_EDITED_SUBPATH+"/test/data", make([]byte, 200), os.ModePerm)).ToNot(HaveOccurred()) + bp, err = BuildBlueprintFromPath(fs, BLUEPRINT_EDITED_SUBPATH) + Expect(err).ToNot(HaveOccurred()) + + ok, err = store.Put(ctx, "b", &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(ok).To(BeTrue()) + + // the garbage collection should happen during each store + // But as the gc process is async lets explicitly run it again to not have to wait here + store.RunGarbageCollection() + + // the first one is gc'ed because it is the oldest entry and the hits of blueprint 1 and blueprint 2 equal + bp, err = store.Get(ctx, "b") + Expect(err).ToNot(HaveOccurred()) + Expect(bp).ToNot(BeNil()) + Expect(bp.Info.Annotations).To(HaveKeyWithValue("test", "different")) + + bp, err = store.Get(ctx, "a") + Expect(bp).To(BeNil()) + Expect(err).To(BeNil()) + }) + }) + +}) diff --git a/pkg/components/cache/blueprint/suite_test.go b/pkg/components/cache/blueprint/suite_test.go new file mode 100644 index 000000000..c8c1551e7 --- /dev/null +++ b/pkg/components/cache/blueprint/suite_test.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2021 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 + +package blueprint_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Blueprints Test Suite") +} diff --git a/pkg/components/cache/blueprint/testdata/blueprint/blueprint.yaml b/pkg/components/cache/blueprint/testdata/blueprint/blueprint.yaml new file mode 100644 index 000000000..fcd389780 --- /dev/null +++ b/pkg/components/cache/blueprint/testdata/blueprint/blueprint.yaml @@ -0,0 +1,15 @@ +apiVersion: landscaper.gardener.cloud/v1alpha1 +kind: Blueprint +jsonSchema: "https://json-schema.org/draft/2019-09/schema" + +annotations: + test: original + +imports: + - name: cluster # name of the import parameter + targetType: landscaper.gardener.cloud/kubernetes-cluster # type of the import parameter + +deployExecutions: + - name: default + type: GoTemplate + template: \ No newline at end of file diff --git a/pkg/components/cache/blueprint/testdata/blueprint/test/README.md b/pkg/components/cache/blueprint/testdata/blueprint/test/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/components/cache/blueprint/testdata/blueprint_edited/blueprint.yaml b/pkg/components/cache/blueprint/testdata/blueprint_edited/blueprint.yaml new file mode 100644 index 000000000..b57037660 --- /dev/null +++ b/pkg/components/cache/blueprint/testdata/blueprint_edited/blueprint.yaml @@ -0,0 +1,15 @@ +apiVersion: landscaper.gardener.cloud/v1alpha1 +kind: Blueprint +jsonSchema: "https://json-schema.org/draft/2019-09/schema" + +annotations: + test: different + +imports: + - name: cluster # name of the import parameter + targetType: landscaper.gardener.cloud/kubernetes-cluster # type of the import parameter + +deployExecutions: + - name: default + type: GoTemplate + template: \ No newline at end of file diff --git a/pkg/components/cache/blueprint/testdata/blueprint_edited/test/README.md b/pkg/components/cache/blueprint/testdata/blueprint_edited/test/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/components/cache/cache.go b/pkg/components/cache/cache.go new file mode 100644 index 000000000..15b146210 --- /dev/null +++ b/pkg/components/cache/cache.go @@ -0,0 +1,262 @@ +// SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "errors" + "fmt" + "io" + "os" + "sync" + + "github.com/go-logr/logr" + "github.com/mandelsoft/vfs/pkg/memoryfs" + "github.com/mandelsoft/vfs/pkg/osfs" + "github.com/mandelsoft/vfs/pkg/projectionfs" + "github.com/mandelsoft/vfs/pkg/vfs" + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type layeredCache struct { + log logr.Logger + mux sync.RWMutex + + baseFs *FileSystem + overlayFs *FileSystem +} + +// NewCache creates a new cache with the given options. +// It uses by default a tmp fs +func NewCache(log logr.Logger, options ...Option) (*layeredCache, error) { + opts := &Options{} + opts = opts.ApplyOptions(options) + opts.ApplyDefaults() + + if err := initBasePath(opts); err != nil { + return nil, err + } + + base, err := projectionfs.New(osfs.New(), opts.BasePath) + if err != nil { + return nil, err + } + baseCFs, err := NewCacheFilesystem(log.WithName("baseCacheFS"), base, opts.BaseGCConfig) + if err != nil { + return nil, fmt.Errorf("unable to create base layer: %w", err) + } + var overlayCFs *FileSystem + if opts.InMemoryOverlay { + overlayCFs, err = NewCacheFilesystem(log.WithName("inMemoryCacheFS"), memoryfs.New(), opts.InMemoryGCConfig) + if err != nil { + return nil, fmt.Errorf("unable to create base layer: %w", err) + } + } + + //initialize metrics + baseCFs.WithMetrics( + CachedItems.WithLabelValues(opts.UID), + CacheDiskUsage.WithLabelValues(opts.UID), + CacheHitsDisk.WithLabelValues(opts.UID)) + if opts.InMemoryOverlay { + overlayCFs.WithMetrics(nil, + CacheMemoryUsage.WithLabelValues(opts.UID), + CacheHitsMemory.WithLabelValues(opts.UID)) + } + + return &layeredCache{ + log: log, + mux: sync.RWMutex{}, + baseFs: baseCFs, + overlayFs: overlayCFs, + }, nil +} + +func initBasePath(opts *Options) error { + if len(opts.BasePath) == 0 { + path, err := os.MkdirTemp(os.TempDir(), "ocicache") + if err != nil { + return err + } + opts.BasePath = path + } + info, err := os.Stat(opts.BasePath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + if err := os.MkdirAll(opts.BasePath, os.ModePerm); err != nil { + return err + } + return nil + } + + if !info.IsDir() { + return errors.New("path has to be a directory") + } + return nil +} + +// Close implements the io.Closer interface that cleanups all resource used by the cache. +func (lc *layeredCache) Close() error { + if err := lc.baseFs.Close(); err != nil { + return err + } + if lc.overlayFs != nil { + if err := lc.overlayFs.Close(); err != nil { + return err + } + } + return nil +} + +func (lc *layeredCache) Get(desc ocispecv1.Descriptor) (io.ReadCloser, error) { + _, file, err := lc.get(Path(desc), desc) + if err != nil { + return nil, err + } + return file, nil +} + +func (lc *layeredCache) Add(desc ocispecv1.Descriptor, reader io.ReadCloser) error { + path := Path(desc) + lc.mux.Lock() + defer lc.mux.Unlock() + defer reader.Close() + + file, err := lc.baseFs.Create(path, desc.Size) + if err != nil { + return err + } + defer file.Close() + + _, err = io.Copy(file, reader) + return err +} + +func (lc *layeredCache) Info() (Info, error) { + return Info{ + Size: lc.baseFs.Size, + CurrentSize: lc.baseFs.CurrentSize(), + ItemsCount: int64(lc.baseFs.index.Len()), + }, nil +} + +func (lc *layeredCache) Prune() error { + return lc.baseFs.DeleteAll() +} + +func (lc *layeredCache) get(dgst string, desc ocispecv1.Descriptor) (os.FileInfo, vfs.File, error) { + lc.mux.RLock() + defer lc.mux.RUnlock() + + // first search in the overlayFs layer + if info, file, err := lc.getFromOverlay(dgst, desc); err == nil { + return info, file, nil + } + + info, err := lc.baseFs.Stat(dgst) + if err != nil { + if os.IsNotExist(err) { + return nil, nil, ErrNotFound + } + return nil, nil, err + } + verified, err := verifyBlob(lc.baseFs.FileSystem, info, dgst, desc) + if err != nil { + return nil, nil, fmt.Errorf("unable to verify blob: %w", err) + } + if !verified { + // remove invalid blob from cache + if err := lc.baseFs.Remove(dgst); err != nil { + lc.log.V(7).Info("unable to remove invalid blob", "digest", dgst, "err", err.Error()) + } + return info, nil, ErrNotFound + } + file, err := lc.baseFs.OpenFile(dgst, os.O_RDONLY, os.ModePerm) + if err != nil { + return nil, nil, err + } + + // copy file to in memory cache + if lc.overlayFs != nil { + overlayFile, err := lc.overlayFs.Create(dgst, info.Size()) + if err != nil { + // do not return an error here as we are only unable to write to better cache + lc.log.V(5).Info(err.Error()) + return info, file, nil + } + defer overlayFile.Close() + if _, err := io.Copy(overlayFile, file); err != nil { + // do not return an error here as we are only unable to write to better cache + lc.log.V(5).Info(err.Error()) + + // The file handle is at the end as the data was copied by io.Copy. + // Therefore the file handle is reset so that the caller can also read the data. + if _, err := file.Seek(0, io.SeekStart); err != nil { + return nil, nil, fmt.Errorf("unable to reset the file handle: %w", err) + } + return info, file, nil + } + + // The file handle is at the end as the data was copied by io.Copy. + // Therefore the file handle is reset so that the caller can also read the data. + if _, err := file.Seek(0, io.SeekStart); err != nil { + return nil, nil, fmt.Errorf("unable to reset the file handle: %w", err) + } + } + return info, file, nil +} + +func (lc *layeredCache) getFromOverlay(dgst string, desc ocispecv1.Descriptor) (os.FileInfo, vfs.File, error) { + if lc.overlayFs == nil { + return nil, nil, ErrNotFound + } + info, err := lc.overlayFs.Stat(dgst) + if err != nil { + lc.log.V(7).Info("not found in overlay cache", "digest", dgst, "err", err.Error()) + return nil, nil, ErrNotFound + } + + verified, err := verifyBlob(lc.overlayFs.FileSystem, info, dgst, desc) + if err != nil { + return nil, nil, fmt.Errorf("unable to verify blob: %w", err) + } + if !verified { + // remove invalid blob from cache + if err := lc.overlayFs.Remove(dgst); err != nil { + lc.log.V(7).Info("unable to remove invalid blob", "digest", dgst, "err", err.Error()) + } + return info, nil, ErrNotFound + } + file, err := lc.overlayFs.OpenFile(dgst, os.O_RDONLY, os.ModePerm) + if err != nil { + return nil, nil, err + } + return info, file, err +} + +// verifyBlob validates the digest of a blob +func verifyBlob(fs vfs.FileSystem, info os.FileInfo, dgst string, desc ocispecv1.Descriptor) (bool, error) { + if info.Size() != desc.Size { + // do a simple check by checking the blob size + return false, nil + } + + file, err := fs.OpenFile(dgst, os.O_RDONLY, os.ModePerm) + if err != nil { + return false, err + } + defer file.Close() + + verifier := desc.Digest.Verifier() + if _, err := io.Copy(verifier, file); err != nil { + return false, err + } + return verifier.Verified(), nil +} + +func Path(desc ocispecv1.Descriptor) string { + return desc.Digest.Encoded() +} diff --git a/pkg/components/cache/filesystem.go b/pkg/components/cache/filesystem.go new file mode 100644 index 000000000..c6bf78f08 --- /dev/null +++ b/pkg/components/cache/filesystem.go @@ -0,0 +1,489 @@ +// SPDX-FileCopyrightText: 2021 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "fmt" + "io" + "math" + "os" + "sort" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/mandelsoft/vfs/pkg/vfs" + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/api/resource" +) + +// GCHighThreshold defines the default percent of disk usage which triggers files garbage collection. +const GCHighThreshold float64 = 0.85 + +// GCLowThreshold defines the default percent of disk usage to which files garbage collection attempts to free. +const GCLowThreshold float64 = 0.80 + +// ResetInterval defines the default interval when the hit reset should run. +const ResetInterval time.Duration = 1 * time.Hour + +// PreservedHitsProportion defines the default percent of hits that should be preserved. +const PreservedHitsProportion = 0.5 + +// GarbageCollectionConfiguration contains all options for the cache garbage collection. +type GarbageCollectionConfiguration struct { + // Size is the size of the filesystem. + // If the value is 0 there is no limit and no garbage collection will happen. + // See the kubernetes quantity docs for detailed description of the format + // https://github.com/kubernetes/apimachinery/blob/master/pkg/api/resource/quantity.go + Size string + // GCHighThreshold defines the percent of disk usage which triggers files garbage collection. + GCHighThreshold float64 + // GCLowThreshold defines the percent of disk usage to which files garbage collection attempts to free. + GCLowThreshold float64 + // ResetInterval defines the interval when the hit reset should run. + ResetInterval time.Duration + // PreservedHitsProportion defines the percent of hits that should be preserved. + PreservedHitsProportion float64 +} + +// FileSystem is a internal representation of FileSystem with a optional max size +// and a garbage collection. +type FileSystem struct { + log logr.Logger + mux sync.Mutex + + // Configuration + vfs.FileSystem + // Size is the size of the filesystem in bytes. + // If the value is 0 there is no limit and no garbage collection will happen. + Size int64 + // GCHighThreshold defines the percent of disk usage which triggers files garbage collection. + GCHighThreshold float64 + // GCLowThreshold defines the percent of disk usage to which files garbage collection attempts to free. + GCLowThreshold float64 + // ResetInterval defines the interval when the hit reset should run. + ResetInterval time.Duration + // PreservedHitsProportion defines the percent of hits that should be preserved. + PreservedHitsProportion float64 + + index Index + // currentSize is the current size of the filesystem. + currentSize int64 + resetStopChan chan struct{} + + // optional metrics + itemsCountMetric prometheus.Gauge + diskUsageMetric prometheus.Gauge + hitsCountMetric prometheus.Counter +} + +// ApplyOptions parses and applies the options to the filesystem. +// It also applies defaults +func (o GarbageCollectionConfiguration) ApplyOptions(fs *FileSystem) error { + if len(o.Size) == 0 { + // no garbage collection configured ignore all other values + return nil + } + quantity, err := resource.ParseQuantity(o.Size) + if err != nil { + return fmt.Errorf("unable to parse size %q: %w", o.Size, err) + } + sizeInBytes, ok := quantity.AsInt64() + if ok { + fs.Size = sizeInBytes + } + + if o.GCHighThreshold == 0 { + o.GCHighThreshold = GCHighThreshold + } + fs.GCHighThreshold = o.GCHighThreshold + + if o.GCLowThreshold == 0 { + o.GCLowThreshold = GCLowThreshold + } + fs.GCLowThreshold = o.GCLowThreshold + + if o.ResetInterval == 0 { + o.ResetInterval = ResetInterval + } + fs.ResetInterval = o.ResetInterval + + if o.PreservedHitsProportion == 0 { + o.PreservedHitsProportion = PreservedHitsProportion + } + fs.PreservedHitsProportion = o.PreservedHitsProportion + + return nil +} + +// Merge merges 2 gc configurations whereas the defined values are overwritten. +func (o GarbageCollectionConfiguration) Merge(cfg *GarbageCollectionConfiguration) { + if len(o.Size) != 0 { + cfg.Size = o.Size + } + if o.GCHighThreshold != 0 { + cfg.GCHighThreshold = o.GCHighThreshold + } + if o.GCLowThreshold != 0 { + cfg.GCLowThreshold = o.GCLowThreshold + } + if o.ResetInterval != 0 { + cfg.ResetInterval = o.ResetInterval + } + if o.PreservedHitsProportion != 0 { + cfg.PreservedHitsProportion = o.PreservedHitsProportion + } +} + +// NewCacheFilesystem creates a new FileSystem cache. +// It acts as a replacement for a vfs filesystem that restricts the size of the filesystem. +// The garbage collection is triggered when a file is created. +// When the filesystem is not used anymore fs.Close() should be called to gracefully free resources. +func NewCacheFilesystem(log logr.Logger, fs vfs.FileSystem, gcOpts GarbageCollectionConfiguration) (*FileSystem, error) { + cFs := &FileSystem{ + log: log, + FileSystem: fs, + index: NewIndex(), + } + if err := gcOpts.ApplyOptions(cFs); err != nil { + return nil, err + } + + // load all cached files from the filesystem + files, err := vfs.ReadDir(fs, "/") + if err != nil { + return nil, fmt.Errorf("unable to read current cached files: %w", err) + } + for _, file := range files { + cFs.currentSize = cFs.currentSize + file.Size() + cFs.index.Add(file.Name(), file.Size(), file.ModTime()) + } + + if cFs.Size != 0 { + // start hit reset counter + cFs.StartResetInterval() + } + + return cFs, nil +} + +// WithMetrics adds prometheus metrics to the filesystem +// that are set by the filesystem. +func (fs *FileSystem) WithMetrics(itemsCount, usage prometheus.Gauge, hits prometheus.Counter) { + fs.diskUsageMetric = usage + fs.hitsCountMetric = hits + fs.itemsCountMetric = itemsCount + + if fs.diskUsageMetric != nil { + fs.diskUsageMetric.Set(float64(fs.CurrentSize())) + } + if fs.itemsCountMetric != nil { + fs.itemsCountMetric.Set(float64(fs.index.Len())) + } +} + +// Close implements the io.Closer interface. +// It should be called when the cache is not used anymore. +func (fs *FileSystem) Close() error { + if fs.resetStopChan == nil { + return nil + } + fs.resetStopChan <- struct{}{} + return nil +} + +var _ io.Closer = &FileSystem{} + +// StartResetInterval starts the reset counter for the cache hits. +func (fs *FileSystem) StartResetInterval() { + interval := time.NewTicker(ResetInterval) + fs.resetStopChan = make(chan struct{}) + go func() { + for { + select { + case <-interval.C: + fs.index.Reset() + case <-fs.resetStopChan: + interval.Stop() + return + } + } + }() +} + +func (fs *FileSystem) Create(path string, size int64) (vfs.File, error) { + fs.mux.Lock() + defer fs.mux.Unlock() + file, err := fs.FileSystem.Create(path) + if err != nil { + return nil, err + } + fs.setCurrentSize(fs.currentSize + size) + fs.index.Add(path, size, time.Now()) + if fs.itemsCountMetric != nil { + fs.itemsCountMetric.Inc() + } + go fs.RunGarbageCollection() + return file, err +} + +func (fs *FileSystem) OpenFile(name string, flags int, perm os.FileMode) (vfs.File, error) { + fs.index.Hit(name) + if fs.hitsCountMetric != nil { + fs.hitsCountMetric.Inc() + } + return fs.FileSystem.OpenFile(name, flags, perm) +} + +func (fs *FileSystem) Remove(name string) error { + if err := fs.FileSystem.Remove(name); err != nil { + return err + } + entry := fs.index.Get(name) + fs.setCurrentSize(fs.currentSize - entry.Size) + fs.index.Remove(name) + if fs.itemsCountMetric != nil { + fs.itemsCountMetric.Dec() + } + return nil +} + +// DeleteAll removes all files in the filesystem +func (fs *FileSystem) DeleteAll() error { + fs.mux.Lock() + defer fs.mux.Unlock() + files, err := vfs.ReadDir(fs.FileSystem, "/") + if err != nil { + return fmt.Errorf("unable to read current cached files: %w", err) + } + for _, file := range files { + if err := fs.Remove(file.Name()); err != nil { + return err + } + } + return nil +} + +// CurrentSize returns the current size of the filesystem +func (fs *FileSystem) CurrentSize() int64 { + return fs.currentSize +} + +// setCurrentSize sets the current size of the filesystem +func (fs *FileSystem) setCurrentSize(size int64) { + fs.currentSize = size + if fs.diskUsageMetric != nil { + fs.diskUsageMetric.Set(float64(size)) + } +} + +// usage returns the current disk usage of the filesystem +func (fs *FileSystem) usage() float64 { + return float64(fs.currentSize) / float64(fs.Size) +} + +// RunGarbageCollection runs the garbage collection of the filesystem. +// The gc only deletes items when the max size reached a certain threshold. +// If that threshold is reached the files are deleted with the following priority: +// - least hits +// - oldest +// - random +func (fs *FileSystem) RunGarbageCollection() { + // do not run gc if the size is infinite + if fs.Size == 0 { + return + } + // first check if we reached the threshold to start garbage collection + if usage := fs.usage(); usage < GCHighThreshold { + fs.log.V(10).Info(fmt.Sprintf("run gc with %v%% usage", usage)) + return + } + + // while the index is read and copied no write should happen + fs.mux.Lock() + index := fs.index.DeepCopy() + fs.mux.Unlock() + + // while the garbage collection is running read operations are blocked + // todo: improve to only block read operations on really deleted objects + fs.mux.Lock() + defer fs.mux.Unlock() + + // sort all files according to their deletion priority + items := index.PriorityList() + for fs.usage() > GCLowThreshold { + if len(items) == 0 { + return + } + item := items[0] + if err := fs.Remove(item.Name); err != nil { + fs.log.Error(err, "unable to delete file", "file", item.Name) + } + // remove currently garbage collected item + items = items[1:] + } +} + +type Index struct { + mut sync.RWMutex + entries map[string]IndexEntry +} + +// NewIndex creates a new index structure +func NewIndex() Index { + return Index{ + mut: sync.RWMutex{}, + entries: map[string]IndexEntry{}, + } +} + +type IndexEntry struct { + // Name is the name if the file. + Name string + // Size is the size of the file in bytes. + Size int64 + // Is the number of hits since the last gc + Hits int64 + // CreatedAt is the time when the file ha been created. + CreatedAt time.Time + // HitsSinceLastReset is the number hits since the last reset interval + HitsSinceLastReset int64 +} + +// Add adds a entry to the index. +func (i *Index) Add(name string, size int64, createdAt time.Time) { + i.entries[name] = IndexEntry{ + Name: name, + Size: size, + Hits: 0, + CreatedAt: createdAt, + HitsSinceLastReset: 0, + } +} + +// Len returns the number of items that are currently in the index. +func (i *Index) Len() int { + return len(i.entries) +} + +// Get return the index entry with the given name. +func (i *Index) Get(name string) IndexEntry { + i.mut.RLock() + defer i.mut.RUnlock() + return i.entries[name] +} + +// Remove removes the entry from the index. +func (i *Index) Remove(name string) { + i.mut.Lock() + defer i.mut.Unlock() + delete(i.entries, name) +} + +// Hit increases the hit count for the file. +func (i *Index) Hit(name string) { + i.mut.Lock() + defer i.mut.Unlock() + entry, ok := i.entries[name] + if !ok { + return + } + entry.Hits++ + entry.HitsSinceLastReset++ + i.entries[name] = entry +} + +// Reset resets the hit counter for all entries. +// The reset preserves 20% if the old hits. +func (i *Index) Reset() { + for name := range i.entries { + entry := i.Get(name) + + oldHits := entry.Hits - entry.HitsSinceLastReset + preservedHits := int64(float64(oldHits) * PreservedHitsProportion) + entry.Hits = preservedHits + entry.HitsSinceLastReset + + entry.HitsSinceLastReset = 0 + i.entries[name] = entry + } +} + +// DeepCopy creates a deep copy of the current index. +func (i *Index) DeepCopy() *Index { + index := &Index{ + entries: make(map[string]IndexEntry, len(i.entries)), + } + for key, value := range i.entries { + index.entries[key] = value + } + return index +} + +// PriorityList returns a entries of all entries +// sorted by their gc priority. +// The entry with the lowest priority is the first item. +func (i *Index) PriorityList() []IndexEntry { + p := priorityList{ + entries: make([]IndexEntry, 0), + } + for _, i := range i.entries { + entry := i + if entry.Hits > p.maxHits { + p.maxHits = entry.Hits + } + if p.minHits == 0 || entry.Hits < p.minHits { + p.minHits = entry.Hits + } + if p.newest.Before(entry.CreatedAt) { + p.newest = entry.CreatedAt + } + if p.oldest.IsZero() || entry.CreatedAt.Before(p.oldest) { + p.oldest = entry.CreatedAt + } + p.entries = append(p.entries, entry) + } + sort.Sort(p) + return p.entries +} + +// priorityList is a helper type that implements the sort.Sort function. +// the entries are sorted by their priority. +// The priority is calculated using the entries hits and creation date. +type priorityList struct { + minHits, maxHits int64 + oldest, newest time.Time + entries []IndexEntry +} + +var _ sort.Interface = priorityList{} + +func (i priorityList) Len() int { return len(i.entries) } + +func (i priorityList) Less(a, b int) bool { + eA, eB := i.entries[a], i.entries[b] + + // calculate the entries hits value + // based on the min and max values + priorityA := CalculatePriority(eA, i.minHits, i.maxHits, i.oldest, i.newest) + priorityB := CalculatePriority(eB, i.minHits, i.maxHits, i.oldest, i.newest) + return priorityA < priorityB +} + +// CalculatePriority calculates the gc priority of a index entry. +// A lower priority means that is more likely to be deleted. +func CalculatePriority(entry IndexEntry, minHits, maxHits int64, oldest, newest time.Time) float64 { + hitsVal := float64(entry.Hits-minHits) / float64(maxHits-minHits) + if math.IsNaN(hitsVal) { + hitsVal = 0 + } + dateVal := float64(entry.CreatedAt.UnixNano()-oldest.UnixNano()) / float64(newest.UnixNano()-oldest.UnixNano()) + if math.IsNaN(dateVal) { + dateVal = 0 + } + + return (hitsVal * 0.6) + (dateVal * 0.4) +} + +func (i priorityList) Swap(a, b int) { i.entries[a], i.entries[b] = i.entries[b], i.entries[a] } diff --git a/pkg/components/cache/inmemory.go b/pkg/components/cache/inmemory.go new file mode 100644 index 000000000..14538d8d3 --- /dev/null +++ b/pkg/components/cache/inmemory.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "bytes" + "fmt" + "io" + + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type inmemoryCache struct { + store map[string][]byte +} + +// NewInMemoryCache creates a new in memory cache. +func NewInMemoryCache() *inmemoryCache { + return &inmemoryCache{ + store: make(map[string][]byte), + } +} + +func (fs *inmemoryCache) Close() error { + return nil +} + +func (fs *inmemoryCache) Get(desc ocispecv1.Descriptor) (io.ReadCloser, error) { + data, ok := fs.store[desc.Digest.String()] + if !ok { + return nil, ErrNotFound + } + return io.NopCloser(bytes.NewBuffer(data)), nil +} + +func (fs *inmemoryCache) Add(desc ocispecv1.Descriptor, reader io.ReadCloser) error { + if _, ok := fs.store[desc.Digest.String()]; ok { + // already cached + return nil + } + var buf bytes.Buffer + if _, err := io.Copy(&buf, reader); err != nil { + return fmt.Errorf("unable to read data: %w", err) + } + fs.store[desc.Digest.String()] = buf.Bytes() + return nil +} diff --git a/pkg/components/cache/metrics.go b/pkg/components/cache/metrics.go new file mode 100644 index 000000000..ffa8a3f42 --- /dev/null +++ b/pkg/components/cache/metrics.go @@ -0,0 +1,107 @@ +// SPDX-FileCopyrightText: 2021 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "github.com/prometheus/client_golang/prometheus" + + lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" +) + +const ( + storeSubsystemName = "blueprintCacheStore" + + ociClientNamespaceName = "ociclient" + cacheSubsystemName = "blueprintCache" +) + +var ( + // DiskUsage discloses disk used by the blueprint store + DiskUsage = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: lsv1alpha1.LandscaperMetricsNamespaceName, + Subsystem: storeSubsystemName, + Name: "disk_usage_bytes", + Help: "Bytes on disk currently used by blueprint store instance.", + }, + ) + + // StoredItems discloses the number of items stored by the blueprint store. + StoredItems = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: lsv1alpha1.LandscaperMetricsNamespaceName, + Subsystem: storeSubsystemName, + Name: "items_total", + Help: "Total number of items currently stored by the blueprint store.", + }, + ) + + // CacheMemoryUsage discloses memory used by caches + CacheMemoryUsage = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: ociClientNamespaceName, + Subsystem: cacheSubsystemName, + Name: "memory_usage_bytes", + Help: "Bytes in memory currently used by cache instance.", + }, + []string{"id"}, + ) + + // CacheDiskUsage discloses disk used by caches + CacheDiskUsage = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: ociClientNamespaceName, + Subsystem: cacheSubsystemName, + Name: "disk_usage_bytes", + Help: "Bytes on disk currently used by cache instance.", + }, + []string{"id"}, + ) + + // CachedItems discloses the number of items stored by caches + CachedItems = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: ociClientNamespaceName, + Subsystem: cacheSubsystemName, + Name: "items_total", + Help: "Total number of items currently cached by instance.", + }, + []string{"id"}, + ) + + // CacheHitsDisk discloses the number of hits for items cached on disk + CacheHitsDisk = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: ociClientNamespaceName, + Subsystem: cacheSubsystemName, + Name: "disk_hits_total", + Help: "Total number of hits for items cached on disk by an instance.", + }, + []string{"id"}, + ) + + // CacheHitsMemory discloses the number of hits for items cached in memory + CacheHitsMemory = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: ociClientNamespaceName, + Subsystem: cacheSubsystemName, + Name: "memory_hits_total", + Help: "Total number of hits for items cached in memory by an instance.", + }, + []string{"id"}, + ) +) + +// RegisterStoreMetrics allows to register blueprint store metrics with a given prometheus registerer +func RegisterStoreMetrics(reg prometheus.Registerer) { + reg.MustRegister(DiskUsage) + reg.MustRegister(StoredItems) + + reg.MustRegister(CacheHitsDisk) + reg.MustRegister(CacheHitsMemory) + reg.MustRegister(CachedItems) + reg.MustRegister(CacheDiskUsage) + reg.MustRegister(CacheMemoryUsage) +} diff --git a/pkg/components/cache/types.go b/pkg/components/cache/types.go new file mode 100644 index 000000000..04952386e --- /dev/null +++ b/pkg/components/cache/types.go @@ -0,0 +1,175 @@ +// SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors. +// +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "errors" + "io" + + ocispecv1 "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/google/uuid" +) + +var ( + // ErrNotFound is a error that indicates that the file is not cached + ErrNotFound = errors.New("not cached") +) + +// CacheDirEnvName is the name of the environment variable that configures cache directory. +const CacheDirEnvName = "OCI_CACHE_DIR" + +// Cache is the interface for a oci cache +type Cache interface { + io.Closer + Get(desc ocispecv1.Descriptor) (io.ReadCloser, error) + Add(desc ocispecv1.Descriptor, reader io.ReadCloser) error +} + +// Info contains additional information about the cache +type Info struct { + // Size is the max size of the filesystem in bytes. + // If the value is 0 there is no limit and no garbage collection will happen. + // +optional + Size int64 `json:"size"` + // CurrentSize is the current size of the cache + CurrentSize int64 `json:"currentSize"` + // ItemsCount is the number of items that are currently managed by the cache. + ItemsCount int64 `json:"items"` +} + +// InfoInterface describes an interface that can be optionally exposed by a cache to give additional information. +type InfoInterface interface { + Info() (Info, error) +} + +// PruneInterface describes an interface that can be optionally exposed by a cache to manually prune the cache. +type PruneInterface interface { + Prune() error +} + +// InjectCache is a interface to inject a cache. +type InjectCache interface { + InjectCache(c Cache) error +} + +// InjectCacheInto injects a cache if the given object implements the InjectCache interface. +func InjectCacheInto(obj interface{}, cache Cache) error { + if cache == nil { + return nil + } + if injector, ok := obj.(InjectCache); ok { + return injector.InjectCache(cache) + } + return nil +} + +// Options contains all oci cache options to configure the oci cache. +type Options struct { + // InMemoryOverlay specifies if a overlayFs InMemory cache should be used + InMemoryOverlay bool + + // InMemoryGCConfig defines the garbage collection configuration for the in memory cache. + InMemoryGCConfig GarbageCollectionConfiguration + + // BasePath specifies the Base path for the os filepath. + // Will be defaulted to a temp filepath if not specified + BasePath string + + // BaseGCConfig defines the garbage collection configuration for the in base cache. + BaseGCConfig GarbageCollectionConfiguration + + // UID is the identity of a cache, if not specified a UID will be generated + UID string +} + +// Option is the interface to specify different cache options +type Option interface { + ApplyOption(options *Options) +} + +// ApplyOptions applies the given entries options on these options, +// and then returns itself (for convenient chaining). +func (o *Options) ApplyOptions(opts []Option) *Options { + for _, opt := range opts { + opt.ApplyOption(o) + } + return o +} + +// ApplyDefaults sets defaults for the options. +func (o *Options) ApplyDefaults() { + if o.InMemoryOverlay && len(o.InMemoryGCConfig.Size) == 0 { + o.InMemoryGCConfig.Size = "200Mi" + } + + if len(o.UID) == 0 { + o.UID = uuid.New().String() + } +} + +// WithInMemoryOverlay is the options to specify the usage of a in memory overlayFs +type WithInMemoryOverlay bool + +func (w WithInMemoryOverlay) ApplyOption(options *Options) { + options.InMemoryOverlay = bool(w) +} + +// WithBasePath is the options to specify a base path +type WithBasePath string + +func (p WithBasePath) ApplyOption(options *Options) { + options.BasePath = string(p) +} + +// WithInMemoryOverlaySize sets the max size of the overly file system. +// See the kubernetes quantity docs for detailed description of the format +// https://github.com/kubernetes/apimachinery/blob/master/pkg/api/resource/quantity.go +type WithInMemoryOverlaySize string + +func (p WithInMemoryOverlaySize) ApplyOption(options *Options) { + options.InMemoryGCConfig.Size = string(p) +} + +// WithBaseSize sets the max size of the base file system. +// See the kubernetes quantity docs for detailed description of the format +// https://github.com/kubernetes/apimachinery/blob/master/pkg/api/resource/quantity.go +type WithBaseSize string + +func (p WithBaseSize) ApplyOption(options *Options) { + options.BaseGCConfig.Size = string(p) +} + +// WithGCConfig overwrites the garbage collection settings for all caches. +type WithGCConfig GarbageCollectionConfiguration + +func (p WithGCConfig) ApplyOption(options *Options) { + cfg := GarbageCollectionConfiguration(p) + cfg.Merge(&options.BaseGCConfig) + cfg.Merge(&options.InMemoryGCConfig) +} + +// WithBaseGCConfig overwrites the base garbage collection settings. +type WithBaseGCConfig GarbageCollectionConfiguration + +func (p WithBaseGCConfig) ApplyOption(options *Options) { + cfg := GarbageCollectionConfiguration(p) + cfg.Merge(&options.BaseGCConfig) +} + +// WithBaseGCConfig overwrites the in memory garbage collection settings. +type WithInMemoryGCConfig GarbageCollectionConfiguration + +func (p WithInMemoryGCConfig) ApplyOption(options *Options) { + cfg := GarbageCollectionConfiguration(p) + cfg.Merge(&options.InMemoryGCConfig) +} + +// WithUID is the option to give a cache an identity +type WithUID string + +func (p WithUID) ApplyOption(options *Options) { + options.UID = string(p) +} diff --git a/pkg/components/cnudie/registries/resourcetypehandlers.go b/pkg/components/cnudie/registries/resourcetypehandlers.go new file mode 100644 index 000000000..c610266a7 --- /dev/null +++ b/pkg/components/cnudie/registries/resourcetypehandlers.go @@ -0,0 +1,47 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package registries + +import ( + "context" + "sync" + + "github.com/gardener/landscaper/pkg/components/model" +) + +var Registry = New() + +type ResourceHandler interface { + GetResourceContent(ctx context.Context, resource model.Resource, blobResolver model.BlobResolver) (*model.TypedResourceContent, error) +} + +type ResourceHandlerRegistry struct { + lock sync.Mutex + handlers map[string]ResourceHandler +} + +func New() *ResourceHandlerRegistry { + return &ResourceHandlerRegistry{ + handlers: map[string]ResourceHandler{}, + } +} + +func (r *ResourceHandlerRegistry) Register(typ string, handler ResourceHandler) { + r.lock.Lock() + defer r.lock.Unlock() + + r.handlers[typ] = handler +} + +func (r *ResourceHandlerRegistry) Get(typ string) ResourceHandler { + r.lock.Lock() + defer r.lock.Unlock() + + res, ok := r.handlers[typ] + if !ok { + return nil + } + return res +} diff --git a/pkg/components/cnudie/resource.go b/pkg/components/cnudie/resource.go index 8a49c95f8..988e08807 100644 --- a/pkg/components/cnudie/resource.go +++ b/pkg/components/cnudie/resource.go @@ -6,8 +6,11 @@ package cnudie import ( "context" + "fmt" "io" + "github.com/gardener/landscaper/pkg/components/ocmlib/registries" + "github.com/gardener/component-spec/bindings-go/ctf" "github.com/gardener/landscaper/pkg/components/model" @@ -16,14 +19,16 @@ import ( func NewResource(res *types.Resource, blobResolver ctf.BlobResolver) model.Resource { return &Resource{ - resource: res, - blobResolver: blobResolver, + resource: res, + blobResolver: blobResolver, + handlerRegistry: registries.Registry, } } type Resource struct { - resource *types.Resource - blobResolver ctf.BlobResolver + resource *types.Resource + blobResolver ctf.BlobResolver + handlerRegistry *registries.ResourceHandlerRegistry } var _ model.Resource = &Resource{} @@ -55,3 +60,21 @@ func (r Resource) GetBlob(ctx context.Context, writer io.Writer) (*types.BlobInf func (r Resource) GetBlobInfo(ctx context.Context) (*types.BlobInfo, error) { return r.blobResolver.Info(ctx, *r.resource) } + +func (r *Resource) GetTypedContent(ctx context.Context) (*model.TypedResourceContent, error) { + handler := r.handlerRegistry.Get(r.GetType()) + if handler != nil { + // TODO + // return handler.GetResourceContent(ctx, r, r.blobResolver) + return nil, model.NotImplemented() + } + return nil, fmt.Errorf("no handler found for resource type %s", r.GetType()) +} + +func (r *Resource) GetCachingIdentity(ctx context.Context) string { + blobInfo, _ := r.blobResolver.Info(ctx, *r.resource) + if blobInfo == nil { + return "" + } + return blobInfo.Digest +} diff --git a/pkg/components/common/util.go b/pkg/components/common/util.go new file mode 100644 index 000000000..f1f5e05c1 --- /dev/null +++ b/pkg/components/common/util.go @@ -0,0 +1,105 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package common + +import ( + "context" + "encoding/base64" + "fmt" + "regexp" + "strings" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + helmv1alpha1 "github.com/gardener/landscaper/apis/deployer/helm/v1alpha1" + "github.com/gardener/landscaper/pkg/components/model/types" +) + +var scheme = regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9]*://.*$") + +func NormalizeUrl(url string) string { + result := strings.TrimSpace(url) + result = strings.TrimSuffix(result, "/") + if !scheme.MatchString(result) { + return "https://" + result + } + return result +} + +func GetAuthHeader(ctx context.Context, authData *helmv1alpha1.Auth, lsClient client.Client, namespace string) (string, error) { + if len(authData.AuthHeader) > 0 && authData.SecretRef != nil { + return "", fmt.Errorf("failed to get auth header: auth header and secret ref are both set") + } + + if len(authData.AuthHeader) > 0 { + return authData.AuthHeader, nil + } + + if authData.SecretRef != nil { + secretKey := client.ObjectKey{Name: authData.SecretRef.Name, Namespace: namespace} + secret := &corev1.Secret{} + if err := lsClient.Get(ctx, secretKey, secret); err != nil { + return "", err + } + + authHeaderKey := authData.SecretRef.Key + if len(authData.SecretRef.Key) == 0 { + authHeaderKey = types.AuthHeaderSecretDefaultKey + } + + authHeader, ok := secret.Data[authHeaderKey] + if !ok { + return "", fmt.Errorf("failed to get auth header: key %s not found in secret", authHeaderKey) + } + + return string(authHeader), nil + } + + return "", fmt.Errorf("failed to get auth header: neither auth header nor secret ref is set") +} + +// Copied from standard http module + +// ParseBasicAuth parses an HTTP Basic Authentication string. +func ParseBasicAuth(auth string) (username, password string, ok bool) { + const prefix = "Basic " + // Case insensitive prefix match. See Issue 22736. + if len(auth) < len(prefix) || !EqualFold(auth[:len(prefix)], prefix) { + return "", "", false + } + c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) + if err != nil { + return "", "", false + } + cs := string(c) + username, password, ok = strings.Cut(cs, ":") + if !ok { + return "", "", false + } + return username, password, true +} + +// EqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func EqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} diff --git a/pkg/components/model/resource.go b/pkg/components/model/resource.go index def9bdbf4..ca25e90d1 100644 --- a/pkg/components/model/resource.go +++ b/pkg/components/model/resource.go @@ -12,6 +12,7 @@ import ( ) type Resource interface { + TypedResourceProvider // GetName returns the name by which the resource can be identified among all resources of a component version. GetName() string @@ -34,4 +35,15 @@ type Resource interface { // GetBlobInfo returns information like mediatype and digest of the resource. GetBlobInfo(ctx context.Context) (*types.BlobInfo, error) + + GetCachingIdentity(ctx context.Context) string +} + +type TypedResourceProvider interface { + GetTypedContent(ctx context.Context) (*TypedResourceContent, error) +} + +type TypedResourceContent struct { + Type string + Resource interface{} } diff --git a/pkg/components/model/types/constants.go b/pkg/components/model/types/constants.go index 033b67f7e..dec42bfaa 100644 --- a/pkg/components/model/types/constants.go +++ b/pkg/components/model/types/constants.go @@ -2,6 +2,8 @@ package types // Constants for resource types const ( + AuthHeaderSecretDefaultKey = "authHeader" + // OldHelmResourceType describes the old resource type of helm chart resources defined in a component descriptor. OldHelmResourceType = "helm" diff --git a/pkg/components/model/util.go b/pkg/components/model/util.go new file mode 100644 index 000000000..7a2d9ff85 --- /dev/null +++ b/pkg/components/model/util.go @@ -0,0 +1,7 @@ +package model + +import "fmt" + +func NotImplemented() error { + return fmt.Errorf("not implemented") +} diff --git a/pkg/components/ocmlib/componentversion.go b/pkg/components/ocmlib/componentversion.go new file mode 100644 index 000000000..e320a8d66 --- /dev/null +++ b/pkg/components/ocmlib/componentversion.go @@ -0,0 +1,83 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package ocmlib + +import ( + "context" + "fmt" + + cdv2 "github.com/gardener/component-spec/bindings-go/apis/v2" + "github.com/open-component-model/ocm/pkg/contexts/ocm" + + lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" + "github.com/gardener/landscaper/pkg/components/model" + "github.com/gardener/landscaper/pkg/components/model/componentoverwrites" + "github.com/gardener/landscaper/pkg/components/model/types" +) + +type ComponentVersion struct { + registryAccess *RegistryAccess + componentVersionAccess ocm.ComponentVersionAccess + componentDescriptorV2 cdv2.ComponentDescriptor +} + +func (c *ComponentVersion) GetName() string { + return c.componentVersionAccess.GetName() +} + +func (c *ComponentVersion) GetVersion() string { + return c.componentVersionAccess.GetVersion() +} + +func (c *ComponentVersion) GetComponentDescriptor() *types.ComponentDescriptor { + return &c.componentDescriptorV2 +} + +func (c *ComponentVersion) GetRepositoryContext() *types.UnstructuredTypedObject { + return c.componentDescriptorV2.GetEffectiveRepositoryContext() +} + +func (c *ComponentVersion) GetComponentReferences() []types.ComponentReference { + return c.componentDescriptorV2.ComponentReferences +} + +func (c *ComponentVersion) GetComponentReference(name string) *types.ComponentReference { + refs := c.GetComponentReferences() + + for i := range refs { + ref := &refs[i] + if ref.GetName() == name { + return ref + } + } + + return nil +} + +func (c *ComponentVersion) GetReferencedComponentVersion(ctx context.Context, ref *types.ComponentReference, repositoryContext *types.UnstructuredTypedObject, overwriter componentoverwrites.Overwriter) (model.ComponentVersion, error) { + cdRef := &lsv1alpha1.ComponentDescriptorReference{ + RepositoryContext: repositoryContext, + ComponentName: ref.ComponentName, + Version: ref.Version, + } + + return model.GetComponentVersionWithOverwriter(ctx, c.registryAccess, cdRef, overwriter) + +} + +func (c *ComponentVersion) GetResource(name string, identity map[string]string) (model.Resource, error) { + resources, err := c.componentVersionAccess.GetResourcesByName(name, cdv2.Identity(identity)) + if err != nil { + return nil, err + } + if len(resources) < 1 { + return nil, fmt.Errorf("no resource with name %s and extra identities %v found", name, identity) + } + if len(resources) > 1 { + return nil, fmt.Errorf("there is more than one resource with name %s and extra identities %v", name, identity) + } + + return NewResource(resources[0]), nil +} diff --git a/pkg/components/ocmlib/factory.go b/pkg/components/ocmlib/factory.go new file mode 100644 index 000000000..3adc5de64 --- /dev/null +++ b/pkg/components/ocmlib/factory.go @@ -0,0 +1,276 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package ocmlib + +import ( + "context" + "fmt" + + mandellog "github.com/mandelsoft/logging" + credconfig "github.com/open-component-model/ocm/pkg/contexts/credentials/config" + + "github.com/gardener/landscaper/controller-utils/pkg/logging" + + "github.com/gardener/component-cli/ociclient/cache" + "github.com/gardener/component-spec/bindings-go/ctf" + "github.com/mandelsoft/vfs/pkg/memoryfs" + "github.com/mandelsoft/vfs/pkg/osfs" + "github.com/mandelsoft/vfs/pkg/projectionfs" + "github.com/mandelsoft/vfs/pkg/readonlyfs" + "github.com/mandelsoft/vfs/pkg/vfs" + ocmcommon "github.com/open-component-model/ocm/pkg/common" + "github.com/open-component-model/ocm/pkg/contexts/credentials" + "github.com/open-component-model/ocm/pkg/contexts/credentials/repositories/dockerconfig" + "github.com/open-component-model/ocm/pkg/contexts/datacontext" + "github.com/open-component-model/ocm/pkg/contexts/datacontext/attrs/vfsattr" + "github.com/open-component-model/ocm/pkg/contexts/oci" + "github.com/open-component-model/ocm/pkg/contexts/ocm" + "github.com/open-component-model/ocm/pkg/errors" + "github.com/open-component-model/ocm/pkg/helm/identity" + "github.com/open-component-model/ocm/pkg/runtime" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/gardener/landscaper/apis/config" + lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" + helmv1alpha1 "github.com/gardener/landscaper/apis/deployer/helm/v1alpha1" + lserrors "github.com/gardener/landscaper/apis/errors" + "github.com/gardener/landscaper/pkg/components/common" + "github.com/gardener/landscaper/pkg/components/model" + "github.com/gardener/landscaper/pkg/components/model/types" + "github.com/gardener/landscaper/pkg/components/ocmlib/inlinecompdesc" + "github.com/gardener/landscaper/pkg/components/ocmlib/repository" +) + +type Factory struct{} + +// TODO +//var _ model.Factory = &Factory{} + +func (*Factory) SetApplicationLogger(logger logging.Logger) { + mandellog.DefaultContext().SetBaseLogger(logger.Logr()) +} + +func (*Factory) NewRegistryAccess(ctx context.Context, + fs vfs.FileSystem, + secrets []corev1.Secret, + sharedCache cache.Cache, + localRegistryConfig *config.LocalRegistryConfiguration, + ociRegistryConfig *config.OCIConfiguration, + inlineCd *types.ComponentDescriptor, + additionalBlobResolvers ...ctf.TypedBlobResolver) (model.RegistryAccess, error) { + + logger, _ := logging.FromContextOrNew(ctx, nil) + + if fs == nil { + fs = osfs.New() + } + + registryAccess := &RegistryAccess{} + registryAccess.octx = ocm.New(datacontext.MODE_EXTENDED) + registryAccess.session = ocm.NewSession(datacontext.NewSession()) + + registryAccess.octx.LoggingContext().SetBaseLogger(logger.Logr()) + + ociConfigFiles := make([]string, 0) + if ociRegistryConfig != nil { + ociConfigFiles = ociRegistryConfig.ConfigFiles + } + + var localfs vfs.FileSystem + if localRegistryConfig != nil { + var err error + localfs, err = projectionfs.New(fs, localRegistryConfig.RootPath) + if err != nil { + return nil, err + } + vfsattr.Set(registryAccess.octx, localfs) + } else { + // safe guard that the file system cannot be accessed + vfsattr.Set(registryAccess.octx, readonlyfs.New(memoryfs.New())) + } + + if inlineCd != nil { + cd, err := runtime.DefaultYAMLEncoding.Marshal(inlineCd) + if err != nil { + return nil, err + } + inline, err := inlinecompdesc.New(cd) + if err != nil { + return nil, err + } + descriptors, err := inline.GetFlatList() + if err != nil { + return nil, err + } + + provider := repository.NewMemoryCompDescProvider(descriptors) + registryAccess.inlineRepository, err = repository.NewRepository(registryAccess.octx, provider, localfs) + if err != nil { + return nil, err + } + _ = registryAccess.session.AddCloser(registryAccess.inlineRepository) + if err != nil { + return nil, err + } + registryAccess.resolver = registryAccess.inlineRepository + if len(inlineCd.RepositoryContexts) > 0 { + repoCtx := inlineCd.GetEffectiveRepositoryContext() + registryAccess.inlineSpec, err = registryAccess.octx.RepositorySpecForConfig(repoCtx.Raw, nil) + if err != nil { + return nil, err + } + registryAccess.resolver, err = registryAccess.session.LookupRepository(registryAccess.octx, registryAccess.inlineSpec) + if err != nil { + return nil, err + } + registryAccess.resolver = ocm.NewCompoundResolver(registryAccess.inlineRepository, registryAccess.resolver) + } + + } + + for _, path := range ociConfigFiles { + dockerConfigBytes, err := vfs.ReadFile(fs, path) + if err != nil { + return nil, err + } + spec := dockerconfig.NewRepositorySpecForConfig(dockerConfigBytes, true) + _, err = registryAccess.octx.CredentialsContext().RepositoryForSpec(spec) + if err != nil { + return nil, err + } + } + + // set credentials from pull secrets + for _, secret := range secrets { + dockerConfigBytes, ok := secret.Data[corev1.DockerConfigJsonKey] + if ok { + spec := dockerconfig.NewRepositorySpecForConfig(dockerConfigBytes, true) + _, err := registryAccess.octx.CredentialsContext().RepositoryForSpec(spec) + if err != nil { + return nil, errors.Wrapf(err, "cannot create credentials from secret") + } + } + ocmConfigBytes, ok := secret.Data[".ocmcredentialconfig"] + if ok { + cfg, err := registryAccess.octx.ConfigContext().GetConfigForData(ocmConfigBytes, runtime.DefaultYAMLEncoding) + if err != nil { + return nil, err + } + if cfg.GetKind() == credconfig.ConfigType { + err := registryAccess.octx.ConfigContext().ApplyConfig(cfg, fmt.Sprintf("landscaper secret: %s/%s", secret.Namespace, secret.Name)) + if err != nil { + return nil, err + } + } + } + } + + return registryAccess, nil +} + +func (f *Factory) NewHelmRepoResource(ctx context.Context, helmChartRepo *helmv1alpha1.HelmChartRepo, lsClient client.Client, contextObj *lsv1alpha1.Context) (model.TypedResourceProvider, error) { + provider := &HelmChartProvider{ + ocictx: oci.New(datacontext.MODE_EXTENDED), + ref: helmChartRepo.HelmChartName, + version: helmChartRepo.HelmChartVersion, + repourl: common.NormalizeUrl(helmChartRepo.HelmChartRepoUrl), + } + + if contextObj != nil && contextObj.Configurations != nil { + if rawAuths, ok := contextObj.Configurations[helmv1alpha1.HelmChartRepoCredentialsKey]; ok { + repoCredentials := helmv1alpha1.HelmChartRepoCredentials{} + err := yaml.Unmarshal(rawAuths.RawMessage, &repoCredentials) + if err != nil { + return nil, lserrors.NewWrappedError(err, "NewHelmChartRepoClient", "ParsingAuths", err.Error(), lsv1alpha1.ErrorConfigurationProblem) + } + + for _, a := range repoCredentials.Auths { + id := identity.GetConsumerId(provider.repourl, "") + provider.ocictx.CredentialsContext().SetCredentialsForConsumer(id, &CredentialSource{ + lsClient: lsClient, + auth: a, + namespace: contextObj.Namespace, + }) + } + } + } + + return provider, nil +} + +type CredentialSource struct { + lsClient client.Client + auth helmv1alpha1.Auth + namespace string +} + +func (c *CredentialSource) Credentials(ctx credentials.Context, _ ...credentials.CredentialsSource) (credentials.Credentials, error) { + authheader, err := common.GetAuthHeader(context.Background(), &c.auth, c.lsClient, c.namespace) + if err != nil { + return nil, err + } + username, password, ok := common.ParseBasicAuth(authheader) + if !ok { + return nil, errors.New("only basic auth supported") + } + + props := ocmcommon.Properties{ + identity.ATTR_USERNAME: username, + identity.ATTR_PASSWORD: password, + } + if c.auth.CustomCAData != "" { + props[identity.ATTR_CERTIFICATE_AUTHORITY] = c.auth.CustomCAData + } + return credentials.NewCredentials(props), nil +} + +func (f *Factory) NewHelmOCIResource(ctx context.Context, ociImageRef string, registryPullSecrets []corev1.Secret, ociConfig *config.OCIConfiguration, sharedCache cache.Cache) (model.TypedResourceProvider, error) { + refspec, err := oci.ParseRef(ociImageRef) + if err != nil { + return nil, err + } + + provider := &HelmChartProvider{ + ocictx: oci.DefaultContext(), + ref: refspec.Repository, + version: refspec.Version(), + repourl: fmt.Sprintf("oci://%s", refspec.Host), + } + + ociConfigFiles := make([]string, 0) + if ociConfig != nil { + ociConfigFiles = ociConfig.ConfigFiles + } + + // set available default credentials from dockerconfig files + var credspec *dockerconfig.RepositorySpec + for _, path := range ociConfigFiles { + credspec = dockerconfig.NewRepositorySpec(path, true) + _, err := provider.ocictx.CredentialsContext().RepositoryForSpec(credspec) + if err != nil { + return nil, errors.Wrapf(err, "cannot access %v", path) + } + } + + // set credentials from pull secrets + for _, secret := range registryPullSecrets { + if secret.Type != corev1.SecretTypeDockerConfigJson { + continue + } + dockerConfigBytes, ok := secret.Data[corev1.DockerConfigJsonKey] + if !ok { + continue + } + credspec := dockerconfig.NewRepositorySpecForConfig(dockerConfigBytes) + _, err := provider.ocictx.CredentialsContext().RepositoryForSpec(credspec) + if err != nil { + return nil, errors.Wrapf(err, "cannot create credentials from secret") + } + } + + return provider, nil +} diff --git a/pkg/components/ocmlib/inlinecompdesc/util.go b/pkg/components/ocmlib/inlinecompdesc/util.go new file mode 100644 index 000000000..dd20d13ec --- /dev/null +++ b/pkg/components/ocmlib/inlinecompdesc/util.go @@ -0,0 +1,90 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package inlinecompdesc + +import ( + "github.com/open-component-model/ocm/pkg/contexts/ocm/compdesc" + + lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" +) + +type InlineCompDesc struct { + ComponentDescriptor *compdesc.ComponentDescriptor + ReferencedComponentDescriptors []*InlineCompDesc + list []*compdesc.ComponentDescriptor +} + +func New(raw []byte) (*InlineCompDesc, error) { + cd, err := compdesc.Decode(raw) + if err != nil { + return nil, err + } + return &InlineCompDesc{ + ComponentDescriptor: cd, + ReferencedComponentDescriptors: []*InlineCompDesc{}, + list: []*compdesc.ComponentDescriptor{cd}, + }, nil +} + +func NewFromInline(cd *compdesc.ComponentDescriptor) *InlineCompDesc { + return &InlineCompDesc{ + ComponentDescriptor: cd, + ReferencedComponentDescriptors: []*InlineCompDesc{}, + list: []*compdesc.ComponentDescriptor{cd}, + } +} + +// Expand takes an Inline Component Descriptor as input. Inline Component Descriptors may have Component References +// to other Inline Component Descriptors. One may wish to also define those referenced Component Descriptors inline. +// Therefore, it is possible to define the referenced Component Descriptors within the Label of the referencing +// Component Descriptor. +// This function evaluates the label to expand all Component Descriptors contained in the Inline Component Descriptor. +func (c *InlineCompDesc) Expand() error { + return c.expand(c) +} + +// expand is an auxiliary method to transparently pass the root Inline Component Descriptor. This allows to build a +// flat list of all involved Component Descriptors while traversing the references +func (c *InlineCompDesc) expand(root *InlineCompDesc) error { + refs, err := c.ComponentDescriptor.GetComponentReferences() + if err != nil { + return err + } + for _, ref := range refs { + label, exists := ref.GetLabels().Get(lsv1alpha1.InlineComponentDescriptorLabel) + if exists { + cd, err := compdesc.Decode(label) + if err != nil { + return err + } + root.list = append(root.list, cd) + + inlineCd := NewFromInline(cd) + c.ReferencedComponentDescriptors = append(c.ReferencedComponentDescriptors, inlineCd) + err = inlineCd.expand(root) + if err != nil { + return err + } + } + } + return nil +} + +// GetFlatList returns a flat list of all the Component Descriptors aggregated by the top level Component Descriptors +// (thus, the Component References are recursively resolved), including itself. +func (c *InlineCompDesc) GetFlatList() ([]*compdesc.ComponentDescriptor, error) { + // Check if Expand has been called yet, otherwise call it + if len(c.ReferencedComponentDescriptors) == 0 { + _, exists := c.ComponentDescriptor.GetLabels().Get(lsv1alpha1.InlineComponentDescriptorLabel) + if exists { + err := c.Expand() + if err != nil { + return nil, err + } + } + } + + return c.list, nil +} diff --git a/pkg/components/ocmlib/ocm_test.go b/pkg/components/ocmlib/ocm_test.go new file mode 100644 index 000000000..f902d8a51 --- /dev/null +++ b/pkg/components/ocmlib/ocm_test.go @@ -0,0 +1,373 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package ocmlib_test + +import ( + "encoding/base64" + "fmt" + "os" + "path/filepath" + + "github.com/mandelsoft/vfs/pkg/memoryfs" + "github.com/mandelsoft/vfs/pkg/osfs" + "github.com/mandelsoft/vfs/pkg/vfs" + + "github.com/gardener/landscaper/pkg/components/model/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/open-component-model/ocm/pkg/runtime" + + . "github.com/open-component-model/ocm/pkg/testutils" + + "github.com/gardener/landscaper/apis/core/v1alpha1" +) + +const ( + LOCALCNUDIEREPOPATH = "../testdata/localcnudierepo" + LOCALOCMREPOPATH = "../testdata/localocmrepo" + COMPDESC_V2_FILENAME = "component-descriptor-v2.yaml" + COMPDESC_V3_FILENAME = "component-descriptor-v3.yaml" + + USERNAME = "testuser1" + PASSWORD = "testpassword1" + HOSTNAME1 = "ghcr.io" + HOSTNAME2 = "https://index.docker.io/v1/" +) + +// Prepare Test Data +var ( + componentReference = ` +{ + "repositoryContext": { + "type": "local", + "filePath": "./" + }, + "componentName": "example.com/landscaper-component", + "version": "1.0.0" +} +` + + auth = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", USERNAME, PASSWORD))) + dockerconfigdata = []byte(fmt.Sprintf(` +{ + "auths": { + "%s": {"auth": "%s"}, + "%s": {"auth": "%s"} + } +} +`, HOSTNAME1, auth, HOSTNAME2, auth)) + + // TODO + // ocmconfigdata = []byte(fmt.Sprintf(` + //{ + // "type": "credentials.config.ocm.software", + // "consumers": [ + // { + // "identity": { + // "type": "OCIRegistry", + // "hostname": "%s" + // }, + // "credentials": [ + // { + // "type": "Credentials", + // "properties": { + // "username": "%s", + // "password": "%s" + // } + // } + // ] + // } + // ] + //} + //`, HOSTNAME1, USERNAME, PASSWORD)) +) + +// TODO +var _ = XDescribe("ocm-lib facade implementation", func() { + // TODO + //ctx := context.Background() + //factory := ocmlib.Factory{} + + It("get component version from component descriptor reference (from local repository)", func() { + // as this test uses the local repository implementation, it tests that the ocmlib-facade's GetComponentVersion + // method can deal with the legacy ComponentDescriptorReference type rather than testing ocmlib functionality + cdref := &v1alpha1.ComponentDescriptorReference{} + MustBeSuccessful(runtime.DefaultYAMLEncoding.Unmarshal([]byte(componentReference), &cdref)) + + // TODO + //r := Must(factory.NewRegistryAccess(ctx, nil, nil, nil, &config.LocalRegistryConfiguration{RootPath: LOCALCNUDIEREPOPATH}, nil, nil, nil)) + // + //cv := Must(r.GetComponentVersion(ctx, cdref)) + //Expect(cv).NotTo(BeNil()) + }) + + It("get component descriptor with v2 as input", func() { + // check that the component descriptor is not altered by the ocmlib-facade + compdesc := &types.ComponentDescriptor{} + compdescData := Must(vfs.ReadFile(osfs.New(), filepath.Join(LOCALCNUDIEREPOPATH, COMPDESC_V2_FILENAME))) + Expect(runtime.DefaultYAMLEncoding.Unmarshal(compdescData, compdesc)).To(Succeed()) + + cdref := &v1alpha1.ComponentDescriptorReference{} + MustBeSuccessful(runtime.DefaultYAMLEncoding.Unmarshal([]byte(componentReference), &cdref)) + + // TODO + //r := Must(factory.NewRegistryAccess(ctx, nil, nil, nil, &config.LocalRegistryConfiguration{RootPath: LOCALCNUDIEREPOPATH}, nil, nil, nil)) + //cv := Must(r.GetComponentVersion(ctx, cdref)) + // + //cd, _ := cv.GetComponentDescriptor() + //Expect(reflect.DeepEqual(cd, compdesc)).To(BeTrue()) + }) + + It("get component descriptor with v3 as input", func() { + // component-descriptor-v2 and component-descriptor-v3 describe identical components with different versions ocm + // versions and the ocmlib-facade should decode even the v3 version correctly into the landscapers' internal + // v2 representation + compdesc := &types.ComponentDescriptor{} + compdescData := Must(vfs.ReadFile(osfs.New(), filepath.Join(LOCALCNUDIEREPOPATH, COMPDESC_V2_FILENAME))) + Expect(runtime.DefaultYAMLEncoding.Unmarshal(compdescData, compdesc)).To(Succeed()) + + cdref := &v1alpha1.ComponentDescriptorReference{} + MustBeSuccessful(runtime.DefaultYAMLEncoding.Unmarshal([]byte(componentReference), &cdref)) + + // TODO + //r := Must(factory.NewRegistryAccess(ctx, nil, nil, nil, &config.LocalRegistryConfiguration{RootPath: LOCALOCMREPOPATH}, nil, nil, nil)) + //cv := Must(r.GetComponentVersion(ctx, cdref)) + // + //cd, _ := cv.GetComponentDescriptor() + //Expect(reflect.DeepEqual(cd, compdesc)).To(BeTrue()) + }) + + It("dockerconfig credentials from filesystem", func() { + // Prepare memory test filesystem with dockerconfig credentials + fs := memoryfs.New() + Expect(fs.MkdirAll("testdata", 0o777)).To(Succeed()) + dockerconfigs := map[string][]byte{"dockerconfig.json": dockerconfigdata} + for name, config := range dockerconfigs { + f := Must(fs.OpenFile(filepath.Join("testdata", name), os.O_CREATE|os.O_RDWR, 0o777)) + _ = Must(f.Write(config)) + Expect(f.Close()).To(Succeed()) + } + // Create a Registry Access and check whether credentials are properly set and can be found + // TODO + //r := Must(factory.NewRegistryAccess(ctx, fs, nil, nil, nil, &config.OCIConfiguration{ + // ConfigFiles: []string{"testdata/dockerconfig.json"}, + //}, nil)).(*ocmlib.RegistryAccess) + //creds := Must(identity.GetCredentials(r.OCMContext(), "ghcr.io", "/test/repo")) + //props := creds.Properties() + //Expect(props["username"]).To(Equal(USERNAME)) + //Expect(props["password"]).To(Equal(PASSWORD)) + }) + + It("dockerconfig credentials from secrets", func() { + // Prepare secret with dockerconfig credentials + // TODO + //secrets := []corev1.Secret{{ + // Data: map[string][]byte{corev1.DockerConfigJsonKey: dockerconfigdata}, + //}} + // Create a Registry Access and check whether credentials are properly set and can be found + //r := Must(factory.NewRegistryAccess(ctx, nil, secrets, nil, nil, nil, nil)).(*ocmlib.RegistryAccess) + //creds := Must(identity.GetCredentials(r.OCMContext(), "ghcr.io", "/test/repo")) + //props := creds.Properties() + //Expect(props["username"]).To(Equal(USERNAME)) + //Expect(props["password"]).To(Equal(PASSWORD)) + }) + + It("ocm credentials from secrets", func() { + // Prepare secret with ocmconfig credentials + // TODO + //secrets := []corev1.Secret{{ + // Data: map[string][]byte{".ocmcredentialconfig": ocmconfigdata}, + //}} + //r := Must(factory.NewRegistryAccess(ctx, nil, secrets, nil, nil, nil, nil)).(*ocmlib.RegistryAccess) + //creds := Must(identity.GetCredentials(r.OCMContext(), HOSTNAME1, "/test/repo")) + //props := creds.Properties() + //Expect(props["username"]).To(Equal(USERNAME)) + //Expect(props["password"]).To(Equal(PASSWORD)) + }) + + It("blueprint from inline component descriptor with single inline component and blob file system", func() { + inlineComponentReference := ` +repositoryContext: + type: inline + compDescDirPath: / + blobDirPath: /blobs + fileSystem: + component-descriptor1.yaml: | + meta: + schemaVersion: v2 + + component: + name: example.com/landscaper-component + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: [] + + resources: + - name: blueprint + type: blueprint + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: application/vnd.gardener.landscaper.blueprint.v1+tar+gzip + filename: blueprint + blobs: + blueprint: + blueprint.yaml: | + apiVersion: landscaper.gardener.cloud/v1alpha1 + kind: Blueprint + + annotations: + local/name: root-a + local/version: 1.0.0 + + imports: + - name: imp-a + type: data + schema: + type: string + + exports: + - name: exp-a + type: data + schema: + type: string + + deployExecutions: + - type: GoTemplate + template: | + deployItems: + - name: main + type: landscaper.gardener.cloud/mock + config: + apiVersion: mock.deployer.landscaper.gardener.cloud/v1alpha1 + kind: ProviderConfiguration + providerStatus: + apiVersion: mock.deployer.landscaper.gardener.cloud/v1alpha1 + kind: ProviderStatus + imp: {{ index .imports "imp-a" }} + export: + exp-a: exp-mock + + exportExecutions: + - type: GoTemplate + template: | + exports: + exp-a: {{ index .values.deployitems.main "exp-a" }} + +componentName: example.com/landscaper-component +version: 1.0.0 +` + cdref := &v1alpha1.ComponentDescriptorReference{} + MustBeSuccessful(runtime.DefaultYAMLEncoding.Unmarshal([]byte(inlineComponentReference), &cdref)) + // TODO + //r := Must(factory.NewRegistryAccess(ctx, nil, nil, nil, &config.LocalRegistryConfiguration{RootPath: LOCALCNUDIEREPOPATH}, nil, nil, nil)) + //cv := Must(r.GetComponentVersion(ctx, cdref)) + //Expect(cv).NotTo(BeNil()) + //res := Must(cv.GetResource("blueprint", nil)) + //content := Must(res.GetTypedContent(ctx)) + //bp, ok := content.Resource.(*blueprints.Blueprint) + //Expect(ok).To(BeTrue()) + //_ = bp + }) + + It("blueprint from inline component descriptor with separate inline component and blob file system", func() { + inlineComponentReference := ` +repositoryContext: + type: inline + fileSystem: + component-descriptor1.yaml: | + meta: + schemaVersion: v2 + + component: + name: example.com/landscaper-component + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: [] + + resources: + - name: blueprint + type: blueprint + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: application/vnd.gardener.landscaper.blueprint.v1+tar+gzip + filename: blueprint + blobFs: + blueprint: + blueprint.yaml: | + apiVersion: landscaper.gardener.cloud/v1alpha1 + kind: Blueprint + + annotations: + local/name: root-a + local/version: 1.0.0 + + imports: + - name: imp-a + type: data + schema: + type: string + + exports: + - name: exp-a + type: data + schema: + type: string + + deployExecutions: + - type: GoTemplate + template: | + deployItems: + - name: main + type: landscaper.gardener.cloud/mock + config: + apiVersion: mock.deployer.landscaper.gardener.cloud/v1alpha1 + kind: ProviderConfiguration + providerStatus: + apiVersion: mock.deployer.landscaper.gardener.cloud/v1alpha1 + kind: ProviderStatus + imp: {{ index .imports "imp-a" }} + export: + exp-a: exp-mock + + exportExecutions: + - type: GoTemplate + template: | + exports: + exp-a: {{ index .values.deployitems.main "exp-a" }} + +componentName: example.com/landscaper-component +version: 1.0.0 +` + cdref := &v1alpha1.ComponentDescriptorReference{} + MustBeSuccessful(runtime.DefaultYAMLEncoding.Unmarshal([]byte(inlineComponentReference), &cdref)) + // TODO + //r := Must(factory.NewRegistryAccess(ctx, nil, nil, nil, &config.LocalRegistryConfiguration{RootPath: LOCALCNUDIEREPOPATH}, nil, nil, nil)) + //cv := Must(r.GetComponentVersion(ctx, cdref)) + //Expect(cv).NotTo(BeNil()) + //res := Must(cv.GetResource("blueprint", nil)) + //content := Must(res.GetTypedContent(ctx)) + //bp, ok := content.Resource.(*blueprints.Blueprint) + //Expect(ok).To(BeTrue()) + //_ = bp + }) +}) diff --git a/pkg/components/ocmlib/registries/resourcetypehandlers.go b/pkg/components/ocmlib/registries/resourcetypehandlers.go new file mode 100644 index 000000000..8b158bc4f --- /dev/null +++ b/pkg/components/ocmlib/registries/resourcetypehandlers.go @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package registries + +import ( + "context" + "sync" + + "github.com/open-component-model/ocm/pkg/contexts/ocm" + + "github.com/gardener/landscaper/pkg/components/model" +) + +var Registry = New() + +type ResourceHandler interface { + GetResourceContent(ctx context.Context, r model.Resource, access ocm.ResourceAccess) (*model.TypedResourceContent, error) +} + +type ResourceHandlerRegistry struct { + lock sync.Mutex + handlers map[string]ResourceHandler +} + +func New() *ResourceHandlerRegistry { + return &ResourceHandlerRegistry{ + handlers: map[string]ResourceHandler{}, + } +} + +func (r *ResourceHandlerRegistry) Register(typ string, handler ResourceHandler) { + r.lock.Lock() + defer r.lock.Unlock() + + r.handlers[typ] = handler +} + +func (r *ResourceHandlerRegistry) Get(typ string) ResourceHandler { + r.lock.Lock() + defer r.lock.Unlock() + + res, ok := r.handlers[typ] + if !ok { + return nil + } + return res +} diff --git a/pkg/components/ocmlib/registryaccess.go b/pkg/components/ocmlib/registryaccess.go new file mode 100644 index 000000000..0db33d964 --- /dev/null +++ b/pkg/components/ocmlib/registryaccess.go @@ -0,0 +1,97 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package ocmlib + +import ( + "context" + "reflect" + + v2 "github.com/gardener/component-spec/bindings-go/apis/v2" + "github.com/open-component-model/ocm/pkg/contexts/ocm/compdesc" + + "github.com/gardener/landscaper/pkg/components/model/types" + + "github.com/open-component-model/ocm/pkg/contexts/ocm" + "github.com/open-component-model/ocm/pkg/runtime" + + lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1" + "github.com/gardener/landscaper/pkg/components/model" + _ "github.com/gardener/landscaper/pkg/components/ocmlib/repository/inline" + _ "github.com/gardener/landscaper/pkg/components/ocmlib/repository/local" +) + +type RegistryAccess struct { + octx ocm.Context + session ocm.Session + inlineSpec ocm.RepositorySpec + inlineRepository ocm.Repository + resolver ocm.ComponentVersionResolver +} + +var _ model.RegistryAccess = (*RegistryAccess)(nil) + +func (r *RegistryAccess) NewComponentVersion(cv ocm.ComponentVersionAccess) (model.ComponentVersion, error) { + // Get ocm-lib Component Descriptor + cd := cv.GetDescriptor() + data, err := compdesc.Encode(cd, compdesc.SchemaVersion(v2.SchemaVersion)) + if err != nil { + return nil, err + } + + // Create Landscaper Component Descriptor from the ocm-lib Component Descriptor + lscd := types.ComponentDescriptor{} + err = runtime.DefaultYAMLEncoding.Unmarshal(data, &lscd) + if err != nil { + return nil, err + } + + //return &ComponentVersion{ + // registryAccess: r, + // componentVersionAccess: cv, + // componentDescriptorV2: lscd, + //}, nil + return nil, model.NotImplemented() +} + +func (r *RegistryAccess) GetComponentVersion(ctx context.Context, cdRef *lsv1alpha1.ComponentDescriptorReference) (_ model.ComponentVersion, rerr error) { + spec, err := r.octx.RepositorySpecForConfig(cdRef.RepositoryContext.Raw, runtime.DefaultYAMLEncoding) + if err != nil { + return nil, err + } + + var cv ocm.ComponentVersionAccess + // check if repository context from inline component descriptor should be used + if r.inlineRepository != nil && reflect.DeepEqual(spec, r.inlineSpec) { + // in this case, resolver knows an inline repository as well as the repository specified by the repository + // context of the inline component descriptor + cv, err = r.session.LookupComponentVersion(r.resolver, cdRef.ComponentName, cdRef.Version) + } else { + // if there is no inline repository or the repository context is different from the one specified in the inline + // component descriptor, we need to look up the repository specified by the component descriptor reference + var repo ocm.Repository + repo, err = r.session.LookupRepository(r.octx, spec) + if err != nil { + return nil, err + } + + cv, err = r.session.LookupComponentVersion(repo, cdRef.ComponentName, cdRef.Version) + } + if err != nil { + return nil, err + } + return r.NewComponentVersion(cv) +} + +func (r *RegistryAccess) OCMContext() ocm.Context { + return r.octx +} + +func (r *RegistryAccess) Close() error { + err := r.session.Close() + if err != nil { + return err + } + return nil +} diff --git a/pkg/components/ocmlib/repository/inline/inlinespec.go b/pkg/components/ocmlib/repository/inline/inlinespec.go new file mode 100644 index 000000000..5dc5f3f6a --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/inlinespec.go @@ -0,0 +1,119 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package inline + +import ( + "encoding/json" + + "github.com/mandelsoft/vfs/pkg/vfs" + "github.com/mandelsoft/vfs/pkg/yamlfs" + "github.com/open-component-model/ocm/pkg/contexts/ocm/cpi" + "github.com/open-component-model/ocm/pkg/errors" + . "github.com/open-component-model/ocm/pkg/exception" + "github.com/open-component-model/ocm/pkg/runtime" + + "github.com/gardener/landscaper/pkg/components/ocmlib/repository" +) + +const ( + Type = repository.InlineType + TypeV1 = Type + runtime.VersionSeparator + "v1" +) + +var versions = cpi.NewRepositoryTypeVersionScheme(Type) + +func init() { + Must(versions.Register(cpi.NewRepositoryTypeByConverter[*repository.RepositorySpec, *RepositorySpecV1](Type, &converterV1{}, nil))) + Must(versions.Register(cpi.NewRepositoryTypeByConverter[*repository.RepositorySpec, *RepositorySpecV1](TypeV1, &converterV1{}, nil))) + cpi.RegisterRepositoryTypeVersions(versions) +} + +type RepositorySpecV1 struct { + runtime.ObjectVersionedType `json:",inline"` + FileSystem json.RawMessage `json:"fileSystem,omitempty"` + CompDescDirPath string `json:"compDescDirPath,omitempty"` + BlobFs json.RawMessage `json:"blobFs,omitempty"` + BlobFsMode string `json:"blobFsMode"` + BlobDirPath string `json:"blobDirPath,omitempty"` +} + +func NewRepositorySpecV1(fileSystem vfs.FileSystem, compDescDirPath string, blobFs vfs.FileSystem, blobDirPath string) (*repository.RepositorySpec, error) { + spec := &repository.RepositorySpec{ + InternalVersionedTypedObject: runtime.NewInternalVersionedTypedObject[cpi.RepositorySpec](versions, Type), + FileSystem: fileSystem, + CompDescDirPath: compDescDirPath, + BlobFs: blobFs, + BlobDirPath: blobDirPath, + } + return spec, nil +} + +type converterV1 struct{} + +func (_ converterV1) ConvertFrom(in *repository.RepositorySpec) (*RepositorySpecV1, error) { + var err error + var compdescYaml json.RawMessage + var blobYaml json.RawMessage + + if fs, ok := in.FileSystem.(yamlfs.YamlFileSystem); ok { + compdescYaml, err = fs.Data() + if err != nil { + return nil, err + } + } else { + return nil, errors.New("cannot serialize non-yaml filesystem") + } + + if in.BlobFs != nil && in.BlobFs != in.FileSystem { + if in.BlobFsMode != repository.CONTEXT { + if fs, ok := in.BlobFs.(yamlfs.YamlFileSystem); ok { + blobYaml, err = fs.Data() + if err != nil { + return nil, err + } + } else { + return nil, errors.New("cannot serialize non-yaml filesystem") + } + } + } + + return &RepositorySpecV1{ + ObjectVersionedType: runtime.NewVersionedObjectType(in.Type), + FileSystem: compdescYaml, + CompDescDirPath: in.CompDescDirPath, + BlobFs: blobYaml, + BlobFsMode: in.BlobFsMode, + BlobDirPath: in.BlobDirPath, + }, nil +} + +func (_ converterV1) ConvertTo(in *RepositorySpecV1) (*repository.RepositorySpec, error) { + var err error + var compfs vfs.FileSystem + var blobfs vfs.FileSystem + + if in.FileSystem != nil { + compfs, err = yamlfs.New(in.FileSystem) + if err != nil { + return nil, err + } + } + + if in.BlobFs != nil { + blobfs, err = yamlfs.New(in.BlobFs) + if err != nil { + return nil, err + } + } + + return &repository.RepositorySpec{ + InternalVersionedTypedObject: runtime.NewInternalVersionedTypedObject[cpi.RepositorySpec](versions, in.Type), + FileSystem: compfs, + CompDescDirPath: in.CompDescDirPath, + BlobFs: blobfs, + BlobFsMode: in.BlobFsMode, + BlobDirPath: in.BlobDirPath, + }, nil +} diff --git a/pkg/components/ocmlib/repository/inline/repository_test.go b/pkg/components/ocmlib/repository/inline/repository_test.go new file mode 100644 index 000000000..3575855d3 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/repository_test.go @@ -0,0 +1,146 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package inline_test + +import ( + "github.com/mandelsoft/filepath/pkg/filepath" + "github.com/mandelsoft/vfs/pkg/osfs" + "github.com/mandelsoft/vfs/pkg/vfs" + . "github.com/onsi/ginkgo/v2" + "github.com/open-component-model/ocm/pkg/contexts/datacontext/attrs/vfsattr" + tenv "github.com/open-component-model/ocm/pkg/env" + "github.com/open-component-model/ocm/pkg/env/builder" + "github.com/open-component-model/ocm/pkg/runtime" + . "github.com/open-component-model/ocm/pkg/testutils" +) + +const ( + // Component Names + COMPONENT_NAME = "example.com/root" + COMPONENT_VERSION = "1.0.0" + RESOURCE_NAME = "test" + + DISTINCT_REPOSITORY = "testdata/distinct" + DIRECTORY_REPOSITORY = "testdata/directory" +) + +// TODO +var _ = XDescribe("ocm-lib based landscaper local repository", func() { + var env *builder.Builder + + BeforeEach(func() { + env = builder.NewBuilder(tenv.NewEnvironment(tenv.TestData())) + }) + + AfterEach(func() { + _ = env.Cleanup() + }) + + It("repository spec with inline component descriptors and local resources", func() { + specdata := Must(vfs.ReadFile(osfs.New(), filepath.Join("testdata", "spec-with-local-resource.yaml"))) + vfsattr.Set(env.OCMContext(), env) + Must(env.OCMContext().RepositorySpecForConfig(specdata, runtime.DefaultYAMLEncoding)) + // TODO + //repo := Must(spec.Repository(env.OCMContext(), nil)) + //defer Close(repo) + //cv := Must(repo.LookupComponentVersion(COMPONENT_NAME, COMPONENT_VERSION)) + //defer Close(cv) + //ref := Must(cv.GetReferenceByIndex(0)) + //refcv := Must(repo.LookupComponentVersion(ref.ComponentName, ref.Version)) + //defer Close(refcv) + //res := Must(cv.GetResourcesByName(RESOURCE_NAME)) + //acc := Must(res[0].AccessMethod()) + //defer Close(acc) + //data := Must(acc.Get()) + //Expect(string(data)).To(Equal("test")) + }) + + It("repository spec with inline component descriptors and inline resources", func() { + specdata := Must(vfs.ReadFile(osfs.New(), filepath.Join("testdata", "spec-with-inline-resource.yaml"))) + + Must(env.OCMContext().RepositorySpecForConfig(specdata, runtime.DefaultYAMLEncoding)) + // TODO + //repo := Must(spec.Repository(env.OCMContext(), nil)) + //defer Close(repo) + //cv := Must(repo.LookupComponentVersion(COMPONENT_NAME, COMPONENT_VERSION)) + //defer Close(cv) + //ref := Must(cv.GetReferenceByIndex(0)) + //refcv := Must(repo.LookupComponentVersion(ref.ComponentName, ref.Version)) + //defer Close(refcv) + //res := Must(cv.GetResourcesByName(RESOURCE_NAME)) + //acc := Must(res[0].AccessMethod()) + //defer Close(acc) + //data := Must(acc.Get()) + //Expect(string(data)).To(Equal("test")) + }) + + It("support for legacy inline component descriptors and resources", func() { + // TODO + + //inline1 := Must(vfs.ReadFile(osfs.New(), filepath.Join("testdata", "legacy", "component-descriptor1.yaml"))) + //inline2 := Must(vfs.ReadFile(osfs.New(), filepath.Join("testdata", "legacy", "component-descriptor2.yaml"))) + //resource1 := Must(vfs.ReadFile(osfs.New(), filepath.Join("testdata", "legacy", "resource.yaml"))) + // + //var list []*compdesc.ComponentDescriptor + //list = append(list, Must(compdesc.Decode(inline1))) + //list = append(list, Must(compdesc.Decode(inline2))) + // + //memfs := memoryfs.New() + //r1 := Must(memfs.Create("blob1")) + //Must(r1.Write(resource1)) + //MustBeSuccessful(r1.Close()) + // + //repo := Must(repository.NewRepository(env.OCMContext(), repository.NewMemoryCompDescProvider(list), memfs)) + //defer Close(repo) + //cv := Must(repo.LookupComponentVersion(COMPONENT_NAME, COMPONENT_VERSION)) + //defer Close(cv) + //ref := Must(cv.GetReferenceByIndex(0)) + //refcv := Must(repo.LookupComponentVersion(ref.ComponentName, ref.Version)) + //defer Close(refcv) + //res := Must(cv.GetResourcesByName(RESOURCE_NAME)) + //acc := Must(res[0].AccessMethod()) + //defer Close(acc) + //data := Must(acc.Get()) + //Expect(string(data)).To(Equal("test")) + }) + + It("repository with component descriptors and resources stored in distinct directories", func() { + // TODO + //spec := Must(inline.NewRepositorySpecV1(env, filepath.Join(DISTINCT_REPOSITORY, "compdescs"), nil, filepath.Join(DISTINCT_REPOSITORY, "blobs"))) + //repo := Must(spec.Repository(env.OCMContext(), nil)) + //defer Close(repo) + //cv := Must(repo.LookupComponentVersion(COMPONENT_NAME, COMPONENT_VERSION)) + //defer Close(cv) + //res := Must(cv.GetResourcesByName(RESOURCE_NAME)) + //acc := Must(res[0].AccessMethod()) + //defer Close(acc) + //bufferA := Must(acc.Get()) + // + //bufferB := Must(vfs.ReadFile(env, filepath.Join(DISTINCT_REPOSITORY, "blobs", "blob1"))) + //Expect(bufferA).To(Equal(bufferB)) + }) + + It("repository with a directory resource", func() { + // TODO + // Must(inline.NewRepositorySpecV1(env, DIRECTORY_REPOSITORY, nil, DIRECTORY_REPOSITORY)) + //repo := Must(spec.Repository(env.OCMContext(), nil)) + //defer Close(repo) + //cv := Must(repo.LookupComponentVersion(COMPONENT_NAME, COMPONENT_VERSION)) + //defer Close(cv) + //res := Must(cv.GetResourcesByName(RESOURCE_NAME)) + //acc := Must(res[0].AccessMethod()) + //defer Close(acc) + //data := Must(acc.Reader()) + //defer Close(data) + // + //mfs := memoryfs.New() + //_, _, err := tarutils.ExtractTarToFsWithInfo(mfs, data) + //Expect(err).ToNot(HaveOccurred()) + //bufferA := Must(vfs.ReadFile(mfs, "testblob")) + //bufferB := Must(vfs.ReadFile(env, filepath.Join(DIRECTORY_REPOSITORY, "blob1", "testblob"))) + //Expect(bufferA).To(Equal(bufferB)) + }) + +}) diff --git a/pkg/components/ocmlib/repository/inline/suite_test.go b/pkg/components/ocmlib/repository/inline/suite_test.go new file mode 100644 index 000000000..45f4892e6 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/suite_test.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package inline_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "LocalBlob Test Suite") +} diff --git a/pkg/components/ocmlib/repository/inline/testdata/directory/blob1/testblob b/pkg/components/ocmlib/repository/inline/testdata/directory/blob1/testblob new file mode 100644 index 000000000..30d74d258 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/directory/blob1/testblob @@ -0,0 +1 @@ +test \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/inline/testdata/directory/component-descriptor.yaml b/pkg/components/ocmlib/repository/inline/testdata/directory/component-descriptor.yaml new file mode 100644 index 000000000..5367c4ce4 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/directory/component-descriptor.yaml @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +meta: + schemaVersion: v2 + +component: + name: example.com/root + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: [] + + resources: + - name: test + type: PlainText + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: text/plain + filename: blob1 \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/inline/testdata/distinct/blobs/blob1 b/pkg/components/ocmlib/repository/inline/testdata/distinct/blobs/blob1 new file mode 100644 index 000000000..30d74d258 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/distinct/blobs/blob1 @@ -0,0 +1 @@ +test \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/inline/testdata/distinct/compdescs/component-descriptor.yaml b/pkg/components/ocmlib/repository/inline/testdata/distinct/compdescs/component-descriptor.yaml new file mode 100644 index 000000000..5367c4ce4 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/distinct/compdescs/component-descriptor.yaml @@ -0,0 +1,29 @@ +# SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +meta: + schemaVersion: v2 + +component: + name: example.com/root + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: [] + + resources: + - name: test + type: PlainText + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: text/plain + filename: blob1 \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/inline/testdata/legacy/component-descriptor1.yaml b/pkg/components/ocmlib/repository/inline/testdata/legacy/component-descriptor1.yaml new file mode 100644 index 000000000..7651df329 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/legacy/component-descriptor1.yaml @@ -0,0 +1,28 @@ +meta: + schemaVersion: v2 + +component: + name: example.com/root + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: + - name: rootref + version: 1.0.0 + componentName: example.com/rootref + + resources: + - name: test + type: PlainText + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: text/plain + filename: blob1 \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/inline/testdata/legacy/component-descriptor2.yaml b/pkg/components/ocmlib/repository/inline/testdata/legacy/component-descriptor2.yaml new file mode 100644 index 000000000..0c944af1d --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/legacy/component-descriptor2.yaml @@ -0,0 +1,25 @@ +meta: + schemaVersion: v2 + +component: + name: example.com/rootref + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: [] + + resources: + - name: test + type: PlainText + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: text/plain + filename: blob1 \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/inline/testdata/legacy/resource.yaml b/pkg/components/ocmlib/repository/inline/testdata/legacy/resource.yaml new file mode 100644 index 000000000..30d74d258 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/legacy/resource.yaml @@ -0,0 +1 @@ +test \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/inline/testdata/spec-with-inline-resource.yaml b/pkg/components/ocmlib/repository/inline/testdata/spec-with-inline-resource.yaml new file mode 100644 index 000000000..bd4560201 --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/spec-with-inline-resource.yaml @@ -0,0 +1,61 @@ +type: inline +fileSystem: + component-descriptor1.yaml: | + meta: + schemaVersion: v2 + + component: + name: example.com/root + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: + - name: rootref + version: 1.0.0 + componentName: example.com/rootref + + resources: + - name: test + type: PlainText + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: text/plain + filename: blob1 + + component-descriptor2.yaml: | + meta: + schemaVersion: v2 + + component: + name: example.com/rootref + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: [] + + resources: + - name: test + type: PlainText + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: text/plain + filename: blob1 +blobFs: + blob1: | + test \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/inline/testdata/spec-with-local-resource.yaml b/pkg/components/ocmlib/repository/inline/testdata/spec-with-local-resource.yaml new file mode 100644 index 000000000..90812457a --- /dev/null +++ b/pkg/components/ocmlib/repository/inline/testdata/spec-with-local-resource.yaml @@ -0,0 +1,60 @@ +type: inline +fileSystem: + component-descriptor1.yaml: | + meta: + schemaVersion: v2 + + component: + name: example.com/root + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: + - name: rootref + version: 1.0.0 + componentName: example.com/rootref + + resources: + - name: test + type: PlainText + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: text/plain + filename: resource.yaml + + component-descriptor2.yaml: | + meta: + schemaVersion: v2 + + component: + name: example.com/rootref + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + sources: [] + componentReferences: [] + + resources: + - name: test + type: PlainText + version: 1.0.0 + relation: local + access: + type: localFilesystemBlob + mediaType: text/plain + filename: resource.yaml +blobFsMode: context +blobDirPath: /testdata/legacy \ No newline at end of file diff --git a/pkg/components/ocmlib/repository/internalspec.go b/pkg/components/ocmlib/repository/internalspec.go new file mode 100644 index 000000000..b415177c1 --- /dev/null +++ b/pkg/components/ocmlib/repository/internalspec.go @@ -0,0 +1,127 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package repository + +import ( + "github.com/gardener/landscaper/pkg/components/model" + + "github.com/mandelsoft/vfs/pkg/projectionfs" + "github.com/mandelsoft/vfs/pkg/vfs" + "github.com/open-component-model/ocm/pkg/contexts/credentials" + "github.com/open-component-model/ocm/pkg/contexts/datacontext/attrs/vfsattr" + "github.com/open-component-model/ocm/pkg/contexts/ocm/compdesc" + "github.com/open-component-model/ocm/pkg/contexts/ocm/cpi" + "github.com/open-component-model/ocm/pkg/contexts/ocm/repositories/virtual" + "github.com/open-component-model/ocm/pkg/errors" + "github.com/open-component-model/ocm/pkg/runtime" +) + +const ( + FILESYSTEM = "filesystem" + CONTEXT = "context" +) + +type ComponentDescriptorProvider interface { + List() ([]*compdesc.ComponentDescriptor, error) +} + +type RepositorySpec struct { + runtime.InternalVersionedTypedObject[cpi.RepositorySpec] + FileSystem vfs.FileSystem + CompDescDirPath string + BlobFs vfs.FileSystem + BlobFsMode string + BlobDirPath string +} + +func (r RepositorySpec) MarshalJSON() ([]byte, error) { + return nil, model.NotImplemented() + // return runtime.MarshalVersionedTypedObject(&r) +} + +func (r *RepositorySpec) Key() (string, error) { + return "", model.NotImplemented() + //var fs string + //var blobfs string + // + //if r.FileSystem != nil { + // fs = fmt.Sprintf("%p", r.FileSystem) + //} else { + // fs = "nil" + //} + // + //if r.BlobFs != nil { + // blobfs = fmt.Sprintf("%p", r.FileSystem) + //} else { + // blobfs = "nil" + //} + // + //data, err := json.Marshal(&struct { + // Type string `json:"type"` + // FileSystem string `json:"fileSystem"` + // CompDescDirPath string `json:"compDescDirPath"` + // BlobFs string `json:"blobFs"` + // BlobFsMode string `json:"blobFsMode"` + // BlobDirPath string `json:"blobDirPath"` + //}{ + // Type: r.GetType(), + // FileSystem: fs, + // CompDescDirPath: r.CompDescDirPath, + // BlobFs: blobfs, + // BlobFsMode: r.BlobFsMode, + // BlobDirPath: r.BlobDirPath, + //}) + // + //return string(data), err +} + +func NewRepository(ctx cpi.Context, provider ComponentDescriptorProvider, blobfs vfs.FileSystem) (cpi.Repository, error) { + acc, err := NewAccess(provider, blobfs) + if err != nil { + return nil, err + } + return virtual.NewRepository(ctx, acc), nil +} + +func (r *RepositorySpec) Repository(ctx cpi.Context, creds credentials.Credentials) (cpi.Repository, error) { + descriptorfs := r.FileSystem + if descriptorfs == nil { + descriptorfs = vfsattr.Get(ctx) + } + + prov := NewFilesystemCompDescProvider(r.CompDescDirPath, descriptorfs) + blobfs := r.BlobFs + if blobfs == nil { + switch r.BlobFsMode { + case "": + r.BlobFsMode = FILESYSTEM + fallthrough + case FILESYSTEM: + blobfs = descriptorfs + case CONTEXT: + blobfs = vfsattr.Get(ctx) + default: + return nil, errors.ErrInvalid("blobFsMode", r.BlobFsMode) + } + } + + if r.BlobDirPath != "" { + exists, err := vfs.DirExists(blobfs, r.BlobDirPath) + if err != nil { + return nil, err + } + if exists { + blobfs, err = projectionfs.New(blobfs, r.BlobDirPath) + if err != nil { + return nil, err + } + } + } + return NewRepository(ctx, prov, blobfs) +} + +func (r *RepositorySpec) AsUniformSpec(ctx cpi.Context) *cpi.UniformRepositorySpec { + return nil +} diff --git a/pkg/components/ocmlib/repository/local/localspec.go b/pkg/components/ocmlib/repository/local/localspec.go new file mode 100644 index 000000000..5f377f825 --- /dev/null +++ b/pkg/components/ocmlib/repository/local/localspec.go @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "github.com/mandelsoft/filepath/pkg/filepath" + "github.com/mandelsoft/vfs/pkg/vfs" + "github.com/open-component-model/ocm/pkg/contexts/ocm/cpi" + . "github.com/open-component-model/ocm/pkg/exception" + "github.com/open-component-model/ocm/pkg/runtime" + "github.com/open-component-model/ocm/pkg/utils" + + "github.com/gardener/landscaper/pkg/components/ocmlib/repository" +) + +const ( + Type = repository.LocalType + TypeV1 = Type + runtime.VersionSeparator + "v1" +) + +var versions = cpi.NewRepositoryTypeVersionScheme(Type) + +func init() { + Must(versions.Register(cpi.NewRepositoryTypeByConverter[*repository.RepositorySpec, *RepositorySpecV1](Type, &converterV1{}, nil))) + Must(versions.Register(cpi.NewRepositoryTypeByConverter[*repository.RepositorySpec, *RepositorySpecV1](TypeV1, &converterV1{}, nil))) + cpi.RegisterRepositoryTypeVersions(versions) +} + +type RepositorySpecV1 struct { + runtime.ObjectVersionedType `json:",inline"` + FilePath string `json:"filePath"` + PathFileSystem vfs.FileSystem `json:"-"` +} + +func NewRepositorySpecV1(filePath string, pathFileSystem ...vfs.FileSystem) (*repository.RepositorySpec, error) { + fs := utils.Optional(pathFileSystem...) + spec := &repository.RepositorySpec{ + InternalVersionedTypedObject: runtime.NewInternalVersionedTypedObject[cpi.RepositorySpec](versions, Type), + FileSystem: fs, + CompDescDirPath: filePath, + BlobDirPath: filepath.Join(filePath, "blobs"), + } + return spec, nil +} + +type converterV1 struct{} + +func (_ converterV1) ConvertFrom(in *repository.RepositorySpec) (*RepositorySpecV1, error) { + return &RepositorySpecV1{ + ObjectVersionedType: runtime.NewVersionedObjectType(in.Type), + PathFileSystem: in.FileSystem, + FilePath: in.CompDescDirPath, + }, nil +} + +func (_ converterV1) ConvertTo(in *RepositorySpecV1) (*repository.RepositorySpec, error) { + return &repository.RepositorySpec{ + InternalVersionedTypedObject: runtime.NewInternalVersionedTypedObject[cpi.RepositorySpec](versions, in.Type), + FileSystem: in.PathFileSystem, + CompDescDirPath: in.FilePath, + BlobDirPath: filepath.Join(in.FilePath, "blobs"), + }, nil +} diff --git a/pkg/components/ocmlib/repository/local/repository_test.go b/pkg/components/ocmlib/repository/local/repository_test.go new file mode 100644 index 000000000..c2b142fac --- /dev/null +++ b/pkg/components/ocmlib/repository/local/repository_test.go @@ -0,0 +1,78 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package local_test + +import ( + . "github.com/onsi/ginkgo/v2" + tenv "github.com/open-component-model/ocm/pkg/env" + "github.com/open-component-model/ocm/pkg/env/builder" +) + +const ( + // Random Test Names + DESCRIPTOR_PATH = "testdescriptorpath" + + // Mock Directory Paths + TAR_REPOSITORY = "testdata/tar" + + // Component Names + COMPONENT_NAME = "example.com/root" + COMPONENT_VERSION = "1.0.0" + RESOURCE_NAME = "test" +) + +// TODO +var _ = XDescribe("ocm-lib based landscaper local repository", func() { + var env *builder.Builder + + BeforeEach(func() { + env = builder.NewBuilder(tenv.NewEnvironment(tenv.TestData())) + }) + + AfterEach(func() { + _ = env.Cleanup() + }) + + It("marshal/unmarshal spec v1", func() { + //spec := Must(local.NewRepositorySpecV1(DESCRIPTOR_PATH)) + //Expect(spec).ToNot(BeNil()) + //Expect(spec.FileSystem).To(BeNil()) + //Expect(spec.BlobFs).To(BeNil()) + // + //spec = Must(local.NewRepositorySpecV1(DESCRIPTOR_PATH, env)) + //Expect(spec).ToNot(BeNil()) + //Expect(spec.FileSystem).To(Equal(env)) + // + //data := Must(json.Marshal(spec)) + //Expect(string(data)).To(Equal(fmt.Sprintf("{\"type\":\"%s\",\"filePath\":\"%s\"}", local.Type, DESCRIPTOR_PATH))) + //spec1 := Must(env.OCMContext().RepositorySpecForConfig(data, runtime.DefaultJSONEncoding)).(*repository.RepositorySpec) + //// spec will not completely equal spec1 as the filesystem cannot be serialized + //Expect(spec1.Type).To(Equal(spec.Type)) + //Expect(spec1.CompDescDirPath).To(Equal(spec.CompDescDirPath)) + //Expect(spec1.BlobDirPath).To(Equal(spec.BlobDirPath)) + }) + + It("repository (from spec v1) with resource stored as tar in blob directory", func() { + // this use case pretty much resembles a component archive + + // this has to be set if the PathFileSystem within the spec is not set + // as a filesystem cannot really be serialized, this is always the case if the spec is created from a serialized + // form (e.g. coming from the installation) + //vfsattr.Set(env.OCMContext(), env) + // TODO + //spec := Must(local.NewRepositorySpecV1(TAR_REPOSITORY, env)) + //repo := Must(spec.Repository(env.OCMContext(), nil)) + //defer Close(repo) + //cv := Must(repo.LookupComponentVersion(COMPONENT_NAME, COMPONENT_VERSION)) + //defer Close(cv) + //res := Must(cv.GetResourcesByName(RESOURCE_NAME)) + //acc := Must(res[0].AccessMethod()) + //defer Close(acc) + //bytesA := Must(acc.Get()) + // + //bytesB := Must(vfs.ReadFile(env, filepath.Join(TAR_REPOSITORY, "blobs", "sha256.3ed99e50092c619823e2c07941c175ea2452f1455f570c55510586b387ec2ff2"))) + //Expect(bytesA).To(Equal(bytesB)) + }) +}) diff --git a/pkg/components/ocmlib/repository/local/suite_test.go b/pkg/components/ocmlib/repository/local/suite_test.go new file mode 100644 index 000000000..6d96cddd4 --- /dev/null +++ b/pkg/components/ocmlib/repository/local/suite_test.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package local_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "LocalBlob Test Suite") +} diff --git a/pkg/components/ocmlib/repository/local/testdata/tar/blobs/sha256.3ed99e50092c619823e2c07941c175ea2452f1455f570c55510586b387ec2ff2 b/pkg/components/ocmlib/repository/local/testdata/tar/blobs/sha256.3ed99e50092c619823e2c07941c175ea2452f1455f570c55510586b387ec2ff2 new file mode 100644 index 000000000..42299d21c Binary files /dev/null and b/pkg/components/ocmlib/repository/local/testdata/tar/blobs/sha256.3ed99e50092c619823e2c07941c175ea2452f1455f570c55510586b387ec2ff2 differ diff --git a/pkg/components/ocmlib/repository/local/testdata/tar/component-descriptor.yaml b/pkg/components/ocmlib/repository/local/testdata/tar/component-descriptor.yaml new file mode 100755 index 000000000..07072fb89 --- /dev/null +++ b/pkg/components/ocmlib/repository/local/testdata/tar/component-descriptor.yaml @@ -0,0 +1,28 @@ +# SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors +# +# SPDX-License-Identifier: Apache-2.0 + +meta: + schemaVersion: v2 +component: + name: example.com/root + version: 1.0.0 + + provider: internal + + repositoryContexts: + - type: ociRegistry + baseUrl: "/" + + componentReferences: [] + sources: [] + + resources: + - name: test + relation: local + type: PlainText + version: 1.0.0 + access: + fileName: sha256.3ed99e50092c619823e2c07941c175ea2452f1455f570c55510586b387ec2ff2 + mediaType: text/plain + type: localFilesystemBlob diff --git a/pkg/components/ocmlib/repository/providers.go b/pkg/components/ocmlib/repository/providers.go new file mode 100644 index 000000000..5d4ad2495 --- /dev/null +++ b/pkg/components/ocmlib/repository/providers.go @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package repository + +import ( + "github.com/mandelsoft/filepath/pkg/filepath" + "github.com/mandelsoft/vfs/pkg/osfs" + "github.com/mandelsoft/vfs/pkg/vfs" + "github.com/open-component-model/ocm/pkg/contexts/ocm/compdesc" + "github.com/open-component-model/ocm/pkg/utils" +) + +type FilesystemCompDescProvider struct { + CompDescFs vfs.FileSystem + CompDescDirPath string +} + +func NewFilesystemCompDescProvider(path string, fs ...vfs.FileSystem) ComponentDescriptorProvider { + return &FilesystemCompDescProvider{ + CompDescDirPath: path, + CompDescFs: utils.Optional(fs...), + } +} + +func (f *FilesystemCompDescProvider) List() ([]*compdesc.ComponentDescriptor, error) { + p := "/" + if f.CompDescDirPath != "" { + p = f.CompDescDirPath + } + fs := f.CompDescFs + if fs == nil { + fs = osfs.New() + } + var result []*compdesc.ComponentDescriptor + entries, err := vfs.ReadDir(fs, p) + if err != nil { + return nil, err + } + for _, e := range entries { + filename := filepath.Join(p, e.Name()) + // there might e.g. be a blob directory in this folder + if ok, err := vfs.IsDir(fs, filename); ok || err != nil { + continue + } + data, err := vfs.ReadFile(fs, filename) + if err != nil { + return nil, err + } + cd, err := compdesc.Decode(data) + if err != nil { + return nil, err + } + result = append(result, cd) + } + return result, nil +} + +type MemoryCompDescProvider struct { + CompDescs []*compdesc.ComponentDescriptor +} + +func NewMemoryCompDescProvider(descriptors []*compdesc.ComponentDescriptor) ComponentDescriptorProvider { + return &MemoryCompDescProvider{ + CompDescs: descriptors, + } +} + +func (m *MemoryCompDescProvider) List() ([]*compdesc.ComponentDescriptor, error) { + return m.CompDescs, nil +} diff --git a/pkg/components/ocmlib/repository/repository.go b/pkg/components/ocmlib/repository/repository.go new file mode 100644 index 000000000..47fe40661 --- /dev/null +++ b/pkg/components/ocmlib/repository/repository.go @@ -0,0 +1,195 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package repository + +import ( + "os" + "path" + "sync" + + "github.com/mandelsoft/vfs/pkg/osfs" + "github.com/open-component-model/ocm/pkg/mime" + "github.com/open-component-model/ocm/pkg/utils/tarutils" + + "github.com/mandelsoft/vfs/pkg/vfs" + + "github.com/open-component-model/ocm/pkg/common" + "github.com/open-component-model/ocm/pkg/common/accessio" + "github.com/open-component-model/ocm/pkg/contexts/ocm/accessmethods/localblob" + "github.com/open-component-model/ocm/pkg/contexts/ocm/compdesc" + "github.com/open-component-model/ocm/pkg/contexts/ocm/cpi" + "github.com/open-component-model/ocm/pkg/contexts/ocm/repositories/virtual" + "github.com/open-component-model/ocm/pkg/errors" +) + +type Index = virtual.Index[any] + +type ComponentAccess struct { + lock sync.Mutex + descriptorProvider ComponentDescriptorProvider + blobsFs vfs.FileSystem + index *Index +} + +func NewAccess(descriptorProvider ComponentDescriptorProvider, blobFs vfs.FileSystem) (*ComponentAccess, error) { + a := &ComponentAccess{ + descriptorProvider: descriptorProvider, + blobsFs: blobFs, + } + err := a.Index() + if err != nil { + return nil, err + } + return a, nil +} + +// Index adds all Component Descriptors in the descriptorProvider to the Repository Index (in other words, it makes them +// known to the repository). +// This function used to initialize the repository but can also be used to update the repository in case Component +// Descriptors were added to the descriptorProvider. +func (a *ComponentAccess) Index() error { + a.lock.Lock() + defer a.lock.Unlock() + + a.index = virtual.NewIndex[any]() + + entries, err := a.descriptorProvider.List() + if err != nil { + return err + } + for _, cd := range entries { + err = a.index.Add(cd, nil) + if err != nil { + return err + } + } + return nil +} + +func (a *ComponentAccess) ComponentLister() cpi.ComponentLister { + a.lock.Lock() + defer a.lock.Unlock() + + return a.index +} + +func (a *ComponentAccess) ExistsComponentVersion(name string, version string) (bool, error) { + a.lock.Lock() + defer a.lock.Unlock() + + e := a.index.Get(name, version) + return e != nil, nil +} + +func (a *ComponentAccess) ListVersions(comp string) ([]string, error) { + a.lock.Lock() + defer a.lock.Unlock() + + return a.index.GetVersions(comp), nil +} + +func (a *ComponentAccess) GetComponentVersion(comp, version string) (virtual.VersionAccess, error) { + var cd *compdesc.ComponentDescriptor + + a.lock.Lock() + defer a.lock.Unlock() + + i := a.index.Get(comp, version) + if i == nil { + return nil, errors.ErrNotFound(cpi.KIND_COMPONENTVERSION, common.NewNameVersion(comp, version).String()) + } else { + cd = i.CD() + } + return &ComponentVersionAccess{a, cd.GetName(), cd.GetVersion(), cd.Copy()}, nil +} + +func (a *ComponentAccess) IsReadOnly() bool { + return true +} + +func (a *ComponentAccess) Close() error { + return nil +} + +var _ virtual.Access = (*ComponentAccess)(nil) + +type ComponentVersionAccess struct { + access *ComponentAccess + comp string + vers string + desc *compdesc.ComponentDescriptor +} + +func (v *ComponentVersionAccess) GetDescriptor() *compdesc.ComponentDescriptor { + return v.desc +} + +func (v *ComponentVersionAccess) GetBlob(name string) (cpi.DataAccess, error) { + if v.access.blobsFs == nil { + return nil, vfs.ErrNotExist + } + + filepath := path.Join("/", name) + + if ok, err := vfs.IsDir(v.access.blobsFs, filepath); ok { + tempfile, err := accessio.NewTempFile(osfs.New(), os.TempDir(), "TEMP_BLOB_DATA") + if err != nil { + return nil, err + } + err = tarutils.PackFsIntoTar(v.access.blobsFs, filepath, tempfile.Writer(), tarutils.TarFileSystemOptions{}) + if err != nil { + return nil, err + } + return tempfile.AsBlob(mime.MIME_TAR), nil + } else { + if err != nil { + return nil, err + } + + if ok, err := vfs.FileExists(v.access.blobsFs, filepath); ok { + return accessio.DataAccessForFile(v.access.blobsFs, filepath), nil + } else { + if err != nil { + return nil, err + } + return nil, vfs.ErrNotExist + } + } +} + +func (v *ComponentVersionAccess) AddBlob(blob cpi.BlobAccess) (string, error) { + return "", accessio.ErrReadOnly +} + +func (v *ComponentVersionAccess) Update() error { + return nil +} + +func (v *ComponentVersionAccess) Close() error { + return nil +} + +func (v *ComponentVersionAccess) IsReadOnly() bool { + return true +} + +func (v *ComponentVersionAccess) GetInexpensiveContentVersionIdentity(a cpi.AccessSpec) string { + switch a.GetKind() { //nolint:gocritic // to be extended + case localblob.Type: + blob, err := v.GetBlob(a.(*localblob.AccessSpec).LocalReference) + if err != nil { + return "" + } + defer blob.Close() + dig, err := accessio.Digest(blob) + if err != nil { + return "" + } + return dig.String() + } + return "" +} + +var _ virtual.VersionAccess = (*ComponentVersionAccess)(nil) diff --git a/pkg/components/ocmlib/repository/types.go b/pkg/components/ocmlib/repository/types.go new file mode 100644 index 000000000..c859cfaac --- /dev/null +++ b/pkg/components/ocmlib/repository/types.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package repository + +const ( + LocalType = "local" + InlineType = "inline" +) diff --git a/pkg/components/ocmlib/resource.go b/pkg/components/ocmlib/resource.go new file mode 100644 index 000000000..1c02ec4f6 --- /dev/null +++ b/pkg/components/ocmlib/resource.go @@ -0,0 +1,136 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package ocmlib + +import ( + "context" + + cdv2 "github.com/gardener/component-spec/bindings-go/apis/v2" + + "github.com/open-component-model/ocm/pkg/common" + "github.com/open-component-model/ocm/pkg/contexts/oci" + "github.com/open-component-model/ocm/pkg/contexts/ocm" + "github.com/open-component-model/ocm/pkg/errors" + "github.com/open-component-model/ocm/pkg/helm" + "github.com/open-component-model/ocm/pkg/helm/loader" + "github.com/open-component-model/ocm/pkg/runtime" + + "github.com/gardener/landscaper/pkg/components/model" + "github.com/gardener/landscaper/pkg/components/model/types" + _ "github.com/gardener/landscaper/pkg/components/ocmlib/resourcetypehandlers" +) + +type Resource struct { + resourceAccess ocm.ResourceAccess + // TODO + // handlerRegistry *registries.ResourceHandlerRegistry +} + +func NewResource(access ocm.ResourceAccess) model.Resource { + // TODO + //return &Resource{ + // resourceAccess: access, + // handlerRegistry: registries.Registry, + //} + return nil +} + +func (r *Resource) GetName() string { + return r.resourceAccess.Meta().GetName() +} + +func (r *Resource) GetVersion() string { + return r.resourceAccess.Meta().GetVersion() +} + +func (r *Resource) GetType() string { + return r.resourceAccess.Meta().GetType() +} + +func (r *Resource) GetAccessType() string { + spec, err := r.resourceAccess.Access() + if err != nil { + return "" + } + return spec.GetType() +} + +func (r *Resource) GetResource() (*types.Resource, error) { + spec := r.resourceAccess.Meta() + data, err := runtime.DefaultYAMLEncoding.Marshal(spec) + if err != nil { + return nil, err + } + + accessSpec, err := r.resourceAccess.Access() + if err != nil { + return nil, err + } + accessData, err := runtime.DefaultJSONEncoding.Marshal(accessSpec) + if err != nil { + return nil, err + } + lsspec := types.Resource{} + err = runtime.DefaultYAMLEncoding.Unmarshal(data, &lsspec) + if err != nil { + return nil, err + } + + lsspec.Access = &cdv2.UnstructuredTypedObject{} + err = lsspec.Access.UnmarshalJSON(accessData) + if err != nil { + return nil, err + } + + return &lsspec, err +} + +func (r *Resource) GetTypedContent(ctx context.Context) (*model.TypedResourceContent, error) { + return nil, model.NotImplemented() + + //handler := r.handlerRegistry.Get(r.GetType()) + //if handler != nil { + // return handler.GetResourceContent(ctx, r, r.resourceAccess) + //} + //return nil, fmt.Errorf("no handler found for resource type %s", r.GetType()) +} + +func (r *Resource) GetCachingIdentity(ctx context.Context) string { + spec, err := r.resourceAccess.Access() + if err != nil { + return "" + } + return spec.GetInexpensiveContentVersionIdentity(r.resourceAccess.ComponentVersion()) +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type HelmChartProvider struct { + ocictx oci.Context + ref string + version string + repourl string +} + +func (h *HelmChartProvider) GetTypedContent(ctx context.Context) (_ *model.TypedResourceContent, rerr error) { + access, err := helm.DownloadChart(common.NewPrinter(nil), h.ocictx, h.ref, h.version, h.repourl) + if err != nil { + return nil, err + } + defer errors.PropagateError(&rerr, access.Close) + + chartLoader := loader.AccessLoader(access) + helmChart, err := chartLoader.Chart() + if err != nil { + return nil, err + } + + return &model.TypedResourceContent{ + Type: types.HelmChartResourceType, + Resource: helmChart, + }, nil +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/pkg/components/ocmlib/resourcetypehandlers/blueprint/handler.go b/pkg/components/ocmlib/resourcetypehandlers/blueprint/handler.go new file mode 100644 index 000000000..d3ff49e1f --- /dev/null +++ b/pkg/components/ocmlib/resourcetypehandlers/blueprint/handler.go @@ -0,0 +1,85 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package blueprint + +import ( + "context" + "fmt" + + "github.com/open-component-model/ocm/pkg/common" + + "github.com/mandelsoft/filepath/pkg/filepath" + "github.com/mandelsoft/vfs/pkg/memoryfs" + "github.com/open-component-model/ocm/pkg/contexts/ocm" + bpdownload "github.com/open-component-model/ocm/pkg/contexts/ocm/download/handlers/blueprint" + + "github.com/mandelsoft/vfs/pkg/vfs" + + "github.com/gardener/landscaper/apis/mediatype" + "github.com/gardener/landscaper/pkg/components/cache/blueprint" + "github.com/gardener/landscaper/pkg/components/model" + "github.com/gardener/landscaper/pkg/components/ocmlib/registries" +) + +func init() { + registries.Registry.Register(mediatype.BlueprintType, New()) + registries.Registry.Register(mediatype.OldBlueprintType, New()) +} + +type BlueprintHandler struct { + cache *blueprint.Store +} + +func New() *BlueprintHandler { + return &BlueprintHandler{ + cache: blueprint.GetBlueprintStore(), + } +} +func (h *BlueprintHandler) GetResourceContent(ctx context.Context, r model.Resource, access ocm.ResourceAccess) (*model.TypedResourceContent, error) { + res, err := blueprint.GetBlueprintStore().Get(ctx, r.GetCachingIdentity(ctx)) + if err != nil { + return nil, err + } + if res != nil { + return &model.TypedResourceContent{ + Type: r.GetType(), + Resource: res, + }, nil + } + + fs := memoryfs.New() + pr := common.NewPrinter(nil) + ok, _, err := bpdownload.New().Download(pr, access, filepath.Join("/"), fs) + if !ok { + return nil, fmt.Errorf("artifact does not match downloader (check config media type)") + } + if err != nil { + return nil, err + } + + typedResourceContent, err := h.Prepare(ctx, fs) + if err != nil { + return nil, err + } + + _, err = blueprint.GetBlueprintStore().Put(ctx, r.GetCachingIdentity(ctx), typedResourceContent) + if err != nil { + return nil, err + } + + return typedResourceContent, nil +} + +func (h *BlueprintHandler) Prepare(ctx context.Context, fs vfs.FileSystem) (*model.TypedResourceContent, error) { + bp, err := blueprint.BuildBlueprintFromPath(fs, "/") + if err != nil { + return nil, err + } + + return &model.TypedResourceContent{ + Type: mediatype.BlueprintType, + Resource: bp, + }, nil +} diff --git a/pkg/components/ocmlib/resourcetypehandlers/doc.go b/pkg/components/ocmlib/resourcetypehandlers/doc.go new file mode 100644 index 000000000..f539dc430 --- /dev/null +++ b/pkg/components/ocmlib/resourcetypehandlers/doc.go @@ -0,0 +1,18 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +// Package resourcetypehandlers provides handler for specific resource types. These handlers are responsible for +// accessing the content of resources of the respective type. This may include caching as well as several other +// pre- and postprocessing steps necessary to return the downloaded content in a suitable abstract data type. +// +// In contrast to the corresponding cnudie package, this package does not provide a specific handler for resources of +// type helm but instead provides a separate implementation of the resource interface specifically dedicated for helm +// charts. +// This is due to the fact that the landscaper accesses helm charts through an oci reference or helm repository +// reference directly without having a component descriptor or rather a component version describing these information. +// As a resource, as implemented in the ocm-lib, is always tied to a component version (examine the ocm.ResourceAccess +// struct type for a deeper understanding), a separate resource type was needed. +// +// After implementing a new handler, DO NOT forget to import it in init.go +package resourcetypehandlers diff --git a/pkg/components/ocmlib/resourcetypehandlers/init.go b/pkg/components/ocmlib/resourcetypehandlers/init.go new file mode 100644 index 000000000..2edc62a6a --- /dev/null +++ b/pkg/components/ocmlib/resourcetypehandlers/init.go @@ -0,0 +1,10 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package resourcetypehandlers + +import ( + _ "github.com/gardener/landscaper/pkg/components/ocmlib/resourcetypehandlers/blueprint" + _ "github.com/gardener/landscaper/pkg/components/ocmlib/resourcetypehandlers/jsonschema" +) diff --git a/pkg/components/ocmlib/resourcetypehandlers/jsonschema/handler.go b/pkg/components/ocmlib/resourcetypehandlers/jsonschema/handler.go new file mode 100644 index 000000000..530c00c5c --- /dev/null +++ b/pkg/components/ocmlib/resourcetypehandlers/jsonschema/handler.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package jsonschema + +import ( + "bytes" + "context" + "fmt" + + "github.com/open-component-model/ocm/pkg/common/compression" + "github.com/open-component-model/ocm/pkg/contexts/ocm" + "github.com/open-component-model/ocm/pkg/finalizer" + + "github.com/gardener/landscaper/apis/mediatype" + "github.com/gardener/landscaper/pkg/components/model" + "github.com/gardener/landscaper/pkg/components/ocmlib/registries" +) + +func init() { + registries.Registry.Register(mediatype.JSONSchemaType, New()) +} + +type SchemaHandler struct{} + +func New() *SchemaHandler { + return &SchemaHandler{} +} + +func (h *SchemaHandler) GetResourceContent(ctx context.Context, r model.Resource, access ocm.ResourceAccess) (_ *model.TypedResourceContent, rerr error) { + var finalize finalizer.Finalizer + defer finalize.FinalizeWithErrorPropagationf(&rerr, "accessing (and decompressing) json schema") + + m, err := access.AccessMethod() + if err != nil { + return nil, err + } + finalize.Close(m) + + mimeType := m.MimeType() + mt, err := mediatype.Parse(mimeType) + if err != nil { + return nil, fmt.Errorf("unable to parse media type %q: %w", mimeType, err) + } + if mt.Type != mediatype.JSONSchemaArtifactsMediaTypeV1 { + return nil, fmt.Errorf("unknown media type %s expected %s", mimeType, mediatype.JSONSchemaArtifactsMediaTypeV1) + } + + schemaRaw, err := m.Reader() + if err != nil { + return nil, err + } + finalize.Close(schemaRaw) + + schema, _, err := compression.AutoDecompress(schemaRaw) + if err != nil { + return nil, err + } + finalize.Close(schema) + + var buf bytes.Buffer + _, err = buf.ReadFrom(schema) + if err != nil { + return nil, err + } + + return h.Prepare(ctx, buf.Bytes()) +} + +func (h *SchemaHandler) Prepare(ctx context.Context, data []byte) (*model.TypedResourceContent, error) { + return &model.TypedResourceContent{ + Type: mediatype.JSONSchemaType, + Resource: data, + }, nil +} diff --git a/pkg/components/ocmlib/suite_test.go b/pkg/components/ocmlib/suite_test.go new file mode 100644 index 000000000..1133bb87c --- /dev/null +++ b/pkg/components/ocmlib/suite_test.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2023 SAP SE or an SAP affiliate company and Gardener contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package ocmlib_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Registry Access Test Suite") +} diff --git a/vendor/filippo.io/edwards25519/LICENSE b/vendor/filippo.io/edwards25519/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/filippo.io/edwards25519/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md new file mode 100644 index 000000000..2606f40b0 --- /dev/null +++ b/vendor/filippo.io/edwards25519/README.md @@ -0,0 +1,14 @@ +# filippo.io/edwards25519 + +``` +import "filippo.io/edwards25519" +``` + +This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives. +Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519). + +The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality. + +Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative. + +Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements. diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go new file mode 100644 index 000000000..d8608b067 --- /dev/null +++ b/vendor/filippo.io/edwards25519/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edwards25519 implements group logic for the twisted Edwards curve +// +// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2 +// +// This is better known as the Edwards curve equivalent to Curve25519, and is +// the curve used by the Ed25519 signature scheme. +// +// Most users don't need this package, and should instead use crypto/ed25519 for +// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or +// github.com/gtank/ristretto255 for prime order group logic. +// +// However, developers who do need to interact with low-level edwards25519 +// operations can use this package, which is an extended version of +// crypto/ed25519/internal/edwards25519 from the standard library repackaged as +// an importable module. +package edwards25519 diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go new file mode 100644 index 000000000..e22a7c2db --- /dev/null +++ b/vendor/filippo.io/edwards25519/edwards25519.go @@ -0,0 +1,428 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "errors" + + "filippo.io/edwards25519/field" +) + +// Point types. + +type projP1xP1 struct { + X, Y, Z, T field.Element +} + +type projP2 struct { + X, Y, Z field.Element +} + +// Point represents a point on the edwards25519 curve. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is NOT valid, and it may be used only as a receiver. +type Point struct { + // The point is internally represented in extended coordinates (X, Y, Z, T) + // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522. + x, y, z, t field.Element + + // Make the type not comparable (i.e. used with == or as a map key), as + // equivalent points can be represented by different Go values. + _ incomparable +} + +type incomparable [0]func() + +func checkInitialized(points ...*Point) { + for _, p := range points { + if p.x == (field.Element{}) && p.y == (field.Element{}) { + panic("edwards25519: use of uninitialized Point") + } + } +} + +type projCached struct { + YplusX, YminusX, Z, T2d field.Element +} + +type affineCached struct { + YplusX, YminusX, T2d field.Element +} + +// Constructors. + +func (v *projP2) Zero() *projP2 { + v.X.Zero() + v.Y.One() + v.Z.One() + return v +} + +// identity is the point at infinity. +var identity, _ = new(Point).SetBytes([]byte{ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) + +// NewIdentityPoint returns a new Point set to the identity. +func NewIdentityPoint() *Point { + return new(Point).Set(identity) +} + +// generator is the canonical curve basepoint. See TestGenerator for the +// correspondence of this encoding with the values in RFC 8032. +var generator, _ = new(Point).SetBytes([]byte{ + 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66}) + +// NewGeneratorPoint returns a new Point set to the canonical generator. +func NewGeneratorPoint() *Point { + return new(Point).Set(generator) +} + +func (v *projCached) Zero() *projCached { + v.YplusX.One() + v.YminusX.One() + v.Z.One() + v.T2d.Zero() + return v +} + +func (v *affineCached) Zero() *affineCached { + v.YplusX.One() + v.YminusX.One() + v.T2d.Zero() + return v +} + +// Assignments. + +// Set sets v = u, and returns v. +func (v *Point) Set(u *Point) *Point { + *v = *u + return v +} + +// Encoding. + +// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032, +// Section 5.1.2. +func (v *Point) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var buf [32]byte + return v.bytes(&buf) +} + +func (v *Point) bytes(buf *[32]byte) []byte { + checkInitialized(v) + + var zInv, x, y field.Element + zInv.Invert(&v.z) // zInv = 1 / Z + x.Multiply(&v.x, &zInv) // x = X / Z + y.Multiply(&v.y, &zInv) // y = Y / Z + + out := copyFieldElement(buf, &y) + out[31] |= byte(x.IsNegative() << 7) + return out +} + +var feOne = new(field.Element).One() + +// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not +// represent a valid point on the curve, SetBytes returns nil and an error and +// the receiver is unchanged. Otherwise, SetBytes returns v. +// +// Note that SetBytes accepts all non-canonical encodings of valid points. +// That is, it follows decoding rules that match most implementations in +// the ecosystem rather than RFC 8032. +func (v *Point) SetBytes(x []byte) (*Point, error) { + // Specifically, the non-canonical encodings that are accepted are + // 1) the ones where the field element is not reduced (see the + // (*field.Element).SetBytes docs) and + // 2) the ones where the x-coordinate is zero and the sign bit is set. + // + // This is consistent with crypto/ed25519/internal/edwards25519. Read more + // at https://hdevalence.ca/blog/2020-10-04-its-25519am, specifically the + // "Canonical A, R" section. + + y, err := new(field.Element).SetBytes(x) + if err != nil { + return nil, errors.New("edwards25519: invalid point encoding length") + } + + // -x² + y² = 1 + dx²y² + // x² + dx²y² = x²(dy² + 1) = y² - 1 + // x² = (y² - 1) / (dy² + 1) + + // u = y² - 1 + y2 := new(field.Element).Square(y) + u := new(field.Element).Subtract(y2, feOne) + + // v = dy² + 1 + vv := new(field.Element).Multiply(y2, d) + vv = vv.Add(vv, feOne) + + // x = +√(u/v) + xx, wasSquare := new(field.Element).SqrtRatio(u, vv) + if wasSquare == 0 { + return nil, errors.New("edwards25519: invalid point encoding") + } + + // Select the negative square root if the sign bit is set. + xxNeg := new(field.Element).Negate(xx) + xx = xx.Select(xxNeg, xx, int(x[31]>>7)) + + v.x.Set(xx) + v.y.Set(y) + v.z.One() + v.t.Multiply(xx, y) // xy = T / Z + + return v, nil +} + +func copyFieldElement(buf *[32]byte, v *field.Element) []byte { + copy(buf[:], v.Bytes()) + return buf[:] +} + +// Conversions. + +func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 { + v.X.Multiply(&p.X, &p.T) + v.Y.Multiply(&p.Y, &p.Z) + v.Z.Multiply(&p.Z, &p.T) + return v +} + +func (v *projP2) FromP3(p *Point) *projP2 { + v.X.Set(&p.x) + v.Y.Set(&p.y) + v.Z.Set(&p.z) + return v +} + +func (v *Point) fromP1xP1(p *projP1xP1) *Point { + v.x.Multiply(&p.X, &p.T) + v.y.Multiply(&p.Y, &p.Z) + v.z.Multiply(&p.Z, &p.T) + v.t.Multiply(&p.X, &p.Y) + return v +} + +func (v *Point) fromP2(p *projP2) *Point { + v.x.Multiply(&p.X, &p.Z) + v.y.Multiply(&p.Y, &p.Z) + v.z.Square(&p.Z) + v.t.Multiply(&p.X, &p.Y) + return v +} + +// d is a constant in the curve equation. +var d, _ = new(field.Element).SetBytes([]byte{ + 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, + 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, + 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, + 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52}) +var d2 = new(field.Element).Add(d, d) + +func (v *projCached) FromP3(p *Point) *projCached { + v.YplusX.Add(&p.y, &p.x) + v.YminusX.Subtract(&p.y, &p.x) + v.Z.Set(&p.z) + v.T2d.Multiply(&p.t, d2) + return v +} + +func (v *affineCached) FromP3(p *Point) *affineCached { + v.YplusX.Add(&p.y, &p.x) + v.YminusX.Subtract(&p.y, &p.x) + v.T2d.Multiply(&p.t, d2) + + var invZ field.Element + invZ.Invert(&p.z) + v.YplusX.Multiply(&v.YplusX, &invZ) + v.YminusX.Multiply(&v.YminusX, &invZ) + v.T2d.Multiply(&v.T2d, &invZ) + return v +} + +// (Re)addition and subtraction. + +// Add sets v = p + q, and returns v. +func (v *Point) Add(p, q *Point) *Point { + checkInitialized(p, q) + qCached := new(projCached).FromP3(q) + result := new(projP1xP1).Add(p, qCached) + return v.fromP1xP1(result) +} + +// Subtract sets v = p - q, and returns v. +func (v *Point) Subtract(p, q *Point) *Point { + checkInitialized(p, q) + qCached := new(projCached).FromP3(q) + result := new(projP1xP1).Sub(p, qCached) + return v.fromP1xP1(result) +} + +func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YplusX) + MM.Multiply(&YminusX, &q.YminusX) + TT2d.Multiply(&p.t, &q.T2d) + ZZ2.Multiply(&p.z, &q.Z) + + ZZ2.Add(&ZZ2, &ZZ2) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Add(&ZZ2, &TT2d) + v.T.Subtract(&ZZ2, &TT2d) + return v +} + +func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YminusX) // flipped sign + MM.Multiply(&YminusX, &q.YplusX) // flipped sign + TT2d.Multiply(&p.t, &q.T2d) + ZZ2.Multiply(&p.z, &q.Z) + + ZZ2.Add(&ZZ2, &ZZ2) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Subtract(&ZZ2, &TT2d) // flipped sign + v.T.Add(&ZZ2, &TT2d) // flipped sign + return v +} + +func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YplusX) + MM.Multiply(&YminusX, &q.YminusX) + TT2d.Multiply(&p.t, &q.T2d) + + Z2.Add(&p.z, &p.z) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Add(&Z2, &TT2d) + v.T.Subtract(&Z2, &TT2d) + return v +} + +func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YminusX) // flipped sign + MM.Multiply(&YminusX, &q.YplusX) // flipped sign + TT2d.Multiply(&p.t, &q.T2d) + + Z2.Add(&p.z, &p.z) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Subtract(&Z2, &TT2d) // flipped sign + v.T.Add(&Z2, &TT2d) // flipped sign + return v +} + +// Doubling. + +func (v *projP1xP1) Double(p *projP2) *projP1xP1 { + var XX, YY, ZZ2, XplusYsq field.Element + + XX.Square(&p.X) + YY.Square(&p.Y) + ZZ2.Square(&p.Z) + ZZ2.Add(&ZZ2, &ZZ2) + XplusYsq.Add(&p.X, &p.Y) + XplusYsq.Square(&XplusYsq) + + v.Y.Add(&YY, &XX) + v.Z.Subtract(&YY, &XX) + + v.X.Subtract(&XplusYsq, &v.Y) + v.T.Subtract(&ZZ2, &v.Z) + return v +} + +// Negation. + +// Negate sets v = -p, and returns v. +func (v *Point) Negate(p *Point) *Point { + checkInitialized(p) + v.x.Negate(&p.x) + v.y.Set(&p.y) + v.z.Set(&p.z) + v.t.Negate(&p.t) + return v +} + +// Equal returns 1 if v is equivalent to u, and 0 otherwise. +func (v *Point) Equal(u *Point) int { + checkInitialized(v, u) + + var t1, t2, t3, t4 field.Element + t1.Multiply(&v.x, &u.z) + t2.Multiply(&u.x, &v.z) + t3.Multiply(&v.y, &u.z) + t4.Multiply(&u.y, &v.z) + + return t1.Equal(&t2) & t3.Equal(&t4) +} + +// Constant-time operations + +// Select sets v to a if cond == 1 and to b if cond == 0. +func (v *projCached) Select(a, b *projCached, cond int) *projCached { + v.YplusX.Select(&a.YplusX, &b.YplusX, cond) + v.YminusX.Select(&a.YminusX, &b.YminusX, cond) + v.Z.Select(&a.Z, &b.Z, cond) + v.T2d.Select(&a.T2d, &b.T2d, cond) + return v +} + +// Select sets v to a if cond == 1 and to b if cond == 0. +func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached { + v.YplusX.Select(&a.YplusX, &b.YplusX, cond) + v.YminusX.Select(&a.YminusX, &b.YminusX, cond) + v.T2d.Select(&a.T2d, &b.T2d, cond) + return v +} + +// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. +func (v *projCached) CondNeg(cond int) *projCached { + v.YplusX.Swap(&v.YminusX, cond) + v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) + return v +} + +// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. +func (v *affineCached) CondNeg(cond int) *affineCached { + v.YplusX.Swap(&v.YminusX, cond) + v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) + return v +} diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go new file mode 100644 index 000000000..f5e590803 --- /dev/null +++ b/vendor/filippo.io/edwards25519/extra.go @@ -0,0 +1,343 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// This file contains additional functionality that is not included in the +// upstream crypto/ed25519/internal/edwards25519 package. + +import ( + "errors" + + "filippo.io/edwards25519/field" +) + +// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where +// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. +func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. Don't change the style without making + // sure it doesn't increase the inliner cost. + var e [4]field.Element + X, Y, Z, T = v.extendedCoordinates(&e) + return +} + +func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) { + checkInitialized(v) + X = e[0].Set(&v.x) + Y = e[1].Set(&v.y) + Z = e[2].Set(&v.z) + T = e[3].Set(&v.t) + return +} + +// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where +// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. +// +// If the coordinates are invalid or don't represent a valid point on the curve, +// SetExtendedCoordinates returns nil and an error and the receiver is +// unchanged. Otherwise, SetExtendedCoordinates returns v. +func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) { + if !isOnCurve(X, Y, Z, T) { + return nil, errors.New("edwards25519: invalid point coordinates") + } + v.x.Set(X) + v.y.Set(Y) + v.z.Set(Z) + v.t.Set(T) + return v, nil +} + +func isOnCurve(X, Y, Z, T *field.Element) bool { + var lhs, rhs field.Element + XX := new(field.Element).Square(X) + YY := new(field.Element).Square(Y) + ZZ := new(field.Element).Square(Z) + TT := new(field.Element).Square(T) + // -x² + y² = 1 + dx²y² + // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)² + // -X² + Y² = Z² + dT² + lhs.Subtract(YY, XX) + rhs.Multiply(d, TT).Add(&rhs, ZZ) + if lhs.Equal(&rhs) != 1 { + return false + } + // xy = T/Z + // XY/Z² = T/Z + // XY = TZ + lhs.Multiply(X, Y) + rhs.Multiply(T, Z) + return lhs.Equal(&rhs) == 1 +} + +// BytesMontgomery converts v to a point on the birationally-equivalent +// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding +// according to RFC 7748. +// +// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode +// to the same value. If v is the identity point, BytesMontgomery returns 32 +// zero bytes, analogously to the X25519 function. +func (v *Point) BytesMontgomery() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var buf [32]byte + return v.bytesMontgomery(&buf) +} + +func (v *Point) bytesMontgomery(buf *[32]byte) []byte { + checkInitialized(v) + + // RFC 7748, Section 4.1 provides the bilinear map to calculate the + // Montgomery u-coordinate + // + // u = (1 + y) / (1 - y) + // + // where y = Y / Z. + + var y, recip, u field.Element + + y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z + recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y) + u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r + + return copyFieldElement(buf, &u) +} + +// MultByCofactor sets v = 8 * p, and returns v. +func (v *Point) MultByCofactor(p *Point) *Point { + checkInitialized(p) + result := projP1xP1{} + pp := (&projP2{}).FromP3(p) + result.Double(pp) + pp.FromP1xP1(&result) + result.Double(pp) + pp.FromP1xP1(&result) + result.Double(pp) + return v.fromP1xP1(&result) +} + +// Given k > 0, set s = s**(2*i). +func (s *Scalar) pow2k(k int) { + for i := 0; i < k; i++ { + s.Multiply(s, s) + } +} + +// Invert sets s to the inverse of a nonzero scalar v, and returns s. +// +// If t is zero, Invert returns zero. +func (s *Scalar) Invert(t *Scalar) *Scalar { + // Uses a hardcoded sliding window of width 4. + var table [8]Scalar + var tt Scalar + tt.Multiply(t, t) + table[0] = *t + for i := 0; i < 7; i++ { + table[i+1].Multiply(&table[i], &tt) + } + // Now table = [t**1, t**3, t**7, t**11, t**13, t**15] + // so t**k = t[k/2] for odd k + + // To compute the sliding window digits, use the following Sage script: + + // sage: import itertools + // sage: def sliding_window(w,k): + // ....: digits = [] + // ....: while k > 0: + // ....: if k % 2 == 1: + // ....: kmod = k % (2**w) + // ....: digits.append(kmod) + // ....: k = k - kmod + // ....: else: + // ....: digits.append(0) + // ....: k = k // 2 + // ....: return digits + + // Now we can compute s roughly as follows: + + // sage: s = 1 + // sage: for coeff in reversed(sliding_window(4,l-2)): + // ....: s = s*s + // ....: if coeff > 0 : + // ....: s = s*t**coeff + + // This works on one bit at a time, with many runs of zeros. + // The digits can be collapsed into [(count, coeff)] as follows: + + // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))] + + // Entries of the form (k, 0) turn into pow2k(k) + // Entries of the form (1, coeff) turn into a squaring and then a table lookup. + // We can fold the squaring into the previous pow2k(k) as pow2k(k+1). + + *s = table[1/2] + s.pow2k(127 + 1) + s.Multiply(s, &table[1/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[13/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[5/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[1/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(5 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(9 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[13/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[11/2]) + + return s +} + +// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. +// +// Execution time depends only on the lengths of the two slices, which must match. +func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point { + if len(scalars) != len(points) { + panic("edwards25519: called MultiScalarMult with different size inputs") + } + checkInitialized(points...) + + // Proceed as in the single-base case, but share doublings + // between each point in the multiscalar equation. + + // Build lookup tables for each point + tables := make([]projLookupTable, len(points)) + for i := range tables { + tables[i].FromP3(points[i]) + } + // Compute signed radix-16 digits for each scalar + digits := make([][64]int8, len(scalars)) + for i := range digits { + digits[i] = scalars[i].signedRadix16() + } + + // Unwrap first loop iteration to save computing 16*identity + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + // Lookup-and-add the appropriate multiple of each input point + for j := range tables { + tables[j].SelectInto(multiple, digits[j][63]) + tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords + v.fromP1xP1(tmp1) // update v + } + tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration + for i := 62; i >= 0; i-- { + tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords + v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords + // Lookup-and-add the appropriate multiple of each input point + for j := range tables { + tables[j].SelectInto(multiple, digits[j][i]) + tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords + v.fromP1xP1(tmp1) // update v + } + tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration + } + return v +} + +// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. +// +// Execution time depends on the inputs. +func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point { + if len(scalars) != len(points) { + panic("edwards25519: called VarTimeMultiScalarMult with different size inputs") + } + checkInitialized(points...) + + // Generalize double-base NAF computation to arbitrary sizes. + // Here all the points are dynamic, so we only use the smaller + // tables. + + // Build lookup tables for each point + tables := make([]nafLookupTable5, len(points)) + for i := range tables { + tables[i].FromP3(points[i]) + } + // Compute a NAF for each scalar + nafs := make([][256]int8, len(scalars)) + for i := range nafs { + nafs[i] = scalars[i].nonAdjacentForm(5) + } + + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + tmp2.Zero() + + // Move from high to low bits, doubling the accumulator + // at each iteration and checking whether there is a nonzero + // coefficient to look up a multiple of. + // + // Skip trying to find the first nonzero coefficent, because + // searching might be more work than a few extra doublings. + for i := 255; i >= 0; i-- { + tmp1.Double(tmp2) + + for j := range nafs { + if nafs[j][i] > 0 { + v.fromP1xP1(tmp1) + tables[j].SelectInto(multiple, nafs[j][i]) + tmp1.Add(v, multiple) + } else if nafs[j][i] < 0 { + v.fromP1xP1(tmp1) + tables[j].SelectInto(multiple, -nafs[j][i]) + tmp1.Sub(v, multiple) + } + } + + tmp2.FromP1xP1(tmp1) + } + + v.fromP2(tmp2) + return v +} diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go new file mode 100644 index 000000000..5518ef2b9 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe.go @@ -0,0 +1,420 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package field implements fast arithmetic modulo 2^255-19. +package field + +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "math/bits" +) + +// Element represents an element of the field GF(2^255-19). Note that this +// is not a cryptographically secure group, and should only be used to interact +// with edwards25519.Point coordinates. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is a valid zero element. +type Element struct { + // An element t represents the integer + // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 + // + // Between operations, all limbs are expected to be lower than 2^52. + l0 uint64 + l1 uint64 + l2 uint64 + l3 uint64 + l4 uint64 +} + +const maskLow51Bits uint64 = (1 << 51) - 1 + +var feZero = &Element{0, 0, 0, 0, 0} + +// Zero sets v = 0, and returns v. +func (v *Element) Zero() *Element { + *v = *feZero + return v +} + +var feOne = &Element{1, 0, 0, 0, 0} + +// One sets v = 1, and returns v. +func (v *Element) One() *Element { + *v = *feOne + return v +} + +// reduce reduces v modulo 2^255 - 19 and returns it. +func (v *Element) reduce() *Element { + v.carryPropagate() + + // After the light reduction we now have a field element representation + // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. + + // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, + // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. + c := (v.l0 + 19) >> 51 + c = (v.l1 + c) >> 51 + c = (v.l2 + c) >> 51 + c = (v.l3 + c) >> 51 + c = (v.l4 + c) >> 51 + + // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's + // effectively applying the reduction identity to the carry. + v.l0 += 19 * c + + v.l1 += v.l0 >> 51 + v.l0 = v.l0 & maskLow51Bits + v.l2 += v.l1 >> 51 + v.l1 = v.l1 & maskLow51Bits + v.l3 += v.l2 >> 51 + v.l2 = v.l2 & maskLow51Bits + v.l4 += v.l3 >> 51 + v.l3 = v.l3 & maskLow51Bits + // no additional carry + v.l4 = v.l4 & maskLow51Bits + + return v +} + +// Add sets v = a + b, and returns v. +func (v *Element) Add(a, b *Element) *Element { + v.l0 = a.l0 + b.l0 + v.l1 = a.l1 + b.l1 + v.l2 = a.l2 + b.l2 + v.l3 = a.l3 + b.l3 + v.l4 = a.l4 + b.l4 + // Using the generic implementation here is actually faster than the + // assembly. Probably because the body of this function is so simple that + // the compiler can figure out better optimizations by inlining the carry + // propagation. + return v.carryPropagateGeneric() +} + +// Subtract sets v = a - b, and returns v. +func (v *Element) Subtract(a, b *Element) *Element { + // We first add 2 * p, to guarantee the subtraction won't underflow, and + // then subtract b (which can be up to 2^255 + 2^13 * 19). + v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 + v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 + v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 + v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 + v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 + return v.carryPropagate() +} + +// Negate sets v = -a, and returns v. +func (v *Element) Negate(a *Element) *Element { + return v.Subtract(feZero, a) +} + +// Invert sets v = 1/z mod p, and returns v. +// +// If z == 0, Invert returns v = 0. +func (v *Element) Invert(z *Element) *Element { + // Inversion is implemented as exponentiation with exponent p − 2. It uses the + // same sequence of 255 squarings and 11 multiplications as [Curve25519]. + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element + + z2.Square(z) // 2 + t.Square(&z2) // 4 + t.Square(&t) // 8 + z9.Multiply(&t, z) // 9 + z11.Multiply(&z9, &z2) // 11 + t.Square(&z11) // 22 + z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 + + t.Square(&z2_5_0) // 2^6 - 2^1 + for i := 0; i < 4; i++ { + t.Square(&t) // 2^10 - 2^5 + } + z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 + + t.Square(&z2_10_0) // 2^11 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^20 - 2^10 + } + z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 + + t.Square(&z2_20_0) // 2^21 - 2^1 + for i := 0; i < 19; i++ { + t.Square(&t) // 2^40 - 2^20 + } + t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 + + t.Square(&t) // 2^41 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^50 - 2^10 + } + z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 + + t.Square(&z2_50_0) // 2^51 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^100 - 2^50 + } + z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 + + t.Square(&z2_100_0) // 2^101 - 2^1 + for i := 0; i < 99; i++ { + t.Square(&t) // 2^200 - 2^100 + } + t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 + + t.Square(&t) // 2^201 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^250 - 2^50 + } + t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 + + t.Square(&t) // 2^251 - 2^1 + t.Square(&t) // 2^252 - 2^2 + t.Square(&t) // 2^253 - 2^3 + t.Square(&t) // 2^254 - 2^4 + t.Square(&t) // 2^255 - 2^5 + + return v.Multiply(&t, &z11) // 2^255 - 21 +} + +// Set sets v = a, and returns v. +func (v *Element) Set(a *Element) *Element { + *v = *a + return v +} + +// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is +// not of the right length, SetBytes returns nil and an error, and the +// receiver is unchanged. +// +// Consistent with RFC 7748, the most significant bit (the high bit of the +// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) +// are accepted. Note that this is laxer than specified by RFC 8032, but +// consistent with most Ed25519 implementations. +func (v *Element) SetBytes(x []byte) (*Element, error) { + if len(x) != 32 { + return nil, errors.New("edwards25519: invalid field element input size") + } + + // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). + v.l0 = binary.LittleEndian.Uint64(x[0:8]) + v.l0 &= maskLow51Bits + // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). + v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 + v.l1 &= maskLow51Bits + // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). + v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 + v.l2 &= maskLow51Bits + // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). + v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 + v.l3 &= maskLow51Bits + // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51). + // Note: not bytes 25:33, shift 4, to avoid overread. + v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 + v.l4 &= maskLow51Bits + + return v, nil +} + +// Bytes returns the canonical 32-byte little-endian encoding of v. +func (v *Element) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var out [32]byte + return v.bytes(&out) +} + +func (v *Element) bytes(out *[32]byte) []byte { + t := *v + t.reduce() + + var buf [8]byte + for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { + bitsOffset := i * 51 + binary.LittleEndian.PutUint64(buf[:], l<= len(out) { + break + } + out[off] |= bb + } + } + + return out[:] +} + +// Equal returns 1 if v and u are equal, and 0 otherwise. +func (v *Element) Equal(u *Element) int { + sa, sv := u.Bytes(), v.Bytes() + return subtle.ConstantTimeCompare(sa, sv) +} + +// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. +func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } + +// Select sets v to a if cond == 1, and to b if cond == 0. +func (v *Element) Select(a, b *Element, cond int) *Element { + m := mask64Bits(cond) + v.l0 = (m & a.l0) | (^m & b.l0) + v.l1 = (m & a.l1) | (^m & b.l1) + v.l2 = (m & a.l2) | (^m & b.l2) + v.l3 = (m & a.l3) | (^m & b.l3) + v.l4 = (m & a.l4) | (^m & b.l4) + return v +} + +// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. +func (v *Element) Swap(u *Element, cond int) { + m := mask64Bits(cond) + t := m & (v.l0 ^ u.l0) + v.l0 ^= t + u.l0 ^= t + t = m & (v.l1 ^ u.l1) + v.l1 ^= t + u.l1 ^= t + t = m & (v.l2 ^ u.l2) + v.l2 ^= t + u.l2 ^= t + t = m & (v.l3 ^ u.l3) + v.l3 ^= t + u.l3 ^= t + t = m & (v.l4 ^ u.l4) + v.l4 ^= t + u.l4 ^= t +} + +// IsNegative returns 1 if v is negative, and 0 otherwise. +func (v *Element) IsNegative() int { + return int(v.Bytes()[0] & 1) +} + +// Absolute sets v to |u|, and returns v. +func (v *Element) Absolute(u *Element) *Element { + return v.Select(new(Element).Negate(u), u, u.IsNegative()) +} + +// Multiply sets v = x * y, and returns v. +func (v *Element) Multiply(x, y *Element) *Element { + feMul(v, x, y) + return v +} + +// Square sets v = x * x, and returns v. +func (v *Element) Square(x *Element) *Element { + feSquare(v, x) + return v +} + +// Mult32 sets v = x * y, and returns v. +func (v *Element) Mult32(x *Element, y uint32) *Element { + x0lo, x0hi := mul51(x.l0, y) + x1lo, x1hi := mul51(x.l1, y) + x2lo, x2hi := mul51(x.l2, y) + x3lo, x3hi := mul51(x.l3, y) + x4lo, x4hi := mul51(x.l4, y) + v.l0 = x0lo + 19*x4hi // carried over per the reduction identity + v.l1 = x1lo + x0hi + v.l2 = x2lo + x1hi + v.l3 = x3lo + x2hi + v.l4 = x4lo + x3hi + // The hi portions are going to be only 32 bits, plus any previous excess, + // so we can skip the carry propagation. + return v +} + +// mul51 returns lo + hi * 2⁵¹ = a * b. +func mul51(a uint64, b uint32) (lo uint64, hi uint64) { + mh, ml := bits.Mul64(a, uint64(b)) + lo = ml & maskLow51Bits + hi = (mh << 13) | (ml >> 51) + return +} + +// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. +func (v *Element) Pow22523(x *Element) *Element { + var t0, t1, t2 Element + + t0.Square(x) // x^2 + t1.Square(&t0) // x^4 + t1.Square(&t1) // x^8 + t1.Multiply(x, &t1) // x^9 + t0.Multiply(&t0, &t1) // x^11 + t0.Square(&t0) // x^22 + t0.Multiply(&t1, &t0) // x^31 + t1.Square(&t0) // x^62 + for i := 1; i < 5; i++ { // x^992 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 + t1.Square(&t0) // 2^11 - 2 + for i := 1; i < 10; i++ { // 2^20 - 2^10 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^20 - 1 + t2.Square(&t1) // 2^21 - 2 + for i := 1; i < 20; i++ { // 2^40 - 2^20 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^40 - 1 + t1.Square(&t1) // 2^41 - 2 + for i := 1; i < 10; i++ { // 2^50 - 2^10 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^50 - 1 + t1.Square(&t0) // 2^51 - 2 + for i := 1; i < 50; i++ { // 2^100 - 2^50 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^100 - 1 + t2.Square(&t1) // 2^101 - 2 + for i := 1; i < 100; i++ { // 2^200 - 2^100 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^200 - 1 + t1.Square(&t1) // 2^201 - 2 + for i := 1; i < 50; i++ { // 2^250 - 2^50 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^250 - 1 + t0.Square(&t0) // 2^251 - 2 + t0.Square(&t0) // 2^252 - 4 + return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) +} + +// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. +var sqrtM1 = &Element{1718705420411056, 234908883556509, + 2233514472574048, 2117202627021982, 765476049583133} + +// SqrtRatio sets r to the non-negative square root of the ratio of u and v. +// +// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio +// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, +// and returns r and 0. +func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) { + t0 := new(Element) + + // r = (u * v3) * (u * v7)^((p-5)/8) + v2 := new(Element).Square(v) + uv3 := new(Element).Multiply(u, t0.Multiply(v2, v)) + uv7 := new(Element).Multiply(uv3, t0.Square(v2)) + rr := new(Element).Multiply(uv3, t0.Pow22523(uv7)) + + check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2 + + uNeg := new(Element).Negate(u) + correctSignSqrt := check.Equal(u) + flippedSignSqrt := check.Equal(uNeg) + flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1)) + + rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r + // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) + rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI) + + r.Absolute(rr) // Choose the nonnegative square root. + return r, correctSignSqrt | flippedSignSqrt +} diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go new file mode 100644 index 000000000..44dc8e8ca --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64.go @@ -0,0 +1,13 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +// +build amd64,gc,!purego + +package field + +// feMul sets out = a * b. It works like feMulGeneric. +//go:noescape +func feMul(out *Element, a *Element, b *Element) + +// feSquare sets out = a * a. It works like feSquareGeneric. +//go:noescape +func feSquare(out *Element, a *Element) diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s new file mode 100644 index 000000000..0aa1e86d9 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64.s @@ -0,0 +1,378 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +// +build amd64,gc,!purego + +#include "textflag.h" + +// func feMul(out *Element, a *Element, b *Element) +TEXT ·feMul(SB), NOSPLIT, $0-24 + MOVQ a+8(FP), CX + MOVQ b+16(FP), BX + + // r0 = a0×b0 + MOVQ (CX), AX + MULQ (BX) + MOVQ AX, DI + MOVQ DX, SI + + // r0 += 19×a1×b4 + MOVQ 8(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a2×b3 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a3×b2 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a4×b1 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 8(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r1 = a0×b1 + MOVQ (CX), AX + MULQ 8(BX) + MOVQ AX, R9 + MOVQ DX, R8 + + // r1 += a1×b0 + MOVQ 8(CX), AX + MULQ (BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a2×b4 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a3×b3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a4×b2 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r2 = a0×b2 + MOVQ (CX), AX + MULQ 16(BX) + MOVQ AX, R11 + MOVQ DX, R10 + + // r2 += a1×b1 + MOVQ 8(CX), AX + MULQ 8(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += a2×b0 + MOVQ 16(CX), AX + MULQ (BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a3×b4 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a4×b3 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r3 = a0×b3 + MOVQ (CX), AX + MULQ 24(BX) + MOVQ AX, R13 + MOVQ DX, R12 + + // r3 += a1×b2 + MOVQ 8(CX), AX + MULQ 16(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a2×b1 + MOVQ 16(CX), AX + MULQ 8(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a3×b0 + MOVQ 24(CX), AX + MULQ (BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += 19×a4×b4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r4 = a0×b4 + MOVQ (CX), AX + MULQ 32(BX) + MOVQ AX, R15 + MOVQ DX, R14 + + // r4 += a1×b3 + MOVQ 8(CX), AX + MULQ 24(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a2×b2 + MOVQ 16(CX), AX + MULQ 16(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a3×b1 + MOVQ 24(CX), AX + MULQ 8(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a4×b0 + MOVQ 32(CX), AX + MULQ (BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, DI, SI + SHLQ $0x0d, R9, R8 + SHLQ $0x0d, R11, R10 + SHLQ $0x0d, R13, R12 + SHLQ $0x0d, R15, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Second reduction chain (carryPropagate) + MOVQ DI, SI + SHRQ $0x33, SI + MOVQ R9, R8 + SHRQ $0x33, R8 + MOVQ R11, R10 + SHRQ $0x33, R10 + MOVQ R13, R12 + SHRQ $0x33, R12 + MOVQ R15, R14 + SHRQ $0x33, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Store output + MOVQ out+0(FP), AX + MOVQ DI, (AX) + MOVQ R9, 8(AX) + MOVQ R11, 16(AX) + MOVQ R13, 24(AX) + MOVQ R15, 32(AX) + RET + +// func feSquare(out *Element, a *Element) +TEXT ·feSquare(SB), NOSPLIT, $0-16 + MOVQ a+8(FP), CX + + // r0 = l0×l0 + MOVQ (CX), AX + MULQ (CX) + MOVQ AX, SI + MOVQ DX, BX + + // r0 += 38×l1×l4 + MOVQ 8(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r0 += 38×l2×l3 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 24(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r1 = 2×l0×l1 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 8(CX) + MOVQ AX, R8 + MOVQ DX, DI + + // r1 += 38×l2×l4 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r1 += 19×l3×l3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r2 = 2×l0×l2 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 16(CX) + MOVQ AX, R10 + MOVQ DX, R9 + + // r2 += l1×l1 + MOVQ 8(CX), AX + MULQ 8(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r2 += 38×l3×l4 + MOVQ 24(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r3 = 2×l0×l3 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 24(CX) + MOVQ AX, R12 + MOVQ DX, R11 + + // r3 += 2×l1×l2 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 16(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r3 += 19×l4×l4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r4 = 2×l0×l4 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 32(CX) + MOVQ AX, R14 + MOVQ DX, R13 + + // r4 += 2×l1×l3 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 24(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // r4 += l2×l2 + MOVQ 16(CX), AX + MULQ 16(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, SI, BX + SHLQ $0x0d, R8, DI + SHLQ $0x0d, R10, R9 + SHLQ $0x0d, R12, R11 + SHLQ $0x0d, R14, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Second reduction chain (carryPropagate) + MOVQ SI, BX + SHRQ $0x33, BX + MOVQ R8, DI + SHRQ $0x33, DI + MOVQ R10, R9 + SHRQ $0x33, R9 + MOVQ R12, R11 + SHRQ $0x33, R11 + MOVQ R14, R13 + SHRQ $0x33, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Store output + MOVQ out+0(FP), AX + MOVQ SI, (AX) + MOVQ R8, 8(AX) + MOVQ R10, 16(AX) + MOVQ R12, 24(AX) + MOVQ R14, 32(AX) + RET diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go new file mode 100644 index 000000000..ddb6c9b8f --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package field + +func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } + +func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go new file mode 100644 index 000000000..af459ef51 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego +// +build arm64,gc,!purego + +package field + +//go:noescape +func carryPropagate(v *Element) + +func (v *Element) carryPropagate() *Element { + carryPropagate(v) + return v +} diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s new file mode 100644 index 000000000..751ab2ada --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64,gc,!purego + +#include "textflag.h" + +// carryPropagate works exactly like carryPropagateGeneric and uses the +// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but +// avoids loading R0-R4 twice and uses LDP and STP. +// +// See https://golang.org/issues/43145 for the main compiler issue. +// +// func carryPropagate(v *Element) +TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 + MOVD v+0(FP), R20 + + LDP 0(R20), (R0, R1) + LDP 16(R20), (R2, R3) + MOVD 32(R20), R4 + + AND $0x7ffffffffffff, R0, R10 + AND $0x7ffffffffffff, R1, R11 + AND $0x7ffffffffffff, R2, R12 + AND $0x7ffffffffffff, R3, R13 + AND $0x7ffffffffffff, R4, R14 + + ADD R0>>51, R11, R11 + ADD R1>>51, R12, R12 + ADD R2>>51, R13, R13 + ADD R3>>51, R14, R14 + // R4>>51 * 19 + R10 -> R10 + LSR $51, R4, R21 + MOVD $19, R22 + MADD R22, R10, R21, R10 + + STP (R10, R11), 0(R20) + STP (R12, R13), 16(R20) + MOVD R14, 32(R20) + + RET diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go new file mode 100644 index 000000000..234a5b2e5 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 || !gc || purego +// +build !arm64 !gc purego + +package field + +func (v *Element) carryPropagate() *Element { + return v.carryPropagateGeneric() +} diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go new file mode 100644 index 000000000..699238e9e --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_extra.go @@ -0,0 +1,50 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "errors" + +// This file contains additional functionality that is not included in the +// upstream crypto/ed25519/internal/edwards25519/field package. + +// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which +// is reduced modulo the field order. If x is not of the right length, +// SetWideBytes returns nil and an error, and the receiver is unchanged. +// +// SetWideBytes is not necessary to select a uniformly distributed value, and is +// only provided for compatibility: SetBytes can be used instead as the chance +// of bias is less than 2⁻²⁵⁰. +func (v *Element) SetWideBytes(x []byte) (*Element, error) { + if len(x) != 64 { + return nil, errors.New("edwards25519: invalid SetWideBytes input size") + } + + // Split the 64 bytes into two elements, and extract the most significant + // bit of each, which is ignored by SetBytes. + lo, _ := new(Element).SetBytes(x[:32]) + loMSB := uint64(x[31] >> 7) + hi, _ := new(Element).SetBytes(x[32:]) + hiMSB := uint64(x[63] >> 7) + + // The output we want is + // + // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹ + // + // which applying the reduction identity comes out to + // + // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19² + // + // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value + // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value + // (hiMSB * 2 * 19²), so it fits in a uint64. + + v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19 + v.l1 = lo.l1 + hi.l1*2*19 + v.l2 = lo.l2 + hi.l2*2*19 + v.l3 = lo.l3 + hi.l3*2*19 + v.l4 = lo.l4 + hi.l4*2*19 + + return v.carryPropagate(), nil +} diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go new file mode 100644 index 000000000..d6667b27b --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_generic.go @@ -0,0 +1,266 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "math/bits" + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +// mul64 returns a * b. +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +// addMul64 returns v + a * b. +func addMul64(v uint128, a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + lo, c := bits.Add64(lo, v.lo, 0) + hi, _ = bits.Add64(hi, v.hi, c) + return uint128{lo, hi} +} + +// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. +func shiftRightBy51(a uint128) uint64 { + return (a.hi << (64 - 51)) | (a.lo >> 51) +} + +func feMulGeneric(v, a, b *Element) { + a0 := a.l0 + a1 := a.l1 + a2 := a.l2 + a3 := a.l3 + a4 := a.l4 + + b0 := b.l0 + b1 := b.l1 + b2 := b.l2 + b3 := b.l3 + b4 := b.l4 + + // Limb multiplication works like pen-and-paper columnar multiplication, but + // with 51-bit limbs instead of digits. + // + // a4 a3 a2 a1 a0 x + // b4 b3 b2 b1 b0 = + // ------------------------ + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a4b1 a3b1 a2b1 a1b1 a0b1 + + // a4b2 a3b2 a2b2 a1b2 a0b2 + + // a4b3 a3b3 a2b3 a1b3 a0b3 + + // a4b4 a3b4 a2b4 a1b4 a0b4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to + // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, + // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. + // + // Reduction can be carried out simultaneously to multiplication. For + // example, we do not compute r5: whenever the result of a multiplication + // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. + // + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a3b1 a2b1 a1b1 a0b1 19×a4b1 + + // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + + // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + + // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // Finally we add up the columns into wide, overlapping limbs. + + a1_19 := a1 * 19 + a2_19 := a2 * 19 + a3_19 := a3 * 19 + a4_19 := a4 * 19 + + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + r0 := mul64(a0, b0) + r0 = addMul64(r0, a1_19, b4) + r0 = addMul64(r0, a2_19, b3) + r0 = addMul64(r0, a3_19, b2) + r0 = addMul64(r0, a4_19, b1) + + // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) + r1 := mul64(a0, b1) + r1 = addMul64(r1, a1, b0) + r1 = addMul64(r1, a2_19, b4) + r1 = addMul64(r1, a3_19, b3) + r1 = addMul64(r1, a4_19, b2) + + // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) + r2 := mul64(a0, b2) + r2 = addMul64(r2, a1, b1) + r2 = addMul64(r2, a2, b0) + r2 = addMul64(r2, a3_19, b4) + r2 = addMul64(r2, a4_19, b3) + + // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 + r3 := mul64(a0, b3) + r3 = addMul64(r3, a1, b2) + r3 = addMul64(r3, a2, b1) + r3 = addMul64(r3, a3, b0) + r3 = addMul64(r3, a4_19, b4) + + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + r4 := mul64(a0, b4) + r4 = addMul64(r4, a1, b3) + r4 = addMul64(r4, a2, b2) + r4 = addMul64(r4, a3, b1) + r4 = addMul64(r4, a4, b0) + + // After the multiplication, we need to reduce (carry) the five coefficients + // to obtain a result with limbs that are at most slightly larger than 2⁵¹, + // to respect the Element invariant. + // + // Overall, the reduction works the same as carryPropagate, except with + // wider inputs: we take the carry for each coefficient by shifting it right + // by 51, and add it to the limb above it. The top carry is multiplied by 19 + // according to the reduction identity and added to the lowest limb. + // + // The largest coefficient (r0) will be at most 111 bits, which guarantees + // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. + // + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) + // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² + // r0 < 2⁷ × 2⁵² × 2⁵² + // r0 < 2¹¹¹ + // + // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most + // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and + // allows us to easily apply the reduction identity. + // + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + // r4 < 5 × 2⁵² × 2⁵² + // r4 < 2¹⁰⁷ + // + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + // Now all coefficients fit into 64-bit registers but are still too large to + // be passed around as a Element. We therefore do one last carry chain, + // where the carries will be small enough to fit in the wiggle room above 2⁵¹. + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +func feSquareGeneric(v, a *Element) { + l0 := a.l0 + l1 := a.l1 + l2 := a.l2 + l3 := a.l3 + l4 := a.l4 + + // Squaring works precisely like multiplication above, but thanks to its + // symmetry we get to group a few terms together. + // + // l4 l3 l2 l1 l0 x + // l4 l3 l2 l1 l0 = + // ------------------------ + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l4l1 l3l1 l2l1 l1l1 l0l1 + + // l4l2 l3l2 l2l2 l1l2 l0l2 + + // l4l3 l3l3 l2l3 l1l3 l0l3 + + // l4l4 l3l4 l2l4 l1l4 l0l4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l3l1 l2l1 l1l1 l0l1 19×l4l1 + + // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + + // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + + // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with + // only three Mul64 and four Add64, instead of five and eight. + + l0_2 := l0 * 2 + l1_2 := l1 * 2 + + l1_38 := l1 * 38 + l2_38 := l2 * 38 + l3_38 := l3 * 38 + + l3_19 := l3 * 19 + l4_19 := l4 * 19 + + // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) + r0 := mul64(l0, l0) + r0 = addMul64(r0, l1_38, l4) + r0 = addMul64(r0, l2_38, l3) + + // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 + r1 := mul64(l0_2, l1) + r1 = addMul64(r1, l2_38, l4) + r1 = addMul64(r1, l3_19, l3) + + // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 + r2 := mul64(l0_2, l2) + r2 = addMul64(r2, l1, l1) + r2 = addMul64(r2, l3_38, l4) + + // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 + r3 := mul64(l0_2, l3) + r3 = addMul64(r3, l1_2, l2) + r3 = addMul64(r3, l4_19, l4) + + // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 + r4 := mul64(l0_2, l4) + r4 = addMul64(r4, l1_2, l3) + r4 = addMul64(r4, l2, l2) + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +// carryPropagate brings the limbs below 52 bits by applying the reduction +// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. +func (v *Element) carryPropagateGeneric() *Element { + c0 := v.l0 >> 51 + c1 := v.l1 >> 51 + c2 := v.l2 >> 51 + c3 := v.l3 >> 51 + c4 := v.l4 >> 51 + + // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and + // the final l0 will be at most 52 bits. Similarly for the rest. + v.l0 = v.l0&maskLow51Bits + c4*19 + v.l1 = v.l1&maskLow51Bits + c0 + v.l2 = v.l2&maskLow51Bits + c1 + v.l3 = v.l3&maskLow51Bits + c2 + v.l4 = v.l4&maskLow51Bits + c3 + + return v +} diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go new file mode 100644 index 000000000..3df2fb936 --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalar.go @@ -0,0 +1,1030 @@ +// Copyright (c) 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "crypto/subtle" + "encoding/binary" + "errors" +) + +// A Scalar is an integer modulo +// +// l = 2^252 + 27742317777372353535851937790883648493 +// +// which is the prime order of the edwards25519 group. +// +// This type works similarly to math/big.Int, and all arguments and +// receivers are allowed to alias. +// +// The zero value is a valid zero element. +type Scalar struct { + // s is the Scalar value in little-endian. The value is always reduced + // modulo l between operations. + s [32]byte +} + +var ( + scZero = Scalar{[32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + + scOne = Scalar{[32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + + scMinusOne = Scalar{[32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}} +) + +// NewScalar returns a new zero Scalar. +func NewScalar() *Scalar { + return &Scalar{} +} + +// MultiplyAdd sets s = x * y + z mod l, and returns s. +func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar { + scMulAdd(&s.s, &x.s, &y.s, &z.s) + return s +} + +// Add sets s = x + y mod l, and returns s. +func (s *Scalar) Add(x, y *Scalar) *Scalar { + // s = 1 * x + y mod l + scMulAdd(&s.s, &scOne.s, &x.s, &y.s) + return s +} + +// Subtract sets s = x - y mod l, and returns s. +func (s *Scalar) Subtract(x, y *Scalar) *Scalar { + // s = -1 * y + x mod l + scMulAdd(&s.s, &scMinusOne.s, &y.s, &x.s) + return s +} + +// Negate sets s = -x mod l, and returns s. +func (s *Scalar) Negate(x *Scalar) *Scalar { + // s = -1 * x + 0 mod l + scMulAdd(&s.s, &scMinusOne.s, &x.s, &scZero.s) + return s +} + +// Multiply sets s = x * y mod l, and returns s. +func (s *Scalar) Multiply(x, y *Scalar) *Scalar { + // s = x * y + 0 mod l + scMulAdd(&s.s, &x.s, &y.s, &scZero.s) + return s +} + +// Set sets s = x, and returns s. +func (s *Scalar) Set(x *Scalar) *Scalar { + *s = *x + return s +} + +// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer. +// If x is not of the right length, SetUniformBytes returns nil and an error, +// and the receiver is unchanged. +// +// SetUniformBytes can be used to set s to an uniformly distributed value given +// 64 uniformly distributed random bytes. +func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) { + if len(x) != 64 { + return nil, errors.New("edwards25519: invalid SetUniformBytes input length") + } + var wideBytes [64]byte + copy(wideBytes[:], x[:]) + scReduce(&s.s, &wideBytes) + return s, nil +} + +// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of +// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes +// returns nil and an error, and the receiver is unchanged. +func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) { + if len(x) != 32 { + return nil, errors.New("invalid scalar length") + } + ss := &Scalar{} + copy(ss.s[:], x) + if !isReduced(ss) { + return nil, errors.New("invalid scalar encoding") + } + s.s = ss.s + return s, nil +} + +// isReduced returns whether the given scalar is reduced modulo l. +func isReduced(s *Scalar) bool { + for i := len(s.s) - 1; i >= 0; i-- { + switch { + case s.s[i] > scMinusOne.s[i]: + return false + case s.s[i] < scMinusOne.s[i]: + return true + } + } + return true +} + +// SetBytesWithClamping applies the buffer pruning described in RFC 8032, +// Section 5.1.5 (also known as clamping) and sets s to the result. The input +// must be 32 bytes, and it is not modified. If x is not of the right length, +// SetBytesWithClamping returns nil and an error, and the receiver is unchanged. +// +// Note that since Scalar values are always reduced modulo the prime order of +// the curve, the resulting value will not preserve any of the cofactor-clearing +// properties that clamping is meant to provide. It will however work as +// expected as long as it is applied to points on the prime order subgroup, like +// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the +// irrelevant RFC 7748 clamping, but it is now required for compatibility. +func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) { + // The description above omits the purpose of the high bits of the clamping + // for brevity, but those are also lost to reductions, and are also + // irrelevant to edwards25519 as they protect against a specific + // implementation bug that was once observed in a generic Montgomery ladder. + if len(x) != 32 { + return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length") + } + var wideBytes [64]byte + copy(wideBytes[:], x[:]) + wideBytes[0] &= 248 + wideBytes[31] &= 63 + wideBytes[31] |= 64 + scReduce(&s.s, &wideBytes) + return s, nil +} + +// Bytes returns the canonical 32-byte little-endian encoding of s. +func (s *Scalar) Bytes() []byte { + buf := make([]byte, 32) + copy(buf, s.s[:]) + return buf +} + +// Equal returns 1 if s and t are equal, and 0 otherwise. +func (s *Scalar) Equal(t *Scalar) int { + return subtle.ConstantTimeCompare(s.s[:], t.s[:]) +} + +// scMulAdd and scReduce are ported from the public domain, “ref10” +// implementation of ed25519 from SUPERCOP. + +func load3(in []byte) int64 { + r := int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + r := int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func scMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func scReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// nonAdjacentForm computes a width-w non-adjacent form for this scalar. +// +// w must be between 2 and 8, or nonAdjacentForm will panic. +func (s *Scalar) nonAdjacentForm(w uint) [256]int8 { + // This implementation is adapted from the one + // in curve25519-dalek and is documented there: + // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871 + if s.s[31] > 127 { + panic("scalar has high bit set illegally") + } + if w < 2 { + panic("w must be at least 2 by the definition of NAF") + } else if w > 8 { + panic("NAF digits must fit in int8") + } + + var naf [256]int8 + var digits [5]uint64 + + for i := 0; i < 4; i++ { + digits[i] = binary.LittleEndian.Uint64(s.s[i*8:]) + } + + width := uint64(1 << w) + windowMask := uint64(width - 1) + + pos := uint(0) + carry := uint64(0) + for pos < 256 { + indexU64 := pos / 64 + indexBit := pos % 64 + var bitBuf uint64 + if indexBit < 64-w { + // This window's bits are contained in a single u64 + bitBuf = digits[indexU64] >> indexBit + } else { + // Combine the current 64 bits with bits from the next 64 + bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit)) + } + + // Add carry into the current window + window := carry + (bitBuf & windowMask) + + if window&1 == 0 { + // If the window value is even, preserve the carry and continue. + // Why is the carry preserved? + // If carry == 0 and window & 1 == 0, + // then the next carry should be 0 + // If carry == 1 and window & 1 == 0, + // then bit_buf & 1 == 1 so the next carry should be 1 + pos += 1 + continue + } + + if window < width/2 { + carry = 0 + naf[pos] = int8(window) + } else { + carry = 1 + naf[pos] = int8(window) - int8(width) + } + + pos += w + } + return naf +} + +func (s *Scalar) signedRadix16() [64]int8 { + if s.s[31] > 127 { + panic("scalar has high bit set illegally") + } + + var digits [64]int8 + + // Compute unsigned radix-16 digits: + for i := 0; i < 32; i++ { + digits[2*i] = int8(s.s[i] & 15) + digits[2*i+1] = int8((s.s[i] >> 4) & 15) + } + + // Recenter coefficients: + for i := 0; i < 63; i++ { + carry := (digits[i] + 8) >> 4 + digits[i] -= carry << 4 + digits[i+1] += carry + } + + return digits +} diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go new file mode 100644 index 000000000..f7ca3cef9 --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalarmult.go @@ -0,0 +1,214 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "sync" + +// basepointTable is a set of 32 affineLookupTables, where table i is generated +// from 256i * basepoint. It is precomputed the first time it's used. +func basepointTable() *[32]affineLookupTable { + basepointTablePrecomp.initOnce.Do(func() { + p := NewGeneratorPoint() + for i := 0; i < 32; i++ { + basepointTablePrecomp.table[i].FromP3(p) + for j := 0; j < 8; j++ { + p.Add(p, p) + } + } + }) + return &basepointTablePrecomp.table +} + +var basepointTablePrecomp struct { + table [32]affineLookupTable + initOnce sync.Once +} + +// ScalarBaseMult sets v = x * B, where B is the canonical generator, and +// returns v. +// +// The scalar multiplication is done in constant time. +func (v *Point) ScalarBaseMult(x *Scalar) *Point { + basepointTable := basepointTable() + + // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i ) + // as described in the Ed25519 paper + // + // Group even and odd coefficients + // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B + // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B + // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B + // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B) + // + // We use a lookup table for each i to get x_i*16^(2*i)*B + // and do four doublings to multiply by 16. + digits := x.signedRadix16() + + multiple := &affineCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + + // Accumulate the odd components first + v.Set(NewIdentityPoint()) + for i := 1; i < 64; i += 2 { + basepointTable[i/2].SelectInto(multiple, digits[i]) + tmp1.AddAffine(v, multiple) + v.fromP1xP1(tmp1) + } + + // Multiply by 16 + tmp2.FromP3(v) // tmp2 = v in P2 coords + tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords + v.fromP1xP1(tmp1) // now v = 16*(odd components) + + // Accumulate the even components + for i := 0; i < 64; i += 2 { + basepointTable[i/2].SelectInto(multiple, digits[i]) + tmp1.AddAffine(v, multiple) + v.fromP1xP1(tmp1) + } + + return v +} + +// ScalarMult sets v = x * q, and returns v. +// +// The scalar multiplication is done in constant time. +func (v *Point) ScalarMult(x *Scalar, q *Point) *Point { + checkInitialized(q) + + var table projLookupTable + table.FromP3(q) + + // Write x = sum(x_i * 16^i) + // so x*Q = sum( Q*x_i*16^i ) + // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... ) + // <------compute inside out--------- + // + // We use the lookup table to get the x_i*Q values + // and do four doublings to compute 16*Q + digits := x.signedRadix16() + + // Unwrap first loop iteration to save computing 16*identity + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + table.SelectInto(multiple, digits[63]) + + v.Set(NewIdentityPoint()) + tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords + for i := 62; i >= 0; i-- { + tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords + v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords + table.SelectInto(multiple, digits[i]) + tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords + } + v.fromP1xP1(tmp1) + return v +} + +// basepointNafTable is the nafLookupTable8 for the basepoint. +// It is precomputed the first time it's used. +func basepointNafTable() *nafLookupTable8 { + basepointNafTablePrecomp.initOnce.Do(func() { + basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint()) + }) + return &basepointNafTablePrecomp.table +} + +var basepointNafTablePrecomp struct { + table nafLookupTable8 + initOnce sync.Once +} + +// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical +// generator, and returns v. +// +// Execution time depends on the inputs. +func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point { + checkInitialized(A) + + // Similarly to the single variable-base approach, we compute + // digits and use them with a lookup table. However, because + // we are allowed to do variable-time operations, we don't + // need constant-time lookups or constant-time digit + // computations. + // + // So we use a non-adjacent form of some width w instead of + // radix 16. This is like a binary representation (one digit + // for each binary place) but we allow the digits to grow in + // magnitude up to 2^{w-1} so that the nonzero digits are as + // sparse as possible. Intuitively, this "condenses" the + // "mass" of the scalar onto sparse coefficients (meaning + // fewer additions). + + basepointNafTable := basepointNafTable() + var aTable nafLookupTable5 + aTable.FromP3(A) + // Because the basepoint is fixed, we can use a wider NAF + // corresponding to a bigger table. + aNaf := a.nonAdjacentForm(5) + bNaf := b.nonAdjacentForm(8) + + // Find the first nonzero coefficient. + i := 255 + for j := i; j >= 0; j-- { + if aNaf[j] != 0 || bNaf[j] != 0 { + break + } + } + + multA := &projCached{} + multB := &affineCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + tmp2.Zero() + + // Move from high to low bits, doubling the accumulator + // at each iteration and checking whether there is a nonzero + // coefficient to look up a multiple of. + for ; i >= 0; i-- { + tmp1.Double(tmp2) + + // Only update v if we have a nonzero coeff to add in. + if aNaf[i] > 0 { + v.fromP1xP1(tmp1) + aTable.SelectInto(multA, aNaf[i]) + tmp1.Add(v, multA) + } else if aNaf[i] < 0 { + v.fromP1xP1(tmp1) + aTable.SelectInto(multA, -aNaf[i]) + tmp1.Sub(v, multA) + } + + if bNaf[i] > 0 { + v.fromP1xP1(tmp1) + basepointNafTable.SelectInto(multB, bNaf[i]) + tmp1.AddAffine(v, multB) + } else if bNaf[i] < 0 { + v.fromP1xP1(tmp1) + basepointNafTable.SelectInto(multB, -bNaf[i]) + tmp1.SubAffine(v, multB) + } + + tmp2.FromP1xP1(tmp1) + } + + v.fromP2(tmp2) + return v +} diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go new file mode 100644 index 000000000..beec956bf --- /dev/null +++ b/vendor/filippo.io/edwards25519/tables.go @@ -0,0 +1,129 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "crypto/subtle" +) + +// A dynamic lookup table for variable-base, constant-time scalar muls. +type projLookupTable struct { + points [8]projCached +} + +// A precomputed lookup table for fixed-base, constant-time scalar muls. +type affineLookupTable struct { + points [8]affineCached +} + +// A dynamic lookup table for variable-base, variable-time scalar muls. +type nafLookupTable5 struct { + points [8]projCached +} + +// A precomputed lookup table for fixed-base, variable-time scalar muls. +type nafLookupTable8 struct { + points [64]affineCached +} + +// Constructors. + +// Builds a lookup table at runtime. Fast. +func (v *projLookupTable) FromP3(q *Point) { + // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q + // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q + v.points[0].FromP3(q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + // Compute (i+1)*Q as Q + i*Q and convert to a ProjCached + // This is needlessly complicated because the API has explicit + // recievers instead of creating stack objects and relying on RVO + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i]))) + } +} + +// This is not optimised for speed; fixed-base tables should be precomputed. +func (v *affineLookupTable) FromP3(q *Point) { + // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q + // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q + v.points[0].FromP3(q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + // Compute (i+1)*Q as Q + i*Q and convert to AffineCached + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i]))) + } +} + +// Builds a lookup table at runtime. Fast. +func (v *nafLookupTable5) FromP3(q *Point) { + // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q + // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q + v.points[0].FromP3(q) + q2 := Point{} + q2.Add(q, q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i]))) + } +} + +// This is not optimised for speed; fixed-base tables should be precomputed. +func (v *nafLookupTable8) FromP3(q *Point) { + v.points[0].FromP3(q) + q2 := Point{} + q2.Add(q, q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 63; i++ { + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i]))) + } +} + +// Selectors. + +// Set dest to x*Q, where -8 <= x <= 8, in constant time. +func (v *projLookupTable) SelectInto(dest *projCached, x int8) { + // Compute xabs = |x| + xmask := x >> 7 + xabs := uint8((x + xmask) ^ xmask) + + dest.Zero() + for j := 1; j <= 8; j++ { + // Set dest = j*Q if |x| = j + cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + dest.Select(&v.points[j-1], dest, cond) + } + // Now dest = |x|*Q, conditionally negate to get x*Q + dest.CondNeg(int(xmask & 1)) +} + +// Set dest to x*Q, where -8 <= x <= 8, in constant time. +func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) { + // Compute xabs = |x| + xmask := x >> 7 + xabs := uint8((x + xmask) ^ xmask) + + dest.Zero() + for j := 1; j <= 8; j++ { + // Set dest = j*Q if |x| = j + cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + dest.Select(&v.points[j-1], dest, cond) + } + // Now dest = |x|*Q, conditionally negate to get x*Q + dest.CondNeg(int(xmask & 1)) +} + +// Given odd x with 0 < x < 2^4, return x*Q (in variable time). +func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) { + *dest = v.points[x/2] +} + +// Given odd x with 0 < x < 2^7, return x*Q (in variable time). +func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) { + *dest = v.points[x/2] +} diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go new file mode 100644 index 000000000..5706fe2c7 --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go @@ -0,0 +1,51 @@ +package helper + +import ( + "fmt" + "github.com/aliyun/credentials-go/credentials" + "os" +) + +const ( + EnvRoleArn = "ALIBABA_CLOUD_ROLE_ARN" + EnvOidcProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" + EnvOidcTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" +) + +func HaveOidcCredentialRequiredEnv() bool { + return os.Getenv(EnvRoleArn) != "" && + os.Getenv(EnvOidcProviderArn) != "" && + os.Getenv(EnvOidcTokenFile) != "" +} + +func NewOidcCredential(sessionName string) (credential credentials.Credential, err error) { + return GetOidcCredential(sessionName) +} + +// Deprecated: Use NewOidcCredential instead +func GetOidcCredential(sessionName string) (credential credentials.Credential, err error) { + roleArn := os.Getenv(EnvRoleArn) + oidcArn := os.Getenv(EnvOidcProviderArn) + tokenFile := os.Getenv(EnvOidcTokenFile) + if roleArn == "" { + return nil, fmt.Errorf("environment variable %q is missing", EnvRoleArn) + } + if oidcArn == "" { + return nil, fmt.Errorf("environment variable %q is missing", EnvOidcProviderArn) + } + if tokenFile == "" { + return nil, fmt.Errorf("environment variable %q is missing", EnvOidcTokenFile) + } + if _, err := os.Stat(tokenFile); err != nil { + return nil, fmt.Errorf("unable to read file at %q: %s", tokenFile, err) + } + + config := new(credentials.Config). + SetType("oidc_role_arn"). + SetOIDCProviderArn(oidcArn). + SetOIDCTokenFilePath(tokenFile). + SetRoleArn(roleArn). + SetRoleSessionName(sessionName) + + return credentials.NewCredential(config) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt new file mode 100644 index 000000000..05b0ebf5b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt new file mode 100644 index 000000000..a338672ec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt @@ -0,0 +1,29 @@ +NOTICES AND INFORMATION +Do Not Translate or Localize + +This software incorporates material from third parties. Microsoft makes certain +open source code available at https://3rdpartysource.microsoft.com, or you may +send a check or money order for US $5.00, including the product name, the open +source component name, and version number, to: + +Source Code Compliance Team +Microsoft Corporation +One Microsoft Way +Redmond, WA 98052 +USA + +Notwithstanding any other terms, you may reverse engineer this software to the +extent required to debug changes to any libraries licensed under the GNU Lesser +General Public License. + +------------------------------------------------------------------------------ + +Azure SDK for Go uses third-party libraries or other resources that may be +distributed under licenses different than the Azure SDK for Go software. + +In the event that we accidentally failed to list a required notice, please +bring it to our attention. Post an issue or email us: + + azgosdkhelp@microsoft.com + +The attached notices are provided for information only. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/CHANGELOG.md new file mode 100644 index 000000000..52911e4cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/CHANGELOG.md @@ -0,0 +1,2 @@ +# Change History + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/accesstokens.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/accesstokens.go new file mode 100644 index 000000000..e9bc66a39 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/accesstokens.go @@ -0,0 +1,178 @@ +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AccessTokensClient is the metadata API definition for the Azure Container Registry runtime +type AccessTokensClient struct { + BaseClient +} + +// NewAccessTokensClient creates an instance of the AccessTokensClient client. +func NewAccessTokensClient(loginURI string) AccessTokensClient { + return AccessTokensClient{New(loginURI)} +} + +// Get exchange ACR Refresh token for an ACR Access Token +// Parameters: +// service - indicates the name of your Azure container registry. +// scope - which is expected to be a valid scope, and can be specified more than once for multiple scope +// requests. You obtained this from the Www-Authenticate response header from the challenge. +// refreshToken - must be a valid ACR refresh token +func (client AccessTokensClient) Get(ctx context.Context, service string, scope string, refreshToken string) (result AccessToken, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccessTokensClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, service, scope, refreshToken) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client AccessTokensClient) GetPreparer(ctx context.Context, service string, scope string, refreshToken string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + formDataParameters := map[string]interface{}{ + "grant_type": "refresh_token", + "refresh_token": refreshToken, + "scope": scope, + "service": service, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPath("/oauth2/token"), + autorest.WithFormData(autorest.MapToValues(formDataParameters))) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AccessTokensClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AccessTokensClient) GetResponder(resp *http.Response) (result AccessToken, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetFromLogin exchange Username, Password and Scope an ACR Access Token +// Parameters: +// service - indicates the name of your Azure container registry. +// scope - expected to be a valid scope, and can be specified more than once for multiple scope requests. You +// can obtain this from the Www-Authenticate response header from the challenge. +func (client AccessTokensClient) GetFromLogin(ctx context.Context, service string, scope string) (result AccessToken, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccessTokensClient.GetFromLogin") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetFromLoginPreparer(ctx, service, scope) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "GetFromLogin", nil, "Failure preparing request") + return + } + + resp, err := client.GetFromLoginSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "GetFromLogin", resp, "Failure sending request") + return + } + + result, err = client.GetFromLoginResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.AccessTokensClient", "GetFromLogin", resp, "Failure responding to request") + return + } + + return +} + +// GetFromLoginPreparer prepares the GetFromLogin request. +func (client AccessTokensClient) GetFromLoginPreparer(ctx context.Context, service string, scope string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + queryParameters := map[string]interface{}{ + "scope": autorest.Encode("query", scope), + "service": autorest.Encode("query", service), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPath("/oauth2/token"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetFromLoginSender sends the GetFromLogin request. The method will close the +// http.Response Body if it receives an error. +func (client AccessTokensClient) GetFromLoginSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetFromLoginResponder handles the response to the GetFromLogin request. The method always +// closes the http.Response Body. +func (client AccessTokensClient) GetFromLoginResponder(resp *http.Response) (result AccessToken, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/blob.go new file mode 100644 index 000000000..da512a659 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/blob.go @@ -0,0 +1,842 @@ +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "io" + "net/http" +) + +// BlobClient is the metadata API definition for the Azure Container Registry runtime +type BlobClient struct { + BaseClient +} + +// NewBlobClient creates an instance of the BlobClient client. +func NewBlobClient(loginURI string) BlobClient { + return BlobClient{New(loginURI)} +} + +// CancelUpload cancel outstanding upload processes, releasing associated resources. If this is not called, the +// unfinished uploads will eventually timeout. +// Parameters: +// location - link acquired from upload start or previous chunk. Note, do not include initial / (must do +// substring(1) ) +func (client BlobClient) CancelUpload(ctx context.Context, location string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.CancelUpload") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CancelUploadPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CancelUpload", nil, "Failure preparing request") + return + } + + resp, err := client.CancelUploadSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CancelUpload", resp, "Failure sending request") + return + } + + result, err = client.CancelUploadResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CancelUpload", resp, "Failure responding to request") + return + } + + return +} + +// CancelUploadPreparer prepares the CancelUpload request. +func (client BlobClient) CancelUploadPreparer(ctx context.Context, location string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "nextBlobUuidLink": location, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/{nextBlobUuidLink}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CancelUploadSender sends the CancelUpload request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) CancelUploadSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// CancelUploadResponder handles the response to the CancelUpload request. The method always +// closes the http.Response Body. +func (client BlobClient) CancelUploadResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Check same as GET, except only the headers are returned. +// Parameters: +// name - name of the image (including the namespace) +// digest - digest of a BLOB +func (client BlobClient) Check(ctx context.Context, name string, digest string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Check") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CheckPreparer(ctx, name, digest) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Check", nil, "Failure preparing request") + return + } + + resp, err := client.CheckSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Check", resp, "Failure sending request") + return + } + + result, err = client.CheckResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Check", resp, "Failure responding to request") + return + } + + return +} + +// CheckPreparer prepares the Check request. +func (client BlobClient) CheckPreparer(ctx context.Context, name string, digest string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "digest": autorest.Encode("path", digest), + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckSender sends the Check request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) CheckSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// CheckResponder handles the response to the Check request. The method always +// closes the http.Response Body. +func (client BlobClient) CheckResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusTemporaryRedirect), + autorest.ByClosing()) + result.Response = resp + return +} + +// CheckChunk same as GET, except only the headers are returned. +// Parameters: +// name - name of the image (including the namespace) +// digest - digest of a BLOB +// rangeParameter - format : bytes=-, HTTP Range header specifying blob chunk. +func (client BlobClient) CheckChunk(ctx context.Context, name string, digest string, rangeParameter string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.CheckChunk") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CheckChunkPreparer(ctx, name, digest, rangeParameter) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CheckChunk", nil, "Failure preparing request") + return + } + + resp, err := client.CheckChunkSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CheckChunk", resp, "Failure sending request") + return + } + + result, err = client.CheckChunkResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "CheckChunk", resp, "Failure responding to request") + return + } + + return +} + +// CheckChunkPreparer prepares the CheckChunk request. +func (client BlobClient) CheckChunkPreparer(ctx context.Context, name string, digest string, rangeParameter string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "digest": autorest.Encode("path", digest), + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters), + autorest.WithHeader("Range", autorest.String(rangeParameter))) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckChunkSender sends the CheckChunk request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) CheckChunkSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// CheckChunkResponder handles the response to the CheckChunk request. The method always +// closes the http.Response Body. +func (client BlobClient) CheckChunkResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete removes an already uploaded blob. +// Parameters: +// name - name of the image (including the namespace) +// digest - digest of a BLOB +func (client BlobClient) Delete(ctx context.Context, name string, digest string) (result ReadCloser, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Delete") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, name, digest) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client BlobClient) DeletePreparer(ctx context.Context, name string, digest string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "digest": autorest.Encode("path", digest), + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client BlobClient) DeleteResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + result.Response = autorest.Response{Response: resp} + return +} + +// EndUpload complete the upload, providing all the data in the body, if necessary. A request without a body will just +// complete the upload with previously uploaded content. +// Parameters: +// digest - digest of a BLOB +// location - link acquired from upload start or previous chunk. Note, do not include initial / (must do +// substring(1) ) +// value - optional raw data of blob +func (client BlobClient) EndUpload(ctx context.Context, digest string, location string, value io.ReadCloser) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.EndUpload") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.EndUploadPreparer(ctx, digest, location, value) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "EndUpload", nil, "Failure preparing request") + return + } + + resp, err := client.EndUploadSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "EndUpload", resp, "Failure sending request") + return + } + + result, err = client.EndUploadResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "EndUpload", resp, "Failure responding to request") + return + } + + return +} + +// EndUploadPreparer prepares the EndUpload request. +func (client BlobClient) EndUploadPreparer(ctx context.Context, digest string, location string, value io.ReadCloser) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "nextBlobUuidLink": location, + } + + queryParameters := map[string]interface{}{ + "digest": autorest.Encode("query", digest), + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/octet-stream"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/{nextBlobUuidLink}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if value != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithFile(value)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EndUploadSender sends the EndUpload request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) EndUploadSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// EndUploadResponder handles the response to the EndUpload request. The method always +// closes the http.Response Body. +func (client BlobClient) EndUploadResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieve the blob from the registry identified by digest. +// Parameters: +// name - name of the image (including the namespace) +// digest - digest of a BLOB +func (client BlobClient) Get(ctx context.Context, name string, digest string) (result ReadCloser, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, name, digest) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client BlobClient) GetPreparer(ctx context.Context, name string, digest string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "digest": autorest.Encode("path", digest), + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client BlobClient) GetResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusTemporaryRedirect)) + result.Response = autorest.Response{Response: resp} + return +} + +// GetChunk retrieve the blob from the registry identified by `digest`. This endpoint may also support RFC7233 +// compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is +// returned, range requests can be used to fetch partial content. +// Parameters: +// name - name of the image (including the namespace) +// digest - digest of a BLOB +// rangeParameter - format : bytes=-, HTTP Range header specifying blob chunk. +func (client BlobClient) GetChunk(ctx context.Context, name string, digest string, rangeParameter string) (result ReadCloser, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.GetChunk") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetChunkPreparer(ctx, name, digest, rangeParameter) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetChunk", nil, "Failure preparing request") + return + } + + resp, err := client.GetChunkSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetChunk", resp, "Failure sending request") + return + } + + result, err = client.GetChunkResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetChunk", resp, "Failure responding to request") + return + } + + return +} + +// GetChunkPreparer prepares the GetChunk request. +func (client BlobClient) GetChunkPreparer(ctx context.Context, name string, digest string, rangeParameter string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "digest": autorest.Encode("path", digest), + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/blobs/{digest}", pathParameters), + autorest.WithHeader("Range", autorest.String(rangeParameter))) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetChunkSender sends the GetChunk request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) GetChunkSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetChunkResponder handles the response to the GetChunk request. The method always +// closes the http.Response Body. +func (client BlobClient) GetChunkResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusPartialContent)) + result.Response = autorest.Response{Response: resp} + return +} + +// GetStatus retrieve status of upload identified by uuid. The primary purpose of this endpoint is to resolve the +// current status of a resumable upload. +// Parameters: +// location - link acquired from upload start or previous chunk. Note, do not include initial / (must do +// substring(1) ) +func (client BlobClient) GetStatus(ctx context.Context, location string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.GetStatus") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetStatusPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetStatus", nil, "Failure preparing request") + return + } + + resp, err := client.GetStatusSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetStatus", resp, "Failure sending request") + return + } + + result, err = client.GetStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "GetStatus", resp, "Failure responding to request") + return + } + + return +} + +// GetStatusPreparer prepares the GetStatus request. +func (client BlobClient) GetStatusPreparer(ctx context.Context, location string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "nextBlobUuidLink": location, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/{nextBlobUuidLink}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetStatusSender sends the GetStatus request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) GetStatusSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetStatusResponder handles the response to the GetStatus request. The method always +// closes the http.Response Body. +func (client BlobClient) GetStatusResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Mount mount a blob identified by the `mount` parameter from another repository. +// Parameters: +// name - name of the image (including the namespace) +// from - name of the source repository. +// mount - digest of blob to mount from the source repository. +func (client BlobClient) Mount(ctx context.Context, name string, from string, mount string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Mount") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.MountPreparer(ctx, name, from, mount) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Mount", nil, "Failure preparing request") + return + } + + resp, err := client.MountSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Mount", resp, "Failure sending request") + return + } + + result, err = client.MountResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Mount", resp, "Failure responding to request") + return + } + + return +} + +// MountPreparer prepares the Mount request. +func (client BlobClient) MountPreparer(ctx context.Context, name string, from string, mount string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + } + + queryParameters := map[string]interface{}{ + "from": autorest.Encode("query", from), + "mount": autorest.Encode("query", mount), + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/blobs/uploads/", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// MountSender sends the Mount request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) MountSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// MountResponder handles the response to the Mount request. The method always +// closes the http.Response Body. +func (client BlobClient) MountResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// StartUpload initiate a resumable blob upload with an empty request body. +// Parameters: +// name - name of the image (including the namespace) +func (client BlobClient) StartUpload(ctx context.Context, name string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.StartUpload") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.StartUploadPreparer(ctx, name) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "StartUpload", nil, "Failure preparing request") + return + } + + resp, err := client.StartUploadSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "StartUpload", resp, "Failure sending request") + return + } + + result, err = client.StartUploadResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "StartUpload", resp, "Failure responding to request") + return + } + + return +} + +// StartUploadPreparer prepares the StartUpload request. +func (client BlobClient) StartUploadPreparer(ctx context.Context, name string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/blobs/uploads/", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartUploadSender sends the StartUpload request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) StartUploadSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// StartUploadResponder handles the response to the StartUpload request. The method always +// closes the http.Response Body. +func (client BlobClient) StartUploadResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Upload upload a stream of data without completing the upload. +// Parameters: +// value - raw data of blob +// location - link acquired from upload start or previous chunk. Note, do not include initial / (must do +// substring(1) ) +func (client BlobClient) Upload(ctx context.Context, value io.ReadCloser, location string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BlobClient.Upload") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UploadPreparer(ctx, value, location) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Upload", nil, "Failure preparing request") + return + } + + resp, err := client.UploadSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Upload", resp, "Failure sending request") + return + } + + result, err = client.UploadResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.BlobClient", "Upload", resp, "Failure responding to request") + return + } + + return +} + +// UploadPreparer prepares the Upload request. +func (client BlobClient) UploadPreparer(ctx context.Context, value io.ReadCloser, location string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "nextBlobUuidLink": location, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/octet-stream"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/{nextBlobUuidLink}", pathParameters), + autorest.WithFile(value)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UploadSender sends the Upload request. The method will close the +// http.Response Body if it receives an error. +func (client BlobClient) UploadSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UploadResponder handles the response to the Upload request. The method always +// closes the http.Response Body. +func (client BlobClient) UploadResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/client.go new file mode 100644 index 000000000..d0ca3cef4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/client.go @@ -0,0 +1,33 @@ +// Package containerregistry implements the Azure ARM Containerregistry service API version 2019-08-15-preview. +// +// Metadata API definition for the Azure Container Registry runtime +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// BaseClient is the base client for Containerregistry. +type BaseClient struct { + autorest.Client + LoginURI string +} + +// New creates an instance of the BaseClient client. +func New(loginURI string) BaseClient { + return NewWithoutDefaults(loginURI) +} + +// NewWithoutDefaults creates an instance of the BaseClient client. +func NewWithoutDefaults(loginURI string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + LoginURI: loginURI, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/dataplane_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/dataplane_meta.json new file mode 100644 index 000000000..dc0691372 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/dataplane_meta.json @@ -0,0 +1,11 @@ +{ + "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "readme": "/_/azure-rest-api-specs/specification/containerregistry/data-plane/readme.md", + "tag": "package-2019-08", + "use": "@microsoft.azure/autorest.go@2.1.183", + "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-2019-08 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/containerregistry/data-plane/readme.md", + "additional_properties": { + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/manifests.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/manifests.go new file mode 100644 index 000000000..645143788 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/manifests.go @@ -0,0 +1,491 @@ +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ManifestsClient is the metadata API definition for the Azure Container Registry runtime +type ManifestsClient struct { + BaseClient +} + +// NewManifestsClient creates an instance of the ManifestsClient client. +func NewManifestsClient(loginURI string) ManifestsClient { + return ManifestsClient{New(loginURI)} +} + +// Create put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. +// Parameters: +// name - name of the image (including the namespace) +// reference - a tag or a digest, pointing to a specific image +// payload - manifest body, can take v1 or v2 values depending on accept header +func (client ManifestsClient) Create(ctx context.Context, name string, reference string, payload Manifest) (result SetObject, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.Create") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreatePreparer(ctx, name, reference, payload) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Create", resp, "Failure responding to request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client ManifestsClient) CreatePreparer(ctx context.Context, name string, reference string, payload Manifest) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "reference": autorest.Encode("path", reference), + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/vnd.docker.distribution.manifest.v2+json"), + autorest.AsPut(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters), + autorest.WithJSON(payload)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client ManifestsClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client ManifestsClient) CreateResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by +// `digest`. +// Parameters: +// name - name of the image (including the namespace) +// reference - a tag or a digest, pointing to a specific image +func (client ManifestsClient) Delete(ctx context.Context, name string, reference string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, name, reference) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ManifestsClient) DeletePreparer(ctx context.Context, name string, reference string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "reference": autorest.Encode("path", reference), + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ManifestsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ManifestsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the manifest identified by `name` and `reference` where `reference` can be a tag or digest. +// Parameters: +// name - name of the image (including the namespace) +// reference - a tag or a digest, pointing to a specific image +// accept - accept header string delimited by comma. For example, +// application/vnd.docker.distribution.manifest.v2+json +func (client ManifestsClient) Get(ctx context.Context, name string, reference string, accept string) (result ManifestWrapper, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, name, reference, accept) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "Get", resp, "Failure responding to request") + return + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManifestsClient) GetPreparer(ctx context.Context, name string, reference string, accept string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "reference": autorest.Encode("path", reference), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/v2/{name}/manifests/{reference}", pathParameters)) + if len(accept) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("accept", autorest.String(accept))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManifestsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManifestsClient) GetResponder(resp *http.Response) (result ManifestWrapper, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAttributes get manifest attributes +// Parameters: +// name - name of the image (including the namespace) +// reference - a tag or a digest, pointing to a specific image +func (client ManifestsClient) GetAttributes(ctx context.Context, name string, reference string) (result ManifestAttributes, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.GetAttributes") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetAttributesPreparer(ctx, name, reference) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetAttributes", nil, "Failure preparing request") + return + } + + resp, err := client.GetAttributesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetAttributes", resp, "Failure sending request") + return + } + + result, err = client.GetAttributesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetAttributes", resp, "Failure responding to request") + return + } + + return +} + +// GetAttributesPreparer prepares the GetAttributes request. +func (client ManifestsClient) GetAttributesPreparer(ctx context.Context, name string, reference string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "reference": autorest.Encode("path", reference), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}/_manifests/{reference}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAttributesSender sends the GetAttributes request. The method will close the +// http.Response Body if it receives an error. +func (client ManifestsClient) GetAttributesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetAttributesResponder handles the response to the GetAttributes request. The method always +// closes the http.Response Body. +func (client ManifestsClient) GetAttributesResponder(resp *http.Response) (result ManifestAttributes, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetList list manifests of a repository +// Parameters: +// name - name of the image (including the namespace) +// last - query parameter for the last item in previous query. Result set will include values lexically after +// last. +// n - query parameter for max number of items +// orderby - orderby query parameter +func (client ManifestsClient) GetList(ctx context.Context, name string, last string, n *int32, orderby string) (result AcrManifests, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.GetList") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetListPreparer(ctx, name, last, n, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetList", nil, "Failure preparing request") + return + } + + resp, err := client.GetListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetList", resp, "Failure sending request") + return + } + + result, err = client.GetListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "GetList", resp, "Failure responding to request") + return + } + + return +} + +// GetListPreparer prepares the GetList request. +func (client ManifestsClient) GetListPreparer(ctx context.Context, name string, last string, n *int32, orderby string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + } + + queryParameters := map[string]interface{}{} + if len(last) > 0 { + queryParameters["last"] = autorest.Encode("query", last) + } + if n != nil { + queryParameters["n"] = autorest.Encode("query", *n) + } + if len(orderby) > 0 { + queryParameters["orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}/_manifests", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetListSender sends the GetList request. The method will close the +// http.Response Body if it receives an error. +func (client ManifestsClient) GetListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetListResponder handles the response to the GetList request. The method always +// closes the http.Response Body. +func (client ManifestsClient) GetListResponder(resp *http.Response) (result AcrManifests, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateAttributes update attributes of a manifest +// Parameters: +// name - name of the image (including the namespace) +// reference - a tag or a digest, pointing to a specific image +// value - repository attribute value +func (client ManifestsClient) UpdateAttributes(ctx context.Context, name string, reference string, value *ChangeableAttributes) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ManifestsClient.UpdateAttributes") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateAttributesPreparer(ctx, name, reference, value) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "UpdateAttributes", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateAttributesSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "UpdateAttributes", resp, "Failure sending request") + return + } + + result, err = client.UpdateAttributesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ManifestsClient", "UpdateAttributes", resp, "Failure responding to request") + return + } + + return +} + +// UpdateAttributesPreparer prepares the UpdateAttributes request. +func (client ManifestsClient) UpdateAttributesPreparer(ctx context.Context, name string, reference string, value *ChangeableAttributes) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "reference": autorest.Encode("path", reference), + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}/_manifests/{reference}", pathParameters)) + if value != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(value)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateAttributesSender sends the UpdateAttributes request. The method will close the +// http.Response Body if it receives an error. +func (client ManifestsClient) UpdateAttributesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateAttributesResponder handles the response to the UpdateAttributes request. The method always +// closes the http.Response Body. +func (client ManifestsClient) UpdateAttributesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/models.go new file mode 100644 index 000000000..5ae491606 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/models.go @@ -0,0 +1,628 @@ +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "io" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry" + +// AccessToken ... +type AccessToken struct { + autorest.Response `json:"-"` + // AccessToken - The access token for performing authenticated requests + AccessToken *string `json:"access_token,omitempty"` +} + +// AcrErrorInfo error information +type AcrErrorInfo struct { + // Code - Error code + Code *string `json:"code,omitempty"` + // Message - Error message + Message *string `json:"message,omitempty"` + // Detail - Error details + Detail interface{} `json:"detail,omitempty"` +} + +// AcrErrors acr error response describing why the operation failed +type AcrErrors struct { + // Errors - Array of detailed error + Errors *[]AcrErrorInfo `json:"errors,omitempty"` +} + +// AcrManifests manifest attributes +type AcrManifests struct { + autorest.Response `json:"-"` + // Registry - Registry name + Registry *string `json:"registry,omitempty"` + // ImageName - Image name + ImageName *string `json:"imageName,omitempty"` + // ManifestsAttributes - List of manifests + ManifestsAttributes *[]ManifestAttributesBase `json:"manifests,omitempty"` +} + +// Annotations additional information provided through arbitrary metadata. +type Annotations struct { + // AdditionalProperties - Unmatched properties from the message are deserialized this collection + AdditionalProperties map[string]interface{} `json:""` + // Created - Date and time on which the image was built (string, date-time as defined by https://tools.ietf.org/html/rfc3339#section-5.6) + Created *date.Time `json:"org.opencontainers.image.created,omitempty"` + // Authors - Contact details of the people or organization responsible for the image. + Authors *string `json:"org.opencontainers.image.authors,omitempty"` + // URL - URL to find more information on the image. + URL *string `json:"org.opencontainers.image.url,omitempty"` + // Documentation - URL to get documentation on the image. + Documentation *string `json:"org.opencontainers.image.documentation,omitempty"` + // Source - URL to get source code for building the image. + Source *string `json:"org.opencontainers.image.source,omitempty"` + // Version - Version of the packaged software. The version MAY match a label or tag in the source code repository, may also be Semantic versioning-compatible + Version *string `json:"org.opencontainers.image.version,omitempty"` + // Revision - Source control revision identifier for the packaged software. + Revision *string `json:"org.opencontainers.image.revision,omitempty"` + // Vendor - Name of the distributing entity, organization or individual. + Vendor *string `json:"org.opencontainers.image.vendor,omitempty"` + // Licenses - License(s) under which contained software is distributed as an SPDX License Expression. + Licenses *string `json:"org.opencontainers.image.licenses,omitempty"` + // Name - Name of the reference for a target. + Name *string `json:"org.opencontainers.image.ref.name,omitempty"` + // Title - Human-readable title of the image + Title *string `json:"org.opencontainers.image.title,omitempty"` + // Description - Human-readable description of the software packaged in the image + Description *string `json:"org.opencontainers.image.description,omitempty"` +} + +// MarshalJSON is the custom marshaler for Annotations. +func (a Annotations) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if a.Created != nil { + objectMap["org.opencontainers.image.created"] = a.Created + } + if a.Authors != nil { + objectMap["org.opencontainers.image.authors"] = a.Authors + } + if a.URL != nil { + objectMap["org.opencontainers.image.url"] = a.URL + } + if a.Documentation != nil { + objectMap["org.opencontainers.image.documentation"] = a.Documentation + } + if a.Source != nil { + objectMap["org.opencontainers.image.source"] = a.Source + } + if a.Version != nil { + objectMap["org.opencontainers.image.version"] = a.Version + } + if a.Revision != nil { + objectMap["org.opencontainers.image.revision"] = a.Revision + } + if a.Vendor != nil { + objectMap["org.opencontainers.image.vendor"] = a.Vendor + } + if a.Licenses != nil { + objectMap["org.opencontainers.image.licenses"] = a.Licenses + } + if a.Name != nil { + objectMap["org.opencontainers.image.ref.name"] = a.Name + } + if a.Title != nil { + objectMap["org.opencontainers.image.title"] = a.Title + } + if a.Description != nil { + objectMap["org.opencontainers.image.description"] = a.Description + } + for k, v := range a.AdditionalProperties { + objectMap[k] = v + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Annotations struct. +func (a *Annotations) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + default: + if v != nil { + var additionalProperties interface{} + err = json.Unmarshal(*v, &additionalProperties) + if err != nil { + return err + } + if a.AdditionalProperties == nil { + a.AdditionalProperties = make(map[string]interface{}) + } + a.AdditionalProperties[k] = additionalProperties + } + case "org.opencontainers.image.created": + if v != nil { + var created date.Time + err = json.Unmarshal(*v, &created) + if err != nil { + return err + } + a.Created = &created + } + case "org.opencontainers.image.authors": + if v != nil { + var authors string + err = json.Unmarshal(*v, &authors) + if err != nil { + return err + } + a.Authors = &authors + } + case "org.opencontainers.image.url": + if v != nil { + var URL string + err = json.Unmarshal(*v, &URL) + if err != nil { + return err + } + a.URL = &URL + } + case "org.opencontainers.image.documentation": + if v != nil { + var documentation string + err = json.Unmarshal(*v, &documentation) + if err != nil { + return err + } + a.Documentation = &documentation + } + case "org.opencontainers.image.source": + if v != nil { + var source string + err = json.Unmarshal(*v, &source) + if err != nil { + return err + } + a.Source = &source + } + case "org.opencontainers.image.version": + if v != nil { + var version string + err = json.Unmarshal(*v, &version) + if err != nil { + return err + } + a.Version = &version + } + case "org.opencontainers.image.revision": + if v != nil { + var revision string + err = json.Unmarshal(*v, &revision) + if err != nil { + return err + } + a.Revision = &revision + } + case "org.opencontainers.image.vendor": + if v != nil { + var vendor string + err = json.Unmarshal(*v, &vendor) + if err != nil { + return err + } + a.Vendor = &vendor + } + case "org.opencontainers.image.licenses": + if v != nil { + var licenses string + err = json.Unmarshal(*v, &licenses) + if err != nil { + return err + } + a.Licenses = &licenses + } + case "org.opencontainers.image.ref.name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + a.Name = &name + } + case "org.opencontainers.image.title": + if v != nil { + var title string + err = json.Unmarshal(*v, &title) + if err != nil { + return err + } + a.Title = &title + } + case "org.opencontainers.image.description": + if v != nil { + var description string + err = json.Unmarshal(*v, &description) + if err != nil { + return err + } + a.Description = &description + } + } + } + + return nil +} + +// ChangeableAttributes ... +type ChangeableAttributes struct { + // DeleteEnabled - Delete enabled + DeleteEnabled *bool `json:"deleteEnabled,omitempty"` + // WriteEnabled - Write enabled + WriteEnabled *bool `json:"writeEnabled,omitempty"` + // ListEnabled - List enabled + ListEnabled *bool `json:"listEnabled,omitempty"` + // ReadEnabled - Read enabled + ReadEnabled *bool `json:"readEnabled,omitempty"` +} + +// DeletedRepository deleted repository +type DeletedRepository struct { + autorest.Response `json:"-"` + // ManifestsDeleted - SHA of the deleted image + ManifestsDeleted *[]string `json:"manifestsDeleted,omitempty"` + // TagsDeleted - Tag of the deleted image + TagsDeleted *[]string `json:"tagsDeleted,omitempty"` +} + +// Descriptor docker V2 image layer descriptor including config and layers +type Descriptor struct { + // MediaType - Layer media type + MediaType *string `json:"mediaType,omitempty"` + // Size - Layer size + Size *int64 `json:"size,omitempty"` + // Digest - Layer digest + Digest *string `json:"digest,omitempty"` + // Urls - Specifies a list of URIs from which this object may be downloaded. + Urls *[]string `json:"urls,omitempty"` + Annotations *Annotations `json:"annotations,omitempty"` +} + +// FsLayer image layer information +type FsLayer struct { + // BlobSum - SHA of an image layer + BlobSum *string `json:"blobSum,omitempty"` +} + +// History a list of unstructured historical data for v1 compatibility +type History struct { + // V1Compatibility - The raw v1 compatibility information + V1Compatibility *string `json:"v1Compatibility,omitempty"` +} + +// ImageSignature signature of a signed manifest +type ImageSignature struct { + // Header - A JSON web signature + Header *JWK `json:"header,omitempty"` + // Signature - A signature for the image manifest, signed by a libtrust private key + Signature *string `json:"signature,omitempty"` + // Protected - The signed protected header + Protected *string `json:"protected,omitempty"` +} + +// JWK a JSON web signature +type JWK struct { + Jwk *JWKHeader `json:"jwk,omitempty"` + // Alg - The algorithm used to sign or encrypt the JWT + Alg *string `json:"alg,omitempty"` +} + +// JWKHeader JSON web key parameter +type JWKHeader struct { + // Crv - crv value + Crv *string `json:"crv,omitempty"` + // Kid - kid value + Kid *string `json:"kid,omitempty"` + // Kty - kty value + Kty *string `json:"kty,omitempty"` + // X - x value + X *string `json:"x,omitempty"` + // Y - y value + Y *string `json:"y,omitempty"` +} + +// Manifest returns the requested manifest file +type Manifest struct { + // SchemaVersion - Schema version + SchemaVersion *int32 `json:"schemaVersion,omitempty"` +} + +// ManifestAttributes manifest attributes details +type ManifestAttributes struct { + autorest.Response `json:"-"` + // Registry - Registry name + Registry *string `json:"registry,omitempty"` + // ImageName - Image name + ImageName *string `json:"imageName,omitempty"` + // Attributes - Manifest attributes + Attributes *ManifestAttributesBase `json:"manifest,omitempty"` +} + +// ManifestAttributesBase manifest details +type ManifestAttributesBase struct { + // Digest - Manifest + Digest *string `json:"digest,omitempty"` + // ImageSize - Image size + ImageSize *int64 `json:"imageSize,omitempty"` + // CreatedTime - Created time + CreatedTime *string `json:"createdTime,omitempty"` + // LastUpdateTime - Last update time + LastUpdateTime *string `json:"lastUpdateTime,omitempty"` + // Architecture - CPU architecture + Architecture *string `json:"architecture,omitempty"` + // Os - Operating system + Os *string `json:"os,omitempty"` + // MediaType - Media type + MediaType *string `json:"mediaType,omitempty"` + // ConfigMediaType - Config blob media type + ConfigMediaType *string `json:"configMediaType,omitempty"` + // Tags - List of tags + Tags *[]string `json:"tags,omitempty"` + // ChangeableAttributes - Changeable attributes + ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"` +} + +// ManifestAttributesManifest list of manifest attributes +type ManifestAttributesManifest struct { + // References - List of manifest attributes details + References *[]ManifestAttributesManifestReferences `json:"references,omitempty"` + // QuarantineTag - Quarantine tag name + QuarantineTag *string `json:"quarantineTag,omitempty"` +} + +// ManifestAttributesManifestReferences manifest attributes details +type ManifestAttributesManifestReferences struct { + // Digest - Manifest digest + Digest *string `json:"digest,omitempty"` + // Architecture - CPU architecture + Architecture *string `json:"architecture,omitempty"` + // Os - Operating system + Os *string `json:"os,omitempty"` +} + +// ManifestChangeableAttributes changeable attributes +type ManifestChangeableAttributes struct { + // DeleteEnabled - Delete enabled + DeleteEnabled *bool `json:"deleteEnabled,omitempty"` + // WriteEnabled - Write enabled + WriteEnabled *bool `json:"writeEnabled,omitempty"` + // ListEnabled - List enabled + ListEnabled *bool `json:"listEnabled,omitempty"` + // ReadEnabled - Read enabled + ReadEnabled *bool `json:"readEnabled,omitempty"` + // QuarantineState - Quarantine state + QuarantineState *string `json:"quarantineState,omitempty"` + // QuarantineDetails - Quarantine details + QuarantineDetails *string `json:"quarantineDetails,omitempty"` +} + +// ManifestList returns the requested Docker multi-arch-manifest file +type ManifestList struct { + // MediaType - Media type for this Manifest + MediaType *string `json:"mediaType,omitempty"` + // Manifests - List of V2 image layer information + Manifests *[]ManifestListAttributes `json:"manifests,omitempty"` + // SchemaVersion - Schema version + SchemaVersion *int32 `json:"schemaVersion,omitempty"` +} + +// ManifestListAttributes ... +type ManifestListAttributes struct { + // MediaType - The MIME type of the referenced object. This will generally be application/vnd.docker.image.manifest.v2+json, but it could also be application/vnd.docker.image.manifest.v1+json + MediaType *string `json:"mediaType,omitempty"` + // Size - The size in bytes of the object + Size *int64 `json:"size,omitempty"` + // Digest - The digest of the content, as defined by the Registry V2 HTTP API Specification + Digest *string `json:"digest,omitempty"` + Platform *Platform `json:"platform,omitempty"` +} + +// ManifestWrapper returns the requested manifest file +type ManifestWrapper struct { + autorest.Response `json:"-"` + // MediaType - Media type for this Manifest + MediaType *string `json:"mediaType,omitempty"` + // Manifests - (ManifestList, OCIIndex) List of V2 image layer information + Manifests *[]ManifestListAttributes `json:"manifests,omitempty"` + // Config - (V2, OCI) Image config descriptor + Config *Descriptor `json:"config,omitempty"` + // Layers - (V2, OCI) List of V2 image layer information + Layers *[]Descriptor `json:"layers,omitempty"` + // Annotations - (OCI, OCIIndex) Additional metadata + Annotations *Annotations `json:"annotations,omitempty"` + // Architecture - (V1) CPU architecture + Architecture *string `json:"architecture,omitempty"` + // Name - (V1) Image name + Name *string `json:"name,omitempty"` + // Tag - (V1) Image tag + Tag *string `json:"tag,omitempty"` + // FsLayers - (V1) List of layer information + FsLayers *[]FsLayer `json:"fsLayers,omitempty"` + // History - (V1) Image history + History *[]History `json:"history,omitempty"` + // Signatures - (V1) Image signature + Signatures *[]ImageSignature `json:"signatures,omitempty"` + // SchemaVersion - Schema version + SchemaVersion *int32 `json:"schemaVersion,omitempty"` +} + +// OCIIndex returns the requested OCI index file +type OCIIndex struct { + // Manifests - List of OCI image layer information + Manifests *[]ManifestListAttributes `json:"manifests,omitempty"` + Annotations *Annotations `json:"annotations,omitempty"` + // SchemaVersion - Schema version + SchemaVersion *int32 `json:"schemaVersion,omitempty"` +} + +// OCIManifest returns the requested OCI Manifest file +type OCIManifest struct { + // Config - V2 image config descriptor + Config *Descriptor `json:"config,omitempty"` + // Layers - List of V2 image layer information + Layers *[]Descriptor `json:"layers,omitempty"` + Annotations *Annotations `json:"annotations,omitempty"` + // SchemaVersion - Schema version + SchemaVersion *int32 `json:"schemaVersion,omitempty"` +} + +// Platform the platform object describes the platform which the image in the manifest runs on. A full list +// of valid operating system and architecture values are listed in the Go language documentation for $GOOS +// and $GOARCH +type Platform struct { + // Architecture - Specifies the CPU architecture, for example amd64 or ppc64le. + Architecture *string `json:"architecture,omitempty"` + // Os - The os field specifies the operating system, for example linux or windows. + Os *string `json:"os,omitempty"` + // OsVersion - The optional os.version field specifies the operating system version, for example 10.0.10586. + OsVersion *string `json:"os.version,omitempty"` + // OsFeatures - The optional os.features field specifies an array of strings, each listing a required OS feature (for example on Windows win32k + OsFeatures *[]string `json:"os.features,omitempty"` + // Variant - The optional variant field specifies a variant of the CPU, for example armv6l to specify a particular CPU variant of the ARM CPU. + Variant *string `json:"variant,omitempty"` + // Features - The optional features field specifies an array of strings, each listing a required CPU feature (for example sse4 or aes + Features *[]string `json:"features,omitempty"` +} + +// ReadCloser ... +type ReadCloser struct { + autorest.Response `json:"-"` + Value *io.ReadCloser `json:"value,omitempty"` +} + +// RefreshToken ... +type RefreshToken struct { + autorest.Response `json:"-"` + // RefreshToken - The refresh token to be used for generating access tokens + RefreshToken *string `json:"refresh_token,omitempty"` +} + +// Repositories list of repositories +type Repositories struct { + autorest.Response `json:"-"` + // Names - Repository names + Names *[]string `json:"repositories,omitempty"` +} + +// RepositoryAttributes repository attributes +type RepositoryAttributes struct { + autorest.Response `json:"-"` + // Registry - Registry name + Registry *string `json:"registry,omitempty"` + // ImageName - Image name + ImageName *string `json:"imageName,omitempty"` + // CreatedTime - Image created time + CreatedTime *string `json:"createdTime,omitempty"` + // LastUpdateTime - Image last update time + LastUpdateTime *string `json:"lastUpdateTime,omitempty"` + // ManifestCount - Number of the manifests + ManifestCount *int32 `json:"manifestCount,omitempty"` + // TagCount - Number of the tags + TagCount *int32 `json:"tagCount,omitempty"` + // ChangeableAttributes - Changeable attributes + ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"` +} + +// RepositoryTags result of the request to list tags of the image +type RepositoryTags struct { + // Name - Name of the image + Name *string `json:"name,omitempty"` + // Tags - List of tags + Tags *[]string `json:"tags,omitempty"` +} + +// SetObject ... +type SetObject struct { + autorest.Response `json:"-"` + Value interface{} `json:"value,omitempty"` +} + +// TagAttributes tag attributes +type TagAttributes struct { + autorest.Response `json:"-"` + // Registry - Registry name + Registry *string `json:"registry,omitempty"` + // ImageName - Image name + ImageName *string `json:"imageName,omitempty"` + // Attributes - List of tag attribute details + Attributes *TagAttributesBase `json:"tag,omitempty"` +} + +// TagAttributesBase tag attribute details +type TagAttributesBase struct { + // Name - Tag name + Name *string `json:"name,omitempty"` + // Digest - Tag digest + Digest *string `json:"digest,omitempty"` + // CreatedTime - Tag created time + CreatedTime *string `json:"createdTime,omitempty"` + // LastUpdateTime - Tag last update time + LastUpdateTime *string `json:"lastUpdateTime,omitempty"` + // Signed - Is signed + Signed *bool `json:"signed,omitempty"` + // ChangeableAttributes - Changeable attributes + ChangeableAttributes *ChangeableAttributes `json:"changeableAttributes,omitempty"` +} + +// TagAttributesTag tag +type TagAttributesTag struct { + // SignatureRecord - SignatureRecord value + SignatureRecord *string `json:"signatureRecord,omitempty"` +} + +// TagList list of tag details +type TagList struct { + autorest.Response `json:"-"` + // Registry - Registry name + Registry *string `json:"registry,omitempty"` + // ImageName - Image name + ImageName *string `json:"imageName,omitempty"` + // Tags - List of tag attribute details + Tags *[]TagAttributesBase `json:"tags,omitempty"` +} + +// V1Manifest returns the requested V1 manifest file +type V1Manifest struct { + // Architecture - CPU architecture + Architecture *string `json:"architecture,omitempty"` + // Name - Image name + Name *string `json:"name,omitempty"` + // Tag - Image tag + Tag *string `json:"tag,omitempty"` + // FsLayers - List of layer information + FsLayers *[]FsLayer `json:"fsLayers,omitempty"` + // History - Image history + History *[]History `json:"history,omitempty"` + // Signatures - Image signature + Signatures *[]ImageSignature `json:"signatures,omitempty"` + // SchemaVersion - Schema version + SchemaVersion *int32 `json:"schemaVersion,omitempty"` +} + +// V2Manifest returns the requested Docker V2 Manifest file +type V2Manifest struct { + // MediaType - Media type for this Manifest + MediaType *string `json:"mediaType,omitempty"` + // Config - V2 image config descriptor + Config *Descriptor `json:"config,omitempty"` + // Layers - List of V2 image layer information + Layers *[]Descriptor `json:"layers,omitempty"` + // SchemaVersion - Schema version + SchemaVersion *int32 `json:"schemaVersion,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/refreshtokens.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/refreshtokens.go new file mode 100644 index 000000000..a3f2ef220 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/refreshtokens.go @@ -0,0 +1,111 @@ +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RefreshTokensClient is the metadata API definition for the Azure Container Registry runtime +type RefreshTokensClient struct { + BaseClient +} + +// NewRefreshTokensClient creates an instance of the RefreshTokensClient client. +func NewRefreshTokensClient(loginURI string) RefreshTokensClient { + return RefreshTokensClient{New(loginURI)} +} + +// GetFromExchange exchange AAD tokens for an ACR refresh Token +// Parameters: +// grantType - can take a value of access_token_refresh_token, or access_token, or refresh_token +// service - indicates the name of your Azure container registry. +// tenant - AAD tenant associated to the AAD credentials. +// refreshToken - AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token +// accessToken - AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. +func (client RefreshTokensClient) GetFromExchange(ctx context.Context, grantType string, service string, tenant string, refreshToken string, accessToken string) (result RefreshToken, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RefreshTokensClient.GetFromExchange") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetFromExchangePreparer(ctx, grantType, service, tenant, refreshToken, accessToken) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RefreshTokensClient", "GetFromExchange", nil, "Failure preparing request") + return + } + + resp, err := client.GetFromExchangeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RefreshTokensClient", "GetFromExchange", resp, "Failure sending request") + return + } + + result, err = client.GetFromExchangeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RefreshTokensClient", "GetFromExchange", resp, "Failure responding to request") + return + } + + return +} + +// GetFromExchangePreparer prepares the GetFromExchange request. +func (client RefreshTokensClient) GetFromExchangePreparer(ctx context.Context, grantType string, service string, tenant string, refreshToken string, accessToken string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + formDataParameters := map[string]interface{}{ + "grant_type": grantType, + "service": service, + } + if len(tenant) > 0 { + formDataParameters["tenant"] = tenant + } + if len(refreshToken) > 0 { + formDataParameters["refresh_token"] = refreshToken + } + if len(accessToken) > 0 { + formDataParameters["access_token"] = accessToken + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPath("/oauth2/exchange"), + autorest.WithFormData(autorest.MapToValues(formDataParameters))) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetFromExchangeSender sends the GetFromExchange request. The method will close the +// http.Response Body if it receives an error. +func (client RefreshTokensClient) GetFromExchangeSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetFromExchangeResponder handles the response to the GetFromExchange request. The method always +// closes the http.Response Body. +func (client RefreshTokensClient) GetFromExchangeResponder(resp *http.Response) (result RefreshToken, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/repository.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/repository.go new file mode 100644 index 000000000..7b7c4e3fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/repository.go @@ -0,0 +1,321 @@ +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RepositoryClient is the metadata API definition for the Azure Container Registry runtime +type RepositoryClient struct { + BaseClient +} + +// NewRepositoryClient creates an instance of the RepositoryClient client. +func NewRepositoryClient(loginURI string) RepositoryClient { + return RepositoryClient{New(loginURI)} +} + +// Delete delete the repository identified by `name` +// Parameters: +// name - name of the image (including the namespace) +func (client RepositoryClient) Delete(ctx context.Context, name string) (result DeletedRepository, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RepositoryClient.Delete") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, name) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RepositoryClient) DeletePreparer(ctx context.Context, name string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RepositoryClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RepositoryClient) DeleteResponder(resp *http.Response) (result DeletedRepository, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAttributes get repository attributes +// Parameters: +// name - name of the image (including the namespace) +func (client RepositoryClient) GetAttributes(ctx context.Context, name string) (result RepositoryAttributes, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RepositoryClient.GetAttributes") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetAttributesPreparer(ctx, name) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetAttributes", nil, "Failure preparing request") + return + } + + resp, err := client.GetAttributesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetAttributes", resp, "Failure sending request") + return + } + + result, err = client.GetAttributesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetAttributes", resp, "Failure responding to request") + return + } + + return +} + +// GetAttributesPreparer prepares the GetAttributes request. +func (client RepositoryClient) GetAttributesPreparer(ctx context.Context, name string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAttributesSender sends the GetAttributes request. The method will close the +// http.Response Body if it receives an error. +func (client RepositoryClient) GetAttributesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetAttributesResponder handles the response to the GetAttributes request. The method always +// closes the http.Response Body. +func (client RepositoryClient) GetAttributesResponder(resp *http.Response) (result RepositoryAttributes, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetList list repositories +// Parameters: +// last - query parameter for the last item in previous query. Result set will include values lexically after +// last. +// n - query parameter for max number of items +func (client RepositoryClient) GetList(ctx context.Context, last string, n *int32) (result Repositories, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RepositoryClient.GetList") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetListPreparer(ctx, last, n) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetList", nil, "Failure preparing request") + return + } + + resp, err := client.GetListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetList", resp, "Failure sending request") + return + } + + result, err = client.GetListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "GetList", resp, "Failure responding to request") + return + } + + return +} + +// GetListPreparer prepares the GetList request. +func (client RepositoryClient) GetListPreparer(ctx context.Context, last string, n *int32) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + queryParameters := map[string]interface{}{} + if len(last) > 0 { + queryParameters["last"] = autorest.Encode("query", last) + } + if n != nil { + queryParameters["n"] = autorest.Encode("query", *n) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPath("/acr/v1/_catalog"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetListSender sends the GetList request. The method will close the +// http.Response Body if it receives an error. +func (client RepositoryClient) GetListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetListResponder handles the response to the GetList request. The method always +// closes the http.Response Body. +func (client RepositoryClient) GetListResponder(resp *http.Response) (result Repositories, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateAttributes update the attribute identified by `name` where `reference` is the name of the repository. +// Parameters: +// name - name of the image (including the namespace) +// value - repository attribute value +func (client RepositoryClient) UpdateAttributes(ctx context.Context, name string, value *ChangeableAttributes) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RepositoryClient.UpdateAttributes") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateAttributesPreparer(ctx, name, value) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "UpdateAttributes", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateAttributesSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "UpdateAttributes", resp, "Failure sending request") + return + } + + result, err = client.UpdateAttributesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RepositoryClient", "UpdateAttributes", resp, "Failure responding to request") + return + } + + return +} + +// UpdateAttributesPreparer prepares the UpdateAttributes request. +func (client RepositoryClient) UpdateAttributesPreparer(ctx context.Context, name string, value *ChangeableAttributes) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}", pathParameters)) + if value != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(value)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateAttributesSender sends the UpdateAttributes request. The method will close the +// http.Response Body if it receives an error. +func (client RepositoryClient) UpdateAttributesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateAttributesResponder handles the response to the UpdateAttributes request. The method always +// closes the http.Response Body. +func (client RepositoryClient) UpdateAttributesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/tag.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/tag.go new file mode 100644 index 000000000..f53d4f36e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/tag.go @@ -0,0 +1,339 @@ +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// TagClient is the metadata API definition for the Azure Container Registry runtime +type TagClient struct { + BaseClient +} + +// NewTagClient creates an instance of the TagClient client. +func NewTagClient(loginURI string) TagClient { + return TagClient{New(loginURI)} +} + +// Delete delete tag +// Parameters: +// name - name of the image (including the namespace) +// reference - tag name +func (client TagClient) Delete(ctx context.Context, name string, reference string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, name, reference) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "Delete", resp, "Failure responding to request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TagClient) DeletePreparer(ctx context.Context, name string, reference string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "reference": autorest.Encode("path", reference), + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TagClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TagClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetAttributes get tag attributes by tag +// Parameters: +// name - name of the image (including the namespace) +// reference - tag name +func (client TagClient) GetAttributes(ctx context.Context, name string, reference string) (result TagAttributes, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.GetAttributes") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetAttributesPreparer(ctx, name, reference) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetAttributes", nil, "Failure preparing request") + return + } + + resp, err := client.GetAttributesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetAttributes", resp, "Failure sending request") + return + } + + result, err = client.GetAttributesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetAttributes", resp, "Failure responding to request") + return + } + + return +} + +// GetAttributesPreparer prepares the GetAttributes request. +func (client TagClient) GetAttributesPreparer(ctx context.Context, name string, reference string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "reference": autorest.Encode("path", reference), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAttributesSender sends the GetAttributes request. The method will close the +// http.Response Body if it receives an error. +func (client TagClient) GetAttributesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetAttributesResponder handles the response to the GetAttributes request. The method always +// closes the http.Response Body. +func (client TagClient) GetAttributesResponder(resp *http.Response) (result TagAttributes, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetList list tags of a repository +// Parameters: +// name - name of the image (including the namespace) +// last - query parameter for the last item in previous query. Result set will include values lexically after +// last. +// n - query parameter for max number of items +// orderby - orderby query parameter +// digest - filter by digest +func (client TagClient) GetList(ctx context.Context, name string, last string, n *int32, orderby string, digest string) (result TagList, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.GetList") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetListPreparer(ctx, name, last, n, orderby, digest) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetList", nil, "Failure preparing request") + return + } + + resp, err := client.GetListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetList", resp, "Failure sending request") + return + } + + result, err = client.GetListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "GetList", resp, "Failure responding to request") + return + } + + return +} + +// GetListPreparer prepares the GetList request. +func (client TagClient) GetListPreparer(ctx context.Context, name string, last string, n *int32, orderby string, digest string) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + } + + queryParameters := map[string]interface{}{} + if len(last) > 0 { + queryParameters["last"] = autorest.Encode("query", last) + } + if n != nil { + queryParameters["n"] = autorest.Encode("query", *n) + } + if len(orderby) > 0 { + queryParameters["orderby"] = autorest.Encode("query", orderby) + } + if len(digest) > 0 { + queryParameters["digest"] = autorest.Encode("query", digest) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}/_tags", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetListSender sends the GetList request. The method will close the +// http.Response Body if it receives an error. +func (client TagClient) GetListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// GetListResponder handles the response to the GetList request. The method always +// closes the http.Response Body. +func (client TagClient) GetListResponder(resp *http.Response) (result TagList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateAttributes update tag attributes +// Parameters: +// name - name of the image (including the namespace) +// reference - tag name +// value - repository attribute value +func (client TagClient) UpdateAttributes(ctx context.Context, name string, reference string, value *ChangeableAttributes) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagClient.UpdateAttributes") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateAttributesPreparer(ctx, name, reference, value) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "UpdateAttributes", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateAttributesSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "UpdateAttributes", resp, "Failure sending request") + return + } + + result, err = client.UpdateAttributesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.TagClient", "UpdateAttributes", resp, "Failure responding to request") + return + } + + return +} + +// UpdateAttributesPreparer prepares the UpdateAttributes request. +func (client TagClient) UpdateAttributesPreparer(ctx context.Context, name string, reference string, value *ChangeableAttributes) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "reference": autorest.Encode("path", reference), + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPathParameters("/acr/v1/{name}/_tags/{reference}", pathParameters)) + if value != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(value)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateAttributesSender sends the UpdateAttributes request. The method will close the +// http.Response Body if it receives an error. +func (client TagClient) UpdateAttributesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// UpdateAttributesResponder handles the response to the UpdateAttributes request. The method always +// closes the http.Response Body. +func (client TagClient) UpdateAttributesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/v2support.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/v2support.go new file mode 100644 index 000000000..58fca35cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/v2support.go @@ -0,0 +1,89 @@ +package containerregistry + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// V2SupportClient is the metadata API definition for the Azure Container Registry runtime +type V2SupportClient struct { + BaseClient +} + +// NewV2SupportClient creates an instance of the V2SupportClient client. +func NewV2SupportClient(loginURI string) V2SupportClient { + return V2SupportClient{New(loginURI)} +} + +// Check tells whether this Docker Registry instance supports Docker Registry HTTP API v2 +func (client V2SupportClient) Check(ctx context.Context) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/V2SupportClient.Check") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CheckPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.V2SupportClient", "Check", nil, "Failure preparing request") + return + } + + resp, err := client.CheckSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.V2SupportClient", "Check", resp, "Failure sending request") + return + } + + result, err = client.CheckResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.V2SupportClient", "Check", resp, "Failure responding to request") + return + } + + return +} + +// CheckPreparer prepares the Check request. +func (client V2SupportClient) CheckPreparer(ctx context.Context) (*http.Request, error) { + urlParameters := map[string]interface{}{ + "url": client.LoginURI, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithCustomBaseURL("{url}", urlParameters), + autorest.WithPath("/v2/")) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckSender sends the Check request. The method will close the +// http.Response Body if it receives an error. +func (client V2SupportClient) CheckSender(req *http.Request) (*http.Response, error) { + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) +} + +// CheckResponder handles the response to the Check request. The method always +// closes the http.Response Body. +func (client V2SupportClient) CheckResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/version.go new file mode 100644 index 000000000..1298b0471 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry/version.go @@ -0,0 +1,19 @@ +package containerregistry + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + Version() + " containerregistry/2019-08-15-preview" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go new file mode 100644 index 000000000..bcfbb15cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -0,0 +1,7 @@ +package version + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// Number contains the semantic version of this SDK. +const Number = "v68.0.0" diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 000000000..3350aaf70 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 000000000..d1f596bfc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 000000000..a434e73ac --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 000000000..dc6e3e633 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 000000000..1fc286596 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 000000000..de1e19a44 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 000000000..b11eb0788 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,294 @@ +# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). + +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 000000000..fa5964742 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 000000000..9daa4b58b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,273 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + // return a more meaningful error message if available + if token.ErrorDescription != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) + } + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 000000000..647a61bb8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,25 @@ +//go:build modhack +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 000000000..2a974a39b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,135 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "golang.org/x/crypto/pkcs12" +) + +var ( + // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. + ErrMissingCertificate = errors.New("adal: certificate missing") + + // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. + ErrMissingPrivateKey = errors.New("adal: private key missing") +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} + +// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. +// The PFX data must contain a private key along with a certificate whose public key matches that of the +// private key or an error is returned. +// If the private key is not password protected pass the empty string for password. +func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + blocks, err := pkcs12.ToPEM(pfxData, password) + if err != nil { + return nil, nil, err + } + // first extract the private key + var priv *rsa.PrivateKey + for _, block := range blocks { + if block.Type == "PRIVATE KEY" { + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + break + } + } + if priv == nil { + return nil, nil, ErrMissingPrivateKey + } + // now find the certificate with the matching public key of our private key + var cert *x509.Certificate + for _, block := range blocks { + if block.Type == "CERTIFICATE" { + pcert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certKey, ok := pcert.PublicKey.(*rsa.PublicKey) + if !ok { + // keep looking + continue + } + if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { + // found a match + cert = pcert + break + } + } + } + if cert == nil { + return nil, nil, ErrMissingCertificate + } + return cert, priv, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 000000000..eb649bce9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,101 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net" + "net/http" + "net/http/cookiejar" + "sync" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// DO NOT ACCESS THIS DIRECTLY. go through sender() +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // copied from http.DefaultTransport with a TLS minimum version. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 000000000..c90209a94 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1396 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/logger" + "github.com/golang-jwt/jwt/v4" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the API version to use for the MSI endpoint + msiAPIVersion = "2018-02-01" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + msiEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + msiSecretEnv = "MSI_SECRET" + + // the API version to use for the legacy App Service MSI endpoint + appServiceAPIVersion2017 = "2017-09-01" + + // secret header used when authenticating against app service MSI endpoint + secretHeader = "Secret" + + // the format for expires_on in UTC with AM/PM + expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00" + + // the format for expires_on in UTC without AM/PM + expiresOnDateFormat = "1/2/2006 15:04:05 +00:00" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// TokenRefresh is a type representing a custom callback to refresh a token +type TokenRefresh func(ctx context.Context, resource string) (*Token, error) + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +// OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(24 * time.Hour).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { + msiType msiType + clientResourceID string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. +type ServicePrincipalFederatedSecret struct { + jwt string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. +func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + + v.Set("client_assertion", secret.jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalFederatedSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalFederatedSecret is not supported") +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + customRefreshFunc TokenRefresh + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + // Settings this to a value less than 1 will use the default value. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. +func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { + spt.customRefreshFunc = customRefreshFunc +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + case "ServicePrincipalFederatedSecret": + return errors.New("unmarshalling ServicePrincipalFederatedSecret is not supported") + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. +func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwt == "" { + return nil, fmt.Errorf("parameter 'jwt' cannot be empty") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalFederatedSecret{ + jwt: jwt, + }, + callbacks..., + ) +} + +type msiType int + +const ( + msiTypeUnavailable msiType = iota + msiTypeAppServiceV20170901 + msiTypeCloudShell + msiTypeIMDS +) + +func (m msiType) String() string { + switch m { + case msiTypeAppServiceV20170901: + return "AppServiceV20170901" + case msiTypeCloudShell: + return "CloudShell" + case msiTypeIMDS: + return "IMDS" + default: + return fmt.Sprintf("unhandled MSI type %d", m) + } +} + +// returns the MSI type and endpoint, or an error +func getMSIType() (msiType, string, error) { + if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" { + // if the env var MSI_ENDPOINT is set + if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" { + // if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService + return msiTypeAppServiceV20170901, endpointEnvVar, nil + } + // if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell + return msiTypeCloudShell, endpointEnvVar, nil + } + // if MSI_ENDPOINT is NOT set assume the msiType is IMDS + return msiTypeIMDS, msiEndpoint, nil +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions. +// It will return an error when not running in an app service/functions environment. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIAppServiceEndpoint() (string, error) { + msiType, endpoint, err := getMSIType() + if err != nil { + return "", err + } + switch msiType { + case msiTypeAppServiceV20170901: + return endpoint, nil + default: + return "", fmt.Errorf("%s is not app service environment", msiType) + } +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIEndpoint() (string, error) { + _, endpoint, err := getMSIType() + return endpoint, err +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the clientID of specified user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the azure resource id of user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...) +} + +// ManagedIdentityOptions contains optional values for configuring managed identity authentication. +type ManagedIdentityOptions struct { + // ClientID is the user-assigned identity to use during authentication. + // It is mutually exclusive with IdentityResourceID. + ClientID string + + // IdentityResourceID is the resource ID of the user-assigned identity to use during authentication. + // It is mutually exclusive with ClientID. + IdentityResourceID string +} + +// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity. +// It supports the following managed identity environments. +// - App Service Environment (API version 2017-09-01 only) +// - Cloud shell +// - IMDS with a system or user assigned identity +func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if options == nil { + options = &ManagedIdentityOptions{} + } + return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != "" && identityResourceID != "" { + return nil, errors.New("cannot specify userAssignedID and identityResourceID") + } + msiType, endpoint, err := getMSIType() + if err != nil { + logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v\n", err) + return nil, err + } + logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s\n", msiType, endpoint) + if msiEndpoint != "" { + endpoint = msiEndpoint + logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s\n", endpoint) + } + msiEndpointURL, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + // cloud shell sends its data in the request body + if msiType != msiTypeCloudShell { + v := url.Values{} + v.Set("resource", resource) + clientIDParam := "client_id" + switch msiType { + case msiTypeAppServiceV20170901: + clientIDParam = "clientid" + v.Set("api-version", appServiceAPIVersion2017) + break + case msiTypeIMDS: + v.Set("api-version", msiAPIVersion) + } + if userAssignedID != "" { + v.Set(clientIDParam, userAssignedID) + } else if identityResourceID != "" { + v.Set("mi_res_id", identityResourceID) + } + msiEndpointURL.RawQuery = v.Encode() + } + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{ + msiType: msiType, + clientResourceID: identityResourceID, + }, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + ClientID: userAssignedID, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + // must take the read lock when initially checking the token's expiration + if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check again to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + if spt.customRefreshFunc != nil { + token, err := spt.customRefreshFunc(ctx, resource) + if err != nil { + return err + } + spt.inner.Token = *token + return spt.InvokeRefreshCallbacks(spt.inner.Token) + } + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + req = req.WithContext(ctx) + var resp *http.Response + authBodyFilter := func(b []byte) []byte { + if logger.Level() != logger.LogAuth { + return []byte("**REDACTED** authentication body") + } + return b + } + if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + switch msiSecret.msiType { + case msiTypeAppServiceV20170901: + req.Method = http.MethodGet + req.Header.Set("secret", os.Getenv(msiSecretEnv)) + break + case msiTypeCloudShell: + req.Header.Set("Metadata", "true") + data := url.Values{} + data.Set("resource", spt.inner.Resource) + if spt.inner.ClientID != "" { + data.Set("client_id", spt.inner.ClientID) + } else if msiSecret.clientResourceID != "" { + data.Set("msi_res_id", msiSecret.clientResourceID) + } + req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + break + case msiTypeIMDS: + req.Method = http.MethodGet + req.Header.Set("Metadata", "true") + break + } + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = spt.sender.Do(req) + } + + // don't return a TokenRefreshError here; this will allow retry logic to apply + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } else if resp == nil { + return fmt.Errorf("adal: received nil response and error") + } + + logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + token := struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + // AAD returns expires_in as a string, ADFS returns it as an int + ExpiresIn json.Number `json:"expires_in"` + // expires_on can be in three formats, a UTC time stamp, or the number of seconds as a string *or* int. + ExpiresOn interface{} `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` + }{} + // return a TokenRefreshError in the follow error cases as the token is in an unexpected format + err = json.Unmarshal(rb, &token) + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp) + } + expiresOn := json.Number("") + // ADFS doesn't include the expires_on field + if token.ExpiresOn != nil { + if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) + } + } + spt.inner.Token.AccessToken = token.AccessToken + spt.inner.Token.RefreshToken = token.RefreshToken + spt.inner.Token.ExpiresIn = token.ExpiresIn + spt.inner.Token.ExpiresOn = expiresOn + spt.inner.Token.NotBefore = token.NotBefore + spt.inner.Token.Resource = token.Resource + spt.inner.Token.Type = token.Type + + return spt.InvokeRefreshCallbacks(spt.inner.Token) +} + +// converts expires_on to the number of seconds +func parseExpiresOn(s interface{}) (json.Number, error) { + // the JSON unmarshaler treats JSON numbers unmarshaled into an interface{} as float64 + asFloat64, ok := s.(float64) + if ok { + // this is the number of seconds as int case + return json.Number(strconv.FormatInt(int64(asFloat64), 10)), nil + } + asStr, ok := s.(string) + if !ok { + return "", fmt.Errorf("unexpected expires_on type %T", s) + } + // convert the expiration date to the number of seconds from the unix epoch + timeToDuration := func(t time.Time) json.Number { + return json.Number(strconv.FormatInt(t.UTC().Unix(), 10)) + } + if _, err := json.Number(asStr).Int64(); err == nil { + // this is the number of seconds case, no conversion required + return json.Number(asStr), nil + } else if eo, err := time.Parse(expiresOnDateFormatPM, asStr); err == nil { + return timeToDuration(eo), nil + } else if eo, err := time.Parse(expiresOnDateFormat, asStr); err == nil { + return timeToDuration(eo), nil + } else { + // unknown format + return json.Number(""), err + } +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made + if maxAttempts < 1 { + maxAttempts = defaultMaxMSIRefreshAttempts + } + + for attempt < maxAttempts { + if resp != nil && resp.Body != nil { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource. +func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalTokenWithSecret( + *multiTenantCfg.PrimaryTenant(), + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalTokenWithSecret( + *auxTenants[i], + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// MSIAvailable returns true if the MSI endpoint is available for authentication. +func MSIAvailable(ctx context.Context, s Sender) bool { + msiType, _, err := getMSIType() + + if err != nil { + return false + } + + if msiType != msiTypeIMDS { + return true + } + + if s == nil { + s = sender() + } + + resp, err := getMSIEndpoint(ctx, s) + + if err == nil { + resp.Body.Close() + } + + return err == nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go new file mode 100644 index 000000000..89190a421 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go @@ -0,0 +1,76 @@ +//go:build go1.13 +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "fmt" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + // http.NewRequestWithContext() was added in Go 1.13 + req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go new file mode 100644 index 000000000..27ec4efad --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go @@ -0,0 +1,75 @@ +//go:build !go1.13 +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) + req = req.WithContext(tempCtx) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 000000000..c867b3484 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 000000000..1226c4111 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,353 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. +func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { + return ba.tokenProvider +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) + } + return &BearerAuthorizerCallback{sender: s, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err != nil { + return r, err + } + DrainResponseBody(resp) + if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) { + bc, err := newBearerChallenge(resp.Header) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(header http.Header) bool { + authHeader := header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return NewMultiTenantBearerAuthorizer(tp) +} + +// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants. +type MultiTenantBearerAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider. +func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer { + return &MultiTenantBearerAuthorizer{tp: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) + }) + } +} + +// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer. +func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider { + return mt.tp +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go new file mode 100644 index 000000000..66501493b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go @@ -0,0 +1,66 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "strings" +) + +// SASTokenAuthorizer implements an authorization for SAS Token Authentication +// this can be used for interaction with Blob Storage Endpoints +type SASTokenAuthorizer struct { + sasToken string +} + +// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials +func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { + if strings.TrimSpace(sasToken) == "" { + return nil, fmt.Errorf("sasToken cannot be empty") + } + + token := sasToken + if strings.HasPrefix(sasToken, "?") { + token = strings.TrimPrefix(sasToken, "?") + } + + return &SASTokenAuthorizer{ + sasToken: token, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the +// URI's query parameters. This can be used for the Blob, Queue, and File Services. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature +func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + if r.URL.RawQuery == "" { + r.URL.RawQuery = sas.sasToken + } else if !strings.Contains(r.URL.RawQuery, sas.sasToken) { + r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) + } + + return Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go new file mode 100644 index 000000000..2af5030a1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -0,0 +1,307 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// SharedKeyType defines the enumeration for the various shared key types. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. +type SharedKeyType string + +const ( + // SharedKey is used to authorize against blobs, files and queues services. + SharedKey SharedKeyType = "sharedKey" + + // SharedKeyForTable is used to authorize against the table service. + SharedKeyForTable SharedKeyType = "sharedKeyTable" + + // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for + // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. + SharedKeyLite SharedKeyType = "sharedKeyLite" + + // SharedKeyLiteForTable is used to authorize against the table service. It's provided for + // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. + SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" +) + +const ( + headerAccept = "Accept" + headerAcceptCharset = "Accept-Charset" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentLanguage = "Content-Language" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerDate = "Date" + headerXMSDate = "X-Ms-Date" + headerXMSVersion = "x-ms-version" + headerRange = "Range" +) + +const storageEmulatorAccountName = "devstoreaccount1" + +// SharedKeyAuthorizer implements an authorization for Shared Key +// this can be used for interaction with Blob, File and Queue Storage Endpoints +type SharedKeyAuthorizer struct { + accountName string + accountKey []byte + keyType SharedKeyType +} + +// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. +func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return nil, fmt.Errorf("malformed storage account key: %v", err) + } + return &SharedKeyAuthorizer{ + accountName: accountName, + accountKey: key, + keyType: keyType, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is " " followed by the computed key. +// This can be used for the Blob, Queue, and File Services +// +// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key +// You may use Shared Key authorization to authorize a request made against the +// 2009-09-19 version and later of the Blob and Queue services, +// and version 2014-02-14 and later of the File services. +func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) + if err != nil { + return r, err + } + return Prepare(r, WithHeader(headerAuthorization, sk)) + }) + } +} + +func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { + canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) + if err != nil { + return "", err + } + + if req.Header == nil { + req.Header = http.Header{} + } + + // ensure date is set + if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(headerXMSDate, date) + } + canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) + if err != nil { + return "", err + } + return createAuthorizationHeader(accName, accKey, canString, keyType), nil +} + +func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if accountName != storageEmulatorAccountName { + cr.WriteString("/") + cr.WriteString(getCanonicalizedAccountName(accountName)) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if keyType == SharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func getCanonicalizedAccountName(accountName string) string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + date := headers.Get(headerDate) + if v := headers.Get(headerXMSDate); v != "" { + if keyType == SharedKey || keyType == SharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch keyType { + case SharedKey: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + canonicalizedResource, + }, "\n") + case SharedKeyLite: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("key type '%s' is not supported", keyType) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := make(map[string]string) + + for k := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = headers.Get(k) + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { + h := hmac.New(sha256.New, accountKey) + h.Write([]byte(canonicalizedString)) + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + var key string + switch keyType { + case SharedKey, SharedKeyForTable: + key = "SharedKey" + case SharedKeyLite, SharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 000000000..211c98d1e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 000000000..45575eedb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,995 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// FutureAPI contains the set of methods on the Future type. +type FutureAPI interface { + // Response returns the last HTTP response. + Response() *http.Response + + // Status returns the last status message of the operation. + Status() string + + // PollingMethod returns the method used to monitor the status of the asynchronous operation. + PollingMethod() PollingMethodType + + // DoneWithContext queries the service to see if the operation has completed. + DoneWithContext(context.Context, autorest.Sender) (bool, error) + + // GetPollingDelay returns a duration the application should wait before checking + // the status of the asynchronous request and true; this value is returned from + // the service via the Retry-After response header. If the header wasn't returned + // then the function returns the zero-value time.Duration and false. + GetPollingDelay() (time.Duration, bool) + + // WaitForCompletionRef will return when one of the following conditions is met: the long + // running operation has completed, the provided context is cancelled, or the client's + // polling duration has been exceeded. It will retry failed polling attempts based on + // the retry value defined in the client up to the maximum retry attempts. + // If no deadline is specified in the context then the client.PollingDuration will be + // used to determine if a default deadline should be used. + // If PollingDuration is greater than zero the value will be used as the context's timeout. + // If PollingDuration is zero then no default deadline will be used. + WaitForCompletionRef(context.Context, autorest.Client) error + + // MarshalJSON implements the json.Marshaler interface. + MarshalJSON() ([]byte, error) + + // MarshalJSON implements the json.Unmarshaler interface. + UnmarshalJSON([]byte) error + + // PollingURL returns the URL used for retrieving the status of the long-running operation. + PollingURL() string + + // GetResult should be called once polling has completed successfully. + // It makes the final GET call to retrieve the resultant payload. + GetResult(autorest.Sender) (*http.Response, error) +} + +var _ FutureAPI = (*Future)(nil) + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll + if delay, ok := f.GetPollingDelay(); ok { + logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: initial polling delay") + if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { + err = cancelCtx.Err() + return + } + } + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: Using client polling delay") + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + logger.Instance.Writef(logger.LogError, "WaitForCompletionRef: %s\n", err) + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + resp, err := sender.Do(req) + if err == nil && resp.Body != nil { + // copy the body and close it so callers don't have to + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, err + } + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + return resp, err +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { + goto Default + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md new file mode 100644 index 000000000..05bef8a80 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/README.md @@ -0,0 +1,152 @@ +# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `auth` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). + +## Authentication + +Typical SDK operations must be authenticated and authorized. The `autorest.Authorizer` +interface allows use of any auth style in requests, such as inserting an OAuth2 +Authorization header and bearer token received from Azure AD. + +The SDK itself provides a simple way to get an authorizer which first checks +for OAuth client credentials in environment variables and then falls back to +Azure's [Managed Service Identity]() when available, e.g. when on an Azure +VM. The following snippet from [the previous section](#use) demonstrates +this helper. + +```go +import "github.com/Azure/go-autorest/autorest/azure/auth" + +// create a VirtualNetworks client +vnetClient := network.NewVirtualNetworksClient("") + +// create an authorizer from env vars or Azure Managed Service Idenity +authorizer, err := auth.NewAuthorizerFromEnvironment() +if err != nil { + handle(err) +} + +vnetClient.Authorizer = authorizer + +// call the VirtualNetworks CreateOrUpdate API +vnetClient.CreateOrUpdate(context.Background(), +// ... +``` + +The following environment variables help determine authentication configuration: + +- `AZURE_ENVIRONMENT`: Specifies the Azure Environment to use. If not set, it + defaults to `AzurePublicCloud`. Not applicable to authentication with Managed + Service Identity (MSI). +- `AZURE_AD_RESOURCE`: Specifies the AAD resource ID to use. If not set, it + defaults to `ResourceManagerEndpoint` for operations with Azure Resource + Manager. You can also choose an alternate resource programmatically with + `auth.NewAuthorizerFromEnvironmentWithResource(resource string)`. + +### More Authentication Details + +The previous is the first and most recommended of several authentication +options offered by the SDK because it allows seamless use of both service +principals and [Azure Managed Service Identity][]. Other options are listed +below. + +> Note: If you need to create a new service principal, run `az ad sp create-for-rbac -n ""` in the +> [azure-cli](https://github.com/Azure/azure-cli). See [these +> docs](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli?view=azure-cli-latest) +> for more info. Copy the new principal's ID, secret, and tenant ID for use in +> your app, or consider the `--sdk-auth` parameter for serialized output. + +[azure managed service identity]: https://docs.microsoft.com/azure/active-directory/msi-overview + +- The `auth.NewAuthorizerFromEnvironment()` described above creates an authorizer + from the first available of the following configuration: + + 1. **Client Credentials**: Azure AD Application ID and Secret. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + + 2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + + 3. **Resource Owner Password**: Azure AD User and Password. This grant type is *not + recommended*, use device login instead if you need interactive login. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_USERNAME`: Specifies the username to use. + - `AZURE_PASSWORD`: Specifies the password to use. + + 4. **Azure Managed Service Identity**: Delegate credential management to the + platform. Requires that code is running in Azure, e.g. on a VM. All + configuration is handled by Azure. See [Azure Managed Service + Identity](https://docs.microsoft.com/azure/active-directory/msi-overview) + for more details. + +- The `auth.NewAuthorizerFromFile()` method creates an authorizer using + credentials from an auth file created by the [Azure CLI][]. Follow these + steps to utilize: + + 1. Create a service principal and output an auth file using `az ad sp create-for-rbac --sdk-auth > client_credentials.json`. + 2. Set environment variable `AZURE_AUTH_LOCATION` to the path of the saved + output file. + 3. Use the authorizer returned by `auth.NewAuthorizerFromFile()` in your + client as described above. + +- The `auth.NewAuthorizerFromCLI()` method creates an authorizer which + uses [Azure CLI][] to obtain its credentials. + + The default audience being requested is `https://management.azure.com` (Azure ARM API). + To specify your own audience, export `AZURE_AD_RESOURCE` as an evironment variable. + This is read by `auth.NewAuthorizerFromCLI()` and passed to Azure CLI to acquire the access token. + + For example, to request an access token for Azure Key Vault, export + ``` + AZURE_AD_RESOURCE="https://vault.azure.net" + ``` + +- `auth.NewAuthorizerFromCLIWithResource(AUDIENCE_URL_OR_APPLICATION_ID)` - this method is self contained and does + not require exporting environment variables. For example, to request an access token for Azure Key Vault: + ``` + auth.NewAuthorizerFromCLIWithResource("https://vault.azure.net") + ``` + + To use `NewAuthorizerFromCLI()` or `NewAuthorizerFromCLIWithResource()`, follow these steps: + + 1. Install [Azure CLI v2.0.12](https://docs.microsoft.com/cli/azure/install-azure-cli) or later. Upgrade earlier versions. + 2. Use `az login` to sign in to Azure. + + If you receive an error, use `az account get-access-token` to verify access. + + If Azure CLI is not installed to the default directory, you may receive an error + reporting that `az` cannot be found. + Use the `AzureCLIPath` environment variable to define the Azure CLI installation folder. + + If you are signed in to Azure CLI using multiple accounts or your account has + access to multiple subscriptions, you need to specify the specific subscription + to be used. To do so, use: + + ``` + az account set --subscription + ``` + + To verify the current account settings, use: + + ``` + az account list + ``` + +[azure cli]: https://github.com/Azure/azure-cli + +- Finally, you can use OAuth's [Device Flow][] by calling + `auth.NewDeviceFlowConfig()` and extracting the Authorizer as follows: + + ```go + config := auth.NewDeviceFlowConfig(clientID, tenantID) + a, err := config.Authorizer() + ``` + +[device flow]: https://oauth.net/2/device-flow/ diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go new file mode 100644 index 000000000..e97589dcd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -0,0 +1,772 @@ +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "unicode/utf16" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/cli" + "github.com/Azure/go-autorest/logger" + "github.com/dimchansky/utfbom" +) + +// The possible keys in the Values map. +const ( + SubscriptionID = "AZURE_SUBSCRIPTION_ID" + TenantID = "AZURE_TENANT_ID" + AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS" + ClientID = "AZURE_CLIENT_ID" + ClientSecret = "AZURE_CLIENT_SECRET" + CertificatePath = "AZURE_CERTIFICATE_PATH" + CertificatePassword = "AZURE_CERTIFICATE_PASSWORD" + Username = "AZURE_USERNAME" + Password = "AZURE_PASSWORD" + EnvironmentName = "AZURE_ENVIRONMENT" + Resource = "AZURE_AD_RESOURCE" + ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint" + ResourceManagerEndpoint = "ResourceManagerEndpoint" + GraphResourceID = "GraphResourceID" + SQLManagementEndpoint = "SQLManagementEndpoint" + GalleryEndpoint = "GalleryEndpoint" + ManagementEndpoint = "ManagementEndpoint" +) + +// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { + logger.Instance.Writeln(logger.LogInfo, "NewAuthorizerFromEnvironment() determining authentication mechanism") + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + return settings.GetAuthorizer() +} + +// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) { + logger.Instance.Writeln(logger.LogInfo, "NewAuthorizerFromEnvironmentWithResource() determining authentication mechanism") + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + settings.Values[Resource] = resource + return settings.GetAuthorizer() +} + +// EnvironmentSettings contains the available authentication settings. +type EnvironmentSettings struct { + Values map[string]string + Environment azure.Environment +} + +// GetSettingsFromEnvironment returns the available authentication settings from the environment. +func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) { + s = EnvironmentSettings{ + Values: map[string]string{}, + } + s.setValue(SubscriptionID) + s.setValue(TenantID) + s.setValue(AuxiliaryTenantIDs) + s.setValue(ClientID) + s.setValue(ClientSecret) + s.setValue(CertificatePath) + s.setValue(CertificatePassword) + s.setValue(Username) + s.setValue(Password) + s.setValue(EnvironmentName) + s.setValue(Resource) + if v := s.Values[EnvironmentName]; v == "" { + s.Environment = azure.PublicCloud + } else { + s.Environment, err = azure.EnvironmentFromName(v) + } + if s.Values[Resource] == "" { + s.Values[Resource] = s.Environment.ResourceManagerEndpoint + } + return +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings EnvironmentSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified environment variable value to the Values map if it exists +func (settings EnvironmentSettings) setValue(key string) { + if v := os.Getenv(key); v != "" { + logger.Instance.Writef(logger.LogInfo, "GetSettingsFromEnvironment() found environment var %s\n", key) + settings.Values[key] = v + } +} + +// helper to return client and tenant IDs +func (settings EnvironmentSettings) getClientAndTenant() (string, string) { + clientID := settings.Values[ClientID] + tenantID := settings.Values[TenantID] + return clientID, tenantID +} + +// GetClientCredentials creates a config object from the available client credentials. +// An error is returned if no client credentials are available. +func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) { + secret := settings.Values[ClientSecret] + if secret == "" { + logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetClientCredentials() missing client secret") + return ClientCredentialsConfig{}, errors.New("missing client secret") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCredentialsConfig(clientID, secret, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok { + config.AuxTenants = strings.Split(auxTenants, ";") + for i := range config.AuxTenants { + config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i]) + } + } + return config, nil +} + +// GetClientCertificate creates a config object from the available certificate credentials. +// An error is returned if no certificate credentials are available. +func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) { + certPath := settings.Values[CertificatePath] + if certPath == "" { + logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetClientCertificate() missing certificate path") + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + certPwd := settings.Values[CertificatePassword] + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetUsernamePassword creates a config object from the available username/password credentials. +// An error is returned if no username/password credentials are available. +func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) { + username := settings.Values[Username] + password := settings.Values[Password] + if username == "" || password == "" { + logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetUsernamePassword() missing username and/or password") + return UsernamePasswordConfig{}, errors.New("missing username/password") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewUsernamePasswordConfig(username, password, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetMSI creates a MSI config object from the available client ID. +func (settings EnvironmentSettings) GetMSI() MSIConfig { + config := NewMSIConfig() + config.Resource = settings.Values[Resource] + config.ClientID = settings.Values[ClientID] + return config +} + +// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs. +func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig { + clientID, tenantID := settings.getClientAndTenant() + config := NewDeviceFlowConfig(clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config +} + +// GetAuthorizer creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) { + //1.Client Credentials + if c, e := settings.GetClientCredentials(); e == nil { + logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using client secret credentials") + return c.Authorizer() + } + + //2. Client Certificate + if c, e := settings.GetClientCertificate(); e == nil { + logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using client certificate credentials") + return c.Authorizer() + } + + //3. Username Password + if c, e := settings.GetUsernamePassword(); e == nil { + logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using user name/password credentials") + return c.Authorizer() + } + + // 4. MSI + if !adal.MSIAvailable(context.Background(), nil) { + return nil, errors.New("MSI not available") + } + logger.Instance.Writeln(logger.LogInfo, "EnvironmentSettings.GetAuthorizer() using MSI authentication") + return settings.GetMSI().Authorizer() +} + +// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +// The path to the configuration file must be specified in the AZURE_AUTH_LOCATION environment variable. +// resourceBaseURI - used to determine the resource type +func NewAuthorizerFromFile(resourceBaseURI string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + return settings.GetAuthorizer(resourceBaseURI) +} + +// GetAuthorizer create an Authorizer in the following order. +// 1. Client credentials +// 2. Client certificate +// resourceBaseURI - used to determine the resource type +func (settings FileSettings) GetAuthorizer(resourceBaseURI string) (autorest.Authorizer, error) { + if resourceBaseURI == "" { + resourceBaseURI = azure.PublicCloud.ServiceManagementEndpoint + } + if a, err := settings.ClientCredentialsAuthorizer(resourceBaseURI); err == nil { + return a, err + } + if a, err := settings.ClientCertificateAuthorizer(resourceBaseURI); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +// The path to the configuration file must be specified in the AZURE_AUTH_LOCATION environment variable. +func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) { + s, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil { + return a, err + } + if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLI() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + + if settings.Values[Resource] == "" { + settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint + } + + return NewAuthorizerFromCLIWithResource(settings.Values[Resource]) +} + +// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) { + token, err := cli.GetTokenFromCLI(resource) + if err != nil { + return nil, err + } + + adalToken, err := token.ToADALToken() + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(&adalToken), nil +} + +// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file. +func GetSettingsFromFile() (FileSettings, error) { + s := FileSettings{} + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return s, err + } + + // Auth file might be encoded + decoded, err := decode(contents) + if err != nil { + return s, err + } + + authFile := map[string]interface{}{} + err = json.Unmarshal(decoded, &authFile) + if err != nil { + return s, err + } + + s.Values = map[string]string{} + s.setKeyValue(ClientID, authFile["clientId"]) + s.setKeyValue(ClientSecret, authFile["clientSecret"]) + s.setKeyValue(CertificatePath, authFile["clientCertificate"]) + s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"]) + s.setKeyValue(SubscriptionID, authFile["subscriptionId"]) + s.setKeyValue(TenantID, authFile["tenantId"]) + s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"]) + s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"]) + s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"]) + s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"]) + s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"]) + s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"]) + return s, nil +} + +// FileSettings contains the available authentication settings. +type FileSettings struct { + Values map[string]string +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings FileSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified value to the Values map if it isn't nil +func (settings FileSettings) setKeyValue(key string, val interface{}) { + if val != nil { + settings.Values[key] = val.(string) + } +} + +// returns the specified AAD endpoint or the public cloud endpoint if unspecified +func (settings FileSettings) getAADEndpoint() string { + if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok { + return v + } + return azure.PublicCloud.ActiveDirectoryEndpoint +} + +// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) +} + +// ClientCredentialsAuthorizer creates an authorizer from the available client credentials. +func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCredentialsAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken +// from the available client credentials and the specified resource. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) { + if _, ok := settings.Values[ClientSecret]; !ok { + return nil, errors.New("missing client secret") + } + config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID]) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource) +} + +func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) { + if _, ok := settings.Values[CertificatePath]; !ok { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID]) + cfg.AADEndpoint = settings.getAADEndpoint() + cfg.Resource = resource + return cfg, nil +} + +// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource. +func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) + if err != nil { + return nil, err + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource) +} + +// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials. +func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCertificateAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.ServicePrincipalToken() +} + +// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource. +func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.Authorizer() +} + +func decode(b []byte) ([]byte, error) { + reader, enc := utfbom.Skip(bytes.NewReader(b)) + + switch enc { + case utfbom.UTF16LittleEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.LittleEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + case utfbom.UTF16BigEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.BigEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + } + return ioutil.ReadAll(reader) +} + +func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { + // Compare default base URI from the SDK to the endpoints from the public cloud + // Base URI and token resource are the same string. This func finds the authentication + // file field that matches the SDK base URI. The SDK defines the public cloud + // endpoint as its default base URI + if !strings.HasSuffix(baseURI, "/") { + baseURI += "/" + } + switch baseURI { + case azure.PublicCloud.ServiceManagementEndpoint: + return settings.Values[ManagementEndpoint], nil + case azure.PublicCloud.ResourceManagerEndpoint: + return settings.Values[ResourceManagerEndpoint], nil + case azure.PublicCloud.ActiveDirectoryEndpoint: + return settings.Values[ActiveDirectoryEndpoint], nil + case azure.PublicCloud.GalleryEndpoint: + return settings.Values[GalleryEndpoint], nil + case azure.PublicCloud.GraphEndpoint: + return settings.Values[GraphResourceID], nil + } + return "", fmt.Errorf("auth: base URI not found in endpoints") +} + +// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { + return ClientCredentialsConfig{ + ClientID: clientID, + ClientSecret: clientSecret, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { + return ClientCertificateConfig{ + CertificatePath: certificatePath, + CertificatePassword: certificatePassword, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { + return UsernamePasswordConfig{ + Username: username, + Password: password, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. +func NewMSIConfig() MSIConfig { + return MSIConfig{ + Resource: azure.PublicCloud.ResourceManagerEndpoint, + } +} + +// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { + return DeviceFlowConfig{ + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// AuthorizerConfig provides an authorizer from the configuration provided. +type AuthorizerConfig interface { + Authorizer() (autorest.Authorizer, error) +} + +// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. +type ClientCredentialsConfig struct { + ClientID string + ClientSecret string + TenantID string + AuxTenants []string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { + oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) + if err != nil { + return nil, err + } + return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// Authorizer gets the authorizer from client credentials. +func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { + if len(ccc.AuxTenants) == 0 { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil + } + mtSPT, err := ccc.MultiTenantServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err) + } + return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil +} + +// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. +type ClientCertificateConfig struct { + ClientID string + CertificatePath string + CertificatePassword string + TenantID string + AuxTenants []string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client certificate. +func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + certData, err := ioutil.ReadFile(ccc.CertificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) + } + certificate, rsaPrivateKey, err := adal.DecodePfxCertificateData(certData, ccc.CertificatePassword) + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) +} + +// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client certificate. +func (ccc ClientCertificateConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { + oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) + if err != nil { + return nil, err + } + certData, err := ioutil.ReadFile(ccc.CertificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) + } + certificate, rsaPrivateKey, err := adal.DecodePfxCertificateData(certData, ccc.CertificatePassword) + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + return adal.NewMultiTenantServicePrincipalTokenFromCertificate(oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) +} + +// Authorizer gets an authorizer object from client certificate. +func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { + if len(ccc.AuxTenants) == 0 { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil + } + mtSPT, err := ccc.MultiTenantServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get multitenant SPT from certificate auth: %v", err) + } + return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil +} + +// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. +type DeviceFlowConfig struct { + ClientID string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from device flow. +func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := dfc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalToken gets the service principal token from device flow. +func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) + if err != nil { + return nil, err + } + oauthClient := &autorest.Client{} + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + log.Println(*deviceCode.Message) + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) +} + +// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. +type UsernamePasswordConfig struct { + ClientID string + Username string + Password string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from username and password. +func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) +} + +// Authorizer gets the authorizer from a username and a password. +func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ups.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// MSIConfig provides the options to get a bearer authorizer through MSI. +type MSIConfig struct { + Resource string + ClientID string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from MSI. +func (mc MSIConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + spToken, err := adal.NewServicePrincipalTokenFromManagedIdentity(mc.Resource, &adal.ManagedIdentityOptions{ + ClientID: mc.ClientID, + }) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) + } + return spToken, nil +} + +// Authorizer gets the authorizer from MSI. +func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := mc.ServicePrincipalToken() + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(spToken), nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go new file mode 100644 index 000000000..f7eb26fd3 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go @@ -0,0 +1,25 @@ +//go:build modhack +// +build modhack + +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 000000000..868345db6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,388 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderContentType is the type of the content in the HTTP response. + HeaderContentType = "Content-Type" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%s", d) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%s", d) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%s", d) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceErrorInternal struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target,omitempty"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"` + // not all services conform to the OData v4 spec. + // the following fields are where we've seen discrepancies + + // spec calls for []map[string]interface{} but have seen map[string]interface{} + Details interface{} `json:"details,omitempty"` + + // spec calls for map[string]interface{} but have seen []map[string]interface{} and string + InnerError interface{} `json:"innererror,omitempty"` + } + + sei := serviceErrorInternal{} + if err := json.Unmarshal(b, &sei); err != nil { + return err + } + + // copy the fields we know to be correct + se.AdditionalInfo = sei.AdditionalInfo + se.Code = sei.Code + se.Message = sei.Message + se.Target = sei.Target + + // converts an []interface{} to []map[string]interface{} + arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) { + arrayOf, ok := v.([]interface{}) + if !ok { + return nil, false + } + final := []map[string]interface{}{} + for _, item := range arrayOf { + as, ok := item.(map[string]interface{}) + if !ok { + return nil, false + } + final = append(final, as) + } + return final, true + } + + // convert the remaining fields, falling back to raw JSON if necessary + + if c, ok := arrayOfObjs(sei.Details); ok { + se.Details = c + } else if c, ok := sei.Details.(map[string]interface{}); ok { + se.Details = []map[string]interface{}{c} + } else if sei.Details != nil { + // stuff into Details + se.Details = []map[string]interface{}{ + {"raw": sei.Details}, + } + } + + if c, ok := sei.InnerError.(map[string]interface{}); ok { + se.InnerError = c + } else if c, ok := arrayOfObjs(sei.InnerError); ok { + // if there's only one error extract it + if len(c) == 1 { + se.InnerError = c[0] + } else { + // multiple errors, stuff them into the value + se.InnerError = map[string]interface{}{ + "multi": c, + } + } + } else if c, ok := sei.InnerError.(string); ok { + se.InnerError = map[string]interface{}{"error": c} + } else if sei.InnerError != nil { + // stuff into InnerError + se.InnerError = map[string]interface{}{ + "raw": sei.InnerError, + } + } + return nil +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error" xml:"Error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// String function returns a string in form of azureResourceID +func (r Resource) String() string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName) +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)^/subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)$` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + encodedAs := autorest.EncodedAsJSON + if strings.Contains(resp.Header.Get("Content-Type"), "xml") { + encodedAs = autorest.EncodedAsXML + } + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&e.ServiceError); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err) + } + + // for example, should the API return the literal value `null` as the response + if e.ServiceError == nil { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + Details: []map[string]interface{}{ + { + "HttpResponse.Body": b.String(), + }, + }, + } + } + } + + if e.ServiceError != nil && e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body in hopes of getting something. + rawBody := map[string]interface{}{} + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&rawBody); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err) + } + + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go new file mode 100644 index 000000000..50d6f0391 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go @@ -0,0 +1,25 @@ +//go:build modhack +// +build modhack + +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 000000000..f45c3a516 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,83 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` + User *User `json:"user"` +} + +// User represents a User from the Azure CLI +type User struct { + Name string `json:"name"` + Type string `json:"type"` +} + +const azureProfileJSON = "azureProfile.json" + +func configDir() string { + return os.Getenv("AZURE_CONFIG_DIR") +} + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + if cfgDir := configDir(); cfgDir != "" { + return filepath.Join(cfgDir, azureProfileJSON), nil + } + return homedir.Expand("~/.azure/" + azureProfileJSON) +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 000000000..486619111 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,227 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +const accessTokensJSON = "accessTokens.json" + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +// TODO(#199): add unit test. +func AccessTokensPath() (string, error) { + // Azure-CLI allows user to customize the path of access tokens through environment variable. + if accessTokenPath := os.Getenv("AZURE_ACCESS_TOKEN_FILE"); accessTokenPath != "" { + return accessTokenPath, nil + } + + // Azure-CLI allows user to customize the path to Azure config directory through environment variable. + if cfgDir := configDir(); cfgDir != "" { + return filepath.Join(cfgDir, accessTokensJSON), nil + } + + // Fallback logic to default path on non-cloud-shell environment. + // TODO(#200): remove the dependency on hard-coding path. + return homedir.Expand("~/.azure/" + accessTokensJSON) +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} + +// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLI(resource string) (*Token, error) { + return GetTokenFromCLIWithParams(GetAccessTokenParams{Resource: resource}) +} + +// GetAccessTokenParams is the parameter struct of GetTokenFromCLIWithParams +type GetAccessTokenParams struct { + Resource string + ResourceType string + Subscription string + Tenant string +} + +// GetTokenFromCLIWithParams gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLIWithParams(params GetAccessTokenParams) (*Token, error) { + cliCmd := GetAzureCLICommand() + + cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json") + if params.Resource != "" { + if err := validateParameter(params.Resource); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--resource", params.Resource) + } + if params.ResourceType != "" { + if err := validateParameter(params.ResourceType); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--resource-type", params.ResourceType) + } + if params.Subscription != "" { + if err := validateParameter(params.Subscription); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--subscription", params.Subscription) + } + if params.Tenant != "" { + if err := validateParameter(params.Tenant); err != nil { + return nil, err + } + cliCmd.Args = append(cliCmd.Args, "--tenant", params.Tenant) + } + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + if stderr.Len() > 0 { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", err.Error()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) + if err != nil { + return nil, err + } + + return &tokenResponse, err +} + +func validateParameter(param string) error { + // Validate parameters, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Parameter %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", param) + if err != nil { + return err + } + if !match { + return fmt.Errorf(invalidResourceErrorTemplate, param) + } + return nil +} + +// GetAzureCLICommand can be used to run arbitrary Azure CLI command +func GetAzureCLICommand() *exec.Cmd { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" + + // Execute Azure CLI to get token + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) + cliCmd.Args = append(cliCmd.Args, "/c", "az") + } else { + cliCmd = exec.Command("az") + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) + } + + return cliCmd +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 000000000..b0a53769f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,331 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZURECLOUD": PublicCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENT": USGovernmentCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, //TODO: deprecate +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + OSSRDBMS string `json:"ossRDBMS"` + Storage string `json:"storage"` + Synapse string `json:"synapse"` + ServiceBus string `json:"serviceBus"` + SQLDatabase string `json:"sqlDatabase"` + CosmosDB string `json:"cosmosDB"` + ManagedHSM string `json:"managedHSM"` + MicrosoftGraph string `json:"microsoftGraph"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + ManagedHSMEndpoint string `json:"managedHSMEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + MicrosoftGraphEndpoint string `json:"microsoftGraphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + MariaDBDNSSuffix string `json:"mariaDBDNSSuffix"` + MySQLDatabaseDNSSuffix string `json:"mySqlDatabaseDNSSuffix"` + PostgresqlDatabaseDNSSuffix string `json:"postgresqlDatabaseDNSSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ManagedHSMDNSSuffix string `json:"managedHSMDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` + SynapseEndpointSuffix string `json:"synapseEndpointSuffix"` + DatalakeSuffix string `json:"datalakeSuffix"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + ManagedHSMEndpoint: "https://managedhsm.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + MicrosoftGraphEndpoint: "https://graph.microsoft.com/", + StorageEndpointSuffix: "core.windows.net", + CosmosDBDNSSuffix: "documents.azure.com", + MariaDBDNSSuffix: "mariadb.database.azure.com", + MySQLDatabaseDNSSuffix: "mysql.database.azure.com", + PostgresqlDatabaseDNSSuffix: "postgres.database.azure.com", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ManagedHSMDNSSuffix: "managedhsm.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.azure.com/", + APIManagementHostNameSuffix: "azure-api.net", + SynapseEndpointSuffix: "dev.azuresynapse.net", + DatalakeSuffix: "azuredatalakestore.net", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + OSSRDBMS: "https://ossrdbms-aad.database.windows.net", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + SQLDatabase: "https://database.windows.net/", + CosmosDB: "https://cosmos.azure.com", + ManagedHSM: "https://managedhsm.azure.net", + MicrosoftGraph: "https://graph.microsoft.com/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + ManagedHSMEndpoint: NotAvailable, + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + MicrosoftGraphEndpoint: "https://graph.microsoft.us/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + CosmosDBDNSSuffix: "documents.azure.us", + MariaDBDNSSuffix: "mariadb.database.usgovcloudapi.net", + MySQLDatabaseDNSSuffix: "mysql.database.usgovcloudapi.net", + PostgresqlDatabaseDNSSuffix: "postgres.database.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ManagedHSMDNSSuffix: NotAvailable, + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", + ContainerRegistryDNSSuffix: "azurecr.us", + TokenAudience: "https://management.usgovcloudapi.net/", + APIManagementHostNameSuffix: "azure-api.us", + SynapseEndpointSuffix: "dev.azuresynapse.usgovcloudapi.net", + DatalakeSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + OSSRDBMS: "https://ossrdbms-aad.database.usgovcloudapi.net", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.usgovcloudapi.net", + ServiceBus: "https://servicebus.azure.net/", + SQLDatabase: "https://database.usgovcloudapi.net/", + CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: "https://graph.microsoft.us/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + ManagedHSMEndpoint: NotAvailable, + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + MicrosoftGraphEndpoint: "https://microsoftgraph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + MariaDBDNSSuffix: "mariadb.database.chinacloudapi.cn", + MySQLDatabaseDNSSuffix: "mysql.database.chinacloudapi.cn", + PostgresqlDatabaseDNSSuffix: "postgres.database.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ManagedHSMDNSSuffix: NotAvailable, + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + APIManagementHostNameSuffix: "azure-api.cn", + SynapseEndpointSuffix: "dev.azuresynapse.azure.cn", + DatalakeSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + OSSRDBMS: "https://ossrdbms-aad.database.chinacloudapi.cn", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + SQLDatabase: "https://database.chinacloudapi.cn/", + CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: "https://microsoftgraph.chinacloudapi.cn", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + ManagedHSMEndpoint: NotAvailable, + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + MicrosoftGraphEndpoint: NotAvailable, + StorageEndpointSuffix: "core.cloudapi.de", + CosmosDBDNSSuffix: "documents.microsoftazure.de", + MariaDBDNSSuffix: "mariadb.database.cloudapi.de", + MySQLDatabaseDNSSuffix: "mysql.database.cloudapi.de", + PostgresqlDatabaseDNSSuffix: "postgres.database.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ManagedHSMDNSSuffix: NotAvailable, + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + TokenAudience: "https://management.microsoftazure.de/", + APIManagementHostNameSuffix: NotAvailable, + SynapseEndpointSuffix: NotAvailable, + DatalakeSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + OSSRDBMS: "https://ossrdbms-aad.database.cloudapi.de", + Storage: "https://storage.azure.com/", + Synapse: NotAvailable, + ServiceBus: "https://servicebus.azure.net/", + SQLDatabase: "https://database.cloudapi.de/", + CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: NotAvailable, + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} + +// SetEnvironment updates the environment map with the specified values. +func SetEnvironment(name string, env Environment) { + environments[strings.ToUpper(name)] = env +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 000000000..507f9e95c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 000000000..5b52357f9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,204 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + + var re RequestError + if strings.Contains(r.Header.Get("Content-Type"), "xml") { + // XML errors (e.g. Storage Data Plane) only return the inner object + err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) + } else { + err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) + } + + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %w", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 000000000..bb5f9396e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,328 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 30 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the total number of times the client will attempt to make an HTTP request. + // Set the value to 1 to disable retries. DO NOT set the value to less than 1. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool + + // SendDecorators can be used to override the default chain of SendDecorators. + // This can be used to specify things like a custom retry SendDecorator. + // Set this to an empty slice to use no SendDecorators. + SendDecorators []SendDecorator +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + if resp == nil && err == nil { + err = errors.New("autorest: received nil response and error") + } + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + return sender(renengotiation) + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} + +// Send sends the provided http.Request using the client's Sender or the default sender. +// It returns the http.Response and possible error. It also accepts a, possibly empty, +// default set of SendDecorators used when sending the request. +// SendDecorators have the following precedence: +// 1. In a request's context via WithSendDecorators() +// 2. Specified on the client in SendDecorators +// 3. The default values specified in this method +func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) { + if c.SendDecorators != nil { + decorators = c.SendDecorators + } + inCtx := req.Context().Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + decorators = sd + } + return SendWithSender(c, req, decorators...) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 000000000..c45710656 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 000000000..4e0543207 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 000000000..b453fad04 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 000000000..48fb39ba9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 000000000..7073959b2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 000000000..12addf0eb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 000000000..35098eda8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,103 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} + +// Unwrap returns the original error. +func (e DetailedError) Unwrap() error { + return e.Original +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go new file mode 100644 index 000000000..792f82d4b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go @@ -0,0 +1,25 @@ +//go:build modhack +// +build modhack + +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 000000000..121a66fa8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,549 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + setHeader(r, http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. Query parameters will be encoded as required. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + return r, fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if u.RawQuery != "" { + // handle unencoded semicolons (ideally the server would send them already encoded) + u.RawQuery = strings.Replace(u.RawQuery, ";", "%3B", -1) + q, err := url.ParseQuery(u.RawQuery) + if err != nil { + return r, err + } + u.RawQuery = q.Encode() + } + r.URL = u + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader))) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := MapToValues(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d + } + v[key] = value + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 000000000..349e1963a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 000000000..fa11dbed7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 000000000..4c87030e8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,55 @@ +//go:build !go1.8 +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 000000000..05847c08b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,67 @@ +//go:build go1.8 +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 000000000..118de8141 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,458 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/tls" + "fmt" + "log" + "math" + "net" + "net/http" + "net/http/cookiejar" + "strconv" + "sync" + "time" + + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/tracing" +) + +// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums +const defaultSendersCount = 3 + +type defaultSender struct { + sender Sender + init *sync.Once +} + +// each type of sender will be created on demand in sender() +var defaultSenders [defaultSendersCount]defaultSender + +func init() { + for i := 0; i < defaultSendersCount; i++ { + defaultSenders[i].init = &sync.Once{} + } +} + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(tls.RenegotiateNever), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +func sender(renengotiation tls.RenegotiationSupport) Sender { + // note that we can't init defaultSenders in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenders[renengotiation].init.Do(func() { + // copied from http.DefaultTransport with a TLS minimum version. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSenders[renengotiation].sender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSenders[renengotiation].sender +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + logger.Instance.Writef(logger.LogError, "DoRetryForAttempts: received error for attempt %d: %v\n", attempt+1, err) + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// Count429AsRetry indicates that a 429 response should be included as a retry attempt. +var Count429AsRetry = true + +// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received. +var Max429Delay time.Duration + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt, delayCount := 0, 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + if err != nil { + logger.Instance.Writef(logger.LogError, "DoRetryForStatusCodes: received error for attempt %d: %v\n", attempt+1, err) + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + // if this was a 429 set the delay cap as specified. + // applicable only in the absence of a retry-after header. + if resp != nil && resp.StatusCode == http.StatusTooManyRequests { + cap = Max429Delay + } + if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + // delay count is tracked separately from attempts to + // ensure that 429 participates in exponential back-off + delayCount++ + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + logger.Instance.Writef(logger.LogError, "DoRetryForDuration: received error for attempt %d: %v\n", attempt+1, err) + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + logger.Instance.Writef(logger.LogInfo, "DelayForBackoffWithCap: sleeping for %s\n", d) + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 000000000..d35b3850a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,232 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (b bytes.Buffer, err error) { + err = NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) + return +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. +// s must be of type slice or array or an error is returned. +// Each element of s will be converted to its string representation. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i))) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// DrainResponseBody reads the response body then closes it. +func DrainResponseBody(resp *http.Response) error { + if resp != nil && resp.Body != nil { + _, err := io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + return err + } + return nil +} + +func setHeader(r *http.Request, key, value string) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(key, value) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go new file mode 100644 index 000000000..3133fcc08 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go @@ -0,0 +1,30 @@ +//go:build go1.13 +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "errors" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError interface. +func IsTokenRefreshError(err error) bool { + var tre adal.TokenRefreshError + return errors.As(err, &tre) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go new file mode 100644 index 000000000..851e152db --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go @@ -0,0 +1,32 @@ +//go:build !go1.13 +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import "github.com/Azure/go-autorest/autorest/adal" + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 000000000..713e23581 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v14.2.1" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 000000000..6fb8404fd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 000000000..99ae6ca98 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 000000000..0aa27680d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 000000000..2f5d8cc1a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,337 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug + + // LogAuth is a special case of LogDebug, it tells a logger to also log the body of an authentication request and response. + // NOTE: this can disclose sensitive information, use with care. + LogAuth +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logAuth = "AUTH" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + case logAuth: + lt = LogAuth + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + case LogAuth: + return logAuth + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 000000000..e163975cd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 000000000..0e7a6e962 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/DataDog/gostackparse/CONTRIBUTING.md b/vendor/github.com/DataDog/gostackparse/CONTRIBUTING.md new file mode 100644 index 000000000..d44f02915 --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# Contributing + +Contributions are welcome. For smaller changes feel free to send a PR directly, but for big stuff it probably makes sense to open an issue first to discuss it. + +Please always try to clearly describe what problem you are trying to solve (intend) rather then just describing the change your are trying to make. + +## Runbook + +Below are various commands and procedures that might be useful for people trying to contribute to this package. + +``` +# run all tests +go test -v + +# run all benchmarks +cp panicparse_test.go.disabled panicparse_test.go && go get -t . && go test -bench . + +### FUZZING ### +# generate fuzz-corpus directory (takes > 10s, generated 434MiB of data) +FUZZ_CORPUS=true go test +# alternative to the above corpus generation (doesn't seem to work as well) +mkdir -p corpus && cp test-fixtures/*.txt corpus +# install/update go-fuzz +go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build +# build the test program with necessary instrumentation +go-fuzz-build +# start fuzzing, then wait for a while to see if it finds any issues +go-fuzz +# cleanup +rm -rf gostackparse-fuzz.zip corpus + +# refresh go.dev cache by visiting this URL with latest version +https://pkg.go.dev/github.com/DataDog/gostackparse@v0.4.0 +``` diff --git a/vendor/github.com/DataDog/gostackparse/LICENSE b/vendor/github.com/DataDog/gostackparse/LICENSE new file mode 100644 index 000000000..127c27300 --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/LICENSE @@ -0,0 +1,232 @@ +This work is dual-licensed under Apache 2.0 or BSD3. +You may select, at your option, one of the above-listed licenses. + +`SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause` + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +--- + +Copyright (c) 2021-Present, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Datadog nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DataDog/gostackparse/LICENSE-3rdparty.csv b/vendor/github.com/DataDog/gostackparse/LICENSE-3rdparty.csv new file mode 100644 index 000000000..7e96bfdef --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/LICENSE-3rdparty.csv @@ -0,0 +1 @@ +Component,Origin,License,Copyright diff --git a/vendor/github.com/DataDog/gostackparse/LICENSE-APACHE b/vendor/github.com/DataDog/gostackparse/LICENSE-APACHE new file mode 100644 index 000000000..bff56b543 --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/LICENSE-APACHE @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/DataDog/gostackparse/LICENSE-BSD3 b/vendor/github.com/DataDog/gostackparse/LICENSE-BSD3 new file mode 100644 index 000000000..923732099 --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/LICENSE-BSD3 @@ -0,0 +1,24 @@ +Copyright (c) 2016-Present, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Datadog nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DataDog/gostackparse/README.md b/vendor/github.com/DataDog/gostackparse/README.md new file mode 100644 index 000000000..2aff03bb2 --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/README.md @@ -0,0 +1,91 @@ +[![ci test status](https://img.shields.io/github/workflow/status/DataDog/gostackparse/Go?label=tests)](https://github.com/DataDog/gostackparse/actions/workflows/go.yml?query=branch%3Amain) +[![documentation](http://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/DataDog/gostackparse) + +# gostackparse + +Package gostackparse parses goroutines stack traces as produced by [`panic()`](https://golang.org/pkg/builtin/#panic) or [`debug.Stack()`](https://golang.org/pkg/runtime/debug/#Stack) at ~300 MiB/s. + +Parsing this data can be useful for [Goroutine Profiling](https://github.com/DataDog/go-profiler-notes/blob/main/goroutine.md#feature-matrix) or analyzing crashes from log files. + +## Usage + +The package provides a simple [Parse()](https://pkg.go.dev/github.com/DataDog/gostackparse#Parse) API. You can use it like [this](./example): + +```go +import "github.com/DataDog/gostackparse" + +func main() { + // Get a text-based stack trace + stack := debug.Stack() + // Parse it + goroutines, _ := gostackparse.Parse(bytes.NewReader(stack)) + // Ouptut the results + json.NewEncoder(os.Stdout).Encode(goroutines) +} +``` + +The result is a simple list of [Goroutine](https://pkg.go.dev/github.com/DataDog/gostackparse#Goroutine) structs: + +``` +[ + { + "ID": 1, + "State": "running", + "Wait": 0, + "LockedToThread": false, + "Stack": [ + { + "Func": "runtime/debug.Stack", + "File": "/usr/local/Cellar/go/1.16/libexec/src/runtime/debug/stack.go", + "Line": 24 + }, + { + "Func": "main.main", + "File": "/home/go/src/github.com/DataDog/gostackparse/example/main.go", + "Line": 18 + } + ], + "FramesElided": false, + "CreatedBy": null + } +] +``` + +## Design Goals + +1. Safe: No panics should be thrown. +2. Simple: Keep this pkg small and easy to modify. +3. Forgiving: Favor producing partial results over no results, even if the input data is different than expected. +4. Efficient: Parse several hundred MiB/s. + +## Testing + +gostackparse has been tested using a combination of hand picked [test-fixtures](./test-fixtures), [property based testing](https://github.com/DataDog/gostackparse/search?q=TestParse_PropertyBased), and [fuzzing](https://github.com/DataDog/gostackparse/search?q=Fuzz). + +## Comparsion to panicparse + +[panicparse](https://github.com/maruel/panicparse) is a popular library implementing similar functionality. + +gostackparse was created to provide a subset of the functionality (only the parsing) using ~10x less code while achieving > 100x faster performance. If you like fast minimalistic code, you might prefer it. If you're looking for more features and a larger community, use panicparse. + +## Benchmarks + +gostackparse includes a small benchmark that shows that it can parse [test-fixtures/waitsince.txt](./test-fixtures/waitsince.txt) at ~300 MiB/s and how that compares to panicparse. + +``` +$ cp panicparse_test.go.disabled panicparse_test.go +$ go get -t . +$ go test -bench . +goos: darwin +goarch: amd64 +pkg: github.com/DataDog/gostackparse +cpu: Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz +BenchmarkGostackparse-12 45456 26275 ns/op 302.34 MiB/s 17243 B/op 306 allocs/op +BenchmarkPanicparse-12 76 15943320 ns/op 0.50 MiB/s 5274247 B/op 116049 allocs/op +PASS +ok github.com/DataDog/gostackparse 3.634s +``` + +## License + +This work is dual-licensed under Apache 2.0 or BSD3. See [LICENSE](./LICENSE). diff --git a/vendor/github.com/DataDog/gostackparse/fuzz.go b/vendor/github.com/DataDog/gostackparse/fuzz.go new file mode 100644 index 000000000..6df2a29d4 --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/fuzz.go @@ -0,0 +1,20 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016 Datadog, Inc. + +//+build gofuzz + +package gostackparse + +import "bytes" + +// Fuzz implements fuzzing using https://github.com/dvyukov/go-fuzz. See +// TestFuzzCorupus for generating an initial test corpus. +func Fuzz(data []byte) int { + goroutines, _ := Parse(bytes.NewReader(data)) + if len(goroutines) > 0 { + return 1 + } + return 0 +} diff --git a/vendor/github.com/DataDog/gostackparse/gostackparse.go b/vendor/github.com/DataDog/gostackparse/gostackparse.go new file mode 100644 index 000000000..4b2adb7da --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/gostackparse.go @@ -0,0 +1,344 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2021 Datadog, Inc. + +// Package gostackparse parses goroutines stack traces as produced by panic() +// or debug.Stack() at ~300 MiB/s. +package gostackparse + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "time" +) + +type parserState int + +const ( + stateHeader = iota + stateStackFunc + stateStackFile + stateCreatedBy + stateCreatedByFunc + stateCreatedByFile + stateOriginatingFrom +) + +// Parse parses a goroutines stack trace dump as produced by runtime.Stack(). +// The parser is forgiving and will continue parsing even when encountering +// unexpected data. When this happens it will try to discard the entire +// goroutine that encountered the problem and continue with the next one. It +// will also return an error for every goroutine that couldn't be parsed. If +// all goroutines were parsed successfully, the []error slice is empty. +func Parse(r io.Reader) ([]*Goroutine, []error) { + var ( + sc = bufio.NewScanner(r) + state parserState + lineNum int + line []byte + + goroutines []*Goroutine + errs []error + g *Goroutine + f *Frame + + abortGoroutine = func(msg string) { + err := fmt.Errorf( + "%s on line %d: %q", + msg, + lineNum, + line, + ) + errs = append(errs, err) + goroutines = goroutines[0 : len(goroutines)-1] + state = stateHeader + } + ) + + // This for loop implements a simple line based state machine for parsing + // the goroutines dump. The state != stateHeader makes sure we handle an + // unexpected EOF the same way we'd handle an empty line. + for sc.Scan() || state != stateHeader { + line = sc.Bytes() + lineNum++ + + statemachine: + switch state { + case stateHeader, stateOriginatingFrom: + // Ignore lines that don't look like goroutine headers. This helps with + // leading/trailing whitespace but also in case we encountered an error + // during the previous goroutine and need to seek to the beginning of the + // next one. + if state == stateHeader { + if !bytes.HasPrefix(line, goroutinePrefix) { + continue + } + g = parseGoroutineHeader(line[len(goroutinePrefix):]) + goroutines = append(goroutines, g) + } + if state == stateOriginatingFrom { + ancestorIDStr := line[len(originatingFromPrefix) : len(line)-2] + ancestorID, err := strconv.Atoi(string(ancestorIDStr)) + if err != nil { + abortGoroutine(err.Error()) + continue + } + ancestorG := &Goroutine{ID: ancestorID} + g.Ancestor = ancestorG + g = ancestorG + } + + state = stateStackFunc + if g == nil { + abortGoroutine("invalid goroutine header") + } + case stateStackFunc, stateCreatedByFunc: + f = parseFunc(line, state) + if f == nil { + if bytes.Equal(line, framesElided) { + g.FramesElided = true + state = stateCreatedBy + continue + } + if bytes.HasPrefix(line, originatingFromPrefix) { + state = stateOriginatingFrom + goto statemachine + } + abortGoroutine("invalid function call") + continue + } + if state == stateStackFunc { + g.Stack = append(g.Stack, f) + state = stateStackFile + } else { + g.CreatedBy = f + state = stateCreatedByFile + } + case stateStackFile, stateCreatedByFile: + if !parseFile(line, f) { + abortGoroutine("invalid file:line ref") + continue + } + state = stateCreatedBy + case stateCreatedBy: + if bytes.HasPrefix(line, createdByPrefix) { + line = line[len(createdByPrefix):] + state = stateCreatedByFunc + goto statemachine + } else if len(line) == 0 { + state = stateHeader + } else { + state = stateStackFunc + goto statemachine + } + } + } + + if err := sc.Err(); err != nil { + errs = append(errs, err) + } + return goroutines, errs +} + +var ( + goroutinePrefix = []byte("goroutine ") + createdByPrefix = []byte("created by ") + originatingFromPrefix = []byte("[originating from goroutine ") + framesElided = []byte("...additional frames elided...") +) + +var goroutineHeader = regexp.MustCompile( + `^(\d+) \[([^,]+)(?:, (\d+) minutes)?(, locked to thread)?\]:$`, +) + +// parseGoroutineHeader parses a goroutine header line and returns a new +// Goroutine on success or nil on error. It expects the `goroutinePrefix` to +// be trimmed from the beginning of the line. +// +// Example Input: +// 1 [chan receive, 6883 minutes]: +// +// Example Output: +// &Goroutine{ID: 1, State "chan receive", Waitduration: 6883*time.Minute} +func parseGoroutineHeader(line []byte) *Goroutine { + // TODO(fg) would probably be faster if we didn't use a regexp for this, but + // might be more hassle than its worth. + m := goroutineHeader.FindSubmatch(line) + if len(m) != 5 { + return nil + } + var ( + id = m[1] + state = m[2] + waitminutes = m[3] + locked = m[4] + + g = &Goroutine{State: string(state), LockedToThread: len(locked) > 0} + err error + ) + + // regex currently sucks `abc minutes` into the state string if abc is + // non-numeric, let's not consider this a valid goroutine. + if strings.HasSuffix(g.State, " minutes") { + return nil + } + if g.ID, err = strconv.Atoi(string(id)); err != nil { + // should be impossible to end up here + return nil + } + if len(waitminutes) > 0 { + min, err := strconv.Atoi(string(waitminutes)) + if err != nil { + // should be impossible to end up here + return nil + } + g.Wait = time.Duration(min) * time.Minute + } + return g +} + +// parseFunc parse a func call with potential argument addresses and returns a +// new Frame for it on success or nil on error. +// +// Example Input: +// runtime/pprof.writeGoroutineStacks(0x2b016e0, 0xc0995cafc0, 0xc00468e150, 0x0) +// +// Example Output: +// &Frame{Func: "runtime/pprof.writeGoroutineStacks"} +func parseFunc(line []byte, state parserState) *Frame { + // createdBy func calls don't have trailing parens, just return them as-is. + if state == stateCreatedByFunc { + return &Frame{Func: string(line)} + } + + // A valid func call is supposed to have at least one matched pair of parens. + // Multiple matched pairs are allowed, but nesting is not. + var ( + openIndex = -1 + closeIndex = -1 + ) + for i, r := range line { + switch r { + case '(': + if openIndex != -1 && closeIndex == -1 { + return nil + } + openIndex = i + closeIndex = -1 + case ')': + if openIndex == -1 || closeIndex != -1 { + return nil + } + closeIndex = i + } + } + + if openIndex == -1 || closeIndex == -1 || openIndex == 0 { + return nil + } + return &Frame{Func: string(line[0:openIndex])} +} + +// parseFile parses a file line and updates f accordingly or returns false on +// error. +// +// Example Input: +// \t/root/go1.15.6.linux.amd64/src/net/http/server.go:2969 +0x36c +// +// Example Update: +// &Frame{File: "/root/go1.15.6.linux.amd64/src/net/http/server.go", Line: 2969} +// +// Note: The +0x36c is the relative pc offset from the function entry pc. It's +// omitted if it's 0. +func parseFile(line []byte, f *Frame) (ret bool) { + if len(line) < 2 || line[0] != '\t' { + return false + } + line = line[1:] + + const ( + stateFilename = iota + stateColon + stateLine + ) + + var state = stateFilename + for i, c := range line { + switch state { + case stateFilename: + if c == ':' { + state = stateColon + } + case stateColon: + if isDigit(c) { + f.File = string(line[0 : i-1]) + f.Line = int(c - '0') + state = stateLine + ret = true + } else { + state = stateFilename + } + case stateLine: + if c == ' ' { + return true + } else if !isDigit(c) { + return false + } + f.Line = f.Line*10 + int(c-'0') + } + + } + return +} + +func isDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +// Goroutine represents a single goroutine and its stack after extracting it +// from the runtime.Stack() text format. See [1] for more info. +// [1] https://github.com/felixge/go-profiler-notes/blob/main/goroutine.md +type Goroutine struct { + // ID is the goroutine id (aka `goid`). + ID int + // State is the `atomicstatus` of the goroutine, or if "waiting" the + // `waitreason`. + State string + // Wait is the approximate duration a goroutine has been waiting or in a + // syscall as determined by the first gc after the wait started. Aka + // `waitsince`. + Wait time.Duration + // LockedToThread is true if the goroutine is locked by a thread, aka + // `lockedm`. + LockedToThread bool + // Stack is the stack trace of the goroutine. + Stack []*Frame + // FramesElided is true if the stack trace contains a message indicating that + // additional frames were elided. This happens when the stack depth exceeds + // 100. + FramesElided bool + // CreatedBy is the frame that created this goroutine, nil for main(). + CreatedBy *Frame + // Ancestors are the Goroutines that created this goroutine. + // See GODEBUG=tracebackancestors=n in https://pkg.go.dev/runtime. + Ancestor *Goroutine `json:"Ancestor,omitempty"` +} + +// Frame is a single call frame on the stack. +type Frame struct { + // Func is the name of the function, including package name, e.g. "main.main" + // or "net/http.(*Server).Serve". + Func string + // File is the absolute path of source file e.g. + // "/go/src/example.org/example/main.go". + File string + // Line is the line number of inside of the source file that was active when + // the sample was taken. + Line int +} diff --git a/vendor/github.com/DataDog/gostackparse/panicparse_test.go.disabled b/vendor/github.com/DataDog/gostackparse/panicparse_test.go.disabled new file mode 100644 index 000000000..da9251e4f --- /dev/null +++ b/vendor/github.com/DataDog/gostackparse/panicparse_test.go.disabled @@ -0,0 +1,46 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2021 Datadog, Inc. + +// NOTE(fg): To run this benchmark you have to drop the ".disabled" suffix from +// the filename. This is done to avoid the panicparse library ending up in the +// go.mod file of this package. If anybody has better ideas, please let me know +// : ). + +package gostackparse + +import ( + "bytes" + "io" + "io/ioutil" + "path/filepath" + "testing" + "time" + + "github.com/maruel/panicparse/v2/stack" + "github.com/stretchr/testify/require" +) + +func BenchmarkPanicparse(b *testing.B) { + data, err := ioutil.ReadFile(filepath.Join("test-fixtures", "waitsince.txt")) + require.NoError(b, err) + + b.ResetTimer() + b.ReportAllocs() + + start := time.Now() + parsedBytes := 0 + for i := 0; i < b.N; i++ { + parsedBytes += len(data) + s, _, err := stack.ScanSnapshot(bytes.NewReader(data), io.Discard, stack.DefaultOpts()) + if err != nil && err != io.EOF { + b.Fatal(err) + } else if l := len(s.Goroutines); l != 9 { + b.Fatal(l) + } + } + + mbPerSec := float64(parsedBytes) / time.Since(start).Seconds() / 1024 / 1024 + b.ReportMetric(mbPerSec, "MiB/s") +} diff --git a/vendor/github.com/Masterminds/squirrel/LICENSE b/vendor/github.com/Masterminds/squirrel/LICENSE new file mode 100644 index 000000000..b459007fd --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Squirrel: The Masterminds +Copyright (c) 2014-2015, Lann Martin. Copyright (C) 2015-2016, Google. Copyright (C) 2015, Matt Farina and Matt Butcher. + + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Masterminds/squirrel/select.go b/vendor/github.com/Masterminds/squirrel/select.go index b585344ce..d55ce4c74 100644 --- a/vendor/github.com/Masterminds/squirrel/select.go +++ b/vendor/github.com/Masterminds/squirrel/select.go @@ -262,6 +262,13 @@ func (b SelectBuilder) Columns(columns ...string) SelectBuilder { return builder.Extend(b, "Columns", parts).(SelectBuilder) } +// RemoveColumns remove all columns from query. +// Must add a new column with Column or Columns methods, otherwise +// return a error. +func (b SelectBuilder) RemoveColumns() SelectBuilder { + return builder.Delete(b, "Columns").(SelectBuilder) +} + // Column adds a result column to the query. // Unlike Columns, Column accepts args which will be bound to placeholders in // the columns string, for example: diff --git a/vendor/github.com/Masterminds/squirrel/update.go b/vendor/github.com/Masterminds/squirrel/update.go index 8d658d721..eb2a9c4dd 100644 --- a/vendor/github.com/Masterminds/squirrel/update.go +++ b/vendor/github.com/Masterminds/squirrel/update.go @@ -16,6 +16,7 @@ type updateData struct { Prefixes []Sqlizer Table string SetClauses []setClause + From Sqlizer WhereParts []Sqlizer OrderBys []string Limit string @@ -100,6 +101,14 @@ func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) { } sql.WriteString(strings.Join(setSqls, ", ")) + if d.From != nil { + sql.WriteString(" FROM ") + args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) + if err != nil { + return + } + } + if len(d.WhereParts) > 0 { sql.WriteString(" WHERE ") args, err = appendToSql(d.WhereParts, sql, " AND ", args) @@ -233,6 +242,19 @@ func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateBuilder { return b } +// From adds FROM clause to the query +// FROM is valid construct in postgresql only. +func (b UpdateBuilder) From(from string) UpdateBuilder { + return builder.Set(b, "From", newPart(from)).(UpdateBuilder) +} + +// FromSelect sets a subquery into the FROM clause of the query. +func (b UpdateBuilder) FromSelect(from SelectBuilder, alias string) UpdateBuilder { + // Prevent misnumbered parameters in nested selects (#183). + from = from.PlaceholderFormat(Question) + return builder.Set(b, "From", Alias(from, alias)).(UpdateBuilder) +} + // Where adds WHERE expressions to the query. // // See SelectBuilder.Where for more information. diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes new file mode 100644 index 000000000..94f480de9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 000000000..815e20660 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1,10 @@ +.vscode/ + +*.exe + +# testing +testdata + +# go workspaces +go.work +go.work.sum diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml new file mode 100644 index 000000000..7b503d26a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -0,0 +1,149 @@ +run: + skip-dirs: + - pkg/etw/sample + +linters: + enable: + # style + - containedctx # struct contains a context + - dupl # duplicate code + - errname # erorrs are named correctly + - nolintlint # "//nolint" directives are properly explained + - revive # golint replacement + - unconvert # unnecessary conversions + - wastedassign + + # bugs, performance, unused, etc ... + - contextcheck # function uses a non-inherited context + - errorlint # errors not wrapped for 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - gofmt # files are gofmt'ed + - gosec # security + - nilerr # returns nil even with non-nil error + - unparam # unused function params + +issues: + exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + + # ignore long lines for skip autogen directives + - linters: + - revive + text: "^line-length-limit: " + source: "^//(go:generate|sys) " + + #TODO: remove after upgrading to go1.18 + # ignore comment spacing for nolint and sys directives + - linters: + - revive + text: "^comment-spacings: no space between comment delimiter and comment text" + source: "//(cspell:|nolint:|sys |todo)" + + # not on go 1.18 yet, so no any + - linters: + - revive + text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + + # allow unjustified ignores of error checks in defer statements + - linters: + - nolintlint + text: "^directive `//nolint:errcheck` should provide explanation" + source: '^\s*defer ' + + # allow unjustified ignores of error lints for io.EOF + - linters: + - nolintlint + text: "^directive `//nolint:errorlint` should provide explanation" + source: '[=|!]= io.EOF' + + +linters-settings: + exhaustive: + default-signifies-exhaustive: true + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + check-shadowing: true + nolintlint: + allow-leading-space: false + require-explanation: true + require-specific: true + revive: + # revive is more configurable than static check, so likely the preferred alternative to static-check + # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) + enable-all-rules: + true + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + rules: + # rules with required arguments + - name: argument-limit + disabled: true + - name: banned-characters + disabled: true + - name: cognitive-complexity + disabled: true + - name: cyclomatic + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: max-public-structs + disabled: true + # geneally annoying rules + - name: add-constant # complains about any and all strings and integers + disabled: true + - name: confusing-naming # we frequently use "Foo()" and "foo()" together + disabled: true + - name: flag-parameter # excessive, and a common idiom we use + disabled: true + - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead + disabled: true + # general config + - name: line-length-limit + arguments: + - 140 + - name: var-naming + arguments: + - [] + - - CID + - CRI + - CTRD + - DACL + - DLL + - DOS + - ETW + - FSCTL + - GCS + - GMSA + - HCS + - HV + - IO + - LCOW + - LDAP + - LPAC + - LTSC + - MMIO + - NT + - OCI + - PMEM + - PWSH + - RX + - SACl + - SID + - SMB + - TX + - VHD + - VHDX + - VMID + - VPCI + - WCOW + - WIM diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 000000000..ae1b4942b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 000000000..b8b569d77 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 000000000..7474b4f0b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,89 @@ +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +## Contributing + +This project welcomes contributions and suggestions. +Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that +you have the right to, and actually do, grant us the rights to use your contribution. +For details, visit [Microsoft CLA](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether you need to +provide a CLA and decorate the PR appropriately (e.g., label, comment). +Simply follow the instructions provided by the bot. +You will only need to do this once across all repos using our CLA. + +Additionally, the pull request pipeline requires the following steps to be performed before +mergining. + +### Code Sign-Off + +We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run ./... +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. + +This can be done for the entire repo: + +```shell +> go generate ./... +``` + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Special Thanks + +Thanks to [natefinch][natefinch] for the inspiration for this library. +See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. + +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff + +[natefinch]: https://github.com/natefinch diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md new file mode 100644 index 000000000..869fdfe2b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 000000000..09621c884 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,290 @@ +//go:build windows +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "runtime" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId //revive:disable-line:var-naming ID, not Id + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +//nolint:revive // var-naming: ALL_CAPS +const ( + WRITE_DAC = windows.WRITE_DAC + WRITE_OWNER = windows.WRITE_OWNER + ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + //revive:disable-next-line:var-naming ID, not Id + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamID struct { + StreamID uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(io.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamID + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamID, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamID == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamID{ + StreamID: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], + access, + share, + nil, + createmode, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, + 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go new file mode 100644 index 000000000..1f5bfe2d5 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/doc.go @@ -0,0 +1,22 @@ +// This package provides utilities for efficiently performing Win32 IO operations in Go. +// Currently, this package is provides support for genreal IO and management of +// - named pipes +// - files +// - [Hyper-V sockets] +// +// This code is similar to Go's [net] package, and uses IO completion ports to avoid +// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. +// +// This limits support to Windows Vista and newer operating systems. +// +// Additionally, this package provides support for: +// - creating and managing GUIDs +// - writing to [ETW] +// - opening and manageing VHDs +// - parsing [Windows Image files] +// - auto-generating Win32 API code +// +// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service +// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- +// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images +package winio diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 000000000..e104dbdfd --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return ea, nb, err + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return ea, nb, err + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return ea, nb, err +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return eas, err +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 000000000..175a99d3f --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,331 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } + +//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (*timeoutError) Error() string { return "i/o timeout" } +func (*timeoutError) Timeout() bool { return true } +func (*timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation. +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO. +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIO() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + socket bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle. +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIO) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil +} + +// closeHandle closes the resources associated with a Win32 handle. +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + _ = cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// IsClosed checks if the file has been closed. +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + +// prepareIO prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIO() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever. +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// todo: helsaawy - create an asyncIO version that takes a context + +// asyncIO processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno + return int(bytes), err + } + + if f.closing.isSet() { + _ = cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if f.closing.isSet() { + err = ErrFileClosed + } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) + } + case <-timeout: + _ = cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIO() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIO(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIO() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 000000000..702950e72 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,92 @@ +//go:build windows +// +build windows + +package winio + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime + FileAttributes uint32 + _ uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := windows.SetFileInformationByHandle( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), + windows.FileStandardInfo, + (*byte)(unsafe.Pointer(si)), + uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileIdInfo, + (*byte)(unsafe.Pointer(fileID)), + uint32(unsafe.Sizeof(*fileID)), + ); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 000000000..c88191658 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,575 @@ +//go:build windows +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/socket" + "github.com/Microsoft/go-winio/pkg/guid" +) + +const afHVSock = 34 // AF_HYPERV + +// Well known Service and VM IDs +// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards + +// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. +func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 + return guid.GUID{} +} + +// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. +func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff + return guid.GUID{ + Data1: 0xffffffff, + Data2: 0xffff, + Data3: 0xffff, + Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + } +} + +// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. +func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 + return guid.GUID{ + Data1: 0xe0e16197, + Data2: 0xdd56, + Data3: 0x4a10, + Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, + } +} + +// HvsockGUIDSiloHost is the address of a silo's host partition: +// - The silo host of a hosted silo is the utility VM. +// - The silo host of a silo on a physical host is the physical host. +func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 + return guid.GUID{ + Data1: 0x36bd0c5c, + Data2: 0x7276, + Data3: 0x4223, + Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, + } +} + +// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. +func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd + return guid.GUID{ + Data1: 0x90db8b89, + Data2: 0xd35, + Data3: 0x4f79, + Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, + } +} + +// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. +// Listening on this VmId accepts connection from: +// - Inside silos: silo host partition. +// - Inside hosted silo: host of the VM. +// - Inside VM: VM host. +// - Physical host: Not supported. +func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 + return guid.GUID{ + Data1: 0xa42e7cda, + Data2: 0xd03f, + Data3: 0x480c, + Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, + } +} + +// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. +func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 + return guid.GUID{ + Data2: 0xfacb, + Data3: 0x11e6, + Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, + } +} + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +var _ socket.RawSockaddr = &rawHvsockAddr{} + +// Network returns the address's network name, "hvsock". +func (*HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g := hvsockVsockServiceTemplate() // make a copy + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHVSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// Sockaddr returns a pointer to and the size of this struct. +// +// Implements the [socket.RawSockaddr] interface, and allows use in +// [socket.Bind] and [socket.ConnectEx]. +func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { + return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil +} + +// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. +func (r *rawHvsockAddr) FromBytes(b []byte) error { + n := int(unsafe.Sizeof(rawHvsockAddr{})) + + if len(b) < n { + return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) + } + + copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) + if r.Family != afHVSock { + return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) + } + + return nil +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +var _ net.Listener = &HvsockListener{} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +var _ net.Conn = &HvsockConn{} + +func newHVSocket() (*win32File, error) { + fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHVSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = socket.Bind(windows.Handle(sock.handle), &sa) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHVSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIO() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + // + // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) + if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + + conn := &HvsockConn{ + sock: sock, + } + // The local address returned in the AcceptEx buffer is the same as the Listener socket's + // address. However, the service GUID reported by GetSockName is different from the Listeners + // socket, and is sometimes the same as the local address of the socket that dialed the + // address, with the service GUID.Data1 incremented, but othertimes is different. + // todo: does the local address matter? is the listener's address or the actual address appropriate? + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + + // initialize the accepted socket and update its properties with those of the listening socket + if err = windows.Setsockopt(windows.Handle(sock.handle), + windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, + (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { + return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) + } + + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). +type HvsockDialer struct { + // Deadline is the time the Dial operation must connect before erroring. + Deadline time.Time + + // Retries is the number of additional connects to try if the connection times out, is refused, + // or the host is unreachable + Retries uint + + // RetryWait is the time to wait after a connection error to retry + RetryWait time.Duration + + rt *time.Timer // redial wait timer +} + +// Dial the Hyper-V socket at addr. +// +// See [HvsockDialer.Dial] for more information. +func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + return (&HvsockDialer{}).Dial(ctx, addr) +} + +// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. +// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between +// retries. +// +// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. +func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + op := "dial" + // create the conn early to use opErr() + conn = &HvsockConn{ + remote: *addr, + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + // preemptive timeout/cancellation check + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + sock, err := newHVSocket() + if err != nil { + return nil, conn.opErr(op, err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + + sa := addr.raw() + err = socket.Bind(windows.Handle(sock.handle), &sa) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("bind", err)) + } + + c, err := sock.prepareIO() + if err != nil { + return nil, conn.opErr(op, err) + } + defer sock.wg.Done() + var bytes uint32 + for i := uint(0); i <= d.Retries; i++ { + err = socket.ConnectEx( + windows.Handle(sock.handle), + &sa, + nil, // sendBuf + 0, // sendDataLen + &bytes, + (*windows.Overlapped)(unsafe.Pointer(&c.o))) + _, err = sock.asyncIO(c, nil, bytes, err) + if i < d.Retries && canRedial(err) { + if err = d.redialWait(ctx); err == nil { + continue + } + } + break + } + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) + } + + // update the connection properties, so shutdown can be used + if err = windows.Setsockopt( + windows.Handle(sock.handle), + windows.SOL_SOCKET, + windows.SO_UPDATE_CONNECT_CONTEXT, + nil, // optvalue + 0, // optlen + ); err != nil { + return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) + } + + // get the local name + var sal rawHvsockAddr + err = socket.GetSockName(windows.Handle(sock.handle), &sal) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) + } + conn.local.fromRaw(&sal) + + // one last check for timeout, since asyncIO doesn't check the context + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + conn.sock = sock + sock = nil + + return conn, nil +} + +// redialWait waits before attempting to redial, resetting the timer as appropriate. +func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { + if d.RetryWait == 0 { + return nil + } + + if d.rt == nil { + d.rt = time.NewTimer(d.RetryWait) + } else { + // should already be stopped and drained + d.rt.Reset(d.RetryWait) + } + + select { + case <-ctx.Done(): + case <-d.rt.C: + return nil + } + + // stop and drain the timer + if !d.rt.Stop() { + <-d.rt.C + } + return ctx.Err() +} + +// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +func canRedial(err error) bool { + //nolint:errorlint // guaranteed to be an Errno + switch err { + case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, + windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: + return true + default: + return false + } +} + +func (conn *HvsockConn) opErr(op string, err error) error { + // translate from "file closed" to "socket closed" + if errors.Is(err, ErrFileClosed) { + err = socket.ErrSocketClosed + } + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIO() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsarecv", eno) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIO() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsasend", eno) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + +// shutdown disables sending or receiving on a socket. +func (conn *HvsockConn) shutdown(how int) error { + if conn.IsClosed() { + return socket.ErrSocketClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) + if err != nil { + // If the connection was closed, shutdowns fail with "not connected" + if errors.Is(err, windows.WSAENOTCONN) || + errors.Is(err, windows.WSAESHUTDOWN) { + err = socket.ErrSocketClosed + } + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket, preventing future read operations. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("closeread", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("closewrite", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + // todo: implement `SetDeadline` for `win32File` + if err := conn.SetReadDeadline(t); err != nil { + return fmt.Errorf("set read deadline: %w", err) + } + if err := conn.SetWriteDeadline(t); err != nil { + return fmt.Errorf("set write deadline: %w", err) + } + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go new file mode 100644 index 000000000..1f6538817 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go @@ -0,0 +1,2 @@ +// This package contains Win32 filesystem functionality. +package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go new file mode 100644 index 000000000..509b3ec64 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -0,0 +1,202 @@ +//go:build windows + +package fs + +import ( + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/stringbuffer" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go + +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW + +const NullHandle windows.Handle = 0 + +// AccessMask defines standard, specific, and generic rights. +// +// Bitmask: +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---------------+---------------+-------------------------------+ +// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | +// |R|W|E|A| |S| | | +// +-+-------------+---------------+-------------------------------+ +// +// GR Generic Read +// GW Generic Write +// GE Generic Exectue +// GA Generic All +// Resvd Reserved +// AS Access Security System +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask +// +// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights +// +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants +type AccessMask = windows.ACCESS_MASK + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // Not actually any. + // + // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters + FILE_ANY_ACCESS AccessMask = 0 + + // Specific Object Access + // from ntioapi.h + + FILE_READ_DATA AccessMask = (0x0001) // file & pipe + FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory + + FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe + FILE_ADD_FILE AccessMask = (0x0002) // directory + + FILE_APPEND_DATA AccessMask = (0x0004) // file + FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory + FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe + + FILE_READ_EA AccessMask = (0x0008) // file & directory + FILE_READ_PROPERTIES AccessMask = FILE_READ_EA + + FILE_WRITE_EA AccessMask = (0x0010) // file & directory + FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA + + FILE_EXECUTE AccessMask = (0x0020) // file + FILE_TRAVERSE AccessMask = (0x0020) // directory + + FILE_DELETE_CHILD AccessMask = (0x0040) // directory + + FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all + + FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all + + FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) + FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) + FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) + FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) + + SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF + + // Standard Access + // from ntseapi.h + + DELETE AccessMask = 0x0001_0000 + READ_CONTROL AccessMask = 0x0002_0000 + WRITE_DAC AccessMask = 0x0004_0000 + WRITE_OWNER AccessMask = 0x0008_0000 + SYNCHRONIZE AccessMask = 0x0010_0000 + + STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 + + STANDARD_RIGHTS_READ AccessMask = READ_CONTROL + STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL + STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL + + STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 +) + +type FileShareMode uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + FILE_SHARE_NONE FileShareMode = 0x00 + FILE_SHARE_READ FileShareMode = 0x01 + FILE_SHARE_WRITE FileShareMode = 0x02 + FILE_SHARE_DELETE FileShareMode = 0x04 + FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 +) + +type FileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // from winbase.h + + CREATE_NEW FileCreationDisposition = 0x01 + CREATE_ALWAYS FileCreationDisposition = 0x02 + OPEN_EXISTING FileCreationDisposition = 0x03 + OPEN_ALWAYS FileCreationDisposition = 0x04 + TRUNCATE_EXISTING FileCreationDisposition = 0x05 +) + +// CreateFile and co. take flags or attributes together as one parameter. +// Define alias until we can use generics to allow both + +// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants +type FileFlagOrAttribute uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 + FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 + FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 + FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 + FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 + FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 + FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 + FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 + FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 + FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 + FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 +) + +type FileSQSFlag = FileFlagOrAttribute + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) + SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) + SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) + SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) + + SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 +) + +// GetFinalPathNameByHandle flags +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters +type GetFinalPathFlag uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 + + FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 + FILE_NAME_OPENED GetFinalPathFlag = 0x8 + + VOLUME_NAME_DOS GetFinalPathFlag = 0x0 + VOLUME_NAME_GUID GetFinalPathFlag = 0x1 + VOLUME_NAME_NT GetFinalPathFlag = 0x2 + VOLUME_NAME_NONE GetFinalPathFlag = 0x4 +) + +// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle +// with the given handle and flags. It transparently takes care of creating a buffer of the +// correct size for the call. +// +// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew +func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { + b := stringbuffer.NewWString() + //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? + for { + n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) + if err != nil { + return "", err + } + // If the buffer wasn't large enough, n will be the total size needed (including null terminator). + // Resize and try again. + if n > b.Cap() { + b.ResizeTo(n) + continue + } + // If the buffer is large enough, n will be the size not including the null terminator. + // Convert to a Go string and return. + return b.String(), nil + } +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go new file mode 100644 index 000000000..81760ac67 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go @@ -0,0 +1,12 @@ +package fs + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level +type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` + +// Impersonation levels +const ( + SecurityAnonymous SecurityImpersonationLevel = 0 + SecurityIdentification SecurityImpersonationLevel = 1 + SecurityImpersonation SecurityImpersonationLevel = 2 + SecurityDelegation SecurityImpersonationLevel = 3 +) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go new file mode 100644 index 000000000..e2f7bb24e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -0,0 +1,64 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package fs + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procCreateFileW = modkernel32.NewProc("CreateFileW") +) + +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go new file mode 100644 index 000000000..7e82f9afa --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go @@ -0,0 +1,20 @@ +package socket + +import ( + "unsafe" +) + +// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The +// struct must meet the Win32 sockaddr requirements specified here: +// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 +// +// Specifically, the struct size must be least larger than an int16 (unsigned short) +// for the address family. +type RawSockaddr interface { + // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing + // for the RawSockaddr's data to be overwritten by syscalls (if necessary). + // + // It is the callers responsibility to validate that the values are valid; invalid + // pointers or size can cause a panic. + Sockaddr() (unsafe.Pointer, int32, error) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go new file mode 100644 index 000000000..aeb7b7250 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -0,0 +1,179 @@ +//go:build windows + +package socket + +import ( + "errors" + "fmt" + "net" + "sync" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go + +//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname +//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername +//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const socketError = uintptr(^uint32(0)) + +var ( + // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? + + ErrBufferSize = errors.New("buffer size") + ErrAddrFamily = errors.New("address family") + ErrInvalidPointer = errors.New("invalid pointer") + ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) +) + +// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) + +// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. +// If rsa is not large enough, the [windows.WSAEFAULT] is returned. +func GetSockName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + // although getsockname returns WSAEFAULT if the buffer is too small, it does not set + // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy + return getsockname(s, ptr, &l) +} + +// GetPeerName returns the remote address the socket is connected to. +// +// See [GetSockName] for more information. +func GetPeerName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return getpeername(s, ptr, &l) +} + +func Bind(s windows.Handle, rsa RawSockaddr) (err error) { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return bind(s, ptr, l) +} + +// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the +// their sockaddr interface, so they cannot be used with HvsockAddr +// Replicate functionality here from +// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go + +// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at +// runtime via a WSAIoctl call: +// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks + +type runtimeFunc struct { + id guid.GUID + once sync.Once + addr uintptr + err error +} + +func (f *runtimeFunc) Load() error { + f.once.Do(func() { + var s windows.Handle + s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) + if f.err != nil { + return + } + defer windows.CloseHandle(s) //nolint:errcheck + + var n uint32 + f.err = windows.WSAIoctl(s, + windows.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&f.id)), + uint32(unsafe.Sizeof(f.id)), + (*byte)(unsafe.Pointer(&f.addr)), + uint32(unsafe.Sizeof(f.addr)), + &n, + nil, // overlapped + 0, // completionRoutine + ) + }) + return f.err +} + +var ( + // todo: add `AcceptEx` and `GetAcceptExSockaddrs` + WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS + Data1: 0x25a207b9, + Data2: 0xddf3, + Data3: 0x4660, + Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, + } + + connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} +) + +func ConnectEx( + fd windows.Handle, + rsa RawSockaddr, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) error { + if err := connectExFunc.Load(); err != nil { + return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) + } + ptr, n, err := rsa.Sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +// BOOL LpfnConnectex( +// [in] SOCKET s, +// [in] const sockaddr *name, +// [in] int namelen, +// [in, optional] PVOID lpSendBuffer, +// [in] DWORD dwSendDataLength, +// [out] LPDWORD lpdwBytesSent, +// [in] LPOVERLAPPED lpOverlapped +// ) + +func connectEx( + s windows.Handle, + name unsafe.Pointer, + namelen int32, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) (err error) { + // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, + 7, + uintptr(s), + uintptr(name), + uintptr(namelen), + uintptr(unsafe.Pointer(sendBuf)), + uintptr(sendDataLen), + uintptr(unsafe.Pointer(bytesSent)), + uintptr(unsafe.Pointer(overlapped)), + 0, + 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go new file mode 100644 index 000000000..6d2e1a9e4 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -0,0 +1,72 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procbind = modws2_32.NewProc("bind") + procgetpeername = modws2_32.NewProc("getpeername") + procgetsockname = modws2_32.NewProc("getsockname") +) + +func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go new file mode 100644 index 000000000..7ad505702 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -0,0 +1,132 @@ +package stringbuffer + +import ( + "sync" + "unicode/utf16" +) + +// TODO: worth exporting and using in mkwinsyscall? + +// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate +// large path strings: +// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. +const MinWStringCap = 310 + +// use *[]uint16 since []uint16 creates an extra allocation where the slice header +// is copied to heap and then referenced via pointer in the interface header that sync.Pool +// stores. +var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly + New: func() interface{} { + b := make([]uint16, MinWStringCap) + return &b + }, +} + +func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } + +// freeBuffer copies the slice header data, and puts a pointer to that in the pool. +// This avoids taking a pointer to the slice header in WString, which can be set to nil. +func freeBuffer(b []uint16) { pathPool.Put(&b) } + +// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings +// for interacting with Win32 APIs. +// Sizes are specified as uint32 and not int. +// +// It is not thread safe. +type WString struct { + // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. + + // raw buffer + b []uint16 +} + +// NewWString returns a [WString] allocated from a shared pool with an +// initial capacity of at least [MinWStringCap]. +// Since the buffer may have been previously used, its contents are not guaranteed to be empty. +// +// The buffer should be freed via [WString.Free] +func NewWString() *WString { + return &WString{ + b: newBuffer(), + } +} + +func (b *WString) Free() { + if b.empty() { + return + } + freeBuffer(b.b) + b.b = nil +} + +// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the +// previous buffer back into pool. +func (b *WString) ResizeTo(c uint32) uint32 { + // allready sufficient (or n is 0) + if c <= b.Cap() { + return b.Cap() + } + + if c <= MinWStringCap { + c = MinWStringCap + } + // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places + if c <= 2*b.Cap() { + c = 2 * b.Cap() + } + + b2 := make([]uint16, c) + if !b.empty() { + copy(b2, b.b) + freeBuffer(b.b) + } + b.b = b2 + return c +} + +// Buffer returns the underlying []uint16 buffer. +func (b *WString) Buffer() []uint16 { + if b.empty() { + return nil + } + return b.b +} + +// Pointer returns a pointer to the first uint16 in the buffer. +// If the [WString.Free] has already been called, the pointer will be nil. +func (b *WString) Pointer() *uint16 { + if b.empty() { + return nil + } + return &b.b[0] +} + +// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. +// +// It assumes that the data is null-terminated. +func (b *WString) String() string { + // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" + // and would make this code Windows-only, which makes no sense. + // So copy UTF16ToString code into here. + // If other windows-specific code is added, switch to [windows.UTF16ToString] + + s := b.b + for i, v := range s { + if v == 0 { + s = s[:i] + break + } + } + return string(utf16.Decode(s)) +} + +// Cap returns the underlying buffer capacity. +func (b *WString) Cap() uint32 { + if b.empty() { + return 0 + } + return b.cap() +} + +func (b *WString) cap() uint32 { return uint32(cap(b.b)) } +func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 000000000..25cc81103 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,525 @@ +//go:build windows +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "runtime" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/fs" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl + Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl +} + +type ntStatus int32 + +func (status ntStatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + ErrPipeListenerClosed = net.ErrClosed + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + if err := f.SetReadDeadline(t); err != nil { + return err + } + return f.SetWriteDeadline(t) +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { //nolint:errorlint + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { + for { + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + wh, err := fs.CreateFile(*path, + access, + 0, // mode + nil, // security attributes + fs.OPEN_EXISTING, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + 0, // template file handle + ) + h := syscall.Handle(wh) + if err == nil { + return h, nil + } + if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(10 * time.Millisecond) + } + } +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(2 * time.Second) + } + ctx, cancel := context.WithDeadline(context.Background(), absTimeout) + defer cancel() + conn, err := DialPipeContext(ctx, path) + if errors.Is(err, context.DeadlineExceeded) { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) +} + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + var err error + var h syscall.Handle + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) + if err != nil { + return nil, err + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&windows.PIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], + &ntPath, + 0, + 0, + ).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + oa.Attributes = windows.OBJ_CASE_INSENSITIVE + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + l := uint32(len(sd)) + sdb := localAlloc(0, l) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %w", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: windows.SE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= windows.FILE_PIPE_MESSAGE_TYPE + } + + disposition := uint32(windows.FILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = windows.FILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, + access, + &oa, + &iosb, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + disposition, + 0, + typ, + 0, + 0, + 0xffffffff, + uint32(c.InputBufferSize), + uint32(c.OutputBufferSize), + &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size of the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size of the output buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + l := &win32PipeListener{ + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIO() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIO(c, nil, 0, err) + if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 000000000..48ce4e924 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,232 @@ +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" //nolint:gosec // not used for secure application + "encoding" + "encoding/binary" + "fmt" + "strconv" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122 section 4.1.1. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 // RFC 4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +func (v Version) String() string { + return strconv.FormatUint(uint64(v), 10) +} + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() //nolint:gosec // not used for secure application + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 000000000..805bd3548 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,16 @@ +//go:build !windows +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 000000000..27e45ee5c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,13 @@ +//go:build windows +// +build windows + +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go new file mode 100644 index 000000000..4076d3132 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. + +package guid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VariantUnknown-0] + _ = x[VariantNCS-1] + _ = x[VariantRFC4122-2] + _ = x[VariantMicrosoft-3] + _ = x[VariantFuture-4] +} + +const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" + +var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} + +func (i Variant) String() string { + if i >= Variant(len(_Variant_index)-1) { + return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 000000000..0ff9dac90 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,197 @@ +//go:build windows +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + //revive:disable-next-line:var-naming ALL_CAPS + SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED + + //revive:disable-next-line:var-naming ALL_CAPS + ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "Could not enable privilege " + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + privileges := make([]uint64, 0, len(names)) + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p := windows.CurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + _ = binary.Write(&b, binary.LittleEndian, p) + _ = binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(windows.SecurityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 000000000..67d1a104a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,131 @@ +//go:build windows +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + _ = binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + _ = binary.Write(&b, binary.LittleEndian, flags) + } + + _ = binary.Write(&b, binary.LittleEndian, ntTarget16) + _ = binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 000000000..5550ef6b6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,144 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch { + case errors.Is(e.Err, windows.ERROR_INVALID_SID): + s = "the security ID structure is invalid" + case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +func (e *AccountLookupError) Unwrap() error { return e.Err } + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +func (e *SddlConversionError) Unwrap() error { return e.Err } + +// LookupSidByName looks up the SID of an account by name +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +// LookupNameBySid looks up the name of an account by SID +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupNameBySid(sid string) (name string, err error) { + if sid == "" { + return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} + } + + sidBuffer, err := windows.UTF16PtrFromString(sid) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + var sidPtr *byte + if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { + return "", &AccountLookupError{sid, err} + } + defer localFree(uintptr(unsafe.Pointer(sidPtr))) + + var nameSize, refDomainSize, sidNameUse uint32 + err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{sid, err} + } + + nameBuffer := make([]uint16, nameSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + name = windows.UTF16ToString(nameBuffer) + return name, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to include an arbitrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 000000000..a6ca111b3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,5 @@ +//go:build windows + +package winio + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go new file mode 100644 index 000000000..2aa045843 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package winio + +import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 000000000..469b16f63 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,419 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") +) + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSidToSid(str *uint16, sid **byte) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntStatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntStatus(r0) + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntStatus(r0) + return +} + +func rtlNtStatusToDosError(status ntStatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS new file mode 100644 index 000000000..2b00ddba0 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS new file mode 100644 index 000000000..1fbd3e976 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/ProtonMail/go-crypto/LICENSE b/vendor/github.com/ProtonMail/go-crypto/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ProtonMail/go-crypto/PATENTS b/vendor/github.com/ProtonMail/go-crypto/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go new file mode 100644 index 000000000..c85e6befe --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go @@ -0,0 +1,381 @@ +package bitcurves + +// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2011 ThePiachu. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitelliptic implements several Koblitz elliptic curves over prime +// fields. + +// This package operates, internally, on Jacobian coordinates. For a given +// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) +// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole +// calculation can be performed within the transform (as in ScalarMult and +// ScalarBaseMult). But even for Add and Double, it's faster to apply and +// reverse the transform than to operate in affine coordinates. + +import ( + "crypto/elliptic" + "io" + "math/big" + "sync" +) + +// A BitCurve represents a Koblitz Curve with a=0. +// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html +type BitCurve struct { + Name string + P *big.Int // the order of the underlying field + N *big.Int // the order of the base point + B *big.Int // the constant of the BitCurve equation + Gx, Gy *big.Int // (x,y) of the base point + BitSize int // the size of the underlying field +} + +// Params returns the parameters of the given BitCurve (see BitCurve struct) +func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) { + cp = new(elliptic.CurveParams) + cp.Name = bitCurve.Name + cp.P = bitCurve.P + cp.N = bitCurve.N + cp.Gx = bitCurve.Gx + cp.Gy = bitCurve.Gy + cp.BitSize = bitCurve.BitSize + return cp +} + +// IsOnCurve returns true if the given (x,y) lies on the BitCurve. +func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { + // y² = x³ + b + y2 := new(big.Int).Mul(y, y) //y² + y2.Mod(y2, bitCurve.P) //y²%P + + x3 := new(big.Int).Mul(x, x) //x² + x3.Mul(x3, x) //x³ + + x3.Add(x3, bitCurve.B) //x³+B + x3.Mod(x3, bitCurve.P) //(x³+B)%P + + return x3.Cmp(y2) == 0 +} + +// affineFromJacobian reverses the Jacobian transform. See the comment at the +// top of the file. +func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { + if z.Cmp(big.NewInt(0)) == 0 { + panic("bitcurve: Can't convert to affine with Jacobian Z = 0") + } + // x = YZ^2 mod P + zinv := new(big.Int).ModInverse(z, bitCurve.P) + zinvsq := new(big.Int).Mul(zinv, zinv) + + xOut = new(big.Int).Mul(x, zinvsq) + xOut.Mod(xOut, bitCurve.P) + // y = YZ^3 mod P + zinvsq.Mul(zinvsq, zinv) + yOut = new(big.Int).Mul(y, zinvsq) + yOut.Mod(yOut, bitCurve.P) + return xOut, yOut +} + +// Add returns the sum of (x1,y1) and (x2,y2) +func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + z := new(big.Int).SetInt64(1) + x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z) + return bitCurve.affineFromJacobian(x, y, z) +} + +// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and +// (x2, y2, z2) and returns their sum, also in Jacobian form. +func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + z1z1 := new(big.Int).Mul(z1, z1) + z1z1.Mod(z1z1, bitCurve.P) + z2z2 := new(big.Int).Mul(z2, z2) + z2z2.Mod(z2z2, bitCurve.P) + + u1 := new(big.Int).Mul(x1, z2z2) + u1.Mod(u1, bitCurve.P) + u2 := new(big.Int).Mul(x2, z1z1) + u2.Mod(u2, bitCurve.P) + h := new(big.Int).Sub(u2, u1) + if h.Sign() == -1 { + h.Add(h, bitCurve.P) + } + i := new(big.Int).Lsh(h, 1) + i.Mul(i, i) + j := new(big.Int).Mul(h, i) + + s1 := new(big.Int).Mul(y1, z2) + s1.Mul(s1, z2z2) + s1.Mod(s1, bitCurve.P) + s2 := new(big.Int).Mul(y2, z1) + s2.Mul(s2, z1z1) + s2.Mod(s2, bitCurve.P) + r := new(big.Int).Sub(s2, s1) + if r.Sign() == -1 { + r.Add(r, bitCurve.P) + } + r.Lsh(r, 1) + v := new(big.Int).Mul(u1, i) + + x3 := new(big.Int).Set(r) + x3.Mul(x3, x3) + x3.Sub(x3, j) + x3.Sub(x3, v) + x3.Sub(x3, v) + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Set(r) + v.Sub(v, x3) + y3.Mul(y3, v) + s1.Mul(s1, j) + s1.Lsh(s1, 1) + y3.Sub(y3, s1) + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Add(z1, z2) + z3.Mul(z3, z3) + z3.Sub(z3, z1z1) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Sub(z3, z2z2) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Mul(z3, h) + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// Double returns 2*(x,y) +func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { + z1 := new(big.Int).SetInt64(1) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) +} + +// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and +// returns its double, also in Jacobian form. +func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + + a := new(big.Int).Mul(x, x) //X1² + b := new(big.Int).Mul(y, y) //Y1² + c := new(big.Int).Mul(b, b) //B² + + d := new(big.Int).Add(x, b) //X1+B + d.Mul(d, d) //(X1+B)² + d.Sub(d, a) //(X1+B)²-A + d.Sub(d, c) //(X1+B)²-A-C + d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) + + e := new(big.Int).Mul(big.NewInt(3), a) //3*A + f := new(big.Int).Mul(e, e) //E² + + x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D + x3.Sub(f, x3) //F-2*D + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Sub(d, x3) //D-X3 + y3.Mul(e, y3) //E*(D-X3) + y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Mul(y, z) //Y1*Z1 + z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// TODO: double check if it is okay +// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { + // We have a slight problem in that the identity of the group (the + // point at infinity) cannot be represented in (x, y) form on a finite + // machine. Thus the standard add/double algorithm has to be tweaked + // slightly: our initial state is not the identity, but x, and we + // ignore the first true bit in |k|. If we don't find any true bits in + // |k|, then we return nil, nil, because we cannot return the identity + // element. + + Bz := new(big.Int).SetInt64(1) + x := Bx + y := By + z := Bz + + seenFirstTrue := false + for _, byte := range k { + for bitNum := 0; bitNum < 8; bitNum++ { + if seenFirstTrue { + x, y, z = bitCurve.doubleJacobian(x, y, z) + } + if byte&0x80 == 0x80 { + if !seenFirstTrue { + seenFirstTrue = true + } else { + x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z) + } + } + byte <<= 1 + } + } + + if !seenFirstTrue { + return nil, nil + } + + return bitCurve.affineFromJacobian(x, y, z) +} + +// ScalarBaseMult returns k*G, where G is the base point of the group and k is +// an integer in big-endian form. +func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) +} + +var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} + +// TODO: double check if it is okay +// GenerateKey returns a public/private key pair. The private key is generated +// using the given reader, which must return random data. +func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { + byteLen := (bitCurve.BitSize + 7) >> 3 + priv = make([]byte, byteLen) + + for x == nil { + _, err = io.ReadFull(rand, priv) + if err != nil { + return + } + // We have to mask off any excess bits in the case that the size of the + // underlying field is not a whole number of bytes. + priv[0] &= mask[bitCurve.BitSize%8] + // This is because, in tests, rand will return all zeros and we don't + // want to get the point at infinity and loop forever. + priv[1] ^= 0x42 + x, y = bitCurve.ScalarBaseMult(priv) + } + return +} + +// Marshal converts a point into the form specified in section 4.3.6 of ANSI +// X9.62. +func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { + byteLen := (bitCurve.BitSize + 7) >> 3 + + ret := make([]byte, 1+2*byteLen) + ret[0] = 4 // uncompressed point + + xBytes := x.Bytes() + copy(ret[1+byteLen-len(xBytes):], xBytes) + yBytes := y.Bytes() + copy(ret[1+2*byteLen-len(yBytes):], yBytes) + return ret +} + +// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On +// error, x = nil. +func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { + byteLen := (bitCurve.BitSize + 7) >> 3 + if len(data) != 1+2*byteLen { + return + } + if data[0] != 4 { // uncompressed form + return + } + x = new(big.Int).SetBytes(data[1 : 1+byteLen]) + y = new(big.Int).SetBytes(data[1+byteLen:]) + return +} + +//curve parameters taken from: +//http://www.secg.org/collateral/sec2_final.pdf + +var initonce sync.Once +var secp160k1 *BitCurve +var secp192k1 *BitCurve +var secp224k1 *BitCurve +var secp256k1 *BitCurve + +func initAll() { + initS160() + initS192() + initS224() + initS256() +} + +func initS160() { + // See SEC 2 section 2.4.1 + secp160k1 = new(BitCurve) + secp160k1.Name = "secp160k1" + secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16) + secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16) + secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16) + secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16) + secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16) + secp160k1.BitSize = 160 +} + +func initS192() { + // See SEC 2 section 2.5.1 + secp192k1 = new(BitCurve) + secp192k1.Name = "secp192k1" + secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16) + secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16) + secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16) + secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16) + secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16) + secp192k1.BitSize = 192 +} + +func initS224() { + // See SEC 2 section 2.6.1 + secp224k1 = new(BitCurve) + secp224k1.Name = "secp224k1" + secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16) + secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16) + secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16) + secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16) + secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16) + secp224k1.BitSize = 224 +} + +func initS256() { + // See SEC 2 section 2.7.1 + secp256k1 = new(BitCurve) + secp256k1.Name = "secp256k1" + secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) + secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16) + secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16) + secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16) + secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16) + secp256k1.BitSize = 256 +} + +// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1) +func S160() *BitCurve { + initonce.Do(initAll) + return secp160k1 +} + +// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1) +func S192() *BitCurve { + initonce.Do(initAll) + return secp192k1 +} + +// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1) +func S224() *BitCurve { + initonce.Do(initAll) + return secp224k1 +} + +// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1) +func S256() *BitCurve { + initonce.Do(initAll) + return secp256k1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go new file mode 100644 index 000000000..cb6676de2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go @@ -0,0 +1,134 @@ +// Package brainpool implements Brainpool elliptic curves. +// Implementation of rcurves is from github.com/ebfe/brainpool +// Note that these curves are implemented with naive, non-constant time operations +// and are likely not suitable for environments where timing attacks are a concern. +package brainpool + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var ( + once sync.Once + p256t1, p384t1, p512t1 *elliptic.CurveParams + p256r1, p384r1, p512r1 *rcurve +) + +func initAll() { + initP256t1() + initP384t1() + initP512t1() + initP256r1() + initP384r1() + initP512r1() +} + +func initP256t1() { + p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} + p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) + p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) + p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) + p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) + p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) + p256t1.BitSize = 256 +} + +func initP256r1() { + twisted := p256t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP256r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) + params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) + z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) + p256r1 = newrcurve(twisted, params, z) +} + +func initP384t1() { + p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} + p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) + p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) + p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) + p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) + p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) + p384t1.BitSize = 384 +} + +func initP384r1() { + twisted := p384t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP384r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) + params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) + z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) + p384r1 = newrcurve(twisted, params, z) +} + +func initP512t1() { + p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} + p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) + p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) + p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) + p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) + p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) + p512t1.BitSize = 512 +} + +func initP512r1() { + twisted := p512t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP512r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) + params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) + z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) + p512r1 = newrcurve(twisted, params, z) +} + +// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) +func P256t1() elliptic.Curve { + once.Do(initAll) + return p256t1 +} + +// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) +func P256r1() elliptic.Curve { + once.Do(initAll) + return p256r1 +} + +// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) +func P384t1() elliptic.Curve { + once.Do(initAll) + return p384t1 +} + +// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) +func P384r1() elliptic.Curve { + once.Do(initAll) + return p384r1 +} + +// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) +func P512t1() elliptic.Curve { + once.Do(initAll) + return p512t1 +} + +// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) +func P512r1() elliptic.Curve { + once.Do(initAll) + return p512r1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go new file mode 100644 index 000000000..7e291d6aa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go @@ -0,0 +1,83 @@ +package brainpool + +import ( + "crypto/elliptic" + "math/big" +) + +var _ elliptic.Curve = (*rcurve)(nil) + +type rcurve struct { + twisted elliptic.Curve + params *elliptic.CurveParams + z *big.Int + zinv *big.Int + z2 *big.Int + z3 *big.Int + zinv2 *big.Int + zinv3 *big.Int +} + +var ( + two = big.NewInt(2) + three = big.NewInt(3) +) + +func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { + zinv := new(big.Int).ModInverse(z, params.P) + return &rcurve{ + twisted: twisted, + params: params, + z: z, + zinv: zinv, + z2: new(big.Int).Exp(z, two, params.P), + z3: new(big.Int).Exp(z, three, params.P), + zinv2: new(big.Int).Exp(zinv, two, params.P), + zinv3: new(big.Int).Exp(zinv, three, params.P), + } +} + +func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { + var tx, ty big.Int + tx.Mul(x, curve.z2) + tx.Mod(&tx, curve.params.P) + ty.Mul(y, curve.z3) + ty.Mod(&ty, curve.params.P) + return &tx, &ty +} + +func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { + var x, y big.Int + x.Mul(tx, curve.zinv2) + x.Mod(&x, curve.params.P) + y.Mul(ty, curve.zinv3) + y.Mod(&y, curve.params.P) + return &x, &y +} + +func (curve *rcurve) Params() *elliptic.CurveParams { + return curve.params +} + +func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { + return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) +} + +func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + tx2, ty2 := curve.toTwisted(x2, y2) + return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) +} + +func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) +} + +func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) +} + +func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go new file mode 100644 index 000000000..3ae91d594 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go @@ -0,0 +1,162 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package eax provides an implementation of the EAX +// (encrypt-authenticate-translate) mode of operation, as described in +// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS +// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY." +// In FSE'04, volume 3017 of LNCS, 2004 +package eax + +import ( + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" +) + +const ( + defaultTagSize = 16 + defaultNonceSize = 16 +) + +type eax struct { + block cipher.Block // Only AES-{128, 192, 256} supported + tagSize int // At least 12 bytes recommended + nonceSize int +} + +func (e *eax) NonceSize() int { + return e.nonceSize +} + +func (e *eax) Overhead() int { + return e.tagSize +} + +// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and +// tag lengths. Supports {128, 192, 256}- bit key length. +func NewEAX(block cipher.Block) (cipher.AEAD, error) { + return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and +// given nonce and tag lengths in bytes. Panics on zero nonceSize and +// exceedingly long tags. +// +// It is recommended to use at least 12 bytes as tag length (see, for instance, +// NIST SP 800-38D). +// +// Only to be used for compatibility with existing cryptosystems with +// non-standard parameters. For all other cases, prefer NewEAX. +func NewEAXWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if nonceSize < 1 { + return nil, eaxError("Cannot initialize EAX with nonceSize = 0") + } + if tagSize > block.BlockSize() { + return nil, eaxError("Custom tag length exceeds blocksize") + } + return &eax{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + }, nil +} + +func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+e.tagSize) + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + + // Encrypt message using CTR mode and omacNonce as IV + ctr := cipher.NewCTR(e.block, omacNonce) + ciphertextData := out[:len(plaintext)] + ctr.XORKeyStream(ciphertextData, plaintext) + + omacCiphertext := e.omacT(2, ciphertextData) + + tag := out[len(plaintext):] + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + return ret +} + +func (e *eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + if len(ciphertext) < e.tagSize { + return nil, eaxError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - e.tagSize + + // Compute tag + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + omacCiphertext := e.omacT(2, ciphertext[:sep]) + + tag := make([]byte, e.tagSize) + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + + // Compare tags + if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 { + return nil, eaxError("Tag authentication failed") + } + + // Decrypt ciphertext + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ctr := cipher.NewCTR(e.block, omacNonce) + ctr.XORKeyStream(out, ciphertext[:sep]) + + return ret[:sep], nil +} + +// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext) +func (e *eax) omacT(t byte, plaintext []byte) []byte { + blockSize := e.block.BlockSize() + byteT := make([]byte, blockSize) + byteT[blockSize-1] = t + concat := append(byteT, plaintext...) + return e.omac(concat) +} + +func (e *eax) omac(plaintext []byte) []byte { + blockSize := e.block.BlockSize() + // L ← E_K(0^n); B ← 2L; P ← 4L + L := make([]byte, blockSize) + e.block.Encrypt(L, L) + B := byteutil.GfnDouble(L) + P := byteutil.GfnDouble(B) + + // CBC with IV = 0 + cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize)) + padded := e.pad(plaintext, B, P) + cbcCiphertext := make([]byte, len(padded)) + cbc.CryptBlocks(cbcCiphertext, padded) + + return cbcCiphertext[len(cbcCiphertext)-blockSize:] +} + +func (e *eax) pad(plaintext, B, P []byte) []byte { + // if |M| in {n, 2n, 3n, ...} + blockSize := e.block.BlockSize() + if len(plaintext) != 0 && len(plaintext)%blockSize == 0 { + return byteutil.RightXor(plaintext, B) + } + + // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P + ending := make([]byte, blockSize-len(plaintext)%blockSize) + ending[0] = 0x80 + padded := append(plaintext, ending...) + return byteutil.RightXor(padded, P) +} + +func eaxError(err string) error { + return errors.New("crypto/eax: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go new file mode 100644 index 000000000..ddb53d079 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go @@ -0,0 +1,58 @@ +package eax + +// Test vectors from +// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf +var testVectors = []struct { + msg, key, nonce, header, ciphertext string +}{ + {"", + "233952DEE4D5ED5F9B9C6D6FF80FF478", + "62EC67F9C3A4A407FCB2A8C49031A8B3", + "6BFB914FD07EAE6B", + "E037830E8389F27B025A2D6527E79D01"}, + {"F7FB", + "91945D3F4DCBEE0BF45EF52255F095A4", + "BECAF043B0A23D843194BA972C66DEBD", + "FA3BFD4806EB53FA", + "19DD5C4C9331049D0BDAB0277408F67967E5"}, + {"1A47CB4933", + "01F74AD64077F2E704C0F60ADA3DD523", + "70C3DB4F0D26368400A10ED05D2BFF5E", + "234A3463C1264AC6", + "D851D5BAE03A59F238A23E39199DC9266626C40F80"}, + {"481C9E39B1", + "D07CF6CBB7F313BDDE66B727AFD3C5E8", + "8408DFFF3C1A2B1292DC199E46B7D617", + "33CCE2EABFF5A79D", + "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"}, + {"40D0C07DA5E4", + "35B6D0580005BBC12B0587124557D2C2", + "FDB6B06676EEDC5C61D74276E1F8E816", + "AEB96EAEBE2970E9", + "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"}, + {"4DE3B35C3FC039245BD1FB7D", + "BD8E6E11475E60B268784C38C62FEB22", + "6EAC5C93072D8E8513F750935E46DA1B", + "D4482D1CA78DCE0F", + "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"}, + {"8B0A79306C9CE7ED99DAE4F87F8DD61636", + "7C77D6E813BED5AC98BAA417477A2E7D", + "1A8C98DCD73D38393B2BF1569DEEFC19", + "65D2017990D62528", + "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"}, + {"1BDA122BCE8A8DBAF1877D962B8592DD2D56", + "5FFF20CAFAB119CA2FC73549E20F5B0D", + "DDE59B97D722156D4D9AFF2BC7559826", + "54B9F04E6A09189A", + "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"}, + {"6CF36720872B8513F6EAB1A8A44438D5EF11", + "A4A4782BCFFD3EC5E7EF6D8C34A56123", + "B781FCF2F75FA5A8DE97A9CA48E522EC", + "899A175897561D7E", + "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"}, + {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7", + "8395FCF1E95BEBD697BD010BC766AAC3", + "22E7ADD93CFC6393C57EC0B3C17D6B44", + "126735FCC320D25A", + "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go new file mode 100644 index 000000000..4eb19f28d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go @@ -0,0 +1,131 @@ +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package eax + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + {"DFDE093F36B0356E5A81F609786982E3", + "1D8AC604419001816905BA72B14CED7E", + "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6", + "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727", + "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"}, + {"F10619EF02E5D94D7550EB84ED364A21", + "8DC0D4F2F745BBAE835CC5574B942D20", + "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B", + "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5", + "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"}, + {"429F514EFC64D98A698A9247274CFF45", + "976AA5EB072F912D126ACEBC954FEC38", + "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421", + "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8", + "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"}, + {"398138F309085F47F8457CDF53895A63", + "F8A8A7F2D28E5FFF7BBC2F24353F7A36", + "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6", + "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268", + "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"}, + {"7A4151EBD3901B42CBA45DAFB2E931BA", + "0FC88ACEE74DD538040321C330974EB8", + "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9", + "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE", + "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"}, + {"BFB147E1CD5459424F8C0271FC0E0DC5", + "EABCC126442BF373969EA3015988CC45", + "4C0880E1D71AA2C7", + "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE", + "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"}, + {"595DD6F52D18BC2CA8EB4EDAA18D9FA3", + "0F84B5D36CF4BC3B863313AF3B4D2E97", + "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23", + "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368", + "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"}, + {"44E6F2DC8FDC778AD007137D11410F50", + "270A237AD977F7187AA6C158A0BAB24F", + "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78", + "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848", + "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"}, + {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452", + "8B4BBE26CCD9859DCD84884159D6B0A4", + "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED", + "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A", + "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"}, + {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E", + "92745C018AA708ECFEB1667E9F3F1B01", + "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677", + "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083", + "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"}, + {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1", + "0F45CF7F0BF31CCEB85D9DA10F4D749F", + "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05", + "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8", + "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"}, + {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D", + "5B11EA1D6008EBB41CF892FCA5B943D1", + "BAF4FF5C8242", + "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C", + "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"}, + {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A", + "FD82B5B31804FF47D44199B533D0CF84", + "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1", + "B804", + "164BB965C05EBE0931A1A63293EDF9C38C27"}, + {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068", + "343BE00DA9483F05C14F2E9EB8EA6AE8", + "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA", + "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86", + "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"}, + {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE", + "E280E13D7367042E3AA09A80111B6184", + "21486C9D7A9647", + "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906", + "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"}, + {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5", + "656B41CB3F9CF8C08BAD7EBFC80BD225", + "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4", + "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D", + "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"}, + {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153", + "DA0F3A40983D92F2D4C01FED33C7A192", + "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853", + "F37326A80E08", + "83519E53E321D334F7C10B568183775C0E9AAE55F806"}, + {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5", + "3EE1182AEBB19A02B128F28E1D5F7F99", + "D9F35ABB16D776CE", + "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5", + "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"}, + {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A", + "336F5D681E0410FAE7B607246092C6DC", + "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611", + "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235", + "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"}, + {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC", + "AB91F7245DDCE3F1C747872D47BE0A8A", + "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B", + "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32", + "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"}, + {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A", + "C691294EF67CD04D1B9242AF83DD1421", + "879334DAE3", + "1E17F46A98FEF5CBB40759D95354", + "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"}, + {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB", + "4228DA0678CA3534588859E77DFF014C", + "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7", + "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570", + "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"}, + {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446", + "943188480C99437495958B0AE4831AA9", + "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A", + "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A", + "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"}, + {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84", + "B0490F5BD68A52459556B3749ACDF40E", + "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8", + "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F", + "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go new file mode 100644 index 000000000..affb74a76 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -0,0 +1,90 @@ +// Copyright (C) 2019 ProtonTech AG +// This file contains necessary tools for the aex and ocb packages. +// +// These functions SHOULD NOT be used elsewhere, since they are optimized for +// specific input nature in the EAX and OCB modes of operation. + +package byteutil + +// GfnDouble computes 2 * input in the field of 2^n elements. +// The irreducible polynomial in the finite field for n=128 is +// x^128 + x^7 + x^2 + x + 1 (equals 0x87) +// Constant-time execution in order to avoid side-channel attacks +func GfnDouble(input []byte) []byte { + if len(input) != 16 { + panic("Doubling in GFn only implemented for n = 128") + } + // If the first bit is zero, return 2L = L << 1 + // Else return (L << 1) xor 0^120 10000111 + shifted := ShiftBytesLeft(input) + shifted[15] ^= ((input[0] >> 7) * 0x87) + return shifted +} + +// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary. +func ShiftBytesLeft(x []byte) []byte { + l := len(x) + dst := make([]byte, l) + for i := 0; i < l-1; i++ { + dst[i] = (x[i] << 1) | (x[i+1] >> 7) + } + dst[l-1] = x[l-1] << 1 + return dst +} + +// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary. +func ShiftNBytesLeft(dst, x []byte, n int) { + // Erase first n / 8 bytes + copy(dst, x[n/8:]) + + // Shift the remaining n % 8 bits + bits := uint(n % 8) + l := len(dst) + for i := 0; i < l-1; i++ { + dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8-bits)) + } + dst[l-1] = dst[l-1] << bits + + // Append trailing zeroes + dst = append(dst, make([]byte, n/8)...) +} + +// XorBytesMut assumes equal input length, replaces X with X XOR Y +func XorBytesMut(X, Y []byte) { + for i := 0; i < len(X); i++ { + X[i] ^= Y[i] + } +} + +// XorBytes assumes equal input length, puts X XOR Y into Z +func XorBytes(Z, X, Y []byte) { + for i := 0; i < len(X); i++ { + Z[i] = X[i] ^ Y[i] + } +} + +// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) +func RightXor(X, Y []byte) []byte { + offset := len(X) - len(Y) + xored := make([]byte, len(X)) + copy(xored, X) + for i := 0; i < len(Y); i++ { + xored[offset+i] ^= Y[i] + } + return xored +} + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go new file mode 100644 index 000000000..1a6f73502 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -0,0 +1,317 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package ocb provides an implementation of the OCB (offset codebook) mode of +// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare, +// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT +// AUTHENTICATED ENCRYPTION (2003). +// Security considerations (from RFC-7253): A private key MUST NOT be used to +// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a +// brute-force forging adversary succeeds after 2^{tag length} attempts). A +// single key SHOULD NOT be used to decrypt ciphertext with different tag +// lengths. Nonces need not be secret, but MUST NOT be reused. +// This package only supports underlying block ciphers with 128-bit blocks, +// such as AES-{128, 192, 256}, but may be extended to other sizes. +package ocb + +import ( + "bytes" + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" + "math/bits" +) + +type ocb struct { + block cipher.Block + tagSize int + nonceSize int + mask mask + // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop' + // internal variable can be reused for en/decrypting with nonces sharing + // all but the last 6 bits with N. The prefix of the first nonce used to + // compute the new Ktop, and the Ktop value itself, are stored in + // reusableKtop. If using incremental nonces, this saves one block cipher + // call every 63 out of 64 OCB encryptions, and stores one nonce and one + // output of the block cipher in memory only. + reusableKtop reusableKtop +} + +type mask struct { + // L_*, L_$, (L_i)_{i ∈ N} + lAst []byte + lDol []byte + L [][]byte +} + +type reusableKtop struct { + noncePrefix []byte + Ktop []byte +} + +const ( + defaultTagSize = 16 + defaultNonceSize = 15 +) + +const ( + enc = iota + dec +) + +func (o *ocb) NonceSize() int { + return o.nonceSize +} + +func (o *ocb) Overhead() int { + return o.tagSize +} + +// NewOCB returns an OCB instance with the given block cipher and default +// tag and nonce sizes. +func NewOCB(block cipher.Block) (cipher.AEAD, error) { + return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewOCBWithNonceAndTagSize returns an OCB instance with the given block +// cipher, nonce length, and tag length. Panics on zero nonceSize and +// exceedingly long tag size. +// +// It is recommended to use at least 12 bytes as tag length. +func NewOCBWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if block.BlockSize() != 16 { + return nil, ocbError("Block cipher must have 128-bit blocks") + } + if nonceSize < 1 { + return nil, ocbError("Incorrect nonce length") + } + if nonceSize >= block.BlockSize() { + return nil, ocbError("Nonce length exceeds blocksize - 1") + } + if tagSize > block.BlockSize() { + return nil, ocbError("Custom tag length exceeds blocksize") + } + return &ocb{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + mask: initializeMaskTable(block), + reusableKtop: reusableKtop{ + noncePrefix: nil, + Ktop: nil, + }, + }, nil +} + +func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > o.nonceSize { + panic("crypto/ocb: Incorrect nonce length given to OCB") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) + o.crypt(enc, out, nonce, adata, plaintext) + return ret +} + +func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > o.nonceSize { + panic("Nonce too long for this instance") + } + if len(ciphertext) < o.tagSize { + return nil, ocbError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - o.tagSize + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ciphertextData := ciphertext[:sep] + tag := ciphertext[sep:] + o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { + ret = ret[:sep] + return ret, nil + } + for i := range out { + out[i] = 0 + } + return nil, ocbError("Tag authentication failed") +} + +// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) +// function. It returns the resulting plain/ciphertext with the tag appended. +func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { + // + // Consider X as a sequence of 128-bit blocks + // + // Note: For encryption (resp. decryption), X is the plaintext (resp., the + // ciphertext without the tag). + blockSize := o.block.BlockSize() + + // + // Nonce-dependent and per-encryption variables + // + // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop + // is already computed. + truncatedNonce := make([]byte, len(nonce)) + copy(truncatedNonce, nonce) + truncatedNonce[len(truncatedNonce)-1] &= 192 + Ktop := make([]byte, blockSize) + if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { + Ktop = o.reusableKtop.Ktop + } else { + // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N + paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1) + paddedNonce = append(paddedNonce, truncatedNonce...) + paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1) + // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop + paddedNonce[blockSize-1] &= 192 + Ktop = paddedNonce + o.block.Encrypt(Ktop, Ktop) + o.reusableKtop.noncePrefix = truncatedNonce + o.reusableKtop.Ktop = Ktop + } + + // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8)) + xorHalves := make([]byte, blockSize/2) + byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2]) + stretch := append(Ktop, xorHalves...) + bottom := int(nonce[len(nonce)-1] & 63) + offset := make([]byte, len(stretch)) + byteutil.ShiftNBytesLeft(offset, stretch, bottom) + offset = offset[:blockSize] + + // + // Process any whole blocks + // + // Note: For encryption Y is ciphertext || tag, for decryption Y is + // plaintext || tag. + checksum := make([]byte, blockSize) + m := len(X) / blockSize + for i := 0; i < m; i++ { + index := bits.TrailingZeros(uint(i + 1)) + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) + blockX := X[i*blockSize : (i+1)*blockSize] + blockY := Y[i*blockSize : (i+1)*blockSize] + byteutil.XorBytes(blockY, blockX, offset) + switch instruction { + case enc: + o.block.Encrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockX) + case dec: + o.block.Decrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockY) + } + } + // + // Process any final partial block and compute raw tag + // + tag := make([]byte, blockSize) + if len(X)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + pad := make([]byte, blockSize) + o.block.Encrypt(pad, offset) + chunkX := X[blockSize*m:] + chunkY := Y[blockSize*m : len(X)] + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + switch instruction { + case enc: + paddedY := append(chunkX, byte(128)) + paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) + byteutil.XorBytesMut(checksum, paddedY) + case dec: + paddedX := append(chunkY, byte(128)) + paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) + byteutil.XorBytesMut(checksum, paddedX) + } + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) + } else { + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m:], tag[:o.tagSize]) + } + return Y +} + +// This hash function is used to compute the tag. Per design, on empty input it +// returns a slice of zeros, of the same length as the underlying block cipher +// block size. +func (o *ocb) hash(adata []byte) []byte { + // + // Consider A as a sequence of 128-bit blocks + // + A := make([]byte, len(adata)) + copy(A, adata) + blockSize := o.block.BlockSize() + + // + // Process any whole blocks + // + sum := make([]byte, blockSize) + offset := make([]byte, blockSize) + m := len(A) / blockSize + for i := 0; i < m; i++ { + chunk := A[blockSize*i : blockSize*(i+1)] + index := bits.TrailingZeros(uint(i + 1)) + // If the mask table is too short + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[index]) + byteutil.XorBytesMut(chunk, offset) + o.block.Encrypt(chunk, chunk) + byteutil.XorBytesMut(sum, chunk) + } + + // + // Process any final partial block; compute final hash value + // + if len(A)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + // Pad block with 1 || 0 ^ 127 - bitlength(a) + ending := make([]byte, blockSize-len(A)%blockSize) + ending[0] = 0x80 + encrypted := append(A[blockSize*m:], ending...) + byteutil.XorBytesMut(encrypted, offset) + o.block.Encrypt(encrypted, encrypted) + byteutil.XorBytesMut(sum, encrypted) + } + return sum +} + +func initializeMaskTable(block cipher.Block) mask { + // + // Key-dependent variables + // + lAst := make([]byte, block.BlockSize()) + block.Encrypt(lAst, lAst) + lDol := byteutil.GfnDouble(lAst) + L := make([][]byte, 1) + L[0] = byteutil.GfnDouble(lDol) + + return mask{ + lAst: lAst, + lDol: lDol, + L: L, + } +} + +// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1]) +func (m *mask) extendTable(limit int) { + for i := len(m.L); i <= limit; i++ { + m.L = append(m.L, byteutil.GfnDouble(m.L[i-1])) + } +} + +func ocbError(err string) error { + return errors.New("crypto/ocb: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go new file mode 100644 index 000000000..0efaf344f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go @@ -0,0 +1,136 @@ +// In the test vectors provided by RFC 7253, the "bottom" +// internal variable, which defines "offset" for the first time, does not +// exceed 15. However, it can attain values up to 63. + +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package ocb + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + + {"9438C5D599308EAF13F800D2D31EA7F0", + "C38EE4801BEBFFA1CD8635BE", + "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536", + "962D227786FB8913A8BAD5DC3250", + "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"}, + {"BA7DE631C7D6712167C6724F5B9A2B1D", + "35263EBDA05765DC0E71F1F5", + "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83", + "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A", + "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"}, + {"2E74B25289F6FD3E578C24866E9C72A5", + "FD912F15025AF8414642BA1D1D", + "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A", + "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB", + "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"}, + {"E7EC507C802528F790AFF5303A017B17", + "4B97A7A568940A9E3CE7A99E93031E", + "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA", + "9455B3EA706B74", + "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"}, + {"6C928AA3224736F28EE7378DE0090191", + "8936138E2E4C6A13280017A1622D", + "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C", + "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55", + "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"}, + {"ECEA315CA4B3F425B0C9957A17805EA4", + "664CDAE18403F4F9BA13015A44FC", + "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5", + "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB", + "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"}, + {"4EE616C4A58AAA380878F71A373461F6", + "91B8C9C176D9C385E9C47E52", + "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F", + "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC", + "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"}, + {"DCD475773136C830D5E3D0C5FE05B7FF", + "BB8E1FBB483BE7616A922C4A", + "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B", + "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B", + "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"}, + {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07", + "FACF804A4BEBF998505FF9DE", + "8213B9263B2971A5BDA18DBD02208EE1", + "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB", + "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"}, + {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1", + "0B0EF53D4606B28D1398355F", + "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761", + "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC", + "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"}, + {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41", + "5C83F4896D0738E1366B1836", + "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5", + "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87", + "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"}, + {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5", + "36B88F63ADBB5668588181D774", + "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80", + "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694", + "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"}, + {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D", + "BC099AEB637361BAC536B57618", + "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2", + "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8", + "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"}, + {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED", + "B1B0AAF373B8B026EB80422051D8", + "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1", + "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A", + "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"}, + {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72", + "2F2F17CECF7E5A756D10785A3CB9DB", + "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671", + "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF", + "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"}, + {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18", + "BCA625674F41D1E3AB47672DC0C3", + "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43", + "E525422519ECE070E82C", + "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"}, + {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464", + "36D2EC25ADD33CDEDF495205BBC923", + "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90", + "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F", + "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"}, + {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71", + "81691D5D20EC818FCFF24B33DECC", + "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED", + "EB861165DAF7625F827C6B574ED703F03215", + "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"}, + {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282", + "D2A7B94DD12CDACA909D3AD7", + "E021A78F374FC271389AB9A3E97077D755", + "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461", + "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"}, + {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD", + "65ED3D63F79F90BBFD19775E", + "336A8C0B7243582A46B221AA677647FCAE91", + "134A8B34824A290E7B", + "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"}, + {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15", + "C391A856B9FE234E14BA1AC7BB40FF", + "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3", + "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE", + "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"}, + {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB", + "B15B61C8BB39261A8F55AB178EC3", + "D0729B6B75BB", + "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651", + "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"}, + {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9", + "01D7BDB1133E9C347486C1EFA6", + "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978", + "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120", + "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"}, + {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4", + "0C52B3D11D636E5910A4DD76D32C", + "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609", + "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25", + "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go new file mode 100644 index 000000000..330309ff5 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go @@ -0,0 +1,78 @@ +package ocb + +import ( + "encoding/hex" +) + +// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is +// shared across tests. +var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") + +var rfc7253testVectors = []struct { + nonce, header, plaintext, ciphertext string +}{ + {"BBAA99887766554433221100", + "", + "", + "785407BFFFC8AD9EDCC5520AC9111EE6"}, + {"BBAA99887766554433221101", + "0001020304050607", + "0001020304050607", + "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"}, + {"BBAA99887766554433221102", + "0001020304050607", + "", + "81017F8203F081277152FADE694A0A00"}, + {"BBAA99887766554433221103", + "", + "0001020304050607", + "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"}, + {"BBAA99887766554433221104", + "000102030405060708090A0B0C0D0E0F", + "000102030405060708090A0B0C0D0E0F", + "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"}, + {"BBAA99887766554433221105", + "000102030405060708090A0B0C0D0E0F", + "", + "8CF761B6902EF764462AD86498CA6B97"}, + {"BBAA99887766554433221106", + "", + "000102030405060708090A0B0C0D0E0F", + "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"}, + {"BBAA99887766554433221107", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"}, + {"BBAA99887766554433221108", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "", + "6DC225A071FC1B9F7C69F93B0F1E10DE"}, + {"BBAA99887766554433221109", + "", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"}, + {"BBAA9988776655443322110A", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"}, + {"BBAA9988776655443322110B", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "", + "FE80690BEE8A485D11F32965BC9D2A32"}, + {"BBAA9988776655443322110C", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"}, + {"BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"}, + {"BBAA9988776655443322110E", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "", + "C5CD9D1850C141E358649994EE701B68"}, + {"BBAA9988776655443322110F", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go new file mode 100644 index 000000000..14a3c336f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go @@ -0,0 +1,25 @@ +package ocb + +// Second set of test vectors from https://tools.ietf.org/html/rfc7253 +var rfc7253TestVectorTaglen96 = struct { + key, nonce, header, plaintext, ciphertext string +}{"0F0E0D0C0B0A09080706050403020100", + "BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} + +var rfc7253AlgorithmTest = []struct { + KEYLEN, TAGLEN int + OUTPUT string +}{ + {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, + {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, + {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, + {128, 96, "77A3D8E73589158D25D01209"}, + {192, 96, "05D56EAD2752C86BE6932C5E"}, + {256, 96, "5458359AC23B0CBA9E6330DD"}, + {128, 64, "192C9B7BD90BA06A"}, + {192, 64, "0066BC6E0EF34E24"}, + {256, 64, "7D4EA5D445501CBE"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go new file mode 100644 index 000000000..3c6251d1c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go @@ -0,0 +1,153 @@ +// Copyright 2014 Matthew Endsley +// All rights reserved +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted providing that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// Package keywrap is an implementation of the RFC 3394 AES key wrapping +// algorithm. This is used in OpenPGP with elliptic curve keys. +package keywrap + +import ( + "crypto/aes" + "encoding/binary" + "errors" +) + +var ( + // ErrWrapPlaintext is returned if the plaintext is not a multiple + // of 64 bits. + ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits") + + // ErrUnwrapCiphertext is returned if the ciphertext is not a + // multiple of 64 bits. + ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits") + + // ErrUnwrapFailed is returned if unwrapping a key fails. + ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key") + + // NB: the AES NewCipher call only fails if the key is an invalid length. + + // ErrInvalidKey is returned when the AES key is invalid. + ErrInvalidKey = errors.New("keywrap: invalid AES key") +) + +// Wrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Wrap(key, plainText []byte) ([]byte, error) { + if len(plainText)%8 != 0 { + return nil, ErrWrapPlaintext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(plainText) / 8 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = IV, an initial value (see 2.2.3) + for ii := 0; ii < 8; ii++ { + block[ii] = 0xA6 + } + + // - For i = 1 to n + // - Set R[i] = P[i] + intermediate := make([]byte, len(plainText)) + copy(intermediate, plainText) + + // 2) Calculate intermediate values. + for ii := 0; ii < 6; ii++ { + for jj := 0; jj < nblocks; jj++ { + // - B = AES(K, A | R[i]) + copy(block[8:], intermediate[jj*8:jj*8+8]) + c.Encrypt(block[:], block[:]) + + // - A = MSB(64, B) ^ t where t = (n*j)+1 + t := uint64(ii*nblocks + jj + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + // - R[i] = LSB(64, B) + copy(intermediate[jj*8:jj*8+8], block[8:]) + } + } + + // 3) Output results. + // - Set C[0] = A + // - For i = 1 to n + // - C[i] = R[i] + return append(block[:8], intermediate...), nil +} + +// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Unwrap(key, cipherText []byte) ([]byte, error) { + if len(cipherText)%8 != 0 { + return nil, ErrUnwrapCiphertext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(cipherText)/8 - 1 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = C[0] + copy(block[:8], cipherText[:8]) + + // - For i = 1 to n + // - Set R[i] = C[i] + intermediate := make([]byte, len(cipherText)-8) + copy(intermediate, cipherText[8:]) + + // 2) Compute intermediate values. + for jj := 5; jj >= 0; jj-- { + for ii := nblocks - 1; ii >= 0; ii-- { + // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1 + // - A = MSB(64, B) + t := uint64(jj*nblocks + ii + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + copy(block[8:], intermediate[ii*8:ii*8+8]) + c.Decrypt(block[:], block[:]) + + // - R[i] = LSB(B, 64) + copy(intermediate[ii*8:ii*8+8], block[8:]) + } + } + + // 3) Output results. + // - If A is an appropriate initial value (see 2.2.3), + for ii := 0; ii < 8; ii++ { + if block[ii] != 0xA6 { + return nil, ErrUnwrapFailed + } + } + + // - For i = 1 to n + // - P[i] = R[i] + return intermediate, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go new file mode 100644 index 000000000..0d6c5f3f9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -0,0 +1,226 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + l.crcSet = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go new file mode 100644 index 000000000..5b6e16c19 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -0,0 +1,161 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go new file mode 100644 index 000000000..a94f6150c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -0,0 +1,65 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "hash" + "io" +) + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { + start := 0 + for i, c := range buf { + switch *s { + case 0: + if c == '\r' { + *s = 1 + } else if c == '\n' { + cw.Write(buf[start:i]) + cw.Write(newline) + start = i + 1 + } + case 1: + *s = 0 + } + } + + cw.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + return writeCanonical(cth.h, buf, &cth.s) +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go new file mode 100644 index 000000000..c895bad6b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -0,0 +1,210 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "bytes" + "errors" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" +) + +type KDF struct { + Hash algorithm.Hash + Cipher algorithm.Cipher +} + +type PublicKey struct { + curve ecc.ECDHCurve + Point []byte + KDF +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func NewPublicKey(curve ecc.ECDHCurve, kdfHash algorithm.Hash, kdfCipher algorithm.Cipher) *PublicKey { + return &PublicKey{ + curve: curve, + KDF: KDF{ + Hash: kdfHash, + Cipher: kdfCipher, + }, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.ECDHCurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalBytePoint(pk.Point) +} + +func (pk *PublicKey) UnmarshalPoint(p []byte) error { + pk.Point = pk.curve.UnmarshalBytePoint(p) + if pk.Point == nil { + return errors.New("ecdh: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalByteSecret() []byte { + return sk.curve.MarshalByteSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { + sk.D = sk.curve.UnmarshalByteSecret(d) + + if sk.D == nil { + return errors.New("ecdh: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.ECDHCurve, kdf KDF) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.KDF = kdf + priv.PublicKey.Point, priv.D, err = c.GenerateECDH(rand) + return +} + +func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + if len(msg) > 40 { + return nil, nil, errors.New("ecdh: message too long") + } + // the sender MAY use 21, 13, and 5 bytes of padding for AES-128, + // AES-192, and AES-256, respectively, to provide the same number of + // octets, 40 total, as an input to the key wrapping method. + padding := make([]byte, 40-len(msg)) + for i := range padding { + padding[i] = byte(40 - len(msg)) + } + m := append(msg, padding...) + + ephemeral, zb, err := pub.curve.Encaps(random, pub.Point) + if err != nil { + return nil, nil, err + } + + vsG = pub.curve.MarshalBytePoint(ephemeral) + + z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, m); err != nil { + return nil, nil, err + } + + return vsG, c, nil + +} + +func Decrypt(priv *PrivateKey, vsG, c, curveOID, fingerprint []byte) (msg []byte, err error) { + var m []byte + zb, err := priv.PublicKey.curve.Decaps(priv.curve.UnmarshalBytePoint(vsG), priv.D) + + // Try buildKey three times to workaround an old bug, see comments in buildKey. + for i := 0; i < 3; i++ { + var z []byte + // RFC6637 §8: "Compute Z = KDF( S, Z_len, Param );" + z, err = buildKey(&priv.PublicKey, zb, curveOID, fingerprint, i == 1, i == 2) + if err != nil { + return nil, err + } + + // RFC6637 §8: "Compute C = AESKeyWrap( Z, c ) as per [RFC3394]" + m, err = keywrap.Unwrap(z, c) + if err == nil { + break + } + } + + // Only return an error after we've tried all (required) variants of buildKey. + if err != nil { + return nil, err + } + + // RFC6637 §8: "m = symm_alg_ID || session key || checksum || pkcs5_padding" + // The last byte should be the length of the padding, as per PKCS5; strip it off. + return m[:len(m)-int(m[len(m)-1])], nil +} + +func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { + // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03 + // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap + // || "Anonymous Sender " || recipient_fingerprint; + param := new(bytes.Buffer) + if _, err := param.Write(curveOID); err != nil { + return nil, err + } + algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()} + if _, err := param.Write(algKDF); err != nil { + return nil, err + } + if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { + return nil, err + } + // For v5 keys, the 20 leftmost octets of the fingerprint are used. + if _, err := param.Write(fingerprint[:20]); err != nil { + return nil, err + } + if param.Len()-len(curveOID) != 45 { + return nil, errors.New("ecdh: malformed KDF Param") + } + + // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); + h := pub.KDF.Hash.New() + if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil { + return nil, err + } + zbLen := len(zb) + i := 0 + j := zbLen - 1 + if stripLeading { + // Work around old go crypto bug where the leading zeros are missing. + for i < zbLen && zb[i] == 0 { + i++ + } + } + if stripTrailing { + // Work around old OpenPGP.js bug where insignificant trailing zeros in + // this little-endian number are missing. + // (See https://github.com/openpgpjs/openpgpjs/pull/853.) + for j >= 0 && zb[j] == 0 { + j-- + } + } + if _, err := h.Write(zb[i : j+1]); err != nil { + return nil, err + } + if _, err := h.Write(param.Bytes()); err != nil { + return nil, err + } + mb := h.Sum(nil) + + return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. + +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateECDH(priv.Point, priv.D) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go new file mode 100644 index 000000000..f94ae1b2f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go @@ -0,0 +1,80 @@ +// Package ecdsa implements ECDSA signature, suitable for OpenPGP, +// as specified in RFC 6637, section 5. +package ecdsa + +import ( + "errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "io" + "math/big" +) + +type PublicKey struct { + X, Y *big.Int + curve ecc.ECDSACurve +} + +type PrivateKey struct { + PublicKey + D *big.Int +} + +func NewPublicKey(curve ecc.ECDSACurve) *PublicKey { + return &PublicKey{ + curve: curve, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.ECDSACurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalIntegerPoint(pk.X, pk.Y) +} + +func (pk *PublicKey) UnmarshalPoint(p []byte) error { + pk.X, pk.Y = pk.curve.UnmarshalIntegerPoint(p) + if pk.X == nil { + return errors.New("ecdsa: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalIntegerSecret() []byte { + return sk.curve.MarshalIntegerSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalIntegerSecret(d []byte) error { + sk.D = sk.curve.UnmarshalIntegerSecret(d) + + if sk.D == nil { + return errors.New("ecdsa: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.ECDSACurve) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.X, priv.PublicKey.Y, priv.D, err = c.GenerateECDSA(rand) + return +} + +func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) { + return priv.PublicKey.curve.Sign(rand, priv.X, priv.Y, priv.D, hash) +} + +func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { + return pub.curve.Verify(pub.X, pub.Y, hash, r, s) +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateECDSA(priv.X, priv.Y, priv.D.Bytes()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go new file mode 100644 index 000000000..99ecfc7f1 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go @@ -0,0 +1,91 @@ +// Package eddsa implements EdDSA signature, suitable for OpenPGP, as specified in +// https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 +package eddsa + +import ( + "errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "io" +) + +type PublicKey struct { + X []byte + curve ecc.EdDSACurve +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func NewPublicKey(curve ecc.EdDSACurve) *PublicKey { + return &PublicKey{ + curve: curve, + } +} + +func NewPrivateKey(key PublicKey) *PrivateKey { + return &PrivateKey{ + PublicKey: key, + } +} + +func (pk *PublicKey) GetCurve() ecc.EdDSACurve { + return pk.curve +} + +func (pk *PublicKey) MarshalPoint() []byte { + return pk.curve.MarshalBytePoint(pk.X) +} + +func (pk *PublicKey) UnmarshalPoint(x []byte) error { + pk.X = pk.curve.UnmarshalBytePoint(x) + + if pk.X == nil { + return errors.New("eddsa: failed to parse EC point") + } + return nil +} + +func (sk *PrivateKey) MarshalByteSecret() []byte { + return sk.curve.MarshalByteSecret(sk.D) +} + +func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error { + sk.D = sk.curve.UnmarshalByteSecret(d) + + if sk.D == nil { + return errors.New("eddsa: failed to parse scalar") + } + return nil +} + +func GenerateKey(rand io.Reader, c ecc.EdDSACurve) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.curve = c + priv.PublicKey.X, priv.D, err = c.GenerateEdDSA(rand) + return +} + +func Sign(priv *PrivateKey, message []byte) (r, s []byte, err error) { + sig, err := priv.PublicKey.curve.Sign(priv.PublicKey.X, priv.D, message) + if err != nil { + return nil, nil, err + } + + r, s = priv.PublicKey.curve.MarshalSignature(sig) + return +} + +func Verify(pub *PublicKey, message, r, s []byte) bool { + sig := pub.curve.UnmarshalSignature(r, s) + if sig == nil { + return false + } + + return pub.curve.Verify(pub.X, message, sig) +} + +func Validate(priv *PrivateKey) error { + return priv.curve.ValidateEdDSA(priv.PublicKey.X, priv.D) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 000000000..bad277434 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See “Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + if s.ModInverse(s, priv.P) == nil { + return nil, errors.New("elgamal: invalid private key") + } + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go new file mode 100644 index 000000000..17e2bcfed --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -0,0 +1,116 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") +var ErrMDCMissing error = SignatureError("MDC packet not found") + +type signatureExpiredError int + +func (se signatureExpiredError) Error() string { + return "openpgp: signature expired" +} + +var ErrSignatureExpired error = signatureExpiredError(0) + +type keyExpiredError int + +func (ke keyExpiredError) Error() string { + return "openpgp: key expired" +} + +var ErrKeyExpired error = keyExpiredError(0) + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +// KeyInvalidError indicates that the public key parameters are invalid +// as they do not match the private ones +type KeyInvalidError string + +func (e KeyInvalidError) Error() string { + return "openpgp: invalid key: " + string(e) +} + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} + +// AEADError indicates that there is a problem when initializing or using a +// AEAD instance, configuration struct, nonces or index values. +type AEADError string + +func (ae AEADError) Error() string { + return "openpgp: aead error: " + string(ae) +} + +// ErrDummyPrivateKey results when operations are attempted on a private key +// that is just a dummy key. See +// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 +type ErrDummyPrivateKey string + +func (dke ErrDummyPrivateKey) Error() string { + return "openpgp: s2k GNU dummy key: " + string(dke) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go new file mode 100644 index 000000000..526bd7777 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go @@ -0,0 +1,24 @@ +package openpgp + +import ( + "crypto" + + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + return algorithm.HashIdToHash(id) +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + return algorithm.HashIdToString(id) +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + return algorithm.HashToHashId(h) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go new file mode 100644 index 000000000..d06706518 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019 ProtonTech AG + +package algorithm + +import ( + "crypto/cipher" + "github.com/ProtonMail/go-crypto/eax" + "github.com/ProtonMail/go-crypto/ocb" +) + +// AEADMode defines the Authenticated Encryption with Associated Data mode of +// operation. +type AEADMode uint8 + +// Supported modes of operation (see RFC4880bis [EAX] and RFC7253) +const ( + AEADModeEAX = AEADMode(1) + AEADModeOCB = AEADMode(2) + AEADModeGCM = AEADMode(3) +) + +// TagLength returns the length in bytes of authentication tags. +func (mode AEADMode) TagLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 16 + case AEADModeGCM: + return 16 + default: + return 0 + } +} + +// NonceLength returns the length in bytes of nonces. +func (mode AEADMode) NonceLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 15 + case AEADModeGCM: + return 12 + default: + return 0 + } +} + +// New returns a fresh instance of the given mode +func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) { + var err error + switch mode { + case AEADModeEAX: + alg, err = eax.NewEAX(block) + case AEADModeOCB: + alg, err = ocb.NewOCB(block) + case AEADModeGCM: + alg, err = cipher.NewGCM(block) + } + if err != nil { + panic(err.Error()) + } + return alg +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go new file mode 100644 index 000000000..5760cff80 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -0,0 +1,107 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + + "golang.org/x/crypto/cast5" +) + +// Cipher is an official symmetric key cipher algorithm. See RFC 4880, +// section 9.2. +type Cipher interface { + // Id returns the algorithm ID, as a byte, of the cipher. + Id() uint8 + // KeySize returns the key size, in bytes, of the cipher. + KeySize() int + // BlockSize returns the block size, in bytes, of the cipher. + BlockSize() int + // New returns a fresh instance of the given cipher. + New(key []byte) cipher.Block +} + +// The following constants mirror the OpenPGP standard (RFC 4880). +const ( + TripleDES = CipherFunction(2) + CAST5 = CipherFunction(3) + AES128 = CipherFunction(7) + AES192 = CipherFunction(8) + AES256 = CipherFunction(9) +) + +// CipherById represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +var CipherById = map[uint8]Cipher{ + TripleDES.Id(): TripleDES, + CAST5.Id(): CAST5, + AES128.Id(): AES128, + AES192.Id(): AES192, + AES256.Id(): AES256, +} + +type CipherFunction uint8 + +// ID returns the algorithm Id, as a byte, of cipher. +func (sk CipherFunction) Id() uint8 { + return uint8(sk) +} + +var keySizeByID = map[uint8]int{ + TripleDES.Id(): 24, + CAST5.Id(): cast5.KeySize, + AES128.Id(): 16, + AES192.Id(): 24, + AES256.Id(): 32, +} + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case TripleDES: + return 24 + case CAST5: + return cast5.KeySize + case AES128: + return 16 + case AES192: + return 24 + case AES256: + return 32 + } + return 0 +} + +// BlockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) BlockSize() int { + switch cipher { + case TripleDES: + return des.BlockSize + case CAST5: + return 8 + case AES128, AES192, AES256: + return 16 + } + return 0 +} + +// New returns a fresh instance of the given cipher. +func (cipher CipherFunction) New(key []byte) (block cipher.Block) { + var err error + switch cipher { + case TripleDES: + block, err = des.NewTripleDESCipher(key) + case CAST5: + block, err = cast5.NewCipher(key) + case AES128, AES192, AES256: + block, err = aes.NewCipher(key) + } + if err != nil { + panic(err.Error()) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go new file mode 100644 index 000000000..d1a00fc74 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go @@ -0,0 +1,143 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto" + "fmt" + "hash" +) + +// Hash is an official hash function algorithm. See RFC 4880, section 9.4. +type Hash interface { + // Id returns the algorithm ID, as a byte, of Hash. + Id() uint8 + // Available reports whether the given hash function is linked into the binary. + Available() bool + // HashFunc simply returns the value of h so that Hash implements SignerOpts. + HashFunc() crypto.Hash + // New returns a new hash.Hash calculating the given hash function. New + // panics if the hash function is not linked into the binary. + New() hash.Hash + // Size returns the length, in bytes, of a digest resulting from the given + // hash function. It doesn't require that the hash function in question be + // linked into the program. + Size() int + // String is the name of the hash function corresponding to the given + // OpenPGP hash id. + String() string +} + +// The following vars mirror the crypto/Hash supported hash functions. +var ( + SHA1 Hash = cryptoHash{2, crypto.SHA1} + SHA256 Hash = cryptoHash{8, crypto.SHA256} + SHA384 Hash = cryptoHash{9, crypto.SHA384} + SHA512 Hash = cryptoHash{10, crypto.SHA512} + SHA224 Hash = cryptoHash{11, crypto.SHA224} + SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256} + SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512} +) + +// HashById represents the different hash functions specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 +var ( + HashById = map[uint8]Hash{ + SHA256.Id(): SHA256, + SHA384.Id(): SHA384, + SHA512.Id(): SHA512, + SHA224.Id(): SHA224, + SHA3_256.Id(): SHA3_256, + SHA3_512.Id(): SHA3_512, + } +) + +// cryptoHash contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +type cryptoHash struct { + id uint8 + crypto.Hash +} + +// Id returns the algorithm ID, as a byte, of cryptoHash. +func (h cryptoHash) Id() uint8 { + return h.id +} + +var hashNames = map[uint8]string{ + SHA256.Id(): "SHA256", + SHA384.Id(): "SHA384", + SHA512.Id(): "SHA512", + SHA224.Id(): "SHA224", + SHA3_256.Id(): "SHA3-256", + SHA3_512.Id(): "SHA3-512", +} + +func (h cryptoHash) String() string { + s, ok := hashNames[h.id] + if !ok { + panic(fmt.Sprintf("Unsupported hash function %d", h.id)) + } + return s +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.HashFunc(), true + } + return 0, false +} + +// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP +// hash id, allowing sha1. +func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.HashFunc(), true + } + + if id == SHA1.Id() { + return SHA1.HashFunc(), true + } + + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + if hash, ok := HashById[id]; ok { + return hash.String(), true + } + return "", false +} + +// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for id, hash := range HashById { + if hash.HashFunc() == h { + return id, true + } + } + + return 0, false +} + +// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash, +// allowing instances of SHA1 +func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) { + for id, hash := range HashById { + if hash.HashFunc() == h { + return id, true + } + } + + if h == SHA1.HashFunc() { + return SHA1.Id(), true + } + + return 0, false +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go new file mode 100644 index 000000000..888767c4e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go @@ -0,0 +1,171 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + x25519lib "github.com/cloudflare/circl/dh/x25519" +) + +type curve25519 struct{} + +func NewCurve25519() *curve25519 { + return &curve25519{} +} + +func (c *curve25519) GetCurveName() string { + return "curve25519" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *curve25519) MarshalBytePoint(point []byte) []byte { + return append([]byte{0x40}, point...) +} + +// UnmarshalBytePoint decodes the public point to native format, removing the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *curve25519) UnmarshalBytePoint(point []byte) []byte { + if len(point) != x25519lib.Size+1 { + return nil + } + + // Remove prefix + return point[1:] +} + +// MarshalByteSecret encodes the secret scalar from native format. +// Note that the EC secret scalar differs from the definition of public keys in +// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is +// more uniform with how big integers are represented in OpenPGP, and (2) the +// leading zeros are truncated. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 +// Note that leading zero bytes are stripped later when encoding as an MPI. +func (c *curve25519) MarshalByteSecret(secret []byte) []byte { + d := make([]byte, x25519lib.Size) + copyReversed(d, secret) + + // The following ensures that the private key is a number of the form + // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of + // the curve. + // + // This masking is done internally in the underlying lib and so is unnecessary + // for security, but OpenPGP implementations require that private keys be + // pre-masked. + d[0] &= 127 + d[0] |= 64 + d[31] &= 248 + + return d +} + +// UnmarshalByteSecret decodes the secret scalar from native format. +// Note that the EC secret scalar differs from the definition of public keys in +// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is +// more uniform with how big integers are represented in OpenPGP, and (2) the +// leading zeros are truncated. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1 +func (c *curve25519) UnmarshalByteSecret(d []byte) []byte { + if len(d) > x25519lib.Size { + return nil + } + + // Ensure truncated leading bytes are re-added + secret := make([]byte, x25519lib.Size) + copyReversed(secret, d) + + return secret +} + +// generateKeyPairBytes Generates a private-public key-pair. +// 'priv' is a private key; a little-endian scalar belonging to the set +// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the +// curve. 'pub' is simply 'priv' * G where G is the base point. +// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. +func (c *curve25519) generateKeyPairBytes(rand io.Reader) (priv, pub x25519lib.Key, err error) { + _, err = io.ReadFull(rand, priv[:]) + if err != nil { + return + } + + x25519lib.KeyGen(&pub, &priv) + return +} + +func (c *curve25519) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { + priv, pub, err := c.generateKeyPairBytes(rand) + if err != nil { + return + } + + return pub[:], priv[:], nil +} + +func (c *genericCurve) MaskSecret(secret []byte) []byte { + return secret +} + +func (c *curve25519) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + // RFC6637 §8: "Generate an ephemeral key pair {v, V=vG}" + // ephemeralPrivate corresponds to `v`. + // ephemeralPublic corresponds to `V`. + ephemeralPrivate, ephemeralPublic, err := c.generateKeyPairBytes(rand) + if err != nil { + return nil, nil, err + } + + // RFC6637 §8: "Obtain the authenticated recipient public key R" + // pubKey corresponds to `R`. + var pubKey x25519lib.Key + copy(pubKey[:], point) + + // RFC6637 §8: "Compute the shared point S = vR" + // "VB = convert point V to the octet string" + // sharedPoint corresponds to `VB`. + var sharedPoint x25519lib.Key + x25519lib.Shared(&sharedPoint, &ephemeralPrivate, &pubKey) + + return ephemeralPublic[:], sharedPoint[:], nil +} + +func (c *curve25519) Decaps(vsG, secret []byte) (sharedSecret []byte, err error) { + var ephemeralPublic, decodedPrivate, sharedPoint x25519lib.Key + // RFC6637 §8: "The decryption is the inverse of the method given." + // All quoted descriptions in comments below describe encryption, and + // the reverse is performed. + // vsG corresponds to `VB` in RFC6637 §8 . + + // RFC6637 §8: "VB = convert point V to the octet string" + copy(ephemeralPublic[:], vsG) + + // decodedPrivate corresponds to `r` in RFC6637 §8 . + copy(decodedPrivate[:], secret) + + // RFC6637 §8: "Note that the recipient obtains the shared secret by calculating + // S = rV = rvG, where (r,R) is the recipient's key pair." + // sharedPoint corresponds to `S`. + x25519lib.Shared(&sharedPoint, &decodedPrivate, &ephemeralPublic) + + return sharedPoint[:], nil +} + +func (c *curve25519) ValidateECDH(point []byte, secret []byte) (err error) { + var pk, sk x25519lib.Key + copy(sk[:], secret) + x25519lib.KeyGen(&pk, &sk) + + if subtle.ConstantTimeCompare(point, pk[:]) == 0 { + return errors.KeyInvalidError("ecc: invalid curve25519 public point") + } + + return nil +} + +func copyReversed(out []byte, in []byte) { + l := len(in) + for i := 0; i < l; i++ { + out[i] = in[l-i-1] + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go new file mode 100644 index 000000000..35751034d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go @@ -0,0 +1,140 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "bytes" + "crypto/elliptic" + "github.com/ProtonMail/go-crypto/bitcurves" + "github.com/ProtonMail/go-crypto/brainpool" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +type CurveInfo struct { + GenName string + Oid *encoding.OID + Curve Curve +} + +var Curves = []CurveInfo{ + { + // NIST P-256 + GenName: "P256", + Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), + Curve: NewGenericCurve(elliptic.P256()), + }, + { + // NIST P-384 + GenName: "P384", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), + Curve: NewGenericCurve(elliptic.P384()), + }, + { + // NIST P-521 + GenName: "P521", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), + Curve: NewGenericCurve(elliptic.P521()), + }, + { + // SecP256k1 + GenName: "SecP256k1", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), + Curve: NewGenericCurve(bitcurves.S256()), + }, + { + // Curve25519 + GenName: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), + Curve: NewCurve25519(), + }, + { + // X448 + GenName: "Curve448", + Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}), + Curve: NewX448(), + }, + { + // Ed25519 + GenName: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), + Curve: NewEd25519(), + }, + { + // Ed448 + GenName: "Curve448", + Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}), + Curve: NewEd448(), + }, + { + // BrainpoolP256r1 + GenName: "BrainpoolP256", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), + Curve: NewGenericCurve(brainpool.P256r1()), + }, + { + // BrainpoolP384r1 + GenName: "BrainpoolP384", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), + Curve: NewGenericCurve(brainpool.P384r1()), + }, + { + // BrainpoolP512r1 + GenName: "BrainpoolP512", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), + Curve: NewGenericCurve(brainpool.P512r1()), + }, +} + +func FindByCurve(curve Curve) *CurveInfo { + for _, curveInfo := range Curves { + if curveInfo.Curve.GetCurveName() == curve.GetCurveName() { + return &curveInfo + } + } + return nil +} + +func FindByOid(oid encoding.Field) *CurveInfo { + var rawBytes = oid.Bytes() + for _, curveInfo := range Curves { + if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { + return &curveInfo + } + } + return nil +} + +func FindEdDSAByGenName(curveGenName string) EdDSACurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(EdDSACurve) + if ok { + return curve + } + } + } + return nil +} + +func FindECDSAByGenName(curveGenName string) ECDSACurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(ECDSACurve) + if ok { + return curve + } + } + } + return nil +} + +func FindECDHByGenName(curveGenName string) ECDHCurve { + for _, curveInfo := range Curves { + if curveInfo.GenName == curveGenName { + curve, ok := curveInfo.Curve.(ECDHCurve) + if ok { + return curve + } + } + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go new file mode 100644 index 000000000..5ed9c93b3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go @@ -0,0 +1,48 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "io" + "math/big" +) + +type Curve interface { + GetCurveName() string +} + +type ECDSACurve interface { + Curve + MarshalIntegerPoint(x, y *big.Int) []byte + UnmarshalIntegerPoint([]byte) (x, y *big.Int) + MarshalIntegerSecret(d *big.Int) []byte + UnmarshalIntegerSecret(d []byte) *big.Int + GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) + Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) + Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool + ValidateECDSA(x, y *big.Int, secret []byte) error +} + +type EdDSACurve interface { + Curve + MarshalBytePoint(x []byte) []byte + UnmarshalBytePoint([]byte) (x []byte) + MarshalByteSecret(d []byte) []byte + UnmarshalByteSecret(d []byte) []byte + MarshalSignature(sig []byte) (r, s []byte) + UnmarshalSignature(r, s []byte) (sig []byte) + GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) + Sign(publicKey, privateKey, message []byte) (sig []byte, err error) + Verify(publicKey, message, sig []byte) bool + ValidateEdDSA(publicKey, privateKey []byte) (err error) +} +type ECDHCurve interface { + Curve + MarshalBytePoint([]byte) (encoded []byte) + UnmarshalBytePoint(encoded []byte) []byte + MarshalByteSecret(d []byte) []byte + UnmarshalByteSecret(d []byte) []byte + GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) + Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) + Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) + ValidateECDH(public []byte, secret []byte) error +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go new file mode 100644 index 000000000..54a08a8a3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go @@ -0,0 +1,112 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed25519lib "github.com/cloudflare/circl/sign/ed25519" +) + +const ed25519Size = 32 + +type ed25519 struct{} + +func NewEd25519() *ed25519 { + return &ed25519{} +} + +func (c *ed25519) GetCurveName() string { + return "ed25519" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) MarshalBytePoint(x []byte) []byte { + return append([]byte{0x40}, x...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) { + if len(point) != ed25519lib.PublicKeySize+1 { + return nil + } + + // Return unprefixed + return point[1:] +} + +// MarshalByteSecret encodes a scalar in native format. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) MarshalByteSecret(d []byte) []byte { + return d +} + +// UnmarshalByteSecret decodes a scalar in native format and re-adds the stripped leading zeroes +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) { + if len(s) > ed25519lib.SeedSize { + return nil + } + + // Handle stripped leading zeroes + d = make([]byte, ed25519lib.SeedSize) + copy(d[ed25519lib.SeedSize-len(s):], s) + return +} + +// MarshalSignature splits a signature in R and S. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 +func (c *ed25519) MarshalSignature(sig []byte) (r, s []byte) { + return sig[:ed25519Size], sig[ed25519Size:] +} + +// UnmarshalSignature decodes R and S in the native format, re-adding the stripped leading zeroes +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1 +func (c *ed25519) UnmarshalSignature(r, s []byte) (sig []byte) { + // Check size + if len(r) > 32 || len(s) > 32 { + return nil + } + + sig = make([]byte, ed25519lib.SignatureSize) + + // Handle stripped leading zeroes + copy(sig[ed25519Size-len(r):ed25519Size], r) + copy(sig[ed25519lib.SignatureSize-len(s):], s) + return sig +} + +func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { + pk, sk, err := ed25519lib.GenerateKey(rand) + + if err != nil { + return nil, nil, err + } + + return pk, sk[:ed25519lib.SeedSize], nil +} + +func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey { + return append(privateKey, publicKey...) +} + +func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { + sig = ed25519lib.Sign(getEd25519Sk(publicKey, privateKey), message) + return sig, nil +} + +func (c *ed25519) Verify(publicKey, message, sig []byte) bool { + return ed25519lib.Verify(publicKey, message, sig) +} + +func (c *ed25519) ValidateEdDSA(publicKey, privateKey []byte) (err error) { + priv := getEd25519Sk(publicKey, privateKey) + expectedPriv := ed25519lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { + return errors.KeyInvalidError("ecc: invalid ed25519 secret") + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go new file mode 100644 index 000000000..18cd80434 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go @@ -0,0 +1,111 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + ed448lib "github.com/cloudflare/circl/sign/ed448" +) + +type ed448 struct{} + +func NewEd448() *ed448 { + return &ed448{} +} + +func (c *ed448) GetCurveName() string { + return "ed448" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) MarshalBytePoint(x []byte) []byte { + // Return prefixed + return append([]byte{0x40}, x...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) { + if len(point) != ed448lib.PublicKeySize+1 { + return nil + } + + // Strip prefix + return point[1:] +} + +// MarshalByteSecret encoded a scalar from native format to prefixed. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) MarshalByteSecret(d []byte) []byte { + // Return prefixed + return append([]byte{0x40}, d...) +} + +// UnmarshalByteSecret decodes a scalar from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5 +func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) { + // Check prefixed size + if len(s) != ed448lib.SeedSize+1 { + return nil + } + + // Strip prefix + return s[1:] +} + +// MarshalSignature splits a signature in R and S, where R is in prefixed native format and +// S is an MPI with value zero. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 +func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) { + return append([]byte{0x40}, sig...), []byte{} +} + +// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2 +func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) { + if len(r) != ed448lib.SignatureSize+1 { + return nil + } + + return r[1:] +} + +func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) { + pk, sk, err := ed448lib.GenerateKey(rand) + + if err != nil { + return nil, nil, err + } + + return pk, sk[:ed448lib.SeedSize], nil +} + +func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey { + return append(privateKey, publicKey...) +} + +func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 + sig = ed448lib.Sign(getEd448Sk(publicKey, privateKey), message, "") + + return sig, nil +} + +func (c *ed448) Verify(publicKey, message, sig []byte) bool { + // Ed448 is used with the empty string as a context string. + // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7 + return ed448lib.Verify(publicKey, message, sig, "") +} + +func (c *ed448) ValidateEdDSA(publicKey, privateKey []byte) (err error) { + priv := getEd448Sk(publicKey, privateKey) + expectedPriv := ed448lib.NewKeyFromSeed(priv.Seed()) + if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 { + return errors.KeyInvalidError("ecc: invalid ed448 secret") + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go new file mode 100644 index 000000000..e28d7c710 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go @@ -0,0 +1,149 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "fmt" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "math/big" +) + +type genericCurve struct { + Curve elliptic.Curve +} + +func NewGenericCurve(c elliptic.Curve) *genericCurve { + return &genericCurve{ + Curve: c, + } +} + +func (c *genericCurve) GetCurveName() string { + return c.Curve.Params().Name +} + +func (c *genericCurve) MarshalBytePoint(point []byte) []byte { + return point +} + +func (c *genericCurve) UnmarshalBytePoint(point []byte) []byte { + return point +} + +func (c *genericCurve) MarshalIntegerPoint(x, y *big.Int) []byte { + return elliptic.Marshal(c.Curve, x, y) +} + +func (c *genericCurve) UnmarshalIntegerPoint(point []byte) (x, y *big.Int) { + return elliptic.Unmarshal(c.Curve, point) +} + +func (c *genericCurve) MarshalByteSecret(d []byte) []byte { + return d +} + +func (c *genericCurve) UnmarshalByteSecret(d []byte) []byte { + return d +} + +func (c *genericCurve) MarshalIntegerSecret(d *big.Int) []byte { + return d.Bytes() +} + +func (c *genericCurve) UnmarshalIntegerSecret(d []byte) *big.Int { + return new(big.Int).SetBytes(d) +} + +func (c *genericCurve) GenerateECDH(rand io.Reader) (point, secret []byte, err error) { + secret, x, y, err := elliptic.GenerateKey(c.Curve, rand) + if err != nil { + return nil, nil, err + } + + point = elliptic.Marshal(c.Curve, x, y) + return point, secret, nil +} + +func (c *genericCurve) GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) { + priv, err := ecdsa.GenerateKey(c.Curve, rand) + if err != nil { + return + } + + return priv.X, priv.Y, priv.D, nil +} + +func (c *genericCurve) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + xP, yP := elliptic.Unmarshal(c.Curve, point) + if xP == nil { + panic("invalid point") + } + + d, x, y, err := elliptic.GenerateKey(c.Curve, rand) + if err != nil { + return nil, nil, err + } + + vsG := elliptic.Marshal(c.Curve, x, y) + zbBig, _ := c.Curve.ScalarMult(xP, yP, d) + + byteLen := (c.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + return vsG, zb, nil +} + +func (c *genericCurve) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { + x, y := elliptic.Unmarshal(c.Curve, ephemeral) + zbBig, _ := c.Curve.ScalarMult(x, y, secret) + byteLen := (c.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + return zb, nil +} + +func (c *genericCurve) Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) { + priv := &ecdsa.PrivateKey{D: d, PublicKey: ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}} + return ecdsa.Sign(rand, priv, hash) +} + +func (c *genericCurve) Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool { + pub := &ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve} + return ecdsa.Verify(pub, hash, r, s) +} + +func (c *genericCurve) validate(xP, yP *big.Int, secret []byte) error { + // the public point should not be at infinity (0,0) + zero := new(big.Int) + if xP.Cmp(zero) == 0 && yP.Cmp(zero) == 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", c.Curve.Params().Name)) + } + + // re-derive the public point Q' = (X,Y) = dG + // to compare to declared Q in public key + expectedX, expectedY := c.Curve.ScalarBaseMult(secret) + if xP.Cmp(expectedX) != 0 || yP.Cmp(expectedY) != 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) + } + + return nil +} + +func (c *genericCurve) ValidateECDSA(xP, yP *big.Int, secret []byte) error { + return c.validate(xP, yP, secret) +} + +func (c *genericCurve) ValidateECDH(point []byte, secret []byte) error { + xP, yP := elliptic.Unmarshal(c.Curve, point) + if xP == nil { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name)) + } + + return c.validate(xP, yP, secret) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go new file mode 100644 index 000000000..ffdd51513 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go @@ -0,0 +1,105 @@ +// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA. +package ecc + +import ( + "crypto/subtle" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + x448lib "github.com/cloudflare/circl/dh/x448" +) + +type x448 struct{} + +func NewX448() *x448 { + return &x448{} +} + +func (c *x448) GetCurveName() string { + return "x448" +} + +// MarshalBytePoint encodes the public point from native format, adding the prefix. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *x448) MarshalBytePoint(point []byte) []byte { + return append([]byte{0x40}, point...) +} + +// UnmarshalBytePoint decodes a point from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6 +func (c *x448) UnmarshalBytePoint(point []byte) []byte { + if len(point) != x448lib.Size+1 { + return nil + } + + return point[1:] +} + +// MarshalByteSecret encoded a scalar from native format to prefixed. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 +func (c *x448) MarshalByteSecret(d []byte) []byte { + return append([]byte{0x40}, d...) +} + +// UnmarshalByteSecret decodes a scalar from prefixed format to native. +// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2 +func (c *x448) UnmarshalByteSecret(d []byte) []byte { + if len(d) != x448lib.Size+1 { + return nil + } + + // Store without prefix + return d[1:] +} + +func (c *x448) generateKeyPairBytes(rand io.Reader) (sk, pk x448lib.Key, err error) { + if _, err = rand.Read(sk[:]); err != nil { + return + } + + x448lib.KeyGen(&pk, &sk) + return +} + +func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) { + priv, pub, err := c.generateKeyPairBytes(rand) + if err != nil { + return + } + + return pub[:], priv[:], nil +} + +func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) { + var pk, ss x448lib.Key + seed, e, err := c.generateKeyPairBytes(rand) + + copy(pk[:], point) + x448lib.Shared(&ss, &seed, &pk) + + return e[:], ss[:], nil +} + +func (c *x448) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) { + var ss, sk, e x448lib.Key + + copy(sk[:], secret) + copy(e[:], ephemeral) + x448lib.Shared(&ss, &sk, &e) + + return ss[:], nil +} + +func (c *x448) ValidateECDH(point []byte, secret []byte) error { + var sk, pk, expectedPk x448lib.Key + + copy(pk[:], point) + copy(sk[:], secret) + x448lib.KeyGen(&expectedPk, &sk) + + if subtle.ConstantTimeCompare(expectedPk[:], pk[:]) == 0 { + return errors.KeyInvalidError("ecc: invalid curve25519 public point") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go new file mode 100644 index 000000000..6c921481b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding implements openpgp packet field encodings as specified in +// RFC 4880 and 6637. +package encoding + +import "io" + +// Field is an encoded field of an openpgp packet. +type Field interface { + // Bytes returns the decoded data. + Bytes() []byte + + // BitLength is the size in bits of the decoded data. + BitLength() uint16 + + // EncodedBytes returns the encoded data. + EncodedBytes() []byte + + // EncodedLength is the size in bytes of the encoded data. + EncodedLength() uint16 + + // ReadFrom reads the next Field from r. + ReadFrom(r io.Reader) (int64, error) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go new file mode 100644 index 000000000..02e5e695c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go @@ -0,0 +1,91 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + "math/big" + "math/bits" +) + +// An MPI is used to store the contents of a big integer, along with the bit +// length that was specified in the original input. This allows the MPI to be +// reserialized exactly. +type MPI struct { + bytes []byte + bitLength uint16 +} + +// NewMPI returns a MPI initialized with bytes. +func NewMPI(bytes []byte) *MPI { + for len(bytes) != 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + bitLength := uint16(0) + return &MPI{bytes, bitLength} + } + bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0])) + return &MPI{bytes, bitLength} +} + +// Bytes returns the decoded data. +func (m *MPI) Bytes() []byte { + return m.bytes +} + +// BitLength is the size in bits of the decoded data. +func (m *MPI) BitLength() uint16 { + return m.bitLength +} + +// EncodedBytes returns the encoded data. +func (m *MPI) EncodedBytes() []byte { + return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (m *MPI) EncodedLength() uint16 { + return uint16(2 + len(m.bytes)) +} + +// ReadFrom reads into m the next MPI from r. +func (m *MPI) ReadFrom(r io.Reader) (int64, error) { + var buf [2]byte + n, err := io.ReadFull(r, buf[0:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + m.bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + m.bytes = make([]byte, (int(m.bitLength)+7)/8) + + nn, err := io.ReadFull(r, m.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + // remove leading zero bytes from malformed GnuPG encoded MPIs: + // https://bugs.gnupg.org/gnupg/issue1853 + // for _, b := range m.bytes { + // if b != 0 { + // break + // } + // m.bytes = m.bytes[1:] + // m.bitLength -= 8 + // } + + return int64(n) + int64(nn), err +} + +// SetBig initializes m with the bits from n. +func (m *MPI) SetBig(n *big.Int) *MPI { + m.bytes = n.Bytes() + m.bitLength = uint16(n.BitLen()) + return m +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go new file mode 100644 index 000000000..c9df9fe23 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OID is used to store a variable-length field with a one-octet size +// prefix. See https://tools.ietf.org/html/rfc6637#section-9. +type OID struct { + bytes []byte +} + +const ( + // maxOID is the maximum number of bytes in a OID. + maxOID = 254 + // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC + // specifies are reserved. + reservedOIDLength1 = 0 + reservedOIDLength2 = 0xff +) + +// NewOID returns a OID initialized with bytes. +func NewOID(bytes []byte) *OID { + switch len(bytes) { + case reservedOIDLength1, reservedOIDLength2: + panic("encoding: NewOID argument length is reserved") + default: + if len(bytes) > maxOID { + panic("encoding: NewOID argument too large") + } + } + + return &OID{ + bytes: bytes, + } +} + +// Bytes returns the decoded data. +func (o *OID) Bytes() []byte { + return o.bytes +} + +// BitLength is the size in bits of the decoded data. +func (o *OID) BitLength() uint16 { + return uint16(len(o.bytes) * 8) +} + +// EncodedBytes returns the encoded data. +func (o *OID) EncodedBytes() []byte { + return append([]byte{byte(len(o.bytes))}, o.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (o *OID) EncodedLength() uint16 { + return uint16(1 + len(o.bytes)) +} + +// ReadFrom reads into b the next OID from r. +func (o *OID) ReadFrom(r io.Reader) (int64, error) { + var buf [1]byte + n, err := io.ReadFull(r, buf[:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + switch buf[0] { + case reservedOIDLength1, reservedOIDLength2: + return int64(n), errors.UnsupportedError("reserved for future extensions") + } + + o.bytes = make([]byte, buf[0]) + + nn, err := io.ReadFull(r, o.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + return int64(n) + int64(nn), err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go new file mode 100644 index 000000000..0e71934cd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -0,0 +1,389 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + goerrors "errors" + "io" + "math/big" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + // Generate a primary signing key + primaryPrivRaw, err := newSigner(config) + if err != nil { + return nil, err + } + primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) + if config != nil && config.V5Keys { + primary.UpgradeToV5() + } + + e := &Entity{ + PrimaryKey: &primary.PublicKey, + PrivateKey: primary, + Identities: make(map[string]*Identity), + Subkeys: []Subkey{}, + } + + err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) + if err != nil { + return nil, err + } + + // NOTE: No key expiry here, but we will not return this subkey in EncryptionKey() + // if the primary/master key has expired. + err = e.addEncryptionSubkey(config, creationTime, 0) + if err != nil { + return nil, err + } + + return e, nil +} + +func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs) +} + +func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return errors.InvalidArgumentError("user id field contained invalid characters") + } + + if _, ok := t.Identities[uid.Id]; ok { + return errors.InvalidArgumentError("user id exist") + } + + primary := t.PrivateKey + + isPrimaryId := len(t.Identities) == 0 + + selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config) + selfSignature.CreationTime = creationTime + selfSignature.KeyLifetimeSecs = &keyLifetimeSecs + selfSignature.IsPrimaryId = &isPrimaryId + selfSignature.FlagsValid = true + selfSignature.FlagSign = true + selfSignature.FlagCertify = true + selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14 + selfSignature.SEIPDv2 = config.AEAD() != nil + + // Set the PreferredHash for the SelfSignature from the packet.Config. + // If it is not the must-implement algorithm from rfc4880bis, append that. + hash, ok := algorithm.HashToHashId(config.Hash()) + if !ok { + return errors.UnsupportedError("unsupported preferred hash function") + } + + selfSignature.PreferredHash = []uint8{hash} + if config.Hash() != crypto.SHA256 { + selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) + } + + // Likewise for DefaultCipher. + selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())} + if config.Cipher() != packet.CipherAES128 { + selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) + } + + // We set CompressionNone as the preferred compression algorithm because + // of compression side channel attacks, then append the configured + // DefaultCompressionAlgo if any is set (to signal support for cases + // where the application knows that using compression is safe). + selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)} + if config.Compression() != packet.CompressionNone { + selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression())) + } + + // And for DefaultMode. + modes := []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeOCB { + modes = append(modes, uint8(packet.AEADModeOCB)) + } + + // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB) + for _, cipher := range selfSignature.PreferredSymmetric { + for _, mode := range modes { + selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode}) + } + } + + // User ID binding signature + err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) + if err != nil { + return err + } + t.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: selfSignature, + Signatures: []*packet.Signature{selfSignature}, + } + return nil +} + +// AddSigningSubkey adds a signing keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddSigningSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newSigner(config) + if err != nil { + return err + } + sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + if config != nil && config.V5Keys { + sub.UpgradeToV5() + } + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + } + subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) + subkey.Sig.CreationTime = creationTime + subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs + subkey.Sig.FlagsValid = true + subkey.Sig.FlagSign = true + subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config) + subkey.Sig.EmbeddedSignature.CreationTime = creationTime + + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) + if err != nil { + return err + } + + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + return e.addEncryptionSubkey(config, creationTime, keyLifetimeSecs) +} + +func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error { + subPrivRaw, err := newDecrypter(config) + if err != nil { + return err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + if config != nil && config.V5Keys { + sub.UpgradeToV5() + } + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + } + subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config) + subkey.Sig.CreationTime = creationTime + subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs + subkey.Sig.FlagsValid = true + subkey.Sig.FlagEncryptStorage = true + subkey.Sig.FlagEncryptCommunications = true + + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// Generates a signing key +func newSigner(config *packet.Config) (signer interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + curve := ecc.FindEdDSAByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + + priv, err := eddsa.GenerateKey(config.Random(), curve) + if err != nil { + return nil, err + } + return priv, nil + case packet.PubKeyAlgoECDSA: + curve := ecc.FindECDSAByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + + priv, err := ecdsa.GenerateKey(config.Random(), curve) + if err != nil { + return nil, err + } + return priv, nil + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +// Generates an encryption/decryption key +func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA: + fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey + case packet.PubKeyAlgoECDH: + var kdf = ecdh.KDF{ + Hash: algorithm.SHA512, + Cipher: algorithm.AES256, + } + curve := ecc.FindECDHByGenName(string(config.CurveName())) + if curve == nil { + return nil, errors.InvalidArgumentError("unsupported curve") + } + return ecdh.GenerateKey(config.Random(), curve, kdf) + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +var bigOne = big.NewInt(1) + +// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the +// given bit size, using the given random source and prepopulated primes. +func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { + priv := new(rsa.PrivateKey) + priv.E = 65537 + + if nprimes < 2 { + return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2") + } + + if bits < 1024 { + return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024") + } + + primes := make([]*big.Int, nprimes) + +NextSetOfPrimes: + for { + todo := bits + // crypto/rand should set the top two bits in each prime. + // Thus each prime has the form + // p_i = 2^bitlen(p_i) × 0.11... (in base 2). + // And the product is: + // P = 2^todo × α + // where α is the product of nprimes numbers of the form 0.11... + // + // If α < 1/2 (which can happen for nprimes > 2), we need to + // shift todo to compensate for lost bits: the mean value of 0.11... + // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 + // will give good results. + if nprimes >= 7 { + todo += (nprimes - 2) / 5 + } + for i := 0; i < nprimes; i++ { + var err error + if len(prepopulatedPrimes) == 0 { + primes[i], err = rand.Prime(random, todo/(nprimes-i)) + if err != nil { + return nil, err + } + } else { + primes[i] = prepopulatedPrimes[0] + prepopulatedPrimes = prepopulatedPrimes[1:] + } + + todo -= primes[i].BitLen() + } + + // Make sure that primes is pairwise unequal. + for i, prime := range primes { + for j := 0; j < i; j++ { + if prime.Cmp(primes[j]) == 0 { + continue NextSetOfPrimes + } + } + } + + n := new(big.Int).Set(bigOne) + totient := new(big.Int).Set(bigOne) + pminus1 := new(big.Int) + for _, prime := range primes { + n.Mul(n, prime) + pminus1.Sub(prime, bigOne) + totient.Mul(totient, pminus1) + } + if n.BitLen() != bits { + // This should never happen for nprimes == 2 because + // crypto/rand should set the top two bits in each prime. + // For nprimes > 2 we hope it does not happen often. + continue NextSetOfPrimes + } + + priv.D = new(big.Int) + e := big.NewInt(int64(priv.E)) + ok := priv.D.ModInverse(e, totient) + + if ok != nil { + priv.Primes = primes + priv.N = n + break + } + } + + priv.Precompute() + return priv, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go new file mode 100644 index 000000000..7283ca91c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -0,0 +1,842 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + goerrors "errors" + "io" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Revocations []*packet.Signature + Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature + Revocations []*packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature + Revocations []*packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// PrimaryIdentity returns an Identity, preferring non-revoked identities, +// identities marked as primary, or the latest-created identity, in that order. +func (e *Entity) PrimaryIdentity() *Identity { + var primaryIdentity *Identity + for _, ident := range e.Identities { + if shouldPreferIdentity(primaryIdentity, ident) { + primaryIdentity = ident + } + } + return primaryIdentity +} + +func shouldPreferIdentity(existingId, potentialNewId *Identity) bool { + if existingId == nil { + return true + } + + if len(existingId.Revocations) > len(potentialNewId.Revocations) { + return true + } + + if len(existingId.Revocations) < len(potentialNewId.Revocations) { + return false + } + + if existingId.SelfSignature == nil { + return true + } + + if existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId && + !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) { + return false + } + + if !(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) && + potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId { + return true + } + + return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime) +} + +// EncryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { + // Fail to find any encryption key if the... + i := e.PrimaryIdentity() + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true + } + + // If we don't have any subkeys for encryption and the primary key + // is marked as OK to encrypt with, then we can use it. + if i.SelfSignature.FlagsValid && i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true + } + + return Key{}, false +} + +// CertificationKey return the best candidate Key for certifying a key with this +// Entity. +func (e *Entity) CertificationKey(now time.Time) (Key, bool) { + return e.CertificationKeyById(now, 0) +} + +// CertificationKeyById return the Key for key certification with this +// Entity and keyID. +func (e *Entity) CertificationKeyById(now time.Time, id uint64) (Key, bool) { + return e.signingKeyByIdUsage(now, id, packet.KeyFlagCertify) +} + +// SigningKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) SigningKey(now time.Time) (Key, bool) { + return e.SigningKeyById(now, 0) +} + +// SigningKeyById return the Key for signing a message with this +// Entity and keyID. +func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { + return e.signingKeyByIdUsage(now, id, packet.KeyFlagSign) +} + +func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) { + // Fail to find any signing key if the... + i := e.PrimaryIdentity() + if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired + i.SelfSignature == nil || // user ID has no self-signature + i.SelfSignature.SigExpired(now) || // user ID self-signature has expired + e.Revoked(now) || // primary key has been revoked + i.Revoked(now) { // user ID has been revoked + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for idx, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + !subkey.Sig.SigExpired(now) && + !subkey.Revoked(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && + (id == 0 || subkey.PublicKey.KeyId == id) { + candidateSubkey = idx + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true + } + + // If we don't have any subkeys for signing and the primary key + // is marked as OK to sign with, then we can use it. + if i.SelfSignature.FlagsValid && + (flags&packet.KeyFlagCertify == 0 || i.SelfSignature.FlagCertify) && + (flags&packet.KeyFlagSign == 0 || i.SelfSignature.FlagSign) && + e.PrimaryKey.PubKeyAlgo.CanSign() && + (id == 0 || e.PrimaryKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, e.Revocations}, true + } + + // No keys with a valid Signing Flag or no keys matched the id passed in + return Key{}, false +} + +func revoked(revocations []*packet.Signature, now time.Time) bool { + for _, revocation := range revocations { + if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised { + // If the key is compromised, the key is considered revoked even before the revocation date. + return true + } + if !revocation.SigExpired(now) { + return true + } + } + return false +} + +// Revoked returns whether the entity has any direct key revocation signatures. +// Note that third-party revocation signatures are not supported. +// Note also that Identity and Subkey revocation should be checked separately. +func (e *Entity) Revoked(now time.Time) bool { + return revoked(e.Revocations, now) +} + +// EncryptPrivateKeys encrypts all non-encrypted keys in the entity with the same key +// derived from the provided passphrase. Public keys and dummy keys are ignored, +// and don't cause an error to be returned. +func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) error { + var keysToEncrypt []*packet.PrivateKey + // Add entity private key to encrypt. + if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted { + keysToEncrypt = append(keysToEncrypt, e.PrivateKey) + } + + // Add subkeys to encrypt. + for _, sub := range e.Subkeys { + if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && !sub.PrivateKey.Encrypted { + keysToEncrypt = append(keysToEncrypt, sub.PrivateKey) + } + } + return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config) +} + +// DecryptPrivateKeys decrypts all encrypted keys in the entitiy with the given passphrase. +// Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored, +// and don't cause an error to be returned. +func (e *Entity) DecryptPrivateKeys(passphrase []byte) error { + var keysToDecrypt []*packet.PrivateKey + // Add entity private key to decrypt. + if e.PrivateKey != nil && !e.PrivateKey.Dummy() && e.PrivateKey.Encrypted { + keysToDecrypt = append(keysToDecrypt, e.PrivateKey) + } + + // Add subkeys to decrypt. + for _, sub := range e.Subkeys { + if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted { + keysToDecrypt = append(keysToDecrypt, sub.PrivateKey) + } + } + return packet.DecryptPrivateKeys(keysToDecrypt, passphrase) +} + +// Revoked returns whether the identity has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (i *Identity) Revoked(now time.Time) bool { + return revoked(i.Revocations, now) +} + +// Revoked returns whether the subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +func (s *Subkey) Revoked(now time.Time) bool { + return revoked(s.Revocations, now) +} + +// Revoked returns whether the key or subkey has been revoked by a self-signature. +// Note that third-party revocation signatures are not supported. +// Note also that Identity revocation should be checked separately. +// Normally, it's not necessary to call this function, except on keys returned by +// KeysById or KeysByIdUsage. +func (key *Key) Revoked(now time.Time) bool { + return revoked(key.Revocations, now) +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + ident := e.PrimaryIdentity() + selfSig := ident.SelfSignature + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if requiredUsage != 0 { + if key.SelfSignature == nil || !key.SelfSignature.FlagsValid { + continue + } + + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && subKey.Sig.FlagsValid && (subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeGenericCert && + sig.SigType != packet.SigTypePersonaCert && + sig.SigType != packet.SigTypeCasualCert && + sig.SigType != packet.SigTypePositiveCert && + sig.SigType != packet.SigTypeCertificationRevocation { + return errors.StructuralError("user ID signature with wrong type") + } + + if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + if sig.SigType == packet.SigTypeCertificationRevocation { + identity.Revocations = append(identity.Revocations, sig) + } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + identity.SelfSignature = sig + } + identity.Signatures = append(identity.Signatures, sig) + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Revocations = append(subKey.Revocations, sig) + case packet.SigTypeSubkeyBinding: + if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + if e.PrivateKey.Dummy() { + return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities") + } + return e.serializePrivate(w, config, true) +} + +// SerializePrivateWithoutSigning serializes an Entity, including private key +// material, but excluding signatures from other entities, to the given Writer. +// Self-signatures of identities and subkeys are not re-signed. This is useful +// when serializing GNU dummy keys, among other things. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) { + return e.serializePrivate(w, config, false) +} + +func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) { + if e.PrivateKey == nil { + return goerrors.New("openpgp: private key is missing") + } + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + if reSign { + if ident.SelfSignature == nil { + return goerrors.New("openpgp: can't re-sign identity without valid self-signature") + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + if reSign { + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + if subkey.Sig.EmbeddedSignature != nil { + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, + subkey.PrivateKey, config) + if err != nil { + return + } + } + } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, revocation := range e.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + for _, revocation := range subkey.Revocations { + err := revocation.Serialize(w) + if err != nil { + return err + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + certificationKey, ok := signer.CertificationKey(config.Now()) + if !ok { + return errors.InvalidArgumentError("no valid certification key found") + } + + if certificationKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config) + + signingUserID := config.SigningUserId() + if signingUserID != "" { + if _, ok := signer.Identities[signingUserID]; !ok { + return errors.InvalidArgumentError("signer identity string not found in signer Entity") + } + sig.SignerUserId = &signingUserID + } + + if err := sig.SignUserId(identity, e.PrimaryKey, certificationKey.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} + +// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the +// specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config) + revSig.RevocationReason = &reason + revSig.RevocationReasonText = reasonText + + if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { + return err + } + e.Revocations = append(e.Revocations, revSig) + return nil +} + +// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for +// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil { + return errors.InvalidArgumentError("given subkey is not associated with this key") + } + + revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config) + revSig.RevocationReason = &reason + revSig.RevocationReasonText = reasonText + + if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + sk.Revocations = append(sk.Revocations, revSig) + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go new file mode 100644 index 000000000..108fd096f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -0,0 +1,538 @@ +package openpgp + +const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56" +const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" +const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" +const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" + +const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv +2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR +bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL +C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP +WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y +MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA +EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ +MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N +1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm ++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N +lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW +CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF +4artDmrG +=7FfJ +-----END PGP PUBLIC KEY BLOCK-----` + +const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ +UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe +iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK +FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 +R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh ++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA +EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO +52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb +u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl +w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep +54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ +YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL +bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E +i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB +DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 +8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY +s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 +U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL +6LCg2mg= +=Dhm4 +-----END PGP PUBLIC KEY BLOCK-----` + +const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo +7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom +lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 +E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw +6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH +7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv +X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 +GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl +y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw +R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW +CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ +LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO +aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx +yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl +BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr +Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK +CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp +C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ +SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ +MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= +=vtbN +-----END PGP PUBLIC KEY BLOCK-----` + +const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e +DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/ +uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW +ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx +nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ +x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg +PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy +9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ +1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2 +depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl +aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2 +DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa +XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU +8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2 +b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD +BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG +0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N +s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb +tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0 +BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE +/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7 +kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z +VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa +PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ +snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi +bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8 +K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X +8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+ +TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb +OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l +QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V +yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U +heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB +7qTZOahrETw= +=IKnw +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: OpenPGP.js v4.10.10 +Comment: https://openpgpjs.org + +xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q +lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN +91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO +XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb +naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX +8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB +AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK +ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS +gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT +6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ +q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy ++I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY +bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j +SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk +kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu +Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC +GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW +IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1 +mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG +LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK +zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/ +BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ +aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD +rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm +yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2 +UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb +YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt +amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM +u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/ +Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS +gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB +cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N +nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN +kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF +I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb +hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z +FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw= +=+2T8 +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll +JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+ +iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0 +yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e +/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI +q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V +xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw +ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB +B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh +BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u +8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA +mcG3/TwG5GSQUUPkrDsGDA== +=mFWy +-----END PGP PUBLIC KEY BLOCK----- +` + +const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR +s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL +EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/ +lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7 +2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0 +69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9 +Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b +wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW +FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR +AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM +vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx +22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj +7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY +AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044 +tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX +JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl +mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1 +8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC +0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b +4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl +Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY +=3fWu +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L +plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz +r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ +sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+ +3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm +k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s +GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G +HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT +CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR +AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN +MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j +UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS +YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs +nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/ +U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es +HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK +lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg +AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV +UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM +0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX +12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa +3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi +wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6 +Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G +1dh1WoUCyREZsJQg2YoIpWIcvw+a +=bNRo +-----END PGP PUBLIC KEY BLOCK----- +` + +const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1 + +lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0 +GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1 +I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB +/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ +CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl +rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ +6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD +ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX +M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED +Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f +yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk +UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH +e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl +AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r +oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo +UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS +YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ +kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC +WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW +n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8 +/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3 +R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE +VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix +7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg== +=F/T0 +-----END PGP PRIVATE KEY BLOCK-----` + +const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii +B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS +PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6 +lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA +aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD +Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v +Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S +rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH +AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA +iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+ +Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ +AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou +b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw== +=KLN8 +-----END PGP PRIVATE KEY BLOCK-----` + +const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd +oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk +/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+ +gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r +njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU +iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK +Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m +kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR +MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI +zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A +rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP +CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr +MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c +bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd +hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ +3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS +ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL +Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7 +Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz +YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r +sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV +JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5 +EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt +Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw +vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ +iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg +UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y +HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL +Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy +bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP +Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic +BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3 +v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV +oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c +wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R +fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa +4g== +=XZm8 +-----END PGP PRIVATE KEY BLOCK-----` + +// https://tests.sequoia-pgp.org/#Certificate_expiration +// P _ U p +const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe +ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY +495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ +mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t +yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG +Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN +/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm +pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR +eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH +DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05 +ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ +UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT +74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3 +ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/ +i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj +LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/ +iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc +selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n +TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+ +Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk +jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6 +6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO// +rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm +U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR +LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw +sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx +1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld +I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=AmgT +-----END PGP PUBLIC KEY BLOCK-----` + +const rsa2048PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 + +lQPGBGL07P0BCADL0etN8efyAXA6sL2WfQvHe5wEKYXPWeN2+jiqSppfeRZAOlzP +kZ3U+cloeJriplYvVJwI3ID2aw52Z/TRn8iKRP5eOUFrEgcgl06lazLtOndK7o7p +oBV5mLtHEirFHm6W61fNt10jzM0jx0PV6nseLhFB2J42F1cmU/aBgFo41wjLSZYr +owR+v+O9S5sUXblQF6sEDcY01sBEu09zrIgT49VFwQ1Cvdh9XZEOTQBfdiugoj5a +DS3fAqAka3r1VoQK4eR7/upnYSgSACGeaQ4pUelKku5rpm50gdWTY8ppq0k9e1eT +y2x0OQcW3hWE+j4os1ca0ZEADMdqr/99MOxrABEBAAH+BwMCJWxU4VOZOJ7/I6vX +FxdfBhIBEXlJ52FM3S/oYtXqLhkGyrtmZOeEazVvUtuCe3M3ScHI8xCthcmE8E0j +bi+ZEHPS2NiBZtgHFF27BLn7zZuTc+oD5WKduZdK3463egnyThTqIIMl25WZBuab +k5ycwYrWwBH0jfA4gwJ13ai4pufKC2RM8qIu6YAVPglYBKFLKGvvJHa5vI+LuA0E +K+k35hIic7yVUcQneNnAF2598X5yWiieYnOZpmHlRw1zfbMwOJr3ZNj2v94u7b+L +sTa/1Uv9887Vb6sJp0c2Sh4cwEccoPYkvMqFn3ZrJUr3UdDu1K2vWohPtswzhrYV ++RdPZE5RLoCQufKvlPezk0Pzhzb3bBU7XjUbdGY1nH/EyQeBNp+Gw6qldKvzcBaB +cyOK1c6hPSszpJX93m5UxCN55IeifmcNjmbDh8vGCCdajy6d56qV2n4F3k7vt1J1 +0UlxIGhqijJoaTCX66xjLMC6VXkSz6aHQ35rnXosm/cqPcQshsZTdlfSyWkorfdr +4Hj8viBER26mjYurTMLBKDtUN724ZrR0Ev5jorX9uoKlgl87bDZHty2Ku2S+vR68 +VAvnj6Fi1BYNclnDoqxdRB2z5T9JbWE52HuG83/QsplhEqXxESDxriTyTHMbNxEe +88soVCDh4tgflZFa2ucUr6gEKJKij7jgahARnyaXfPZlQBUAS1YUeILYmN+VR+M/ +sHENpwDWc7TInn8VN638nJV+ScZGMih3AwWZTIoiLju3MMt1K0YZ3NuiqwGH4Jwg +/BbEdTWeCci9y3NEQHQ3uZZ5p6j2CwFVlK11idemCMvAiTVxF+gKdaLMkeCwKxru +J3YzhKEo+iDVYbPYBYizx/EHBn2U5kITQ5SBXzjTaaFMNZJEf9JYsL1ybPB6HOFY +VNVB2KT8CGVwtCJHb2xhbmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iQFO +BBMBCgA4FiEEC6K7U7f4qesybTnqSkra7gHusm0FAmL07P0CGwMFCwkIBwIGFQoJ +CAsCBBYCAwECHgECF4AACgkQSkra7gHusm1MvwgAxpClWkeSqIhMQfbiuz0+lOkE +89y1DCFw8bHjZoUf4/4K8hFA3dGkk+q72XFgiyaCpfXxMt6Gi+dN47t+tTv9NIqC +sukbaoJBmJDhN6+djmJOgOYy+FWsW2LAk2LOwKYulpnBZdcA5rlMAhBg7gevQpF+ +ruSU69P7UUaFJl/DC7hDmaIcj+4cjBE/HO26SnVQjoTfjZT82rDh1Wsuf8LnkJUk +b3wezBLpXKjDvdHikdv4gdlR4AputVM38aZntYYglh/EASo5TneyZ7ZscdLNRdcF +r5O2fKqrOJLOdaoYRFZZWOvP5GtEVFDU7WGivOSVfiszBE0wZR3dgZRJipHCXJ0D +xgRi9Oz9AQgAtMJcJqLLVANJHl90tWuoizDkm+Imcwq2ubQAjpclnNrODnDK+7o4 +pBsWmXbZSdkC4gY+LhOQA6bPDD0JEHM58DOnrm49BddxXAyK0HPsk4sGGt2SS86B +OawWNdfJVyqw4bAiHWDmQg4PcjBbt3ocOIxAR6I5kBSiQVxuGQs9T+Zvg3G1r3Or +fS6DzlgY3HFUML5YsGH4lOxNSOoKAP68GIH/WNdUZ+feiRg9knIib6I3Hgtf5eO8 +JRH7aWE/TD7eNu36bLLjT5TZPq5r6xaD2plbtPOyXbNPWs9qI1yG+VnErfaLY0w8 +Qo0aqzbgID+CTZVomXSOpOcQseaFKw8ZfQARAQAB/gcDArha6+/+d4OY/w9N32K9 +hFNYt4LufTETMQ+k/sBeaMuAVzmT47DlAXzkrZhGW4dZOtXMu1rXaUwHlqkhEyzL +L4MYEWVXfD+LbZNEK3MEFss6RK+UAMeT/PTV9aA8cXQVPcSJYzfBXHQ1U1hnOgrO +apn92MN8RmkhX8wJLyeWTMMuP4lXByJMmmGo8WvifeRD2kFY4y0WVBDAXJAV4Ljf +Di/bBiwoc5a+gxHuZT2W9ZSxBQJNXdt4Un2IlyZuo58s5MLx2N0EaNJ8PwRUE6fM +RZYO8aZCEPUtINE4njbvsWOMCtrblsMPwZ1B0SiIaWmLaNyGdCNKea+fCIW7kasC +JYMhnLumpUTXg5HNexkCsl7ABWj0PYBflOE61h8EjWpnQ7JBBVKS2ua4lMjwHRX7 +5o5yxym9k5UZNFdGoXVL7xpizCcdGawxTJvwhs3vBqu1ZWYCegOAZWDrOkCyhUpq +8uKMROZFbn+FwE+7tjt+v2ed62FVEvD6g4V3ThCA6mQqeOARfJWN8GZY8BDm8lht +crOXriUkrx+FlrgGtm2CkwjW5/9Xd7AhFpHnQdFeozOHyq1asNSgJF9sNi9Lz94W +skQSVRi0IExxSXYGI3Y0nnAZUe2BAQflYPJdEveSr3sKlUqXiETTA1VXsTPK3kOC +92CbLzj/Hz199jZvywwyu53I+GKMpF42rMq7zxr2oa61YWY4YE/GDezwwys/wLx/ +QpCW4X3ppI7wJjCSSqEV0baYZSSli1ayheS6dxi8QnSpX1Bmpz6gU7m/M9Sns+hl +J7ZvgpjCAiV7KJTjtclr5/S02zP78LTVkoTWoz/6MOTROwaP63VBUXX8pbJhf/vu +DLmNnDk8joMJxoDXWeNU0EnNl4hP7Z/jExRBOEO4oAnUf/Sf6gCWQhL5qcajtg6w +tGv7vx3f2IkBNgQYAQoAIBYhBAuiu1O3+KnrMm056kpK2u4B7rJtBQJi9Oz9AhsM +AAoJEEpK2u4B7rJt6lgIAMBWqP4BCOGnQXBbgJ0+ACVghpkFUXZTb/tXJc8UUvTM +8uov6k/RsqDGZrvhhufD7Wwt7j9v7dD7VPp7bPyjVWyimglQzWguTUUqLDGlstYH +5uYv1pzma0ZsAGNqFeGlTLsKOSGKFMH4rB2KfN2n51L8POvtp1y7GKZQbWIWneaB +cZr3BINU5GMvYYU7pAYcoR+mJPdJx5Up3Ocn+bn8Tu1sy9C/ArtCQucazGnoE9u1 +HhNLrh0CdzzX7TNH6TQ8LwPOvq0K5l/WqbN9lE0WBBhMv2HydxhluO8AhU+A5GqC +C+wET7nVDnhoOm/fstIeb7/LN7OYejKPeHdFBJEL9GA= +=u442 +-----END PGP PRIVATE KEY BLOCK-----` + +const curve25519PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4 + +lFgEYvTtQBYJKwYBBAHaRw8BAQdAxsNXLbrk5xOjpO24VhOMvQ0/F+JcyIkckMDH +X3FIGxcAAQDFOlunZWYuPsCx5JLp78vKqUTfgef9TGG4oD6I/Sa0zBMstCJHb2xh +bmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iJAEExYIADgWIQSFQHEOazmo +h1ldII4MvfnLQ4JBNwUCYvTtQAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK +CRAMvfnLQ4JBN5yeAQCKdry8B5ScCPrev2+UByMCss7Sdu5RhomCFsHdNPLcKAEA +8ugei+1owHsV+3cGwWWzKk6sLa8ZN87i3SKuOGp9DQycXQRi9O1AEgorBgEEAZdV +AQUBAQdA5CubPp8l7lrVQ25h7Hx5XN2C8xanRnnpcjzEooCaEA0DAQgHAAD/Rpc+ +sOZUXrFk9HOWB1XU41LoWbDBoG8sP8RWAVYwD5AQRYh4BBgWCAAgFiEEhUBxDms5 +qIdZXSCODL35y0OCQTcFAmL07UACGwwACgkQDL35y0OCQTcvdwEA7lb5g/YisrEf +iq660uwMGoepLUfvtqKzuQ6heYe83y0BAN65Ffg5HYOJzUEi0kZQRf7OhdtuL2kJ +SRXn8DmCTfEB +=cELM +-----END PGP PRIVATE KEY BLOCK-----` + +const curve448PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Comment: C1DB 65D5 80D7 B922 7254 4B1E A699 9895 FABA CE52 + +xYUEYV2UmRYDK2VxAc9AFyxgh5xnSbyt50TWl558mw9xdMN+/UBLr5+UMP8IsrvV +MdXuTIE8CyaUQKSotHtH2RkYEXj5nsMAAAHPQIbTMSzjIWug8UFECzAex5FHgAgH +gYF3RK+TS8D24wX8kOu2C/NoVxwGY+p+i0JHaB+7yljriSKAGxs6wsBEBB8WCgCD +BYJhXZSZBYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlv +bnMuc2VxdW9pYS1wZ3Aub3Jn5wSpIutJ5HncJWk4ruUV8GzQF390rR5+qWEAnAoY +akcDFQoIApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAALzdA5dA/fsgYg/J +qaQriYKaPUkyHL7EB3BXhV2d1h/gk+qJLvXQuU2WEJ/XSs3GrsBRiiZwvPH4o+7b +mleAxjy5wpS523vqrrBR2YZ5FwIku7WS4litSdn4AtVam/TlLdMNIf41CtFeZKBe +c5R5VNdQy8y7qy8AAADNEUN1cnZlNDQ4IE9wdGlvbiA4wsBHBBMWCgCGBYJhXZSZ +BYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlvbnMuc2Vx +dW9pYS1wZ3Aub3JnD55UsYMzE6OACP+mgw5zvT+BBgol8/uFQjHg4krjUCMDFQoI +ApkBApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAPQJA5dA0Xqwzn/0uwCq +RlsOVCB3f5NOj1exKnlBvRw0xT1VBee1yxvlUt5eIAoCxWoRlWBJob3TTkhm9AEA +8dyhwPmyGfWHzPw5NFG3xsXrZdNXNvit9WMVAPcmsyR7teXuDlJItxRAdJJc/qfJ +YVbBFoaNrhYAAADHhQRhXZSZFgMrZXEBz0BL7THZ9MnCLfSPJ1FMLim9eGkQ3Bfn +M3he5rOwO3t14QI1LjI96OjkeJipMgcFAmEP1Bq/ZHGO7oAAAc9AFnE8iNBaT3OU +EFtxkmWHXtdaYMmGGRdopw9JPXr/UxuunDln5o9dxPxf7q7z26zXrZen+qed/Isa +HsDCwSwEGBYKAWsFgmFdlJkFiQWkj70JEKaZmJX6us5SRxQAAAAAAB4AIHNhbHRA +bm90YXRpb25zLnNlcXVvaWEtcGdwLm9yZxREUizdTcepBzgSMOv2VWQCWbl++3CZ +EbgAWDryvSsyApsCwDGgBBkWCgBvBYJhXZSZCRBKo3SL4S5djkcUAAAAAAAeACBz +YWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmemoGTDjmNQiIzw6HOEddvS0OB7 +UZ/P07jM/EVmnYxTlBYhBAxsnkGpx1UCiH6gUUqjdIvhLl2OAAALYQOXQAMB1oKq +OWxSFmvmgCKNcbAAyA3piF5ERIqs4z07oJvqDYrOWt75UsEIH/04gU/vHc4EmfG2 +JDLJgOLlyTUPkL/08f0ydGZPofFQBhn8HkuFFjnNtJ5oz3GIP4cdWMQFaUw0uvjb +PM9Tm3ptENGd6Ts1AAAAFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAGpTA5dATR6i +U2GrpUcQgpG+JqfAsGmF4yAOhgFxc1UfidFk3nTup3fLgjipkYY170WLRNbyKkVO +Sodx93GAs58rizO1acDAWiLq3cyEPBFXbyFThbcNPcLl+/77Uk/mgkYrPQFAQWdK +1kSRm4SizDBK37K8ChAAAADHhwRhXZSZEgMrZW8Bx0DMhzvhQo+OsXeqQ6QVw4sF +CaexHh6rLohh7TzL3hQSjoJ27fV6JBkIWdn0LfrMlJIDbSv2SLdlgQMBCgkAAcdA +MO7Dc1myF6Co1fAH+EuP+OxhxP/7V6ljuSCZENDfA49tQkzTta+PniG+pOVB2LHb +huyaKBkqiaogo8LAOQQYFgoAeAWCYV2UmQWJBaSPvQkQppmYlfq6zlJHFAAAAAAA +HgAgc2FsdEBub3RhdGlvbnMuc2VxdW9pYS1wZ3Aub3JnEjBMQAmc/2u45u5FQGmB +QAytjSG2LM3JQN+PPVl5vEkCmwwWIQTB22XVgNe5InJUSx6mmZiV+rrOUgAASdYD +l0DXEHQ9ykNP2rZP35ET1dmiFagFtTj/hLQcWlg16LqvJNGqOgYXuqTerbiOOt02 +XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d +QgqsfguR1PqPuJxpXV4bSr6CGAAAAA== +=MSvh +-----END PGP PRIVATE KEY BLOCK-----` + +const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR +ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u +zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI +CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA +AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B +FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG +wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5 +8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT +z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X +joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F +RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev +VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP +=Z8YJ +-----END PGP PRIVATE KEY BLOCK----- +` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go new file mode 100644 index 000000000..fec41a0e7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import "math/bits" + +// CipherSuite contains a combination of Cipher and Mode +type CipherSuite struct { + // The cipher function + Cipher CipherFunction + // The AEAD mode of operation. + Mode AEADMode +} + +// AEADConfig collects a number of AEAD parameters along with sensible defaults. +// A nil AEADConfig is valid and results in all default values. +type AEADConfig struct { + // The AEAD mode of operation. + DefaultMode AEADMode + // Amount of octets in each chunk of data + ChunkSize uint64 +} + +// Mode returns the AEAD mode of operation. +func (conf *AEADConfig) Mode() AEADMode { + // If no preference is specified, OCB is used (which is mandatory to implement). + if conf == nil || conf.DefaultMode == 0 { + return AEADModeOCB + } + + mode := conf.DefaultMode + if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM { + panic("AEAD mode unsupported") + } + return mode +} + +// ChunkSizeByte returns the byte indicating the chunk size. The effective +// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) +// limit to 16 = 4 MiB +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (conf *AEADConfig) ChunkSizeByte() byte { + if conf == nil || conf.ChunkSize == 0 { + return 12 // 1 << (12 + 6) == 262144 bytes + } + + chunkSize := conf.ChunkSize + exponent := bits.Len64(chunkSize) - 1 + switch { + case exponent < 6: + exponent = 6 + case exponent > 16: + exponent = 16 + } + + return byte(exponent - 6) +} + +// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the +// maximum returned value is 1 << 30. +func decodeAEADChunkSize(c byte) int { + size := uint64(1 << (c + 6)) + if size != uint64(int(size)) { + return 1 << 30 + } + return int(size) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go new file mode 100644 index 000000000..cee83bdc7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -0,0 +1,264 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "bytes" + "crypto/cipher" + "encoding/binary" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption. +type aeadCrypter struct { + aead cipher.AEAD + chunkSize int + initialNonce []byte + associatedData []byte // Chunk-independent associated data + chunkIndex []byte // Chunk counter + packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet + bytesProcessed int // Amount of plaintext bytes encrypted/decrypted + buffer bytes.Buffer // Buffered bytes across chunks +} + +// computeNonce takes the incremental index and computes an eXclusive OR with +// the least significant 8 bytes of the receivers' initial nonce (see sec. +// 5.16.1 and 5.16.2). It returns the resulting nonce. +func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { + if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { + return append(wo.initialNonce, wo.chunkIndex...) + } + + nonce = make([]byte, len(wo.initialNonce)) + copy(nonce, wo.initialNonce) + offset := len(wo.initialNonce) - 8 + for i := 0; i < 8; i++ { + nonce[i+offset] ^= wo.chunkIndex[i] + } + return +} + +// incrementIndex performs an integer increment by 1 of the integer represented by the +// slice, modifying it accordingly. +func (wo *aeadCrypter) incrementIndex() error { + index := wo.chunkIndex + if len(index) == 0 { + return errors.AEADError("Index has length 0") + } + for i := len(index) - 1; i >= 0; i-- { + if index[i] < 255 { + index[i]++ + return nil + } + index[i] = 0 + } + return errors.AEADError("cannot further increment index") +} + +// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when +// necessary, similar to aeadEncrypter. +type aeadDecrypter struct { + aeadCrypter // Embedded ciphertext opener + reader io.Reader // 'reader' is a partialLengthReader + peekedBytes []byte // Used to detect last chunk + eof bool +} + +// Read decrypts bytes and reads them into dst. It decrypts when necessary and +// buffers extra decrypted bytes. It returns the number of bytes copied into dst +// and an error. +func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { + // Return buffered plaintext bytes from previous calls + if ar.buffer.Len() > 0 { + return ar.buffer.Read(dst) + } + + // Return EOF if we've previously validated the final tag + if ar.eof { + return 0, io.EOF + } + + // Read a chunk + tagLen := ar.aead.Overhead() + cipherChunkBuf := new(bytes.Buffer) + _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen)) + cipherChunk := cipherChunkBuf.Bytes() + if errRead != nil && errRead != io.EOF { + return 0, errRead + } + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } + + // Check final authentication tag + if errRead == io.EOF { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return n, errChunk + } + ar.eof = true // Mark EOF for when we've returned all buffered data + } + return +} + +// Close is noOp. The final authentication tag of the stream was already +// checked in the last Read call. In the future, this function could be used to +// wipe the reader and peeked, decrypted bytes, if necessary. +func (ar *aeadDecrypter) Close() (err error) { + return nil +} + +// openChunk decrypts and checks integrity of an encrypted chunk, returning +// the underlying plaintext and an error. It accesses peeked bytes from next +// chunk, to identify the last chunk and decrypt/validate accordingly. +func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { + tagLen := ar.aead.Overhead() + // Restore carried bytes from last call + chunkExtra := append(ar.peekedBytes, data...) + // 'chunk' contains encrypted bytes, followed by an authentication tag. + chunk := chunkExtra[:len(chunkExtra)-tagLen] + ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] + + adata := ar.associatedData + if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { + adata = append(ar.associatedData, ar.chunkIndex...) + } + + nonce := ar.computeNextNonce() + plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + if err != nil { + return nil, err + } + ar.bytesProcessed += len(plainChunk) + if err = ar.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return plainChunk, nil +} + +// Checks the summary tag. It takes into account the total decrypted bytes into +// the associated data. It returns an error, or nil if the tag is valid. +func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { + // Associated: tag, version, cipher, aead, chunk size, ... + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) + + adata := ar.associatedData + if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { + // ... index ... + adata = append(ar.associatedData, ar.chunkIndex...) + } + + // ... and total number of encrypted octets + adata = append(adata, amountBytes...) + nonce := ar.computeNextNonce() + _, err := ar.aead.Open(nil, nonce, tag, adata) + if err != nil { + return err + } + return nil +} + +// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according +// to the AEAD block size, and buffers the extra encrypted bytes for next write. +type aeadEncrypter struct { + aeadCrypter // Embedded plaintext sealer + writer io.WriteCloser // 'writer' is a partialLengthWriter +} + +// Write encrypts and writes bytes. It encrypts when necessary and buffers extra +// plaintext bytes for next call. When the stream is finished, Close() MUST be +// called to append the final tag. +func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { + // Append plaintextBytes to existing buffered bytes + n, err = aw.buffer.Write(plaintextBytes) + if err != nil { + return n, err + } + // Encrypt and write chunks + for aw.buffer.Len() >= aw.chunkSize { + plainChunk := aw.buffer.Next(aw.chunkSize) + encryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + } + return +} + +// Close encrypts and writes the remaining buffered plaintext if any, appends +// the final authentication tag, and closes the embedded writer. This function +// MUST be called at the end of a stream. +func (aw *aeadEncrypter) Close() (err error) { + // Encrypt and write a chunk if there's buffered data left, or if we haven't + // written any chunks yet. + if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { + plainChunk := aw.buffer.Bytes() + lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return err + } + _, err = aw.writer.Write(lastEncryptedChunk) + if err != nil { + return err + } + } + // Compute final tag (associated data: packet tag, version, cipher, aead, + // chunk size... + adata := aw.associatedData + + if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { + // ... index ... + adata = append(aw.associatedData, aw.chunkIndex...) + } + + // ... and total number of encrypted octets + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed)) + adata = append(adata, amountBytes...) + + nonce := aw.computeNextNonce() + finalTag := aw.aead.Seal(nil, nonce, nil, adata) + _, err = aw.writer.Write(finalTag) + if err != nil { + return err + } + return aw.writer.Close() +} + +// sealChunk Encrypts and authenticates the given chunk. +func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { + if len(data) > aw.chunkSize { + return nil, errors.AEADError("chunk exceeds maximum length") + } + if aw.associatedData == nil { + return nil, errors.AEADError("can't seal without headers") + } + adata := aw.associatedData + if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted { + adata = append(aw.associatedData, aw.chunkIndex...) + } + + nonce := aw.computeNextNonce() + encrypted := aw.aead.Seal(nil, nonce, data, adata) + aw.bytesProcessed += len(data) + if err := aw.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return encrypted, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go new file mode 100644 index 000000000..98bd876bf --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// AEADEncrypted represents an AEAD Encrypted Packet. +// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t +type AEADEncrypted struct { + cipher CipherFunction + mode AEADMode + chunkSizeByte byte + Contents io.Reader // Encrypted chunks and tags + initialNonce []byte // Referred to as IV in RFC4880-bis +} + +// Only currently defined version +const aeadEncryptedVersion = 1 + +func (ae *AEADEncrypted) parse(buf io.Reader) error { + headerData := make([]byte, 4) + if n, err := io.ReadFull(buf, headerData); n < 4 { + return errors.AEADError("could not read aead header:" + err.Error()) + } + // Read initial nonce + mode := AEADMode(headerData[2]) + nonceLen := mode.IvLength() + + // This packet supports only EAX and OCB + // https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t + if nonceLen == 0 || mode > AEADModeOCB { + return errors.AEADError("unknown mode") + } + + initialNonce := make([]byte, nonceLen) + if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { + return errors.AEADError("could not read aead nonce:" + err.Error()) + } + ae.Contents = buf + ae.initialNonce = initialNonce + c := headerData[1] + if _, ok := algorithm.CipherById[c]; !ok { + return errors.UnsupportedError("unknown cipher: " + string(c)) + } + ae.cipher = CipherFunction(c) + ae.mode = mode + ae.chunkSizeByte = headerData[3] + return nil +} + +// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or +// an error. +func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) { + return ae.decrypt(key) +} + +// decrypt prepares an aeadCrypter and returns a ReadCloser from which +// decrypted bytes can be read (see aeadDecrypter.Read()). +func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { + blockCipher := ae.cipher.new(key) + aead := ae.mode.new(blockCipher) + // Carry the first tagLen bytes + tagLen := ae.mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(ae.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) + } + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: chunkSize, + initialNonce: ae.initialNonce, + associatedData: ae.associatedData(), + chunkIndex: make([]byte, 8), + packetTag: packetTypeAEADEncrypted, + }, + reader: ae.Contents, + peekedBytes: peekedBytes}, nil +} + +// associatedData for chunks: tag, version, cipher, mode, chunk size byte +func (ae *AEADEncrypted) associatedData() []byte { + return []byte{ + 0xD4, + aeadEncryptedVersion, + byte(ae.cipher), + byte(ae.mode), + ae.chunkSizeByte} +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go new file mode 100644 index 000000000..2f5cad71d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -0,0 +1,125 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 0: + c.Body = r + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go new file mode 100644 index 000000000..04994bec9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -0,0 +1,248 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "math/big" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2K (String to Key) config, used for key derivation in the context of secret key encryption + // and password-encrypted data. + // If nil, the default configuration is used + S2KConfig *s2k.Config + // Iteration count for Iterated S2K (String to Key). + // Only used if sk2.Mode is nil. + // This value is duplicated here from s2k.Config for backwards compatibility. + // It determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + // + // Deprecated: SK2Count should be configured in S2KConfig instead. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int + // The public key algorithm to use - will always create a signing primary + // key and encryption subkey. + Algorithm PublicKeyAlgorithm + // Some known primes that are optionally prepopulated by the caller + RSAPrimes []*big.Int + // Curve configures the desired packet.Curve if the Algorithm is PubKeyAlgoECDSA, + // PubKeyAlgoEdDSA, or PubKeyAlgoECDH. If empty Curve25519 is used. + Curve Curve + // AEADConfig configures the use of the new AEAD Encrypted Data Packet, + // defined in the draft of the next version of the OpenPGP specification. + // If a non-nil AEADConfig is passed, usage of this packet is enabled. By + // default, it is disabled. See the documentation of AEADConfig for more + // configuration options related to AEAD. + // **Note: using this option may break compatibility with other OpenPGP + // implementations, as well as future versions of this library.** + AEADConfig *AEADConfig + // V5Keys configures version 5 key generation. If false, this package still + // supports version 5 keys, but produces version 4 keys. + V5Keys bool + // "The validity period of the key. This is the number of seconds after + // the key creation time that the key expires. If this is not present + // or has a value of zero, the key never expires. This is found only on + // a self-signature."" + // https://tools.ietf.org/html/rfc4880#section-5.2.3.6 + KeyLifetimeSecs uint32 + // "The validity period of the signature. This is the number of seconds + // after the signature creation time that the signature expires. If + // this is not present or has a value of zero, it never expires." + // https://tools.ietf.org/html/rfc4880#section-5.2.3.10 + SigLifetimeSecs uint32 + // SigningKeyId is used to specify the signing key to use (by Key ID). + // By default, the signing key is selected automatically, preferring + // signing subkeys if available. + SigningKeyId uint64 + // SigningIdentity is used to specify a user ID (packet Signer's User ID, type 28) + // when producing a generic certification signature onto an existing user ID. + // The identity must be present in the signer Entity. + SigningIdentity string + // InsecureAllowUnauthenticatedMessages controls, whether it is tolerated to read + // encrypted messages without Modification Detection Code (MDC). + // MDC is mandated by the IETF OpenPGP Crypto Refresh draft and has long been implemented + // in most OpenPGP implementations. Messages without MDC are considered unnecessarily + // insecure and should be prevented whenever possible. + // In case one needs to deal with messages from very old OpenPGP implementations, there + // might be no other way than to tolerate the missing MDC. Setting this flag, allows this + // mode of operation. It should be considered a measure of last resort. + InsecureAllowUnauthenticatedMessages bool + // KnownNotations is a map of Notation Data names to bools, which controls + // the notation names that are allowed to be present in critical Notation Data + // signature subpackets. + KnownNotations map[string]bool + // SignatureNotations is a list of Notations to be added to any signatures. + SignatureNotations []*Notation +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now().Truncate(time.Second) + } + return c.Time().Truncate(time.Second) +} + +// KeyLifetime returns the validity period of the key. +func (c *Config) KeyLifetime() uint32 { + if c == nil { + return 0 + } + return c.KeyLifetimeSecs +} + +// SigLifetime returns the validity period of the signature. +func (c *Config) SigLifetime() uint32 { + if c == nil { + return 0 + } + return c.SigLifetimeSecs +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) RSAModulusBits() int { + if c == nil || c.RSABits == 0 { + return 2048 + } + return c.RSABits +} + +func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { + if c == nil || c.Algorithm == 0 { + return PubKeyAlgoRSA + } + return c.Algorithm +} + +func (c *Config) CurveName() Curve { + if c == nil || c.Curve == "" { + return Curve25519 + } + return c.Curve +} + +// Deprecated: The hash iterations should now be queried via the S2K() method. +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) S2K() *s2k.Config { + if c == nil { + return nil + } + // for backwards compatibility + if c != nil && c.S2KCount > 0 && c.S2KConfig == nil { + return &s2k.Config{ + S2KCount: c.S2KCount, + } + } + return c.S2KConfig +} + +func (c *Config) AEAD() *AEADConfig { + if c == nil { + return nil + } + return c.AEADConfig +} + +func (c *Config) SigningKey() uint64 { + if c == nil { + return 0 + } + return c.SigningKeyId +} + +func (c *Config) SigningUserId() string { + if c == nil { + return "" + } + return c.SigningIdentity +} + +func (c *Config) AllowUnauthenticatedMessages() bool { + if c == nil { + return false + } + return c.InsecureAllowUnauthenticatedMessages +} + +func (c *Config) KnownNotation(notationName string) bool { + if c == nil { + return false + } + return c.KnownNotations[notationName] +} + +func (c *Config) Notations() []*Notation { + if c == nil { + return nil + } + return c.SignatureNotations +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 000000000..eeff2902c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,286 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 encoding.Field +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.MPI) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoECDH: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.OID) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + if e.KeyId != 0 && e.KeyId != priv.KeyId { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) + } + if e.Algo != priv.PubKeyAlgo { + return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + // Supports both *rsa.PrivateKey and crypto.Decrypter + k := priv.PrivateKey.(crypto.Decrypter) + b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes()) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes()) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + case PubKeyAlgoECDH: + vsG := e.encryptedMPI1.Bytes() + m := e.encryptedMPI2.Bytes() + oid := priv.PublicKey.oid.EncodedBytes() + b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) + default: + err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + if !e.CipherFunc.IsSupported() { + return errors.UnsupportedError("unsupported encryption function") + } + + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + case PubKeyAlgoElGamal: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoECDH: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + if err != nil { + return err + } + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + _, err := w.Write(e.encryptedMPI1.EncodedBytes()) + return err + case PubKeyAlgoElGamal: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + case PubKeyAlgoECDH: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + default: + panic("internal error") + } +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + cipherMPI := encoding.NewMPI(cipherText) + packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + _, err = w.Write(cipherMPI.EncodedBytes()) + return err +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil { + return err + } + _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes()) + return err +} + +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { + vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) + if err != nil { + return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) + } + + g := encoding.NewMPI(vsG) + m := encoding.NewOID(c) + + packetLen := 10 /* header length */ + packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(g.EncodedBytes()); err != nil { + return err + } + _, err = w.Write(m.EncodedBytes()) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go new file mode 100644 index 000000000..4be987609 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -0,0 +1,91 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + Format uint8 + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.Format = buf[0] + l.IsBinary = l.Format == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go new file mode 100644 index 000000000..2c3e3f50b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go @@ -0,0 +1,29 @@ +package packet + +// Notation type represents a Notation Data subpacket +// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16 +type Notation struct { + Name string + Value []byte + IsCritical bool + IsHumanReadable bool +} + +func (notation *Notation) getData() []byte { + nameData := []byte(notation.Name) + nameLen := len(nameData) + valueLen := len(notation.Value) + + data := make([]byte, 8+nameLen+valueLen) + if notation.IsHumanReadable { + data[0] = 0x80 + } + + data[4] = byte(nameLen >> 8) + data[5] = byte(nameLen) + data[6] = byte(valueLen >> 8) + data[7] = byte(valueLen) + copy(data[8:8+nameLen], nameData) + copy(data[8+nameLen:], notation.Value) + return data +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go new file mode 100644 index 000000000..4f26d0a00 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go @@ -0,0 +1,137 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. On successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 000000000..033fb2d7e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go new file mode 100644 index 000000000..4f8204079 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -0,0 +1,171 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + EncodedLength []byte // Store the original encoded length for signature verifications. + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + var encodedLength []byte + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + encodedLength = contents[0:1] + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + encodedLength = contents[0:2] + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + encodedLength = contents[0:5] + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.EncodedLength = encodedLength + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + copy(buf, osp.EncodedLength) + n := len(osp.EncodedLength) + + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go new file mode 100644 index 000000000..4d86a7da8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -0,0 +1,551 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet" + +import ( + "bytes" + "crypto/cipher" + "crypto/rsa" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + buf bytes.Buffer + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + bufLen := w.buf.Len() + if bufLen > 512 { + for power := uint(30); ; power-- { + l := 1 << power + if bufLen >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(w.buf.Next(l)) + if err != nil { + return + } + if m != l { + return 0, io.ErrShortWrite + } + break + } + } + } + return w.buf.Write(p) +} + +func (w *partialLengthWriter) Close() (err error) { + len := w.buf.Len() + err = serializeLength(w.w, len) + if err != nil { + return err + } + _, err = w.buf.WriteTo(w.w) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + return serializeLength(w, length) +} + +// serializeType writes an OpenPGP packet type to w. See RFC 4880, section +// 4.2. +func serializeType(w io.Writer, ptype packetType) (err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + return +} + +// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section +// 4.2.2. +func serializeLength(w io.Writer, length int) (err error) { + var buf [5]byte + var n int + + if length < 192 { + buf[0] = byte(length) + n = 1 + } else if length < 8384 { + length -= 192 + buf[0] = 192 + byte(length>>8) + buf[1] = byte(length) + n = 2 + } else { + buf[0] = 255 + buf[1] = byte(length >> 24) + buf[2] = byte(length >> 16) + buf[3] = byte(length >> 8) + buf[4] = byte(length) + n = 5 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18 + packetTypeAEADEncrypted packetType = 20 +) + +// EncryptedDataPacket holds encrypted data. It is currently implemented by +// SymmetricallyEncrypted and AEADEncrypted. +type EncryptedDataPacket interface { + Decrypt(CipherFunction, []byte) (io.ReadCloser, error) +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + isSubkey := tag == packetTypePublicSubkey + p = &PublicKey{IsSubkey: isSubkey} + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedIntegrityProtected: + se := new(SymmetricallyEncrypted) + se.IntegrityProtected = true + p = se + case packetTypeAEADEncrypted: + p = new(AEADEncrypted) + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0x00 + SigTypeText = 0x01 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 + SigTypeCertificationRevocation = 0x30 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt + PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction algorithm.CipherFunction + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + return algorithm.CipherFunction(cipher).KeySize() +} + +// IsSupported returns true if the cipher is supported from the library +func (cipher CipherFunction) IsSupported() bool { + return algorithm.CipherFunction(cipher).KeySize() > 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + return algorithm.CipherFunction(cipher).BlockSize() +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + return algorithm.CipherFunction(cipher).New(key) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) + +// AEADMode represents the different Authenticated Encryption with Associated +// Data specified for OpenPGP. +// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 +type AEADMode algorithm.AEADMode + +const ( + AEADModeEAX AEADMode = 1 + AEADModeOCB AEADMode = 2 + AEADModeGCM AEADMode = 3 +) + +func (mode AEADMode) IvLength() int { + return algorithm.AEADMode(mode).NonceLength() +} + +func (mode AEADMode) TagLength() int { + return algorithm.AEADMode(mode).TagLength() +} + +// new returns a fresh instance of the given mode. +func (mode AEADMode) new(block cipher.Block) cipher.AEAD { + return algorithm.AEADMode(mode).New(block) +} + +// ReasonForRevocation represents a revocation reason code as per RFC4880 +// section 5.2.3.23. +type ReasonForRevocation uint8 + +const ( + NoReason ReasonForRevocation = 0 + KeySuperseded ReasonForRevocation = 1 + KeyCompromised ReasonForRevocation = 2 + KeyRetired ReasonForRevocation = 3 +) + +// Curve is a mapping to supported ECC curves for key generation. +// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats +type Curve string + +const ( + Curve25519 Curve = "Curve25519" + Curve448 Curve = "Curve448" + CurveNistP256 Curve = "P256" + CurveNistP384 Curve = "P384" + CurveNistP521 Curve = "P521" + CurveSecP256k1 Curve = "SecP256k1" + CurveBrainpoolP256 Curve = "BrainpoolP256" + CurveBrainpoolP384 Curve = "BrainpoolP384" + CurveBrainpoolP512 Curve = "BrainpoolP512" +) + +// TrustLevel represents a trust level per RFC4880 5.2.3.13 +type TrustLevel uint8 + +// TrustAmount represents a trust amount per RFC4880 5.2.3.13 +type TrustAmount uint8 diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go new file mode 100644 index 000000000..2fc438643 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -0,0 +1,837 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or + // crypto.Signer/crypto.Decrypter (Decryptor RSA only). + PrivateKey interface{} + sha1Checksum bool + iv []byte + + // Type of encryption of the S2K packet + // Allowed values are 0 (Not encrypted), 254 (SHA1), or + // 255 (2-byte checksum) + s2kType S2KType + // Full parameters of the S2K packet + s2kParams *s2k.Params +} + +// S2KType s2k packet type +type S2KType uint8 + +const ( + // S2KNON unencrypt + S2KNON S2KType = 0 + // S2KSHA1 sha1 sum check + S2KSHA1 S2KType = 254 + // S2KCHECKSUM sum check + S2KCHECKSUM S2KType = 255 +) + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEdDSAPrivateKey(creationTime time.Time, priv *eddsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA, ECDSA or EdDSA. +func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) + case rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey) + case *ecdsa.PrivateKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) + case ecdsa.PrivateKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey) + case *eddsa.PrivateKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + case eddsa.PrivateKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey) + default: + panic("openpgp: unknown signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. +func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { + pk := new(PrivateKey) + switch priv := decrypter.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + case *elgamal.PrivateKey: + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + case *ecdh.PrivateKey: + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + default: + panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") + } + pk.PrivateKey = decrypter + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + v5 := pk.PublicKey.Version == 5 + + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.s2kType = S2KType(buf[0]) + var optCount [1]byte + if v5 { + if _, err = readFull(r, optCount[:]); err != nil { + return + } + } + + switch pk.s2kType { + case S2KNON: + pk.s2k = nil + pk.Encrypted = false + case S2KSHA1, S2KCHECKSUM: + if v5 && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError("wrong s2k identifier for version 5") + } + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + if pk.cipher != 0 && !pk.cipher.IsSupported() { + return errors.UnsupportedError("unsupported cipher function in private key") + } + pk.s2kParams, err = s2k.ParseIntoParams(r) + if err != nil { + return + } + if pk.s2kParams.Dummy() { + return + } + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return + } + pk.Encrypted = true + if pk.s2kType == S2KSHA1 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + var privateKeyData []byte + if v5 { + var n [4]byte /* secret material four octet count */ + _, err = readFull(r, n[:]) + if err != nil { + return + } + count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3])) + if !pk.Encrypted { + count = count + 2 /* two octet checksum */ + } + privateKeyData = make([]byte, count) + _, err = readFull(r, privateKeyData) + if err != nil { + return + } + } else { + privateKeyData, err = ioutil.ReadAll(r) + if err != nil { + return + } + } + if !pk.Encrypted { + if len(privateKeyData) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(privateKeyData)-2; i++ { + sum += uint16(privateKeyData[i]) + } + if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) || + privateKeyData[len(privateKeyData)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + privateKeyData = privateKeyData[:len(privateKeyData)-2] + return pk.parsePrivateKey(privateKeyData) + } + + pk.encryptedData = privateKeyData + return +} + +// Dummy returns true if the private key is a dummy key. This is a GNU extension. +func (pk *PrivateKey) Dummy() bool { + return pk.s2kParams.Dummy() +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + contents := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(contents) + if err != nil { + return + } + if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil { + return + } + + optional := bytes.NewBuffer(nil) + if pk.Encrypted || pk.Dummy() { + optional.Write([]byte{uint8(pk.cipher)}) + if err := pk.s2kParams.Serialize(optional); err != nil { + return err + } + if pk.Encrypted { + optional.Write(pk.iv) + } + } + if pk.Version == 5 { + contents.Write([]byte{uint8(optional.Len())}) + } + io.Copy(contents, optional) + + if !pk.Dummy() { + l := 0 + var priv []byte + if !pk.Encrypted { + buf := bytes.NewBuffer(nil) + err = pk.serializePrivateKey(buf) + if err != nil { + return err + } + l = buf.Len() + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + priv = buf.Bytes() + } else { + priv, l = pk.encryptedData, len(pk.encryptedData) + } + + if pk.Version == 5 { + contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}) + } + contents.Write(priv) + } + + ptype := packetTypePrivateKey + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, contents.Len()) + if err != nil { + return + } + _, err = io.Copy(w, contents) + if err != nil { + return + } + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil { + return err + } + _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes()) + return err +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalIntegerSecret()).EncodedBytes()) + return err +} + +func serializeEdDSAPrivateKey(w io.Writer, priv *eddsa.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) + return err +} + +func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes()) + return err +} + +// decrypt decrypts an encrypted private key using a decryption key. +func (pk *PrivateKey) decrypt(decryptionKey []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + block := pk.cipher.new(decryptionKey) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + err := pk.parsePrivateKey(data) + if _, ok := err.(errors.KeyInvalidError); ok { + return errors.KeyInvalidError("invalid key parameters") + } + if err != nil { + return err + } + + // Mark key as unencrypted + pk.s2kType = S2KNON + pk.s2k = nil + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key, err := keyCache.GetOrComputeDerivedKey(passphrase, pk.s2kParams, pk.cipher.KeySize()) + if err != nil { + return err + } + return pk.decrypt(key) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + return pk.decrypt(key) +} + +// DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase. +// Avoids recomputation of similar s2k key derivations. +func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error { + // Create a cache to avoid recomputation of key derviations for the same passphrase. + s2kCache := &s2k.Cache{} + for _, key := range keys { + if key != nil && !key.Dummy() && key.Encrypted { + err := key.decryptWithCache(passphrase, s2kCache) + if err != nil { + return err + } + } + } + return nil +} + +// encrypt encrypts an unencrypted private key. +func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction CipherFunction) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if pk.Encrypted { + return nil + } + // check if encryptionKey has the correct size + if len(key) != cipherFunction.KeySize() { + return errors.InvalidArgumentError("supplied encryption key has the wrong size") + } + + priv := bytes.NewBuffer(nil) + err := pk.serializePrivateKey(priv) + if err != nil { + return err + } + + pk.cipher = cipherFunction + pk.s2kParams = params + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return err + } + + privateKeyBytes := priv.Bytes() + pk.sha1Checksum = true + block := pk.cipher.new(key) + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = rand.Read(pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + + if pk.sha1Checksum { + pk.s2kType = S2KSHA1 + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + pk.s2kType = S2KCHECKSUM + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) + } + + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) + pk.Encrypted = true + pk.PrivateKey = nil + return err +} + +// EncryptWithConfig encrypts an unencrypted private key using the passphrase and the config. +func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error { + params, err := s2k.Generate(config.Random(), config.S2K()) + if err != nil { + return err + } + // Derive an encryption key with the configured s2k function. + key := make([]byte, config.Cipher().KeySize()) + s2k, err := params.Function() + if err != nil { + return err + } + s2k(key, passphrase) + // Encrypt the private key with the derived encryption key. + return pk.encrypt(key, params, config.Cipher()) +} + +// EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase. +// Only derives one key from the passphrase, which is then used to encrypt each key. +func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) error { + params, err := s2k.Generate(config.Random(), config.S2K()) + if err != nil { + return err + } + // Derive an encryption key with the configured s2k function. + encryptionKey := make([]byte, config.Cipher().KeySize()) + s2k, err := params.Function() + if err != nil { + return err + } + s2k(encryptionKey, passphrase) + for _, key := range keys { + if key != nil && !key.Dummy() && !key.Encrypted { + err = key.encrypt(encryptionKey, params, config.Cipher()) + if err != nil { + return err + } + } + } + return nil +} + +// Encrypt encrypts an unencrypted private key using a passphrase. +func (pk *PrivateKey) Encrypt(passphrase []byte) error { + // Default config of private key encryption + config := &Config{ + S2KConfig: &s2k.Config{ + S2KMode: s2k.IteratedSaltedS2K, + S2KCount: 65536, + Hash: crypto.SHA256, + } , + DefaultCipher: CipherAES256, + } + return pk.EncryptWithConfig(passphrase, config) +} + +func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(w, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(w, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(w, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(w, priv) + case *eddsa.PrivateKey: + err = serializeEdDSAPrivateKey(w, priv) + case *ecdh.PrivateKey: + err = serializeECDHPrivateKey(w, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + return +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + case PubKeyAlgoECDH: + return pk.parseECDHPrivateKey(data) + case PubKeyAlgoEdDSA: + return pk.parseEdDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + p := new(encoding.MPI) + if _, err := p.ReadFrom(buf); err != nil { + return err + } + + q := new(encoding.MPI) + if _, err := q.ReadFrom(buf); err != nil { + return err + } + + rsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes()) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes()) + if err := rsaPriv.Validate(); err != nil { + return errors.KeyInvalidError(err.Error()) + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + dsaPriv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateDSAParameters(dsaPriv); err != nil { + return err + } + pk.PrivateKey = dsaPriv + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + priv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateElGamalParameters(priv); err != nil { + return err + } + pk.PrivateKey = priv + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + ecdsaPriv := ecdsa.NewPrivateKey(*ecdsaPub) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err := ecdsaPriv.UnmarshalIntegerSecret(d.Bytes()); err != nil { + return err + } + if err := ecdsa.Validate(ecdsaPriv); err != nil { + return err + } + pk.PrivateKey = ecdsaPriv + + return nil +} + +func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { + ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) + ecdhPriv := ecdh.NewPrivateKey(*ecdhPub) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err := ecdhPriv.UnmarshalByteSecret(d.Bytes()); err != nil { + return err + } + + if err := ecdh.Validate(ecdhPriv); err != nil { + return err + } + + pk.PrivateKey = ecdhPriv + + return nil +} + +func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { + eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey) + eddsaPriv := eddsa.NewPrivateKey(*eddsaPub) + eddsaPriv.PublicKey = *eddsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + if err = eddsaPriv.UnmarshalByteSecret(d.Bytes()); err != nil { + return err + } + + if err := eddsa.Validate(eddsaPriv); err != nil { + return err + } + + pk.PrivateKey = eddsaPriv + + return nil +} + +func validateDSAParameters(priv *dsa.PrivateKey) error { + p := priv.P // group prime + q := priv.Q // subgroup order + g := priv.G // g has order q mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("dsa: invalid group") + } + // expect p > q + if p.Cmp(q) <= 0 { + return errors.KeyInvalidError("dsa: invalid group prime") + } + // q should be large enough and divide p-1 + pSub1 := new(big.Int).Sub(p, one) + if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // confirm that g has order q mod p + if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("dsa: mismatching values") + } + + return nil +} + +func validateElGamalParameters(priv *elgamal.PrivateKey) error { + p := priv.P // group prime + g := priv.G // g has order p-1 mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // Expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + if p.BitLen() < 1024 { + return errors.KeyInvalidError("elgamal: group order too small") + } + pSub1 := new(big.Int).Sub(p, one) + if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + // Since p-1 is not prime, g might have a smaller order that divides p-1. + // We cannot confirm the exact order of g, but we make sure it is not too small. + gExpI := new(big.Int).Set(g) + i := 1 + threshold := 2 << 17 // we want order > threshold + for i < threshold { + i++ // we check every order to make sure key validation is not easily bypassed by guessing y' + gExpI.Mod(new(big.Int).Mul(gExpI, g), p) + if gExpI.Cmp(one) == 0 { + return errors.KeyInvalidError("elgamal: order too small") + } + } + // Check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("elgamal: mismatching values") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go new file mode 100644 index 000000000..029b8f1aa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go @@ -0,0 +1,12 @@ +package packet + +// Generated with `gpg --export-secret-keys "Test Key 2"` +const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" + +// Generated by `gpg --export-secret-keys` followed by a manual extraction of +// the ElGamal subkey from the packets. +const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" + +// pkcs1PrivKeyHex is a PKCS#1, RSA private key. +// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` +const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go new file mode 100644 index 000000000..ec903ee95 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -0,0 +1,802 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/dsa" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +type kdfHashFunction byte +type kdfAlgorithm byte + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + Version int + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey + Fingerprint []byte + KeyId uint64 + IsSubkey bool + + // RFC 4880 fields + n, e, p, q, g, y encoding.Field + + // RFC 6637 fields + // oid contains the OID byte sequence identifying the elliptic curve used + oid encoding.Field + + // kdf stores key derivation function parameters + // used for ECDH encryption. See RFC 6637, Section 9. + kdf encoding.Field +} + +// UpgradeToV5 updates the version of the key to v5, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV5() { + pk.Version = 5 + pk.setFingerprintAndKeyId() +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeForHash(io.Writer) error + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: new(encoding.MPI).SetBig(pub.N), + e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + q: new(encoding.MPI).SetBig(pub.Q), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + p: encoding.NewMPI(pub.MarshalPoint()), + } + + curveInfo := ecc.FindByCurve(pub.GetCurve()) + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { + var pk *PublicKey + var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(pub.MarshalPoint()), + kdf: kdf, + } + + curveInfo := ecc.FindByCurve(pub.GetCurve()) + + if curveInfo == nil { + panic("unknown elliptic curve") + } + + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey { + curveInfo := ecc.FindByCurve(pub.GetCurve()) + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEdDSA, + PublicKey: pub, + oid: curveInfo.Oid, + // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B + p: encoding.NewMPI(pub.MarshalPoint()), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 { + return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) + } + + pk.Version = int(buf[0]) + if pk.Version == 5 { + var n [4]byte + _, err = readFull(r, n[:]) + if err != nil { + return + } + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + err = pk.parseECDSA(r) + case PubKeyAlgoECDH: + err = pk.parseECDH(r) + case PubKeyAlgoEdDSA: + err = pk.parseEdDSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerprintAndKeyId() + return +} + +func (pk *PublicKey) setFingerprintAndKeyId() { + // RFC 4880, section 12.2 + if pk.Version == 5 { + fingerprint := sha256.New() + pk.SerializeForHash(fingerprint) + pk.Fingerprint = make([]byte, 32) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) + } else { + fingerprint := sha1.New() + pk.SerializeForHash(fingerprint) + pk.Fingerprint = make([]byte, 20) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) + } +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n = new(encoding.MPI) + if _, err = pk.n.ReadFrom(r); err != nil { + return + } + pk.e = new(encoding.MPI) + if _, err = pk.e.ReadFrom(r); err != nil { + return + } + + if len(pk.e.Bytes()) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.Bytes()), + E: 0, + } + for i := 0; i < len(pk.e.Bytes()); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.Bytes()[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.q = new(encoding.MPI) + if _, err = pk.q.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.Bytes()) + dsa.Q = new(big.Int).SetBytes(pk.q.Bytes()) + dsa.G = new(big.Int).SetBytes(pk.g.Bytes()) + dsa.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.Bytes()) + elgamal.G = new(big.Int).SetBytes(pk.g.Bytes()) + elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = elgamal + return +} + +// parseECDSA parses ECDSA public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + c, ok := curveInfo.Curve.(ecc.ECDSACurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + ecdsaKey := ecdsa.NewPublicKey(c) + err = ecdsaKey.UnmarshalPoint(pk.p.Bytes()) + pk.PublicKey = ecdsaKey + + return +} + +// parseECDH parses ECDH public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDH(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.kdf = new(encoding.OID) + if _, err = pk.kdf.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + c, ok := curveInfo.Curve.(ecc.ECDHCurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 { + return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 { + return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved))) + } + kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1]))) + } + kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) + } + + ecdhKey := ecdh.NewPublicKey(c, kdfHash, kdfCipher) + err = ecdhKey.UnmarshalPoint(pk.p.Bytes()) + pk.PublicKey = ecdhKey + + return +} + +func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid)) + } + + c, ok := curveInfo.Curve.(ecc.EdDSACurve) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + pub := eddsa.NewPublicKey(c) + + switch flag := pk.p.Bytes()[0]; flag { + case 0x04: + // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + case 0x40: + err = pub.UnmarshalPoint(pk.p.Bytes()) + default: + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + } + + pk.PublicKey = pub + return +} + +// SerializeForHash serializes the PublicKey to w with the special packet +// header format needed for hashing. +func (pk *PublicKey) SerializeForHash(w io.Writer) error { + pk.SerializeSignaturePrefix(w) + return pk.serializeWithoutHeaders(w) +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { + var pLength = pk.algorithmSpecificByteCount() + if pk.Version == 5 { + pLength += 10 // version, timestamp (4), algorithm, key octet count (4). + w.Write([]byte{ + 0x9A, + byte(pLength >> 24), + byte(pLength >> 16), + byte(pLength >> 8), + byte(pLength), + }) + return + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + length += pk.algorithmSpecificByteCount() + if pk.Version == 5 { + length += 4 // octet key count + } + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +func (pk *PublicKey) algorithmSpecificByteCount() int { + length := 0 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += int(pk.n.EncodedLength()) + length += int(pk.e.EncodedLength()) + case PubKeyAlgoDSA: + length += int(pk.p.EncodedLength()) + length += int(pk.q.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoElGamal: + length += int(pk.p.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoECDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + case PubKeyAlgoECDH: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + length += int(pk.kdf.EncodedLength()) + case PubKeyAlgoEdDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + default: + panic("unknown public key algorithm") + } + return length +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + t := uint32(pk.CreationTime.Unix()) + if _, err = w.Write([]byte{ + byte(pk.Version), + byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t), + byte(pk.PubKeyAlgo), + }); err != nil { + return + } + + if pk.Version == 5 { + n := pk.algorithmSpecificByteCount() + if _, err = w.Write([]byte{ + byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), + }); err != nil { + return + } + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + if _, err = w.Write(pk.n.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.e.EncodedBytes()) + return + case PubKeyAlgoDSA: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.q.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoElGamal: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoECDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + case PubKeyAlgoECDH: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.kdf.EncodedBytes()) + return + case PubKeyAlgoEdDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + if sig.Version == 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes())) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + case PubKeyAlgoEdDSA: + eddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey) + if !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) { + return errors.SignatureError("EdDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + if err != nil { + return nil, err + } + + err = signed.SerializeForHash(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, +// made by this public key, of signed. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.BitLength() + case PubKeyAlgoDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoElGamal: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDH: + bitLength = pk.p.BitLength() + case PubKeyAlgoEdDSA: + bitLength = pk.p.BitLength() + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired or is created in the future. +func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { + if pk.CreationTime.After(currentTime) { + return true + } + if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { + return false + } + expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go new file mode 100644 index 000000000..b255f1f6f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go @@ -0,0 +1,24 @@ +package packet + +const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" + +const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" + +const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" + +const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" + +const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" + +const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" + +const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946" + +const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909" + +const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8" + +const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203" + +// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key +const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go new file mode 100644 index 000000000..10215fe5f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -0,0 +1,86 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. + if _, ok := err.(errors.UnknownPacketTypeError); ok { + continue + } + if _, ok := err.(errors.UnsupportedError); ok { + switch p.(type) { + case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData: + return nil, err + } + continue + } + return nil, err + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go new file mode 100644 index 000000000..6c58c86fa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -0,0 +1,1084 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "encoding/binary" + "hash" + "io" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdsa" + "github.com/ProtonMail/go-crypto/openpgp/eddsa" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage + KeyFlagSplitKey + KeyFlagAuthenticate + _ + KeyFlagGroupKey +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + Version int + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + + // Metadata includes format, filename and time, and is protected by v5 + // signatures of type 0x00 or 0x01. This metadata is included into the hash + // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4. + Metadata *LiteralData + + CreationTime time.Time + + RSASignature encoding.Field + DSASigR, DSASigS encoding.Field + ECDSASigR, ECDSASigS encoding.Field + EdDSASigR, EdDSASigS encoding.Field + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + PreferredCipherSuites [][2]uint8 + IssuerKeyId *uint64 + IssuerFingerprint []byte + SignerUserId *string + IsPrimaryId *bool + Notations []*Notation + + // TrustLevel and TrustAmount can be set by the signer to assert that + // the key is not only valid but also trustworthy at the specified + // level. + // See RFC 4880, section 5.2.3.13 for details. + TrustLevel TrustLevel + TrustAmount TrustAmount + + // TrustRegularExpression can be used in conjunction with trust Signature + // packets to limit the scope of the trust that is extended. + // See RFC 4880, section 5.2.3.14 for details. + TrustRegularExpression *string + + // PolicyURI can be set to the URI of a document that describes the + // policy under which the signature was issued. See RFC 4880, section + // 5.2.3.20 for details. + PolicyURI string + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage, FlagSplitKey, FlagAuthenticate, FlagGroupKey bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *ReasonForRevocation + RevocationReasonText string + + // In a self-signature, these flags are set there is a features subpacket + // indicating that the issuer implementation supports these features + // see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket + SEIPDv1, SEIPDv2 bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + sig.Version = int(buf[0]) + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + + if sig.Version < 5 { + sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2]) + } else { + sig.Hash, ok = algorithm.HashIdToHash(buf[2]) + } + + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + hashedSubpackets := make([]byte, hashedSubpacketsLength) + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature = new(encoding.MPI) + _, err = sig.RSASignature.ReadFrom(r) + case PubKeyAlgoDSA: + sig.DSASigR = new(encoding.MPI) + if _, err = sig.DSASigR.ReadFrom(r); err != nil { + return + } + + sig.DSASigS = new(encoding.MPI) + _, err = sig.DSASigS.ReadFrom(r) + case PubKeyAlgoECDSA: + sig.ECDSASigR = new(encoding.MPI) + if _, err = sig.ECDSASigR.ReadFrom(r); err != nil { + return + } + + sig.ECDSASigS = new(encoding.MPI) + _, err = sig.ECDSASigS.ReadFrom(r) + case PubKeyAlgoEdDSA: + sig.EdDSASigR = new(encoding.MPI) + if _, err = sig.EdDSASigR.ReadFrom(r); err != nil { + return + } + + sig.EdDSASigS = new(encoding.MPI) + if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { + return + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + trustSubpacket signatureSubpacketType = 5 + regularExpressionSubpacket signatureSubpacketType = 6 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + notationDataSubpacket signatureSubpacketType = 20 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + policyUriSubpacket signatureSubpacketType = 26 + keyFlagsSubpacket signatureSubpacketType = 27 + signerUserIdSubpacket signatureSubpacketType = 28 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 + issuerFingerprintSubpacket signatureSubpacketType = 33 + prefCipherSuitesSubpacket signatureSubpacketType = 39 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + if !isHashed && + packetType != issuerSubpacket && + packetType != issuerFingerprintSubpacket && + packetType != embeddedSignatureSubpacket { + return + } + switch packetType { + case creationTimeSubpacket: + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case trustSubpacket: + if len(subpacket) != 2 { + err = errors.StructuralError("trust subpacket with bad length") + return + } + // Trust level and amount, section 5.2.3.13 + sig.TrustLevel = TrustLevel(subpacket[0]) + sig.TrustAmount = TrustAmount(subpacket[1]) + case regularExpressionSubpacket: + if len(subpacket) == 0 { + err = errors.StructuralError("regexp subpacket with bad length") + return + } + // Trust regular expression, section 5.2.3.14 + // RFC specifies the string should be null-terminated; remove a null byte from the end + if subpacket[len(subpacket)-1] != 0x00 { + err = errors.StructuralError("expected regular expression to be null-terminated") + return + } + trustRegularExpression := string(subpacket[:len(subpacket)-1]) + sig.TrustRegularExpression = &trustRegularExpression + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if sig.Version > 4 { + err = errors.StructuralError("issuer subpacket found in v5 key") + return + } + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case notationDataSubpacket: + // Notation data, section 5.2.3.16 + if len(subpacket) < 8 { + err = errors.StructuralError("notation data subpacket with bad length") + return + } + + nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5]) + valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7]) + if len(subpacket) != int(nameLength)+int(valueLength)+8 { + err = errors.StructuralError("notation data subpacket with bad length") + return + } + + notation := Notation{ + IsHumanReadable: (subpacket[0] & 0x80) == 0x80, + Name: string(subpacket[8:(nameLength + 8)]), + Value: subpacket[(nameLength + 8):(valueLength + nameLength + 8)], + IsCritical: isCritical, + } + + sig.Notations = append(sig.Notations, ¬ation) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + if subpacket[0]&KeyFlagSplitKey != 0 { + sig.FlagSplitKey = true + } + if subpacket[0]&KeyFlagAuthenticate != 0 { + sig.FlagAuthenticate = true + } + if subpacket[0]&KeyFlagGroupKey != 0 { + sig.FlagGroupKey = true + } + case signerUserIdSubpacket: + userId := string(subpacket) + sig.SignerUserId = &userId + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(ReasonForRevocation) + *sig.RevocationReason = ReasonForRevocation(subpacket[0]) + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. + if len(subpacket) > 0 { + if subpacket[0]&0x01 != 0 { + sig.SEIPDv1 = true + } + // 0x02 and 0x04 are reserved + if subpacket[0]&0x08 != 0 { + sig.SEIPDv2 = true + } + } + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + case policyUriSubpacket: + // Policy URI, section 5.2.3.20 + sig.PolicyURI = string(subpacket) + case issuerFingerprintSubpacket: + if len(subpacket) == 0 { + err = errors.StructuralError("empty issuer fingerprint subpacket") + return + } + v, l := subpacket[0], len(subpacket[1:]) + if v == 5 && l != 32 || v != 5 && l != 20 { + return nil, errors.StructuralError("bad fingerprint length") + } + sig.IssuerFingerprint = make([]byte, l) + copy(sig.IssuerFingerprint, subpacket[1:]) + sig.IssuerKeyId = new(uint64) + if v == 5 { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) + } else { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) + } + case prefCipherSuitesSubpacket: + // Preferred AEAD cipher suites + // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites + if len(subpacket)%2 != 0 { + err = errors.StructuralError("invalid aead cipher suite length") + return + } + + sig.PreferredCipherSuites = make([][2]byte, len(subpacket)/2) + + for i := 0; i < len(subpacket)/2; i++ { + sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]} + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 { + return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + if subpacket.isCritical { + to[n] |= 0x80 + } + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// SigExpired returns whether sig is a signature that has expired or is created +// in the future. +func (sig *Signature) SigExpired(currentTime time.Time) bool { + if sig.CreationTime.After(currentTime) { + return true + } + if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { + var hashId byte + var ok bool + + if sig.Version < 5 { + hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash) + } else { + hashId, ok = algorithm.HashToHashId(sig.Hash) + } + + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + + hashedFields := bytes.NewBuffer([]byte{ + uint8(sig.Version), + uint8(sig.SigType), + uint8(sig.PubKeyAlgo), + uint8(hashId), + uint8(len(hashedSubpackets) >> 8), + uint8(len(hashedSubpackets)), + }) + hashedFields.Write(hashedSubpackets) + + var l uint64 = uint64(6 + len(hashedSubpackets)) + if sig.Version == 5 { + hashedFields.Write([]byte{0x05, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } else { + hashedFields.Write([]byte{0x04, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } + sig.HashSuffix = make([]byte, hashedFields.Len()) + copy(sig.HashSuffix, hashedFields.Bytes()) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + hashedSubpackets := make([]byte, hashedSubpacketsLen) + serializeSubpackets(hashedSubpackets, sig.outSubpackets, true) + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + sig.Version = priv.PublicKey.Version + sig.IssuerFingerprint = priv.PublicKey.Fingerprint + sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) + if err != nil { + return err + } + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + sig.RSASignature = encoding.NewMPI(sigdata) + } + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR = new(encoding.MPI).SetBig(r) + sig.DSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoECDSA: + sk := priv.PrivateKey.(*ecdsa.PrivateKey) + r, s, err := ecdsa.Sign(config.Random(), sk, digest) + + if err == nil { + sig.ECDSASigR = new(encoding.MPI).SetBig(r) + sig.ECDSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoEdDSA: + sk := priv.PrivateKey.(*eddsa.PrivateKey) + r, s, err := eddsa.Sign(sk, digest) + if err == nil { + sig.EdDSASigR = encoding.NewMPI(r) + sig.EdDSASigS = encoding.NewMPI(s) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, +// the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, + config *Config) error { + h, err := keySignatureHash(hashKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, signingKey, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// RevokeKey computes a revocation signature of pub using priv. On success, the signature is +// stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keyRevocationHash(pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// RevokeSubkey computes a subkey revocation signature of pub using priv. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error { + // Identical to a subkey binding signature + return sig.SignKey(pub, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = int(sig.RSASignature.EncodedLength()) + case PubKeyAlgoDSA: + sigLength = int(sig.DSASigR.EncodedLength()) + sigLength += int(sig.DSASigS.EncodedLength()) + case PubKeyAlgoECDSA: + sigLength = int(sig.ECDSASigR.EncodedLength()) + sigLength += int(sig.ECDSASigS.EncodedLength()) + case PubKeyAlgoEdDSA: + sigLength = int(sig.EdDSASigR.EncodedLength()) + sigLength += int(sig.EdDSASigS.EncodedLength()) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + if sig.Version == 5 { + length -= 4 // eight-octet instead of four-octet big endian + } + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + err = sig.serializeBody(w) + if err != nil { + return err + } + return +} + +func (sig *Signature) serializeBody(w io.Writer) (err error) { + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) + fields := sig.HashSuffix[:6+hashedSubpacketsLen] + _, err = w.Write(fields) + if err != nil { + return + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + _, err = w.Write(sig.RSASignature.EncodedBytes()) + case PubKeyAlgoDSA: + if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.DSASigS.EncodedBytes()) + case PubKeyAlgoECDSA: + if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.ECDSASigS.EncodedBytes()) + case PubKeyAlgoEdDSA: + if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.EdDSASigS.EncodedBytes()) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil && sig.Version == 4 { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + } + if sig.IssuerFingerprint != nil { + contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version == 5, contents}) + } + if sig.SignerUserId != nil { + subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)}) + } + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + if sig.FlagSplitKey { + flags |= KeyFlagSplitKey + } + if sig.FlagAuthenticate { + flags |= KeyFlagAuthenticate + } + if sig.FlagGroupKey { + flags |= KeyFlagGroupKey + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + for _, notation := range sig.Notations { + subpackets = append( + subpackets, + outputSubpacket{ + true, + notationDataSubpacket, + notation.IsCritical, + notation.getData(), + }) + } + + // The following subpackets may only appear in self-signatures. + + var features = byte(0x00) + if sig.SEIPDv1 { + features |= 0x01 + } + if sig.SEIPDv2 { + features |= 0x08 + } + + if features != 0x00 { + subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) + } + + if sig.TrustLevel != 0 { + subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}}) + } + + if sig.TrustRegularExpression != nil { + // RFC specifies the string should be null-terminated; add a null byte to the end + subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")}) + } + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + if len(sig.PolicyURI) > 0 { + subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)}) + } + + if len(sig.PreferredCipherSuites) > 0 { + serialized := make([]byte, len(sig.PreferredCipherSuites)*2) + for i, cipherSuite := range sig.PreferredCipherSuites { + serialized[2*i] = cipherSuite[0] + serialized[2*i+1] = cipherSuite[1] + } + subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized}) + } + + // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. + if sig.RevocationReason != nil { + subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, + append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)}) + } + + // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. + if sig.EmbeddedSignature != nil { + var buf bytes.Buffer + err = sig.EmbeddedSignature.serializeBody(&buf) + if err != nil { + return + } + subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) + } + + return +} + +// AddMetadataToHashSuffix modifies the current hash suffix to include metadata +// (format, filename, and time). Version 5 keys protect this data including it +// in the hash computation. See section 5.2.4. +func (sig *Signature) AddMetadataToHashSuffix() { + if sig == nil || sig.Version != 5 { + return + } + if sig.SigType != 0x00 && sig.SigType != 0x01 { + return + } + lit := sig.Metadata + if lit == nil { + // This will translate into six 0x00 bytes. + lit = &LiteralData{} + } + + // Extract the current byte count + n := sig.HashSuffix[len(sig.HashSuffix)-8:] + l := uint64( + uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | + uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) + + suffix := bytes.NewBuffer(nil) + suffix.Write(sig.HashSuffix[:l]) + + // Add the metadata + var buf [4]byte + buf[0] = lit.Format + fileName := lit.FileName + if len(lit.FileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + suffix.Write(buf[:2]) + suffix.Write([]byte(lit.FileName)) + binary.BigEndian.PutUint32(buf[:], lit.Time) + suffix.Write(buf[:]) + + // Update the counter and restore trailing bytes + l = uint64(suffix.Len()) + suffix.Write([]byte{0x05, 0xff}) + suffix.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + sig.HashSuffix = suffix.Bytes() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 000000000..a8abf2ff7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,303 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "crypto/sha256" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "golang.org/x/crypto/hkdf" +) + +// This is the largest session key that we'll support. Since at most 256-bit cipher +// is supported in OpenPGP, this is large enough to contain also the auth tag. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + Version int + CipherFunc CipherFunction + Mode AEADMode + s2k func(out, in []byte) + iv []byte + encryptedKey []byte // Contains also the authentication tag for AEAD +} + +// parse parses an SymmetricKeyEncrypted packet as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + var buf [1]byte + + // Version + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.Version = int(buf[0]) + if ske.Version != 4 && ske.Version != 5 { + return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + } + + if ske.Version == 5 { + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + + // Cipher function + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.CipherFunc = CipherFunction(buf[0]) + if !ske.CipherFunc.IsSupported() { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0]))) + } + + if ske.Version == 5 { + // AEAD mode + if _, err := readFull(r, buf[:]); err != nil { + return errors.StructuralError("cannot read AEAD octet from packet") + } + ske.Mode = AEADMode(buf[0]) + + // Scalar octet count + if _, err := readFull(r, buf[:]); err != nil { + return err + } + } + + var err error + if ske.s2k, err = s2k.Parse(r); err != nil { + if _, ok := err.(errors.ErrDummyPrivateKey); ok { + return errors.UnsupportedError("missing key GNU extension in session key") + } + return err + } + + if ske.Version == 5 { + // AEAD IV + iv := make([]byte, ske.Mode.IvLength()) + _, err := readFull(r, iv) + if err != nil { + return errors.StructuralError("cannot read AEAD IV") + } + + ske.iv = iv + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + switch ske.Version { + case 4: + plaintextKey, cipherFunc, err := ske.decryptV4(key) + return plaintextKey, cipherFunc, err + case 5: + plaintextKey, err := ske.decryptV5(key) + return plaintextKey, CipherFunction(0), err + } + err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + return nil, CipherFunction(0), err +} + +func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) { + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError( + "unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if len(plaintextKey) != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError( + "length of decrypted key not equal to cipher keysize") + } + return plaintextKey, cipherFunc, nil +} + +func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { + adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} + aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata) + + plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata) + if err != nil { + return nil, err + } + return plaintextKey, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. +// The packet contains a random session key, encrypted by a key derived from +// the given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + + sessionKey := make([]byte, cipherFunc.KeySize()) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + + err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config) + if err != nil { + return + } + + key = sessionKey + return +} + +// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. +// The packet contains the given session key, encrypted by a key derived from +// the given passphrase. The returned session key must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { + var version int + if config.AEAD() != nil { + version = 5 + } else { + version = 4 + } + cipherFunc := config.Cipher() + // cipherFunc must be AES + if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 { + return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc))) + } + + keySize := cipherFunc.KeySize() + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, config.S2K()) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + var packetLength int + switch version { + case 4: + packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + case 5: + ivLen := config.AEAD().Mode().IvLength() + tagLen := config.AEAD().Mode().TagLength() + packetLength = 5 + len(s2kBytes) + ivLen + keySize + tagLen + } + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + // Symmetric Key Encrypted Version + buf := []byte{byte(version)} + + if version == 5 { + // Scalar octet count + buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength())) + } + + // Cipher function + buf = append(buf, byte(cipherFunc)) + + if version == 5 { + // AEAD mode + buf = append(buf, byte(config.AEAD().Mode())) + + // Scalar octet count + buf = append(buf, byte(len(s2kBytes))) + } + _, err = w.Write(buf) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + switch version { + case 4: + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + case 5: + mode := config.AEAD().Mode() + adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} + aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata) + + // Sample iv using random reader + iv := make([]byte, config.AEAD().Mode().IvLength()) + _, err = io.ReadFull(config.Random(), iv) + if err != nil { + return + } + // Seal and write (encryptedData includes auth. tag) + + encryptedData := aead.Seal(nil, iv, sessionKey, adata) + _, err = w.Write(iv) + if err != nil { + return + } + _, err = w.Write(encryptedData) + if err != nil { + return + } + } + + return +} + +func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte) (aead cipher.AEAD) { + hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + blockCipher := c.new(encryptionKey) + return mode.new(blockCipher) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 000000000..609e9fc1f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,90 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +const aeadSaltSize = 32 + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted Contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + Version int + Contents io.Reader // contains tag for version 2 + IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9 + + // Specific to version 1 + prefix []byte + + // Specific to version 2 + cipher CipherFunction + mode AEADMode + chunkSizeByte byte + salt [aeadSaltSize]byte +} + +const ( + symmetricallyEncryptedVersionMdc = 1 + symmetricallyEncryptedVersionAead = 2 +) + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.IntegrityProtected { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case symmetricallyEncryptedVersionMdc: + se.Version = symmetricallyEncryptedVersionMdc + case symmetricallyEncryptedVersionAead: + se.Version = symmetricallyEncryptedVersionAead + if err := se.parseAead(r); err != nil { + return err + } + default: + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.Contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted Contents of the +// packet can be read. An incorrect key will only be detected after trying +// to decrypt the entire data. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + if se.Version == symmetricallyEncryptedVersionAead { + return se.decryptAead(key) + } + + return se.decryptMdc(c, key) +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) { + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected) + if err != nil { + return + } + + if aeadSupported { + return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key) + } + + return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go new file mode 100644 index 000000000..969c42236 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -0,0 +1,155 @@ +// Copyright 2023 Proton AG. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha256" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "golang.org/x/crypto/hkdf" + "io" +) + +// parseAead parses a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error { + headerData := make([]byte, 3) + if n, err := io.ReadFull(r, headerData); n < 3 { + return errors.StructuralError("could not read aead header: " + err.Error()) + } + + // Cipher + se.cipher = CipherFunction(headerData[0]) + // cipherFunc must have block size 16 to use AEAD + if se.cipher.blockSize() != 16 { + return errors.UnsupportedError("invalid aead cipher: " + string(se.cipher)) + } + + // Mode + se.mode = AEADMode(headerData[1]) + if se.mode.TagLength() == 0 { + return errors.UnsupportedError("unknown aead mode: " + string(se.mode)) + } + + // Chunk size + se.chunkSizeByte = headerData[2] + if se.chunkSizeByte > 16 { + return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.chunkSizeByte)) + } + + // Salt + if n, err := io.ReadFull(r, se.salt[:]); n < aeadSaltSize { + return errors.StructuralError("could not read aead salt: " + err.Error()) + } + + return nil +} + +// associatedData for chunks: tag, version, cipher, mode, chunk size byte +func (se *SymmetricallyEncrypted) associatedData() []byte { + return []byte{ + 0xD2, + symmetricallyEncryptedVersionAead, + byte(se.cipher), + byte(se.mode), + se.chunkSizeByte, + } +} + +// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) { + aead, nonce := getSymmetricallyEncryptedAeadInstance(se.cipher, se.mode, inputKey, se.salt[:], se.associatedData()) + + // Carry the first tagLen bytes + tagLen := se.mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(se.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) + } + + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(se.chunkSizeByte), + initialNonce: nonce, + associatedData: se.associatedData(), + chunkIndex: make([]byte, 8), + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + reader: se.Contents, + peekedBytes: peekedBytes, + }, nil +} + +// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in +// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2 +func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) { + // cipherFunc must have block size 16 to use AEAD + if cipherSuite.Cipher.blockSize() != 16 { + return nil, errors.InvalidArgumentError("invalid aead cipher function") + } + + if cipherSuite.Cipher.KeySize() != len(inputKey) { + return nil, errors.InvalidArgumentError("error in aead serialization: bad key length") + } + + // Data for en/decryption: tag, version, cipher, aead mode, chunk size + prefix := []byte{ + 0xD2, + symmetricallyEncryptedVersionAead, + byte(cipherSuite.Cipher), + byte(cipherSuite.Mode), + chunkSizeByte, + } + + // Write header (that correspond to prefix except first byte) + n, err := ciphertext.Write(prefix[1:]) + if err != nil || n < 4 { + return nil, err + } + + // Random salt + salt := make([]byte, aeadSaltSize) + if _, err := rand.Read(salt); err != nil { + return nil, err + } + + if _, err := ciphertext.Write(salt); err != nil { + return nil, err + } + + aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) + + return &aeadEncrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: decodeAEADChunkSize(chunkSizeByte), + associatedData: prefix, + chunkIndex: make([]byte, 8), + initialNonce: nonce, + packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, + }, + writer: ciphertext, + }, nil +} + +func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) { + hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData) + + encryptionKey := make([]byte, c.KeySize()) + _, _ = readFull(hkdfReader, encryptionKey) + + // Last 64 bits of nonce are the counter + nonce = make([]byte, mode.IvLength()-8) + + _, _ = readFull(hkdfReader, nonce) + + blockCipher := c.new(encryptionKey) + aead = mode.new(blockCipher) + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go new file mode 100644 index 000000000..fa26bebe3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go @@ -0,0 +1,256 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// seMdcReader wraps an io.Reader with a no-op Close method. +type seMdcReader struct { + in io.Reader +} + +func (ser seMdcReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seMdcReader) Close() error { + return nil +} + +func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) { + if !c.IsSupported() { + return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c))) + } + + if len(key) != c.KeySize() { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.Contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.IntegrityProtected { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + + plaintext := cipher.StreamReader{S: s, R: se.Contents} + + if se.IntegrityProtected { + // IntegrityProtected packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seMdcReader{plaintext}, nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous Contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.ErrMDCMissing + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.ErrMDCMissing + } + } + + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.ErrMDCHashMismatch + } + // The hash already includes the MDC header, but we still check its value + // to confirm encryption correctness + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.ErrMDCMissing + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { + // Disallow old cipher suites + if !c.IsSupported() || c < CipherAES128 { + return nil, errors.InvalidArgumentError("invalid mdc cipher function") + } + + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length") + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + Contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go new file mode 100644 index 000000000..88ec72c6c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + + lengthBuf := make([]byte, 5) + n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1) + lengthBuf = lengthBuf[:n] + + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + EncodedLength: lengthBuf, + Contents: buf.Bytes(), + }) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + err = sp.Serialize(&buf) + if err != nil { + return err + } + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go new file mode 100644 index 000000000..614fbafd5 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -0,0 +1,167 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + + // RFC 2822 3.4: alternate simple form of a mailbox + if email == "" && strings.ContainsRune(name, '@') { + email = name + name = "" + } + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go new file mode 100644 index 000000000..8499c7379 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -0,0 +1,592 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + _ "crypto/sha512" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" + _ "golang.org/x/crypto/sha3" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + Signature *packet.Signature // the signature packet itself. + SignatureError error // nil if the signature is good. + UnverifiedSignatures []*packet.Signature // all other unverified signature packets. + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted + var edp packet.EncryptedDataPacket + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: + break + default: + continue + } + if keyring != nil { + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + } + case *packet.SymmetricallyEncrypted: + if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() { + return nil, errors.UnsupportedError("message is not integrity protected") + } + edp = p + break ParsePackets + case *packet.AEADEncrypted: + edp = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring, config) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + if errDec != nil { + continue + } + } + // Try to decrypt symmetrically encrypted + decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + // In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: + // only for < 5% of cases we will proceed to decrypt the data + if err == nil { + decrypted, err = edp.Decrypt(cipherFunc, key) + if err != nil { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) + if sensitiveParsingErr != nil { + return nil, errors.StructuralError("parsing error") + } + return mdFinal, nil +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash + var prevLast bool +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if prevLast { + return nil, errors.UnsupportedError("nested signature packets") + } + + if p.IsLast { + prevLast = true + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md.SignatureError = err + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + if keyring != nil { + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.IsSigned && md.SignatureError == nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok { + return nil, nil, errors.UnsupportedError("unsupported hash function") + } + if !hashFunc.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc))) + } + h := hashFunc.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) + if sensitiveParsingError == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails + config *packet.Config +} + +func (scr *signatureCheckReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf) + + // Hash only if required + if scr.md.SignedBy != nil { + scr.wrappedHash.Write(buf[:n]) + } + + if sensitiveParsingError == io.EOF { + var p packet.Packet + var readError error + var sig *packet.Signature + + p, readError = scr.packets.Next() + for readError == nil { + var ok bool + if sig, ok = p.(*packet.Signature); ok { + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.Metadata = scr.md.LiteralData + } + + // If signature KeyID matches + if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { + key := scr.md.SignedBy + signatureError := key.PublicKey.VerifySignature(scr.h, sig) + if signatureError == nil { + signatureError = checkSignatureDetails(key, sig, scr.config) + } + scr.md.Signature = sig + scr.md.SignatureError = signatureError + } else { + scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) + } + } + + p, readError = scr.packets.Next() + } + + if scr.md.SignedBy != nil && scr.md.Signature == nil { + if scr.md.UnverifiedSignatures == nil { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature") + } else { + scr.md.SignatureError = errors.StructuralError("No matching signature found") + } + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// VerifyDetachedSignature takes a signed file and a detached signature and +// returns the signature packet and the entity the signature was signed by, +// if any, and a possible signature verification error. +// If the signer isn't known, ErrUnknownIssuer is returned. +func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + var expectedHashes []crypto.Hash + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) +} + +// VerifyDetachedSignatureAndHash performs the same actions as +// VerifyDetachedSignature and checks that the expected hash functions were used. +func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + return verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the entity the signature was signed by, if any, and a possible +// signature verification error. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + var expectedHashes []crypto.Hash + return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) +} + +// CheckDetachedSignatureAndHash performs the same actions as +// CheckDetachedSignature and checks that the expected hash functions were used. +func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { + _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, config) + return +} + +func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + expectedHashesLen := len(expectedHashes) + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, nil, err + } + + var ok bool + sig, ok = p.(*packet.Signature) + if !ok { + return nil, nil, errors.StructuralError("non signature packet found") + } + if sig.IssuerKeyId == nil { + return nil, nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + + for i, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + break + } + if i+1 == expectedHashesLen { + return nil, nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") + } + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, nil, err + } + + for _, key := range keys { + err = key.PublicKey.VerifySignature(h, sig) + if err == nil { + return sig, key.Entity, checkSignatureDetails(&key, sig, config) + } + } + + return nil, nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body, config) +} + +// checkSignatureDetails returns an error if: +// - The signature (or one of the binding signatures mentioned below) +// has a unknown critical notation data subpacket +// - The primary key of the signing entity is revoked +// - The primary identity is revoked +// - The signature is expired +// - The primary key of the signing entity is expired according to the +// primary identity binding signature +// +// ... or, if the signature was signed by a subkey and: +// - The signing subkey is revoked +// - The signing subkey is expired according to the subkey binding signature +// - The signing subkey binding signature is expired +// - The signing subkey cross-signature is expired +// +// NOTE: The order of these checks is important, as the caller may choose to +// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never +// ignore any other errors. +// +// TODO: Also return an error if: +// - The primary key is expired according to a direct-key signature +// - (For V5 keys only:) The direct-key signature (exists and) is expired +func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error { + now := config.Now() + primaryIdentity := key.Entity.PrimaryIdentity() + signedBySubKey := key.PublicKey != key.Entity.PrimaryKey + sigsToCheck := []*packet.Signature{signature, primaryIdentity.SelfSignature} + if signedBySubKey { + sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature) + } + for _, sig := range sigsToCheck { + for _, notation := range sig.Notations { + if notation.IsCritical && !config.KnownNotation(notation.Name) { + return errors.SignatureError("unknown critical notation: " + notation.Name) + } + } + } + if key.Entity.Revoked(now) || // primary key is revoked + (signedBySubKey && key.Revoked(now)) || // subkey is revoked + primaryIdentity.Revoked(now) { // primary identity is revoked + return errors.ErrKeyRevoked + } + if key.Entity.PrimaryKey.KeyExpired(primaryIdentity.SelfSignature, now) { // primary key is expired + return errors.ErrKeyExpired + } + if signedBySubKey { + if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired + return errors.ErrKeyExpired + } + } + for _, sig := range sigsToCheck { + if sig.SigExpired(now) { // any of the relevant signatures are expired + return errors.ErrSignatureExpired + } + } + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go new file mode 100644 index 000000000..db6dad5c0 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -0,0 +1,274 @@ +package openpgp + +const testKey1KeyId uint64 = 0xA34D7E18C20C31BB +const testKey3KeyId uint64 = 0x338934250CCC0360 +const testKeyP256KeyId uint64 = 0xd44a2c495918513e + +const signedInput = "Signed message\nline 2\nline 3\n" +const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" + +const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" + +const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" + +const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" + +const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" + +const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" + +// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt +const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw==" + +const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" + +const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" + +const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" + +const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" + +const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" + +const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29" + +const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" + +const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377" + +const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857" + +const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp +idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn +vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB +AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X +0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL +IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk +VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn +gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 +TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx +q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz +dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA +CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 +ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ +eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid +AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV +bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK +/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA +A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX +TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc +lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 +rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN +oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 +QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU +nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC +AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp +BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad +AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL +VrM0m72/jnpKo04= +=zNCn +-----END PGP PRIVATE KEY BLOCK-----` + +const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 +sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk +Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ +AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD +24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX ++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 +B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX +fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA +FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 +ex7En5r7rHR5xwX82Msc+Rq9dSyO +=7MrZ +-----END PGP PUBLIC KEY BLOCK-----` + +const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` + +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` + +const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY +BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z +tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 +JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV +/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ +K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H +JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx +YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 +b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi +UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M +pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM +AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz +786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd +EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB +=RZia +-----END PGP PUBLIC KEY BLOCK----- +` + +const signedMessageV3 = `-----BEGIN PGP MESSAGE----- +Comment: GPGTools - https://gpgtools.org + +owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP +q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka +uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka +DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d +iT57d/OhWwA= +=hG7R +-----END PGP MESSAGE----- +` + +// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/ +const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK-----` + +// Generated with the above private key +const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- +Version: OpenPGP.js v4.10.7 +Comment: https://openpgpjs.org + +xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf +bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G +y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv +UQdl5MlBka1QSNbMq2Bz7XwNPg4= +=6lbM +-----END PGP MESSAGE-----` + +const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm +bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN +aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8 +nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl ++bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J +BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK +chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG +ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw +FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln +cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+ +s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh +6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ +sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz +dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt +YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ +1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i +aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr +fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF +gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q +Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj +jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF +qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH +dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI +zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/ +0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8 +9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x +Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH +UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7 +/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ +nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl +owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM +GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu +a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0 +M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD +lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5 +01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb +hW1Hj9AO8lzggBQ= +=Nt+N +-----END PGP PUBLIC KEY BLOCK----- +` + +const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE----- + +wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8 +N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3 +AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI +NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv +KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y +EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN +AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb +UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB +ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+ +7A5CBid5 +=aQkm +-----END PGP SIGNATURE----- +` + +const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE----- + +owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x +U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM +f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10 +2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n +vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA= +=fRXs +-----END PGP MESSAGE-----` + +const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+ +fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5 +GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0 +JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS +YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6 +AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki +Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf +9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa +JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag +Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr +woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb +LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA +SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP +GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2 +bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X +W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD +AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY +hz3tYjKhoFTKEIq3y3Pp +=h/aX +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go new file mode 100644 index 000000000..a43695964 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -0,0 +1,407 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1, and Argon2 specified in +// draft-ietf-openpgp-crypto-refresh-08 section 3.7.1.4. +package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "golang.org/x/crypto/argon2" +) + +type Mode uint8 + +// Defines the default S2KMode constants +// +// 0 (simple), 1(salted), 3(iterated), 4(argon2) +const ( + SimpleS2K Mode = 0 + SaltedS2K Mode = 1 + IteratedSaltedS2K Mode = 3 + Argon2S2K Mode = 4 + GnuS2K Mode = 101 +) + +const Argon2SaltSize int = 16 + +// Params contains all the parameters of the s2k packet +type Params struct { + // mode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + mode Mode + // hashId is the ID of the hash function used in any of the modes + hashId byte + // salt is a byte array to use as a salt in hashing process or argon2 + saltBytes [Argon2SaltSize]byte + // countByte is used to determine how many rounds of hashing are to + // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. + countByte byte + // passes is a parameter in Argon2 to determine the number of iterations + // See RFC the crypto refresh Section 3.7.1.4. + passes byte + // parallelism is a parameter in Argon2 to determine the degree of paralellism + // See RFC the crypto refresh Section 3.7.1.4. + parallelism byte + // memoryExp is a parameter in Argon2 to determine the memory usage + // i.e., 2 ** memoryExp kibibytes + // See RFC the crypto refresh Section 3.7.1.4. + memoryExp byte +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 65536 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 96; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// encodeMemory converts the Argon2 "memory" in the range parallelism*8 to +// 2**31, inclusive, to an encoded memory. The return value is the +// octet that is actually stored in the GPG file. encodeMemory panics +// if is not in the above range +// See OpenPGP crypto refresh Section 3.7.1.4. +func encodeMemory(memory uint32, parallelism uint8) uint8 { + if memory < (8 * uint32(parallelism)) || memory > uint32(2147483648) { + panic("Memory argument memory is outside the required range") + } + + for exp := 3; exp < 31; exp++ { + compare := decodeMemory(uint8(exp)) + if compare >= memory { + return uint8(exp) + } + } + + return 31 +} + +// decodeMemory computes the decoded memory in kibibytes as 2**memoryExponent +func decodeMemory(memoryExponent uint8) uint32 { + return uint32(1) << memoryExponent +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Argon2 writes to out the key derived from the password (in) with the Argon2 +// function (the crypto refresh, section 3.7.1.4) +func Argon2(out []byte, in []byte, salt []byte, passes uint8, paralellism uint8, memoryExp uint8) { + key := argon2.IDKey(in, salt, uint32(passes), decodeMemory(memoryExp), paralellism, uint32(len(out))) + copy(out[:], key) +} + +// Generate generates valid parameters from given configuration. +// It will enforce the Iterated and Salted or Argon2 S2K method. +func Generate(rand io.Reader, c *Config) (*Params, error) { + var params *Params + if c != nil && c.Mode() == Argon2S2K { + // handle Argon2 case + argonConfig := c.Argon2() + params = &Params{ + mode: Argon2S2K, + passes: argonConfig.Passes(), + parallelism: argonConfig.Parallelism(), + memoryExp: argonConfig.EncodedMemory(), + } + } else if c != nil && c.PassphraseIsHighEntropy && c.Mode() == SaltedS2K { // Allow SaltedS2K if PassphraseIsHighEntropy + hashId, ok := algorithm.HashToHashId(c.hash()) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + + params = &Params{ + mode: SaltedS2K, + hashId: hashId, + } + } else { // Enforce IteratedSaltedS2K method otherwise + hashId, ok := algorithm.HashToHashId(c.hash()) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + if c != nil { + c.S2KMode = IteratedSaltedS2K + } + params = &Params{ + mode: IteratedSaltedS2K, + hashId: hashId, + countByte: c.EncodedCount(), + } + } + if _, err := io.ReadFull(rand, params.salt()); err != nil { + return nil, err + } + return params, nil +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. If the S2K is a special +// GNU extension that indicates that the private key is missing, then the error +// returned is errors.ErrDummyPrivateKey. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + params, err := ParseIntoParams(r) + if err != nil { + return nil, err + } + + return params.Function() +} + +// ParseIntoParams reads a binary specification for a string-to-key +// transformation from r and returns a struct describing the s2k parameters. +func ParseIntoParams(r io.Reader) (params *Params, err error) { + var buf [Argon2SaltSize + 3]byte + + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + + params = &Params{ + mode: Mode(buf[0]), + } + + switch params.mode { + case SimpleS2K: + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + return params, nil + case SaltedS2K: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + copy(params.salt(), buf[1:9]) + return params, nil + case IteratedSaltedS2K: + _, err = io.ReadFull(r, buf[:10]) + if err != nil { + return nil, err + } + params.hashId = buf[0] + copy(params.salt(), buf[1:9]) + params.countByte = buf[9] + return params, nil + case Argon2S2K: + _, err = io.ReadFull(r, buf[:Argon2SaltSize+3]) + if err != nil { + return nil, err + } + copy(params.salt(), buf[:Argon2SaltSize]) + params.passes = buf[Argon2SaltSize] + params.parallelism = buf[Argon2SaltSize+1] + params.memoryExp = buf[Argon2SaltSize+2] + return params, nil + case GnuS2K: + // This is a GNU extension. See + // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 + if _, err = io.ReadFull(r, buf[:5]); err != nil { + return nil, err + } + params.hashId = buf[0] + if buf[1] == 'G' && buf[2] == 'N' && buf[3] == 'U' && buf[4] == 1 { + return params, nil + } + return nil, errors.UnsupportedError("GNU S2K extension") + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Dummy() bool { + return params != nil && params.mode == GnuS2K +} + +func (params *Params) salt() []byte { + switch params.mode { + case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8] + case Argon2S2K: return params.saltBytes[:Argon2SaltSize] + default: return nil + } +} + +func (params *Params) Function() (f func(out, in []byte), err error) { + if params.Dummy() { + return nil, errors.ErrDummyPrivateKey("dummy key found") + } + var hashObj crypto.Hash + if params.mode != Argon2S2K { + var ok bool + hashObj, ok = algorithm.HashIdToHashWithSha1(params.hashId) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) + } + if !hashObj.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) + } + } + + switch params.mode { + case SimpleS2K: + f := func(out, in []byte) { + Simple(out, hashObj.New(), in) + } + + return f, nil + case SaltedS2K: + f := func(out, in []byte) { + Salted(out, hashObj.New(), in, params.salt()) + } + + return f, nil + case IteratedSaltedS2K: + f := func(out, in []byte) { + Iterated(out, hashObj.New(), in, params.salt(), decodeCount(params.countByte)) + } + + return f, nil + case Argon2S2K: + f := func(out, in []byte) { + Argon2(out, in, params.salt(), params.passes, params.parallelism, params.memoryExp) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Serialize(w io.Writer) (err error) { + if _, err = w.Write([]byte{uint8(params.mode)}); err != nil { + return + } + if params.mode != Argon2S2K { + if _, err = w.Write([]byte{params.hashId}); err != nil { + return + } + } + if params.Dummy() { + _, err = w.Write(append([]byte("GNU"), 1)) + return + } + if params.mode > 0 { + if _, err = w.Write(params.salt()); err != nil { + return + } + if params.mode == IteratedSaltedS2K { + _, err = w.Write([]byte{params.countByte}) + } + if params.mode == Argon2S2K { + _, err = w.Write([]byte{params.passes, params.parallelism, params.memoryExp}) + } + } + return +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + params, err := Generate(rand, c) + if err != nil { + return err + } + err = params.Serialize(w) + if err != nil { + return err + } + + f, err := params.Function() + if err != nil { + return err + } + f(key, passphrase) + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go new file mode 100644 index 000000000..25a4442df --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go @@ -0,0 +1,26 @@ +package s2k + +// Cache stores keys derived with s2k functions from one passphrase +// to avoid recomputation if multiple items are encrypted with +// the same parameters. +type Cache map[Params][]byte + +// GetOrComputeDerivedKey tries to retrieve the key +// for the given s2k parameters from the cache. +// If there is no hit, it derives the key with the s2k function from the passphrase, +// updates the cache, and returns the key. +func (c *Cache) GetOrComputeDerivedKey(passphrase []byte, params *Params, expectedKeySize int) ([]byte, error) { + key, found := (*c)[*params] + if !found || len(key) != expectedKeySize { + var err error + derivedKey := make([]byte, expectedKeySize) + s2k, err := params.Function() + if err != nil { + return nil, err + } + s2k(derivedKey, passphrase) + (*c)[*params] = key + return derivedKey, nil + } + return key, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go new file mode 100644 index 000000000..b40be5228 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go @@ -0,0 +1,129 @@ +package s2k + +import "crypto" + +// Config collects configuration parameters for s2k key-stretching +// transformations. A nil *Config is valid and results in all default +// values. +type Config struct { + // S2K (String to Key) mode, used for key derivation in the context of secret key encryption + // and passphrase-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K may be used. + // If the passphrase is a high-entropy key, indicated by setting PassphraseIsHighEntropy to true, + // s2k.SaltedS2K can also be used. + // Note: Argon2 is the strongest option but not all OpenPGP implementations are compatible with it + //(pending standardisation). + // 0 (simple), 1(salted), 3(iterated), 4(argon2) + // 2(reserved) 100-110(private/experimental). + S2KMode Mode + // Only relevant if S2KMode is not set to s2k.Argon2S2K. + // Hash is the default hash function to be used. If + // nil, SHA256 is used. + Hash crypto.Hash + // Argon2 parameters for S2K (String to Key). + // Only relevant if S2KMode is set to s2k.Argon2S2K. + // If nil, default parameters are used. + // For more details on the choice of parameters, see https://tools.ietf.org/html/rfc9106#section-4. + Argon2Config *Argon2Config + // Only relevant if S2KMode is set to s2k.IteratedSaltedS2K. + // Iteration count for Iterated S2K (String to Key). It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // Indicates whether the passphrase passed by the application is a + // high-entropy key (e.g. it's randomly generated or derived from + // another passphrase using a strong key derivation function). + // When true, allows the S2KMode to be s2k.SaltedS2K. + // When the passphrase is not a high-entropy key, using SaltedS2K is + // insecure, and not allowed by draft-ietf-openpgp-crypto-refresh-08. + PassphraseIsHighEntropy bool +} + +// Argon2Config stores the Argon2 parameters +// A nil *Argon2Config is valid and results in all default +type Argon2Config struct { + NumberOfPasses uint8 + DegreeOfParallelism uint8 + // The memory parameter for Argon2 specifies desired memory usage in kibibytes. + // For example memory=64*1024 sets the memory cost to ~64 MB. + Memory uint32 +} + +func (c *Config) Mode() Mode { + if c == nil { + return IteratedSaltedS2K + } + return c.S2KMode +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + return crypto.SHA256 + } + + return c.Hash +} + +func (c *Config) Argon2() *Argon2Config { + if c == nil || c.Argon2Config == nil { + return nil + } + return c.Argon2Config +} + +// EncodedCount get encoded count +func (c *Config) EncodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 224 // The common case. Corresponding to 16777216 + } + + i := c.S2KCount + + switch { + case i < 65536: + i = 65536 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +func (c *Argon2Config) Passes() uint8 { + if c == nil || c.NumberOfPasses == 0 { + return 3 + } + return c.NumberOfPasses +} + +func (c *Argon2Config) Parallelism() uint8 { + if c == nil || c.DegreeOfParallelism == 0 { + return 4 + } + return c.DegreeOfParallelism +} + +func (c *Argon2Config) EncodedMemory() uint8 { + if c == nil || c.Memory == 0 { + return 16 // 64 MiB of RAM + } + + memory := c.Memory + lowerBound := uint32(c.Parallelism())*8 + upperBound := uint32(2147483648) + + switch { + case memory < lowerBound: + memory = lowerBound + case memory > upperBound: + memory = upperBound + } + + return encodeMemory(memory, c.Parallelism()) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go new file mode 100644 index 000000000..864d8ca6b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return errors.InvalidArgumentError("no valid signing keys") + } + if signingKey.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signingKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + if _, ok := algorithm.HashToHashId(config.Hash()); !ok { + return errors.InvalidArgumentError("invalid hash function") + } + + sig := createSignaturePacket(signingKey.PublicKey, sigType, config) + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + if _, err = io.Copy(wrappedHash, message); err != nil { + return err + } + + err = sig.Sign(h, signingKey.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + + var w io.WriteCloser + cipherSuite := packet.CipherSuite{ + Cipher: config.Cipher(), + Mode: config.AEAD().Mode(), + } + w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config) + if err != nil { + return + } + + literalData := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literalData, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v[0] == v2[0] && v[1] == v2[1] { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := algorithm.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// EncryptText encrypts a message to a number of recipients and, optionally, +// signs it. Optional information is contained in 'hints', also encrypted, that +// aids the recipients in processing the message. The resulting WriteCloser +// must be closed after the contents of the file have been written. If config +// is nil, sensible defaults will be used. The signing is done in text mode. +func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config) +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := algorithm.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: sigType, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + h, wrappedHash, err := hashForSignature(hash, sigType) + if err != nil { + return nil, err + } + metadata := &packet.LiteralData{ + Format: 't', + FileName: hints.FileName, + Time: epochSeconds, + } + if hints.IsBinary { + metadata.Format = 'b' + } + return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil + } + return literalData, nil +} + +// encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES256), + uint8(packet.CipherAES128), + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA3_256), + hashToHashId(crypto.SHA3_512), + } + + // Prefer GCM if everyone supports it + candidateCipherSuites := [][2]uint8{ + {uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)}, + {uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)}, + {uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)}, + {uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}, + } + + candidateCompression := []uint8{ + uint8(packet.CompressionNone), + uint8(packet.CompressionZIP), + uint8(packet.CompressionZLIB), + } + + encryptKeys := make([]Key, len(to)) + + // AEAD is used only if config enables it and every key supports it + aeadSupported := config.AEAD() != nil + + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys") + } + + sig := to[i].PrimaryIdentity().SelfSignature + if sig.SEIPDv2 == false { + aeadSupported = false + } + + candidateCiphers = intersectPreferences(candidateCiphers, sig.PreferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, sig.PreferredHash) + candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, sig.PreferredCipherSuites) + candidateCompression = intersectPreferences(candidateCompression, sig.PreferredCompression) + } + + // In the event that the intersection of supported algorithms is empty we use the ones + // labelled as MUST that every implementation supports. + if len(candidateCiphers) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3 + candidateCiphers = []uint8{uint8(packet.CipherAES128)} + } + if len(candidateHashes) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos + candidateHashes = []uint8{hashToHashId(crypto.SHA256)} + } + if len(candidateCipherSuites) == 0 { + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6 + candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}} + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + aeadCipherSuite := packet.CipherSuite{ + Cipher: packet.CipherFunction(candidateCipherSuites[0][0]), + Mode: packet.AEADMode(candidateCipherSuites[0][1]), + } + + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + var payload io.WriteCloser + payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config) + if err != nil { + return + } + + payload, err = handleCompression(payload, candidateCompression, config) + if err != nil { + return nil, err + } + + return writeAndSign(payload, candidateHashes, signed, hints, sigType, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA3_256), + hashToHashId(crypto.SHA3_512), + } + defaultHashes := candidateHashes[0:1] + preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + if len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") + } + + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + wrappedHash hash.Hash + h hash.Hash + signer *packet.PrivateKey + sigType packet.SignatureType + config *packet.Config + metadata *packet.LiteralData // V5 signatures protect document metadata +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.wrappedHash.Write(data) + switch s.sigType { + case packet.SigTypeBinary: + return s.literalData.Write(data) + case packet.SigTypeText: + flag := 0 + return writeCanonical(s.literalData, data, &flag) + } + return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType))) +} + +func (s signatureWriter) Close() error { + sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config) + sig.Hash = s.hashType + sig.Metadata = s.metadata + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature { + sigLifetimeSecs := config.SigLifetime() + return &packet.Signature{ + Version: signer.Version, + SigType: sigType, + PubKeyAlgo: signer.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.KeyId, + IssuerFingerprint: signer.Fingerprint, + Notations: config.Notations(), + SigLifetimeSecs: &sigLifetimeSecs, + } +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) { + data = compressed + confAlgo := config.Compression() + if confAlgo == packet.CompressionNone { + return + } + + // Set algorithm labelled as MUST as fallback + // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4 + finalAlgo := packet.CompressionNone + // if compression specified by config available we will use it + for _, c := range candidateCompression { + if uint8(confAlgo) == c { + finalAlgo = confAlgo + break + } + } + + if finalAlgo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig) + if err != nil { + return + } + } + return data, nil +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/.gitignore b/vendor/github.com/ThalesIgnite/crypto11/.gitignore new file mode 100644 index 000000000..8e2feb5cd --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/.gitignore @@ -0,0 +1,6 @@ +*~ +crypto11.test +demo/*.pem +demo/demo +/.idea/ +/vendor/ diff --git a/vendor/github.com/ThalesIgnite/crypto11/.travis.yml b/vendor/github.com/ThalesIgnite/crypto11/.travis.yml new file mode 100644 index 000000000..d383d67d8 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/.travis.yml @@ -0,0 +1,27 @@ +dist: xenial +language: go + +go: + - "1.13.x" + +# Xenial comes with v2.0.0 SoftHSM2, which seems to have issues with ECDSA +# code points +addons: + apt: + sources: + - sourceline: 'ppa:pkg-opendnssec/ppa' + packages: + - softhsm2 + +env: + - GO111MODULE=on + + +script: + - echo directories.tokendir = `pwd`/tokens > softhsm2.conf + - echo objecstore.backend = file >> softhsm2.conf + - cat softhsm2.conf + - mkdir tokens + - export SOFTHSM2_CONF=`pwd`/softhsm2.conf + - softhsm2-util --init-token --slot 0 --label token1 --so-pin sopassword --pin password + - go test -mod readonly -v -bench . diff --git a/vendor/github.com/ThalesIgnite/crypto11/LICENCE.txt b/vendor/github.com/ThalesIgnite/crypto11/LICENCE.txt new file mode 100644 index 000000000..2992f5e9c --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/LICENCE.txt @@ -0,0 +1,22 @@ +MIT License. + +Copyright 2016, 2017 Thales e-Security, Inc + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ThalesIgnite/crypto11/README.md b/vendor/github.com/ThalesIgnite/crypto11/README.md new file mode 100644 index 000000000..4fccaec34 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/README.md @@ -0,0 +1,208 @@ +Crypto11 +======== + +[![GoDoc](https://godoc.org/github.com/ThalesIgnite/crypto11?status.svg)](https://godoc.org/github.com/ThalesIgnite/crypto11) +[![Build Status](https://travis-ci.com/ThalesIgnite/crypto11.svg?branch=master)](https://travis-ci.com/ThalesIgnite/crypto11) + +This is an implementation of the standard Golang crypto interfaces that +uses [PKCS#11](http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/errata01/os/pkcs11-base-v2.40-errata01-os-complete.html) as a backend. The supported features are: + +* Generation and retrieval of RSA, DSA and ECDSA keys. +* Importing and retrieval of x509 certificates +* PKCS#1 v1.5 signing. +* PKCS#1 PSS signing. +* PKCS#1 v1.5 decryption +* PKCS#1 OAEP decryption +* ECDSA signing. +* DSA signing. +* Random number generation. +* AES and DES3 encryption and decryption. +* HMAC support. + +Signing is done through the +[crypto.Signer](https://golang.org/pkg/crypto/#Signer) interface and +decryption through +[crypto.Decrypter](https://golang.org/pkg/crypto/#Decrypter). + +To verify signatures or encrypt messages, retrieve the public key and do it in software. + +See [the documentation](https://godoc.org/github.com/ThalesIgnite/crypto11) for details of various limitations, +especially regarding symmetric crypto. + + +Installation +============ + +Since v1.0.0, crypto11 requires Go v1.11+. Install the library by running: + +```bash +go get github.com/ThalesIgnite/crypto11 +``` + +The crypto11 library needs to be configured with information about your PKCS#11 installation. This is either done programmatically +(see the `Config` struct in [the documentation](https://godoc.org/github.com/ThalesIgnite/crypto11)) or via a configuration +file. The configuration file is a JSON representation of the `Config` struct. + +A minimal configuration file looks like this: + +```json +{ + "Path" : "/usr/lib/softhsm/libsofthsm2.so", + "TokenLabel": "token1", + "Pin" : "password" +} +``` + +- `Path` points to the library from your PKCS#11 vendor. +- `TokenLabel` is the `CKA_LABEL` of the token you wish to use. +- `Pin` is the password for the `CKU_USER` user. + +Testing Guidance +================ + +Disabling tests +--------------- + +To disable specific tests, set the environment variable `CRYPTO11_SKIP=` where `` is a comma-separated +list of the following options: + +* `CERTS` - disables certificate-related tests. Needed for AWS CloudHSM, which doesn't support certificates. +* `OAEP_LABEL` - disables RSA OAEP encryption tests that use source data encoding parameter (also known as a 'label' +in some crypto libraries). Needed for AWS CloudHSM. +* `DSA` - disables DSA tests. Needed for AWS CloudHSM (and any other tokens not supporting DSA). + +Testing with Thales Luna HSM +---------------------------- + + + + +Testing with AWS CloudHSM +------------------------- + +A minimal configuration file for CloudHSM will look like this: + +```json +{ + "Path" : "/opt/cloudhsm/lib/libcloudhsm_pkcs11_standard.so", + "TokenLabel": "cavium", + "Pin" : "username:password", + "UseGCMIVFromHSM" : true, +} +``` + +To run the test suite you must skip unsupported tests: + +``` +CRYPTO11_SKIP=CERTS,OAEP_LABEL,DSA go test -v +``` + +Be sure to take note of the supported mechanisms, key types and other idiosyncrasies described at +https://docs.aws.amazon.com/cloudhsm/latest/userguide/pkcs11-library.html. Here's a collection of things we +noticed when testing with the v2.0.4 PKCS#11 library: + +- 1024-bit RSA keys don't appear to be supported, despite what `C_GetMechanismInfo` tells you. +- The `CKM_RSA_PKCS_OAEP` mechanism doesn't support source data. I.e. when constructing a `CK_RSA_PKCS_OAEP_PARAMS`, +one must set `pSourceData` to `NULL` and `ulSourceDataLen` to zero. +- CloudHSM will generate it's own IV for GCM mode. This is described in their documentation, see footnote 4 on +https://docs.aws.amazon.com/cloudhsm/latest/userguide/pkcs11-mechanisms.html. +- It appears that `CKA_ID` values must be unique, otherwise you get a `CKR_ATTRIBUTE_VALUE_INVALID` error. +- Very rapid session opening can trigger the following error: + ``` + C_OpenSession failed with error CKR_ARGUMENTS_BAD : 0x00000007 + HSM error 8c: HSM Error: Already maximum number of sessions are issued + ``` + +Testing with SoftHSM2 +--------------------- + +To set up a slot: + + $ cat softhsm2.conf + directories.tokendir = /home/rjk/go/src/github.com/ThalesIgnite/crypto11/tokens + objectstore.backend = file + log.level = INFO + $ mkdir tokens + $ export SOFTHSM2_CONF=`pwd`/softhsm2.conf + $ softhsm2-util --init-token --slot 0 --label test + === SO PIN (4-255 characters) === + Please enter SO PIN: ******** + Please reenter SO PIN: ******** + === User PIN (4-255 characters) === + Please enter user PIN: ******** + Please reenter user PIN: ******** + The token has been initialized. + +The configuration looks like this: + + $ cat config + { + "Path" : "/usr/lib/softhsm/libsofthsm2.so", + "TokenLabel": "test", + "Pin" : "password" + } + +(At time of writing) OAEP is only partial and HMAC is unsupported, so expect test skips. + +Testing with nCipher nShield +-------------------- + +In all cases, it's worth enabling nShield PKCS#11 log output: + + export CKNFAST_DEBUG=2 + +To protect keys with a 1/N operator cardset: + + $ cat config + { + "Path" : "/opt/nfast/toolkits/pkcs11/libcknfast.so", + "TokenLabel": "rjk", + "Pin" : "password" + } + +You can also identify the token by serial number, which in this case +means the first 16 hex digits of the operator cardset's token hash: + + $ cat config + { + "Path" : "/opt/nfast/toolkits/pkcs11/libcknfast.so", + "TokenSerial": "1d42780caa22efd5", + "Pin" : "password" + } + +A card from the cardset must be in the slot when you run `go test`. + +To protect keys with the module only, use the 'accelerator' token: + + $ cat config + { + "Path" : "/opt/nfast/toolkits/pkcs11/libcknfast.so", + "TokenLabel": "accelerator", + "Pin" : "password" + } + +(At time of writing) GCM is not implemented, so expect test skips. + +Limitations +=========== + + * The [PKCS1v15DecryptOptions SessionKeyLen](https://golang.org/pkg/crypto/rsa/#PKCS1v15DecryptOptions) field +is not implemented and an error is returned if it is nonzero. +The reason for this is that it is not possible for crypto11 to guarantee the constant-time behavior in the specification. +See [issue #5](https://github.com/ThalesIgnite/crypto11/issues/5) for further discussion. + * Symmetric crypto support via [cipher.Block](https://golang.org/pkg/crypto/cipher/#Block) is very slow. +You can use the `BlockModeCloser` API +(over 400 times as fast on my computer) +but you must call the Close() +interface (not found in [cipher.BlockMode](https://golang.org/pkg/crypto/cipher/#BlockMode)). +See [issue #6](https://github.com/ThalesIgnite/crypto11/issues/6) for further discussion. + +Contributions +======== + +Contributions are gratefully received. Before beginning work on sizeable changes, please open an issue first to +discuss. + +Here are some topics we'd like to cover: + +* Full test instructions for additional PKCS#11 implementations. diff --git a/vendor/github.com/ThalesIgnite/crypto11/aead.go b/vendor/github.com/ThalesIgnite/crypto11/aead.go new file mode 100644 index 000000000..2b0438d47 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/aead.go @@ -0,0 +1,189 @@ +// Copyright 2018 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "crypto/cipher" + "errors" + "fmt" + + "github.com/miekg/pkcs11" +) + +// cipher.AEAD ---------------------------------------------------------- + +// A PaddingMode is used by a block cipher (see NewCBC). +type PaddingMode int + +const ( + // PaddingNone represents a block cipher with no padding. + PaddingNone PaddingMode = iota + + // PaddingPKCS represents a block cipher used with PKCS#7 padding. + PaddingPKCS +) + +var errBadGCMNonceSize = errors.New("nonce slice too small to hold IV") + +type genericAead struct { + key *SecretKey + + overhead int + + nonceSize int + + // Note - if the GCMParams result is non-nil, the caller must call Free() on the params when + // finished. + makeMech func(nonce []byte, additionalData []byte, encrypt bool) ([]*pkcs11.Mechanism, *pkcs11.GCMParams, error) +} + +// NewGCM returns a given cipher wrapped in Galois Counter Mode, with the standard +// nonce length. +// +// This depends on the HSM supporting the CKM_*_GCM mechanism. If it is not supported +// then you must use cipher.NewGCM; it will be slow. +func (key *SecretKey) NewGCM() (cipher.AEAD, error) { + if key.Cipher.GCMMech == 0 { + return nil, fmt.Errorf("GCM not implemented for key type %#x", key.Cipher.GenParams[0].KeyType) + } + + g := genericAead{ + key: key, + overhead: 16, + nonceSize: key.context.cfg.GCMIVLength, + makeMech: func(nonce []byte, additionalData []byte, encrypt bool) ([]*pkcs11.Mechanism, *pkcs11.GCMParams, error) { + var params *pkcs11.GCMParams + + if (encrypt && key.context.cfg.UseGCMIVFromHSM && + !key.context.cfg.GCMIVFromHSMControl.SupplyIvForHSMGCMEncrypt) || (!encrypt && + key.context.cfg.UseGCMIVFromHSM && !key.context.cfg.GCMIVFromHSMControl.SupplyIvForHSMGCMDecrypt) { + params = pkcs11.NewGCMParams(nil, additionalData, 16*8 /*bits*/) + } else { + params = pkcs11.NewGCMParams(nonce, additionalData, 16*8 /*bits*/) + } + return []*pkcs11.Mechanism{pkcs11.NewMechanism(key.Cipher.GCMMech, params)}, params, nil + }, + } + return g, nil +} + +// NewCBC returns a given cipher wrapped in CBC mode. +// +// Despite the cipher.AEAD return type, there is no support for additional data and no authentication. +// This method exists to provide a convenient way to do bulk (possibly padded) CBC encryption. +// Think carefully before passing the cipher.AEAD to any consumer that expects authentication. +func (key *SecretKey) NewCBC(paddingMode PaddingMode) (cipher.AEAD, error) { + + var pkcsMech uint + + switch paddingMode { + case PaddingNone: + pkcsMech = key.Cipher.CBCMech + case PaddingPKCS: + pkcsMech = key.Cipher.CBCPKCSMech + default: + return nil, errors.New("unrecognized padding mode") + } + + g := genericAead{ + key: key, + overhead: 0, + nonceSize: key.BlockSize(), + makeMech: func(nonce []byte, additionalData []byte, encrypt bool) ([]*pkcs11.Mechanism, *pkcs11.GCMParams, error) { + if len(additionalData) > 0 { + return nil, nil, errors.New("additional data not supported for CBC mode") + } + + return []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcsMech, nonce)}, nil, nil + }, + } + + return g, nil +} + +func (g genericAead) NonceSize() int { + return g.nonceSize +} + +func (g genericAead) Overhead() int { + return g.overhead +} + +func (g genericAead) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + + var result []byte + if err := g.key.context.withSession(func(session *pkcs11Session) (err error) { + mech, params, err := g.makeMech(nonce, additionalData, true) + + if err != nil { + return err + } + defer params.Free() + + if err = session.ctx.EncryptInit(session.handle, mech, g.key.handle); err != nil { + err = fmt.Errorf("C_EncryptInit: %v", err) + return + } + if result, err = session.ctx.Encrypt(session.handle, plaintext); err != nil { + err = fmt.Errorf("C_Encrypt: %v", err) + return + } + + if g.key.context.cfg.UseGCMIVFromHSM && g.key.context.cfg.GCMIVFromHSMControl.SupplyIvForHSMGCMEncrypt { + if len(nonce) != len(params.IV()) { + return errBadGCMNonceSize + } + } + + return + }); err != nil { + panic(err) + } else { + dst = append(dst, result...) + } + return dst +} + +func (g genericAead) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + var result []byte + if err := g.key.context.withSession(func(session *pkcs11Session) (err error) { + mech, params, err := g.makeMech(nonce, additionalData, false) + if err != nil { + return + } + defer params.Free() + + if err = session.ctx.DecryptInit(session.handle, mech, g.key.handle); err != nil { + err = fmt.Errorf("C_DecryptInit: %v", err) + return + } + if result, err = session.ctx.Decrypt(session.handle, ciphertext); err != nil { + err = fmt.Errorf("C_Decrypt: %v", err) + return + } + return + }); err != nil { + return nil, err + } + dst = append(dst, result...) + return dst, nil +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/attributes.go b/vendor/github.com/ThalesIgnite/crypto11/attributes.go new file mode 100644 index 000000000..17ae57525 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/attributes.go @@ -0,0 +1,510 @@ +package crypto11 + +import ( + "errors" + "fmt" + "strings" + + "github.com/miekg/pkcs11" +) + +// AttributeType represents a PKCS#11 CK_ATTRIBUTE value. +type AttributeType = uint + +// Attribute represents a PKCS#11 CK_ATTRIBUTE type. +type Attribute = pkcs11.Attribute + +//noinspection GoUnusedConst,GoDeprecation +const ( + CkaClass = AttributeType(0x00000000) + CkaToken = AttributeType(0x00000001) + CkaPrivate = AttributeType(0x00000002) + CkaLabel = AttributeType(0x00000003) + CkaApplication = AttributeType(0x00000010) + CkaValue = AttributeType(0x00000011) + CkaObjectId = AttributeType(0x00000012) + CkaCertificateType = AttributeType(0x00000080) + CkaIssuer = AttributeType(0x00000081) + CkaSerialNumber = AttributeType(0x00000082) + CkaAcIssuer = AttributeType(0x00000083) + CkaOwner = AttributeType(0x00000084) + CkaAttrTypes = AttributeType(0x00000085) + CkaTrusted = AttributeType(0x00000086) + CkaCertificateCategory = AttributeType(0x00000087) + CkaJavaMIDPSecurityDomain = AttributeType(0x00000088) + CkaUrl = AttributeType(0x00000089) + CkaHashOfSubjectPublicKey = AttributeType(0x0000008A) + CkaHashOfIssuerPublicKey = AttributeType(0x0000008B) + CkaNameHashAlgorithm = AttributeType(0x0000008C) + CkaCheckValue = AttributeType(0x00000090) + + CkaKeyType = AttributeType(0x00000100) + CkaSubject = AttributeType(0x00000101) + CkaId = AttributeType(0x00000102) + CkaSensitive = AttributeType(0x00000103) + CkaEncrypt = AttributeType(0x00000104) + CkaDecrypt = AttributeType(0x00000105) + CkaWrap = AttributeType(0x00000106) + CkaUnwrap = AttributeType(0x00000107) + CkaSign = AttributeType(0x00000108) + CkaSignRecover = AttributeType(0x00000109) + CkaVerify = AttributeType(0x0000010A) + CkaVerifyRecover = AttributeType(0x0000010B) + CkaDerive = AttributeType(0x0000010C) + CkaStartDate = AttributeType(0x00000110) + CkaEndDate = AttributeType(0x00000111) + CkaModulus = AttributeType(0x00000120) + CkaModulusBits = AttributeType(0x00000121) + CkaPublicExponent = AttributeType(0x00000122) + CkaPrivateExponent = AttributeType(0x00000123) + CkaPrime1 = AttributeType(0x00000124) + CkaPrime2 = AttributeType(0x00000125) + CkaExponent1 = AttributeType(0x00000126) + CkaExponent2 = AttributeType(0x00000127) + CkaCoefficient = AttributeType(0x00000128) + CkaPublicKeyInfo = AttributeType(0x00000129) + CkaPrime = AttributeType(0x00000130) + CkaSubprime = AttributeType(0x00000131) + CkaBase = AttributeType(0x00000132) + + CkaPrimeBits = AttributeType(0x00000133) + CkaSubprimeBits = AttributeType(0x00000134) + /* (To retain backwards-compatibility) */ + CkaSubPrimeBits = CkaSubprimeBits + + CkaValueBits = AttributeType(0x00000160) + CkaValueLen = AttributeType(0x00000161) + CkaExtractable = AttributeType(0x00000162) + CkaLocal = AttributeType(0x00000163) + CkaNeverExtractable = AttributeType(0x00000164) + CkaAlwaysSensitive = AttributeType(0x00000165) + CkaKeyGenMechanism = AttributeType(0x00000166) + + CkaModifiable = AttributeType(0x00000170) + CkaCopyable = AttributeType(0x00000171) + + /* new for v2.40 */ + CkaDestroyable = AttributeType(0x00000172) + + /* CKA_ECDSA_PARAMS is deprecated in v2.11, + * CKA_EC_PARAMS is preferred. */ + CkaEcdsaParams = AttributeType(0x00000180) + CkaEcParams = AttributeType(0x00000180) + + CkaEcPoint = AttributeType(0x00000181) + + /* CKA_SECONDARY_AUTH, CKA_AUTH_PIN_FLAGS, + * are new for v2.10. Deprecated in v2.11 and onwards. */ + CkaSecondaryAuth = AttributeType(0x00000200) /* Deprecated */ + CkaAuthPinFlags = AttributeType(0x00000201) /* Deprecated */ + + CkaAlwaysAuthenticate = AttributeType(0x00000202) + + CkaWrapWithTrusted = AttributeType(0x00000210) + + ckfArrayAttribute = AttributeType(0x40000000) + + CkaWrapTemplate = ckfArrayAttribute | AttributeType(0x00000211) + CkaUnwrapTemplate = ckfArrayAttribute | AttributeType(0x00000212) + + CkaOtpFormat = AttributeType(0x00000220) + CkaOtpLength = AttributeType(0x00000221) + CkaOtpTimeInterval = AttributeType(0x00000222) + CkaOtpUserFriendlyMode = AttributeType(0x00000223) + CkaOtpChallengeRequirement = AttributeType(0x00000224) + CkaOtpTimeRequirement = AttributeType(0x00000225) + CkaOtpCounterRequirement = AttributeType(0x00000226) + CkaOtpPinRequirement = AttributeType(0x00000227) + CkaOtpCounter = AttributeType(0x0000022E) + CkaOtpTime = AttributeType(0x0000022F) + CkaOtpUserIdentifier = AttributeType(0x0000022A) + CkaOtpServiceIdentifier = AttributeType(0x0000022B) + CkaOtpServiceLogo = AttributeType(0x0000022C) + CkaOtpServiceLogoType = AttributeType(0x0000022D) + + CkaGOSTR3410Params = AttributeType(0x00000250) + CkaGOSTR3411Params = AttributeType(0x00000251) + CkaGOST28147Params = AttributeType(0x00000252) + + CkaHwFeatureType = AttributeType(0x00000300) + CkaResetOnInit = AttributeType(0x00000301) + CkaHasReset = AttributeType(0x00000302) + + CkaPixelX = AttributeType(0x00000400) + CkaPixelY = AttributeType(0x00000401) + CkaResolution = AttributeType(0x00000402) + CkaCharRows = AttributeType(0x00000403) + CkaCharColumns = AttributeType(0x00000404) + CkaColor = AttributeType(0x00000405) + CkaBitsPerPixel = AttributeType(0x00000406) + CkaCharSets = AttributeType(0x00000480) + CkaEncodingMethods = AttributeType(0x00000481) + CkaMimeTypes = AttributeType(0x00000482) + CkaMechanismType = AttributeType(0x00000500) + CkaRequiredCmsAttributes = AttributeType(0x00000501) + CkaDefaultCmsAttributes = AttributeType(0x00000502) + CkaSupportedCmsAttributes = AttributeType(0x00000503) + CkaAllowedMechanisms = ckfArrayAttribute | AttributeType(0x00000600) +) + +// NewAttribute is a helper function that populates a new Attribute for common data types. This function will +// return an error if value is not of type bool, int, uint, string, []byte or time.Time (or is nil). +func NewAttribute(attributeType AttributeType, value interface{}) (a *Attribute, err error) { + // catch any panics from the pkcs11.NewAttribute() call to handle the error cleanly + defer func() { + if r := recover(); r != nil { + err = errors.New(fmt.Sprintf("failed creating Attribute: %v", r)) + } + }() + + pAttr := pkcs11.NewAttribute(attributeType, value) + return pAttr, nil +} + +// CopyAttribute returns a deep copy of the given Attribute. +func CopyAttribute(a *Attribute) *Attribute { + var value []byte + if a.Value != nil && len(a.Value) > 0 { + value = append([]byte(nil), a.Value...) + } + return &pkcs11.Attribute{ + Type: a.Type, + Value: value, + } +} + +// An AttributeSet groups together operations that are common for a collection of Attributes. +type AttributeSet map[AttributeType]*Attribute + +// NewAttributeSet creates an empty AttributeSet. +func NewAttributeSet() AttributeSet { + return make(AttributeSet) +} + +// Set stores a new attribute in the AttributeSet. Any existing value will be overwritten. This function will return an +// error if value is not of type bool, int, uint, string, []byte or time.Time (or is nil). +func (a AttributeSet) Set(attributeType AttributeType, value interface{}) error { + attr, err := NewAttribute(attributeType, value) + if err != nil { + return err + } + a[attributeType] = attr + return nil +} + +// cloneFrom make this AttributeSet a clone of the supplied set. Values are deep copied. +func (a AttributeSet) cloneFrom(set AttributeSet) { + for key := range a { + delete(a, key) + } + + // Use Copy to do the deep cloning for us + c := set.Copy() + for k, v := range c { + a[k] = v + } +} + +// AddIfNotPresent adds the attributes if the Attribute Type is not already present in the AttributeSet. +func (a AttributeSet) AddIfNotPresent(additional []*Attribute) { + for _, additionalAttr := range additional { + // Only add the attribute if it is not already present in the Attribute map + if _, ok := a[additionalAttr.Type]; !ok { + a[additionalAttr.Type] = additionalAttr + } + } +} + +// ToSlice returns a deep copy of Attributes contained in the AttributeSet. +func (a AttributeSet) ToSlice() []*Attribute { + var attributes []*Attribute + for _, v := range a { + duplicateAttr := CopyAttribute(v) + attributes = append(attributes, duplicateAttr) + } + return attributes +} + +// Copy returns a deep copy of the AttributeSet. This function will return an error if value is not of type +// bool, int, uint, string, []byte or time.Time (or is nil). +func (a AttributeSet) Copy() AttributeSet { + b := NewAttributeSet() + for _, v := range a { + b[v.Type] = CopyAttribute(v) + } + return b +} + +// Unset removes an attribute from the attributes set. If the set does not contain the attribute, this +// is a no-op. +func (a AttributeSet) Unset(attributeType AttributeType) { + delete(a, attributeType) +} + +func (a AttributeSet) String() string { + result := new(strings.Builder) + for attr, value := range a { + _, _ = fmt.Fprintf(result, "%s: %x\n", attributeTypeString(attr), value.Value) + } + return result.String() +} + +// NewAttributeSetWithID is a helper function that populates a new slice of Attributes with the provided ID. +// This function returns an error if the ID is an empty slice. +func NewAttributeSetWithID(id []byte) (AttributeSet, error) { + if err := notNilBytes(id, "id"); err != nil { + return nil, err + } + a := NewAttributeSet() + _ = a.Set(CkaId, id) // error not possible for []byte + return a, nil +} + +// NewAttributeSetWithIDAndLabel is a helper function that populates a new slice of Attributes with the +// provided ID and Label. This function returns an error if either the ID or the Label is an empty slice. +func NewAttributeSetWithIDAndLabel(id, label []byte) (a AttributeSet, err error) { + if a, err = NewAttributeSetWithID(id); err != nil { + return nil, err + } + + if err := notNilBytes(label, "label"); err != nil { + return nil, err + } + + _ = a.Set(CkaLabel, label) // error not possible with []byte + return a, nil +} + +func attributeTypeString(a AttributeType) string { + //noinspection GoDeprecation + switch a { + case CkaClass: + return "CkaClass" + case CkaToken: + return "CkaToken" + case CkaPrivate: + return "CkaPrivate" + case CkaLabel: + return "CkaLabel" + case CkaApplication: + return "CkaApplication" + case CkaValue: + return "CkaValue" + case CkaObjectId: + return "CkaObjectId" + case CkaCertificateType: + return "CkaCertificateType" + case CkaIssuer: + return "CkaIssuer" + case CkaSerialNumber: + return "CkaSerialNumber" + case CkaAcIssuer: + return "CkaAcIssuer" + case CkaOwner: + return "CkaOwner" + case CkaAttrTypes: + return "CkaAttrTypes" + case CkaTrusted: + return "CkaTrusted" + case CkaCertificateCategory: + return "CkaCertificateCategory" + case CkaJavaMIDPSecurityDomain: + return "CkaJavaMIDPSecurityDomain" + case CkaUrl: + return "CkaUrl" + case CkaHashOfSubjectPublicKey: + return "CkaHashOfSubjectPublicKey" + case CkaHashOfIssuerPublicKey: + return "CkaHashOfIssuerPublicKey" + case CkaNameHashAlgorithm: + return "CkaNameHashAlgorithm" + case CkaCheckValue: + return "CkaCheckValue" + + case CkaKeyType: + return "CkaKeyType" + case CkaSubject: + return "CkaSubject" + case CkaId: + return "CkaId" + case CkaSensitive: + return "CkaSensitive" + case CkaEncrypt: + return "CkaEncrypt" + case CkaDecrypt: + return "CkaDecrypt" + case CkaWrap: + return "CkaWrap" + case CkaUnwrap: + return "CkaUnwrap" + case CkaSign: + return "CkaSign" + case CkaSignRecover: + return "CkaSignRecover" + case CkaVerify: + return "CkaVerify" + case CkaVerifyRecover: + return "CkaVerifyRecover" + case CkaDerive: + return "CkaDerive" + case CkaStartDate: + return "CkaStartDate" + case CkaEndDate: + return "CkaEndDate" + case CkaModulus: + return "CkaModulus" + case CkaModulusBits: + return "CkaModulusBits" + case CkaPublicExponent: + return "CkaPublicExponent" + case CkaPrivateExponent: + return "CkaPrivateExponent" + case CkaPrime1: + return "CkaPrime1" + case CkaPrime2: + return "CkaPrime2" + case CkaExponent1: + return "CkaExponent1" + case CkaExponent2: + return "CkaExponent2" + case CkaCoefficient: + return "CkaCoefficient" + case CkaPublicKeyInfo: + return "CkaPublicKeyInfo" + case CkaPrime: + return "CkaPrime" + case CkaSubprime: + return "CkaSubprime" + case CkaBase: + return "CkaBase" + + case CkaPrimeBits: + return "CkaPrimeBits" + case CkaSubprimeBits: + return "CkaSubprimeBits" + + case CkaValueBits: + return "CkaValueBits" + case CkaValueLen: + return "CkaValueLen" + case CkaExtractable: + return "CkaExtractable" + case CkaLocal: + return "CkaLocal" + case CkaNeverExtractable: + return "CkaNeverExtractable" + case CkaAlwaysSensitive: + return "CkaAlwaysSensitive" + case CkaKeyGenMechanism: + return "CkaKeyGenMechanism" + + case CkaModifiable: + return "CkaModifiable" + case CkaCopyable: + return "CkaCopyable" + + case CkaDestroyable: + return "CkaDestroyable" + + case CkaEcParams: + return "CkaEcParams" + + case CkaEcPoint: + return "CkaEcPoint" + + case CkaSecondaryAuth: + return "CkaSecondaryAuth" + case CkaAuthPinFlags: + return "CkaAuthPinFlags" + + case CkaAlwaysAuthenticate: + return "CkaAlwaysAuthenticate" + + case CkaWrapWithTrusted: + return "CkaWrapWithTrusted" + + case ckfArrayAttribute: + return "ckfArrayAttribute" + + case CkaWrapTemplate: + return "CkaWrapTemplate" + case CkaUnwrapTemplate: + return "CkaUnwrapTemplate" + + case CkaOtpFormat: + return "CkaOtpFormat" + case CkaOtpLength: + return "CkaOtpLength" + case CkaOtpTimeInterval: + return "CkaOtpTimeInterval" + case CkaOtpUserFriendlyMode: + return "CkaOtpUserFriendlyMode" + case CkaOtpChallengeRequirement: + return "CkaOtpChallengeRequirement" + case CkaOtpTimeRequirement: + return "CkaOtpTimeRequirement" + case CkaOtpCounterRequirement: + return "CkaOtpCounterRequirement" + case CkaOtpPinRequirement: + return "CkaOtpPinRequirement" + case CkaOtpCounter: + return "CkaOtpCounter" + case CkaOtpTime: + return "CkaOtpTime" + case CkaOtpUserIdentifier: + return "CkaOtpUserIdentifier" + case CkaOtpServiceIdentifier: + return "CkaOtpServiceIdentifier" + case CkaOtpServiceLogo: + return "CkaOtpServiceLogo" + case CkaOtpServiceLogoType: + return "CkaOtpServiceLogoType" + + case CkaGOSTR3410Params: + return "CkaGOSTR3410Params" + case CkaGOSTR3411Params: + return "CkaGOSTR3411Params" + case CkaGOST28147Params: + return "CkaGOST28147Params" + + case CkaHwFeatureType: + return "CkaHwFeatureType" + case CkaResetOnInit: + return "CkaResetOnInit" + case CkaHasReset: + return "CkaHasReset" + + case CkaPixelX: + return "CkaPixelX" + case CkaPixelY: + return "CkaPixelY" + case CkaResolution: + return "CkaResolution" + case CkaCharRows: + return "CkaCharRows" + case CkaCharColumns: + return "CkaCharColumns" + case CkaColor: + return "CkaColor" + case CkaBitsPerPixel: + return "CkaBitsPerPixel" + case CkaCharSets: + return "CkaCharSets" + case CkaEncodingMethods: + return "CkaEncodingMethods" + case CkaMimeTypes: + return "CkaMimeTypes" + case CkaMechanismType: + return "CkaMechanismType" + case CkaRequiredCmsAttributes: + return "CkaRequiredCmsAttributes" + case CkaDefaultCmsAttributes: + return "CkaDefaultCmsAttributes" + case CkaSupportedCmsAttributes: + return "CkaSupportedCmsAttributes" + case CkaAllowedMechanisms: + return "CkaAllowedMechanisms" + default: + return "Unknown" + } +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/block.go b/vendor/github.com/ThalesIgnite/crypto11/block.go new file mode 100644 index 000000000..6c612c948 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/block.go @@ -0,0 +1,91 @@ +// Copyright 2018 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "fmt" + + "github.com/miekg/pkcs11" +) + +// cipher.Block --------------------------------------------------------- + +// BlockSize returns the cipher's block size in bytes. +func (key *SecretKey) BlockSize() int { + return key.Cipher.BlockSize +} + +// Decrypt decrypts the first block in src into dst. +// Dst and src must overlap entirely or not at all. +// +// Using this method for bulk operation is very inefficient, as it makes a round trip to the HSM +// (which may be network-connected) for each block. +// For more efficient operation, see NewCBCDecrypterCloser, NewCBCDecrypter or NewCBC. +func (key *SecretKey) Decrypt(dst, src []byte) { + var result []byte + if err := key.context.withSession(func(session *pkcs11Session) (err error) { + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(key.Cipher.ECBMech, nil)} + if err = session.ctx.DecryptInit(session.handle, mech, key.handle); err != nil { + return + } + if result, err = session.ctx.Decrypt(session.handle, src[:key.Cipher.BlockSize]); err != nil { + return + } + if len(result) != key.Cipher.BlockSize { + err = fmt.Errorf("C_Decrypt: returned %v bytes, wanted %v", len(result), key.Cipher.BlockSize) + return + } + return + }); err != nil { + panic(err) + } else { + copy(dst[:key.Cipher.BlockSize], result) + } +} + +// Encrypt encrypts the first block in src into dst. +// Dst and src must overlap entirely or not at all. +// +// Using this method for bulk operation is very inefficient, as it makes a round trip to the HSM +// (which may be network-connected) for each block. +// For more efficient operation, see NewCBCEncrypterCloser, NewCBCEncrypter or NewCBC. +func (key *SecretKey) Encrypt(dst, src []byte) { + var result []byte + if err := key.context.withSession(func(session *pkcs11Session) (err error) { + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(key.Cipher.ECBMech, nil)} + if err = session.ctx.EncryptInit(session.handle, mech, key.handle); err != nil { + return + } + if result, err = session.ctx.Encrypt(session.handle, src[:key.Cipher.BlockSize]); err != nil { + return + } + if len(result) != key.Cipher.BlockSize { + err = fmt.Errorf("C_Encrypt: unexpectedly returned %v bytes, wanted %v", len(result), key.Cipher.BlockSize) + return + } + return + }); err != nil { + panic(err) + } else { + copy(dst[:key.Cipher.BlockSize], result) + } +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/blockmode.go b/vendor/github.com/ThalesIgnite/crypto11/blockmode.go new file mode 100644 index 000000000..f9cea6ab1 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/blockmode.go @@ -0,0 +1,202 @@ +// Copyright 2018 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "crypto/cipher" + "runtime" + + "github.com/miekg/pkcs11" +) + +// cipher.BlockMode ----------------------------------------------------- + +// BlockModeCloser represents a block cipher running in a block-based mode (e.g. CBC). +// +// BlockModeCloser embeds cipher.BlockMode, and can be used as such. +// However, in this case +// (or if the Close() method is not explicitly called for any other reason), +// resources allocated to it may remain live indefinitely. +type BlockModeCloser interface { + cipher.BlockMode + + // Close() releases resources associated with the block mode. + Close() +} + +const ( + modeEncrypt = iota // blockModeCloser is in encrypt mode + modeDecrypt // blockModeCloser is in decrypt mode +) + +// NewCBCEncrypter returns a cipher.BlockMode which encrypts in cipher block chaining mode, using the given key. +// The length of iv must be the same as the key's block size. +// +// The new BlockMode acquires persistent resources which are released (eventually) by a finalizer. +// If this is a problem for your application then use NewCBCEncrypterCloser instead. +// +// If that is not possible then adding calls to runtime.GC() may help. +func (key *SecretKey) NewCBCEncrypter(iv []byte) (cipher.BlockMode, error) { + return key.newBlockModeCloser(key.Cipher.CBCMech, modeEncrypt, iv, true) +} + +// NewCBCDecrypter returns a cipher.BlockMode which decrypts in cipher block chaining mode, using the given key. +// The length of iv must be the same as the key's block size and must match the iv used to encrypt the data. +// +// The new BlockMode acquires persistent resources which are released (eventually) by a finalizer. +// If this is a problem for your application then use NewCBCDecrypterCloser instead. +// +// If that is not possible then adding calls to runtime.GC() may help. +func (key *SecretKey) NewCBCDecrypter(iv []byte) (cipher.BlockMode, error) { + return key.newBlockModeCloser(key.Cipher.CBCMech, modeDecrypt, iv, true) +} + +// NewCBCEncrypterCloser returns a BlockModeCloser which encrypts in cipher block chaining mode, using the given key. +// The length of iv must be the same as the key's block size. +// +// Use of NewCBCEncrypterCloser rather than NewCBCEncrypter represents a commitment to call the Close() method +// of the returned BlockModeCloser. +func (key *SecretKey) NewCBCEncrypterCloser(iv []byte) (BlockModeCloser, error) { + return key.newBlockModeCloser(key.Cipher.CBCMech, modeEncrypt, iv, false) +} + +// NewCBCDecrypterCloser returns a BlockModeCloser which decrypts in cipher block chaining mode, using the given key. +// The length of iv must be the same as the key's block size and must match the iv used to encrypt the data. +// +// Use of NewCBCDecrypterCloser rather than NewCBCEncrypter represents a commitment to call the Close() method +// of the returned BlockModeCloser. +func (key *SecretKey) NewCBCDecrypterCloser(iv []byte) (BlockModeCloser, error) { + return key.newBlockModeCloser(key.Cipher.CBCMech, modeDecrypt, iv, false) +} + +// blockModeCloser is a concrete implementation of BlockModeCloser supporting CBC. +type blockModeCloser struct { + // PKCS#11 session to use + session *pkcs11Session + + // Cipher block size + blockSize int + + // modeDecrypt or modeEncrypt + mode int + + // Cleanup function + cleanup func() +} + +// newBlockModeCloser creates a new blockModeCloser for the chosen mechanism and mode. +func (key *SecretKey) newBlockModeCloser(mech uint, mode int, iv []byte, setFinalizer bool) (*blockModeCloser, error) { + + session, err := key.context.getSession() + if err != nil { + return nil, err + } + + bmc := &blockModeCloser{ + session: session, + blockSize: key.Cipher.BlockSize, + mode: mode, + cleanup: func() { + key.context.pool.Put(session) + }, + } + mechDescription := []*pkcs11.Mechanism{pkcs11.NewMechanism(mech, iv)} + + switch mode { + case modeDecrypt: + err = session.ctx.DecryptInit(session.handle, mechDescription, key.handle) + case modeEncrypt: + err = session.ctx.EncryptInit(bmc.session.handle, mechDescription, key.handle) + default: + panic("unexpected mode") + } + if err != nil { + bmc.cleanup() + return nil, err + } + if setFinalizer { + runtime.SetFinalizer(bmc, finalizeBlockModeCloser) + } + + return bmc, nil +} + +func finalizeBlockModeCloser(obj interface{}) { + obj.(*blockModeCloser).Close() +} + +func (bmc *blockModeCloser) BlockSize() int { + return bmc.blockSize +} + +func (bmc *blockModeCloser) CryptBlocks(dst, src []byte) { + if len(dst) < len(src) { + panic("destination buffer too small") + } + if len(src)%bmc.blockSize != 0 { + panic("input is not a whole number of blocks") + } + var result []byte + var err error + switch bmc.mode { + case modeDecrypt: + result, err = bmc.session.ctx.DecryptUpdate(bmc.session.handle, src) + case modeEncrypt: + result, err = bmc.session.ctx.EncryptUpdate(bmc.session.handle, src) + } + if err != nil { + panic(err) + } + // PKCS#11 2.40 s5.2 says that the operation must produce as much output + // as possible, so we should never have less than we submitted for CBC. + // This could be different for other modes but we don't implement any yet. + if len(result) != len(src) { + panic("nontrivial result from *Final operation") + } + copy(dst[:len(result)], result) + runtime.KeepAlive(bmc) +} + +func (bmc *blockModeCloser) Close() { + if bmc.session == nil { + return + } + var result []byte + var err error + switch bmc.mode { + case modeDecrypt: + result, err = bmc.session.ctx.DecryptFinal(bmc.session.handle) + case modeEncrypt: + result, err = bmc.session.ctx.EncryptFinal(bmc.session.handle) + } + bmc.session = nil + bmc.cleanup() + if err != nil { + panic(err) + } + // PKCS#11 2.40 s5.2 says that the operation must produce as much output + // as possible, so we should never have any left over for CBC. + // This could be different for other modes but we don't implement any yet. + if len(result) > 0 { + panic("nontrivial result from *Final operation") + } +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/certificates.go b/vendor/github.com/ThalesIgnite/crypto11/certificates.go new file mode 100644 index 000000000..e430ecce2 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/certificates.go @@ -0,0 +1,299 @@ +// Copyright 2019 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "math/big" + + "github.com/miekg/pkcs11" + "github.com/pkg/errors" +) + +// FindCertificate retrieves a previously imported certificate. Any combination of id, label +// and serial can be provided. An error is return if all are nil. +func findCertificate(session *pkcs11Session, id []byte, label []byte, serial *big.Int) (cert *x509.Certificate, err error) { + + rawCertificate, err := findRawCertificate(session, id, label, serial) + if err != nil { + return nil, err + } + + if rawCertificate != nil { + cert, err = x509.ParseCertificate(rawCertificate) + if err != nil { + return nil, err + } + } + + return cert, err +} + +func findRawCertificate(session *pkcs11Session, id []byte, label []byte, serial *big.Int) (rawCertificate []byte, err error) { + if id == nil && label == nil && serial == nil { + return nil, errors.New("id, label and serial cannot all be nil") + } + + var template []*pkcs11.Attribute + + if id != nil { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, id)) + } + if label != nil { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + } + if serial != nil { + derSerial, err := asn1.Marshal(serial) + if err != nil { + return nil, errors.WithMessage(err, "failed to encode serial") + } + + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_SERIAL_NUMBER, derSerial)) + } + + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE)) + + if err = session.ctx.FindObjectsInit(session.handle, template); err != nil { + return nil, err + } + defer func() { + finalErr := session.ctx.FindObjectsFinal(session.handle) + if err == nil { + err = finalErr + } + }() + + handles, _, err := session.ctx.FindObjects(session.handle, 1) + if err != nil { + return nil, err + } + if len(handles) == 0 { + return nil, nil + } + + attributes := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_VALUE, 0), + } + + if attributes, err = session.ctx.GetAttributeValue(session.handle, handles[0], attributes); err != nil { + return nil, err + } + + rawCertificate = attributes[0].Value + + return +} + +// FindCertificate retrieves a previously imported certificate. Any combination of id, label +// and serial can be provided. An error is return if all are nil. +func (c *Context) FindCertificate(id []byte, label []byte, serial *big.Int) (*x509.Certificate, error) { + + if c.closed.Get() { + return nil, errClosed + } + + var cert *x509.Certificate + err := c.withSession(func(session *pkcs11Session) (err error) { + cert, err = findCertificate(session, id, label, serial) + return err + }) + + return cert, err +} + +func (c *Context) FindAllPairedCertificates() (certificates []tls.Certificate, err error) { + if c.closed.Get() { + return nil, errClosed + } + + err = c.withSession(func(session *pkcs11Session) error { + // Add the private key class to the template to find the private half + privAttributes := AttributeSet{} + err = privAttributes.Set(CkaClass, pkcs11.CKO_PRIVATE_KEY) + if err != nil { + return err + } + + privHandles, err := findKeysWithAttributes(session, privAttributes.ToSlice()) + if err != nil { + return err + } + + for _, privHandle := range privHandles { + + privateKey, certificate, err := c.makeKeyPair(session, &privHandle) + + if err == errNoCkaId || err == errNoPublicHalf { + continue + } + + if err != nil { + return err + } + + if certificate == nil { + continue + } + + tlsCert := tls.Certificate{ + Leaf: certificate, + PrivateKey: privateKey, + } + + tlsCert.Certificate = append(tlsCert.Certificate, certificate.Raw) + certificates = append(certificates, tlsCert) + } + + return nil + }) + + if err != nil { + return nil, err + } + + return +} + +// ImportCertificate imports a certificate onto the token. The id parameter is used to +// set CKA_ID and must be non-nil. +func (c *Context) ImportCertificate(id []byte, certificate *x509.Certificate) error { + if c.closed.Get() { + return errClosed + } + + if err := notNilBytes(id, "id"); err != nil { + return err + } + + template, err := NewAttributeSetWithID(id) + if err != nil { + return err + } + return c.ImportCertificateWithAttributes(template, certificate) +} + +// ImportCertificateWithLabel imports a certificate onto the token. The id and label parameters are used to +// set CKA_ID and CKA_LABEL respectively and must be non-nil. +func (c *Context) ImportCertificateWithLabel(id []byte, label []byte, certificate *x509.Certificate) error { + if c.closed.Get() { + return errClosed + } + + if err := notNilBytes(id, "id"); err != nil { + return err + } + if err := notNilBytes(label, "label"); err != nil { + return err + } + + template, err := NewAttributeSetWithIDAndLabel(id, label) + if err != nil { + return err + } + return c.ImportCertificateWithAttributes(template, certificate) +} + +// ImportCertificateWithAttributes imports a certificate onto the token. After this function returns, template +// will contain the attributes applied to the certificate. If required attributes are missing, they will be set to a +// default value. +func (c *Context) ImportCertificateWithAttributes(template AttributeSet, certificate *x509.Certificate) error { + if c.closed.Get() { + return errClosed + } + + if certificate == nil { + return errors.New("certificate cannot be nil") + } + + serial, err := asn1.Marshal(certificate.SerialNumber) + if err != nil { + return err + } + + template.AddIfNotPresent([]*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE), + pkcs11.NewAttribute(pkcs11.CKA_CERTIFICATE_TYPE, pkcs11.CKC_X_509), + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_PRIVATE, false), + pkcs11.NewAttribute(pkcs11.CKA_SUBJECT, certificate.RawSubject), + pkcs11.NewAttribute(pkcs11.CKA_ISSUER, certificate.RawIssuer), + pkcs11.NewAttribute(pkcs11.CKA_SERIAL_NUMBER, serial), + pkcs11.NewAttribute(pkcs11.CKA_VALUE, certificate.Raw), + }) + + err = c.withSession(func(session *pkcs11Session) error { + _, err = session.ctx.CreateObject(session.handle, template.ToSlice()) + return err + }) + + return err +} + +// DeleteCertificate destroys a previously imported certificate. it will return +// nil if succeeds or if the certificate does not exist. Any combination of id, +// label and serial can be provided. An error is return if all are nil. +func (c *Context) DeleteCertificate(id []byte, label []byte, serial *big.Int) error { + if id == nil && label == nil && serial == nil { + return errors.New("id, label and serial cannot all be nil") + } + + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE), + } + + if id != nil { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, id)) + } + if label != nil { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + } + if serial != nil { + asn1Serial, err := asn1.Marshal(serial) + if err != nil { + return err + } + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_SERIAL_NUMBER, asn1Serial)) + } + + err := c.withSession(func(session *pkcs11Session) error { + err := session.ctx.FindObjectsInit(session.handle, template) + if err != nil { + return err + } + handles, _, err := session.ctx.FindObjects(session.handle, 1) + finalErr := session.ctx.FindObjectsFinal(session.handle) + if err != nil { + return err + } + if finalErr != nil { + return finalErr + } + if len(handles) == 0 { + return nil + } + return session.ctx.DestroyObject(session.handle, handles[0]) + }) + + return err +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/common.go b/vendor/github.com/ThalesIgnite/crypto11/common.go new file mode 100644 index 000000000..1823c17ac --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/common.go @@ -0,0 +1,125 @@ +// Copyright 2017 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "C" + "encoding/asn1" + "math/big" + "unsafe" + + "github.com/miekg/pkcs11" + "github.com/pkg/errors" +) + +func ulongToBytes(n uint) []byte { + return C.GoBytes(unsafe.Pointer(&n), C.sizeof_ulong) // ugh! +} + +func bytesToUlong(bs []byte) (n uint) { + sliceSize := len(bs) + if sliceSize == 0 { + return 0 + } + + value := *(*uint)(unsafe.Pointer(&bs[0])) + if sliceSize > C.sizeof_ulong { + return value + } + + // truncate the value to the # of bits present in the byte slice since + // the unsafe pointer will always grab/convert ULONG # of bytes + var mask uint + for i := 0; i < sliceSize; i++ { + mask |= 0xff << uint(i * 8) + } + return value & mask +} + +func concat(slices ...[]byte) []byte { + n := 0 + for _, slice := range slices { + n += len(slice) + } + r := make([]byte, n) + n = 0 + for _, slice := range slices { + n += copy(r[n:], slice) + } + return r +} + +// Representation of a *DSA signature +type dsaSignature struct { + R, S *big.Int +} + +// Populate a dsaSignature from a raw byte sequence +func (sig *dsaSignature) unmarshalBytes(sigBytes []byte) error { + if len(sigBytes) == 0 || len(sigBytes)%2 != 0 { + return errors.New("DSA signature length is invalid from token") + } + n := len(sigBytes) / 2 + sig.R, sig.S = new(big.Int), new(big.Int) + sig.R.SetBytes(sigBytes[:n]) + sig.S.SetBytes(sigBytes[n:]) + return nil +} + +// Populate a dsaSignature from DER encoding +func (sig *dsaSignature) unmarshalDER(sigDER []byte) error { + if rest, err := asn1.Unmarshal(sigDER, sig); err != nil { + return errors.WithMessage(err, "DSA signature contains invalid ASN.1 data") + } else if len(rest) > 0 { + return errors.New("unexpected data found after DSA signature") + } + return nil +} + +// Return the DER encoding of a dsaSignature +func (sig *dsaSignature) marshalDER() ([]byte, error) { + return asn1.Marshal(*sig) +} + +// Compute *DSA signature and marshal the result in DER form +func (c *Context) dsaGeneric(key pkcs11.ObjectHandle, mechanism uint, digest []byte) ([]byte, error) { + var err error + var sigBytes []byte + var sig dsaSignature + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(mechanism, nil)} + err = c.withSession(func(session *pkcs11Session) error { + if err = c.ctx.SignInit(session.handle, mech, key); err != nil { + return err + } + sigBytes, err = c.ctx.Sign(session.handle, digest) + return err + }) + if err != nil { + return nil, err + } + err = sig.unmarshalBytes(sigBytes) + if err != nil { + return nil, err + } + + return sig.marshalDER() +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/config b/vendor/github.com/ThalesIgnite/crypto11/config new file mode 100644 index 000000000..c51f9c585 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/config @@ -0,0 +1,5 @@ +{ + "Path" : "/usr/lib/softhsm/libsofthsm2.so", + "TokenLabel": "token1", + "Pin" : "password" +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/crypto11.go b/vendor/github.com/ThalesIgnite/crypto11/crypto11.go new file mode 100644 index 000000000..f68fd93a9 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/crypto11.go @@ -0,0 +1,485 @@ +// Copyright 2016 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package crypto11 enables access to cryptographic keys from PKCS#11 using Go crypto API. +// +// Configuration +// +// PKCS#11 tokens are accessed via Context objects. Each Context connects to one token. +// +// Context objects are created by calling Configure or ConfigureFromFile. +// In the latter case, the file should contain a JSON representation of +// a Config. +// +// Key Generation and Usage +// +// There is support for generating DSA, RSA and ECDSA keys. These keys +// can be found later using FindKeyPair. All three key types implement +// the crypto.Signer interface and the RSA keys also implement crypto.Decrypter. +// +// RSA keys obtained through FindKeyPair will need a type assertion to be +// used for decryption. Assert either crypto.Decrypter or SignerDecrypter, as you +// prefer. +// +// Symmetric keys can also be generated. These are found later using FindKey. +// See the documentation for SecretKey for further information. +// +// Sessions and concurrency +// +// Note that PKCS#11 session handles must not be used concurrently +// from multiple threads. Consumers of the Signer interface know +// nothing of this and expect to be able to sign from multiple threads +// without constraint. We address this as follows. +// +// 1. When a Context is created, a session is created and the user is +// logged in. This session remains open until the Context is closed, +// to ensure all object handles remain valid and to avoid repeatedly +// calling C_Login. +// +// 2. The Context also maintains a pool of read-write sessions. The pool expands +// dynamically as needed, but never beyond the maximum number of r/w sessions +// supported by the token (as reported by C_GetInfo). If other applications +// are using the token, a lower limit should be set in the Config. +// +// 3. Each operation transiently takes a session from the pool. They +// have exclusive use of the session, meeting PKCS#11's concurrency +// requirements. Sessions are returned to the pool afterwards and may +// be re-used. +// +// Behaviour of the pool can be tweaked via Config fields: +// +// - PoolWaitTimeout controls how long an operation can block waiting on a +// session from the pool. A zero value means there is no limit. Timeouts +// occur if the pool is fully used and additional operations are requested. +// +// - MaxSessions sets an upper bound on the number of sessions. If this value is zero, +// a default maximum is used (see DefaultMaxSessions). In every case the maximum +// supported sessions as reported by the token is obeyed. +// +// Limitations +// +// The PKCS1v15DecryptOptions SessionKeyLen field is not implemented +// and an error is returned if it is nonzero. +// The reason for this is that it is not possible for crypto11 to guarantee the constant-time behavior in the specification. +// See https://github.com/thalesignite/crypto11/issues/5 for further discussion. +// +// Symmetric crypto support via cipher.Block is very slow. +// You can use the BlockModeCloser API +// but you must call the Close() interface (not found in cipher.BlockMode). +// See https://github.com/ThalesIgnite/crypto11/issues/6 for further discussion. +package crypto11 + +import ( + "crypto" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "time" + + "github.com/miekg/pkcs11" + "github.com/pkg/errors" + "github.com/thales-e-security/pool" +) + +const ( + // DefaultMaxSessions controls the maximum number of concurrent sessions to + // open, unless otherwise specified in the Config object. + DefaultMaxSessions = 1024 + + // DefaultGCMIVLength controls the expected length of IVs generated by the token + DefaultGCMIVLength = 16 + + // Thales vendor constant for CKU_CRYPTO_USER + CryptoUser = 0x80000001 + DefaultUserType = 1 // 1 -> CKU_USER +) + +// errTokenNotFound represents the failure to find the requested PKCS#11 token +var errTokenNotFound = errors.New("could not find PKCS#11 token") + +// errClosed is returned if a Context is used after a call to Close. +var errClosed = errors.New("cannot used closed Context") + +// pkcs11Object contains a reference to a loaded PKCS#11 object. +type pkcs11Object struct { + // The PKCS#11 object handle. + handle pkcs11.ObjectHandle + + // The PKCS#11 context. This is used to find a session handle that can + // access this object. + context *Context +} + +func (o *pkcs11Object) Delete() error { + return o.context.withSession(func(session *pkcs11Session) error { + err := session.ctx.DestroyObject(session.handle, o.handle) + return errors.WithMessage(err, "failed to destroy key") + }) +} + +// pkcs11PrivateKey contains a reference to a loaded PKCS#11 private key object. +type pkcs11PrivateKey struct { + pkcs11Object + + // pubKeyHandle is a handle to the public key. + pubKeyHandle pkcs11.ObjectHandle + + // pubKey is an exported copy of the public key. We pre-export the key material because crypto.Signer.Public + // doesn't allow us to return errors. + pubKey crypto.PublicKey +} + +// Delete implements Signer.Delete. +func (k *pkcs11PrivateKey) Delete() error { + err := k.pkcs11Object.Delete() + if err != nil { + return err + } + + return k.context.withSession(func(session *pkcs11Session) error { + err := session.ctx.DestroyObject(session.handle, k.pubKeyHandle) + return errors.WithMessage(err, "failed to destroy public key") + }) +} + +// A Context stores the connection state to a PKCS#11 token. Use Configure or ConfigureFromFile to create a new +// Context. Call Close when finished with the token, to free up resources. +// +// All functions, except Close, are safe to call from multiple goroutines. +type Context struct { + // Atomic fields must be at top (according to the package owners) + closed pool.AtomicBool + + ctx *pkcs11.Ctx + cfg *Config + + token *pkcs11.TokenInfo + slot uint + pool *pool.ResourcePool + + // persistentSession is a session held open so we can be confident handles and login status + // persist for the duration of this context + persistentSession pkcs11.SessionHandle +} + +// Signer is a PKCS#11 key that implements crypto.Signer. +type Signer interface { + crypto.Signer + + // Delete deletes the key pair from the token. + Delete() error +} + +// SignerDecrypter is a PKCS#11 key implements crypto.Signer and crypto.Decrypter. +type SignerDecrypter interface { + Signer + + // Decrypt implements crypto.Decrypter. + Decrypt(rand io.Reader, msg []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) +} + +// findToken finds a token given exactly one of serial, label or slotNumber +func (c *Context) findToken(slots []uint, serial, label string, slotNumber *int) (uint, *pkcs11.TokenInfo, error) { + for _, slot := range slots { + + tokenInfo, err := c.ctx.GetTokenInfo(slot) + if err != nil { + return 0, nil, err + } + + if (slotNumber != nil && uint(*slotNumber) == slot) || + (tokenInfo.SerialNumber != "" && tokenInfo.SerialNumber == serial) || + (tokenInfo.Label != "" && tokenInfo.Label == label) { + + return slot, &tokenInfo, nil + } + + } + return 0, nil, errTokenNotFound +} + +// Config holds PKCS#11 configuration information. +// +// A token may be selected by label, serial number or slot number. It is an error to specify +// more than one way to select the token. +// +// Supply this to Configure(), or alternatively use ConfigureFromFile(). +type Config struct { + // Full path to PKCS#11 library. + Path string + + // Token serial number. + TokenSerial string + + // Token label. + TokenLabel string + + // SlotNumber identifies a token to use by the slot containing it. + SlotNumber *int + + // User PIN (password). + Pin string + + // Maximum number of concurrent sessions to open. If zero, DefaultMaxSessions is used. + // Otherwise, the value specified must be at least 2. + MaxSessions int + + // User type identifies the user type logging in. If zero, DefaultUserType is used. + UserType int + + // Maximum time to wait for a session from the sessions pool. Zero means wait indefinitely. + PoolWaitTimeout time.Duration + + // LoginNotSupported should be set to true for tokens that do not support logging in. + LoginNotSupported bool + + // UseGCMIVFromHSM should be set to true for tokens such as CloudHSM, which ignore the supplied IV for + // GCM mode and generate their own. In this case, the token will write the IV used into the CK_GCM_PARAMS. + // If UseGCMIVFromHSM is true, we will copy this IV and overwrite the 'nonce' slice passed to Seal and Open. It + // is therefore necessary that the nonce is the correct length (12 bytes for CloudHSM). + UseGCMIVFromHSM bool + + // GCMIVLength is the length of IVs to use in GCM mode. Refer to NIST SP800-38 for guidance on the length of + // RBG-based IVs in GCM mode. When the UseGCMIVFromHSM parameter is true + GCMIVLength int + + GCMIVFromHSMControl GCMIVFromHSMConfig +} + +type GCMIVFromHSMConfig struct { + + // SupplyIvForHSMGCM_encrypt controls the supply of a non-nil IV for GCM use during C_EncryptInit + SupplyIvForHSMGCMEncrypt bool + + // SupplyIvForHSMGCM_decrypt controls the supply of a non-nil IV for GCM use during C_DecryptInit + SupplyIvForHSMGCMDecrypt bool +} + +// refCount counts the number of contexts using a particular P11 library. It must not be read or modified +// without holding refCountMutex. +var refCount = map[string]int{} +var refCountMutex = sync.Mutex{} + +// Configure creates a new Context based on the supplied PKCS#11 configuration. +func Configure(config *Config) (*Context, error) { + // Have we been given exactly one way to select a token? + var fields []string + if config.SlotNumber != nil { + fields = append(fields, "slot number") + } + if config.TokenLabel != "" { + fields = append(fields, "token label") + } + if config.TokenSerial != "" { + fields = append(fields, "token serial number") + } + if len(fields) == 0 { + return nil, fmt.Errorf("config must specify exactly one way to select a token: none given") + } else if len(fields) > 1 { + return nil, fmt.Errorf("config must specify exactly one way to select a token: %v given", strings.Join(fields, ", ")) + } + + if config.MaxSessions == 0 { + config.MaxSessions = DefaultMaxSessions + } + if config.MaxSessions == 1 { + return nil, errors.New("MaxSessions must be larger than 1") + } + + if config.UserType == 0 { + config.UserType = DefaultUserType + } + + if config.GCMIVLength == 0 { + config.GCMIVLength = DefaultGCMIVLength + } + + instance := &Context{ + cfg: config, + ctx: pkcs11.New(config.Path), + } + + if instance.ctx == nil { + return nil, errors.New("could not open PKCS#11") + } + + // Check how many contexts are currently using this library + refCountMutex.Lock() + defer refCountMutex.Unlock() + numExistingContexts := refCount[config.Path] + + // Only Initialize if we are the first Context using the library + if numExistingContexts == 0 { + if err := instance.ctx.Initialize(); err != nil { + instance.ctx.Destroy() + return nil, errors.WithMessage(err, "failed to initialize PKCS#11 library") + } + } + slots, err := instance.ctx.GetSlotList(true) + if err != nil { + _ = instance.ctx.Finalize() + instance.ctx.Destroy() + return nil, errors.WithMessage(err, "failed to list PKCS#11 slots") + } + + instance.slot, instance.token, err = instance.findToken(slots, config.TokenSerial, config.TokenLabel, config.SlotNumber) + if err != nil { + _ = instance.ctx.Finalize() + instance.ctx.Destroy() + return nil, err + } + + // Create the session pool. + maxSessions := instance.cfg.MaxSessions + tokenMaxSessions := instance.token.MaxRwSessionCount + if tokenMaxSessions != pkcs11.CK_EFFECTIVELY_INFINITE && tokenMaxSessions != pkcs11.CK_UNAVAILABLE_INFORMATION { + maxSessions = min(maxSessions, castDown(tokenMaxSessions)) + } + + // We will use one session to keep state alive, so the pool gets maxSessions - 1 + instance.pool = pool.NewResourcePool(instance.resourcePoolFactoryFunc, maxSessions-1, maxSessions-1, 0, 0) + + // Create a long-term session and log it in (if supported). This session won't be used by callers, instead it is + // used to keep a connection alive to the token to ensure object handles and the log in status remain accessible. + instance.persistentSession, err = instance.ctx.OpenSession(instance.slot, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + _ = instance.ctx.Finalize() + instance.ctx.Destroy() + return nil, errors.WithMessagef(err, "failed to create long term session") + } + + if !config.LoginNotSupported { + // Try to log in our persistent session. This may fail with CKR_USER_ALREADY_LOGGED_IN if another instance + // already exists. + if instance.cfg.UserType == 1 { + err = instance.ctx.Login(instance.persistentSession, pkcs11.CKU_USER, instance.cfg.Pin) + } else { + err = instance.ctx.Login(instance.persistentSession, CryptoUser, instance.cfg.Pin) + } + if err != nil { + + pErr, isP11Error := err.(pkcs11.Error) + + if !isP11Error || pErr != pkcs11.CKR_USER_ALREADY_LOGGED_IN { + _ = instance.ctx.Finalize() + instance.ctx.Destroy() + return nil, errors.WithMessagef(err, "failed to log into long term session") + } + } + } + + // Increment the reference count + refCount[config.Path] = numExistingContexts + 1 + + return instance, nil +} + +func min(a, b int) int { + if b < a { + return b + } + return a +} + +// castDown returns orig as a signed integer. If an overflow would have occurred, +// the maximum possible value is returned. +func castDown(orig uint) int { + // From https://stackoverflow.com/a/6878625/474189 + const maxUint = ^uint(0) + const maxInt = int(maxUint >> 1) + + if orig > uint(maxInt) { + return maxInt + } + + return int(orig) +} + +// ConfigureFromFile is a convenience method, which parses the configuration file +// and calls Configure. The configuration file should be a JSON representation +// of a Config object. +func ConfigureFromFile(configLocation string) (*Context, error) { + config, err := loadConfigFromFile(configLocation) + if err != nil { + return nil, err + } + + return Configure(config) +} + +// loadConfigFromFile reads a Config struct from a file. +func loadConfigFromFile(configLocation string) (*Config, error) { + file, err := os.Open(configLocation) + if err != nil { + return nil, errors.WithMessagef(err, "could not open config file: %s", configLocation) + } + defer func() { + closeErr := file.Close() + if err == nil { + err = closeErr + } + }() + + configDecoder := json.NewDecoder(file) + config := &Config{} + err = configDecoder.Decode(config) + return config, errors.WithMessage(err, "could decode config file:") +} + +// Close releases resources used by the Context and unloads the PKCS #11 library if there are no other +// Contexts using it. Close blocks until existing operations have finished. A closed Context cannot be reused. +func (c *Context) Close() error { + + // Take lock on the reference count + refCountMutex.Lock() + defer refCountMutex.Unlock() + + c.closed.Set(true) + + // Block until all resources returned to pool + c.pool.Close() + + // Close our long-term session. We ignore any returned error, + // since we plan to kill our collection to the library anyway. + _ = c.ctx.CloseSession(c.persistentSession) + + count, found := refCount[c.cfg.Path] + if !found || count == 0 { + // We have somehow lost track of reference counts, this is very bad + panic("invalid reference count for PKCS#11 library") + } + + refCount[c.cfg.Path] = count - 1 + + // If we were the last Context, finalize the library + if count == 1 { + err := c.ctx.Finalize() + if err != nil { + return err + } + } + + c.ctx.Destroy() + return nil +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/dsa.go b/vendor/github.com/ThalesIgnite/crypto11/dsa.go new file mode 100644 index 000000000..562f3dd0e --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/dsa.go @@ -0,0 +1,175 @@ +// Copyright 2016, 2017 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "crypto" + "crypto/dsa" + "io" + "math/big" + + "github.com/pkg/errors" + + pkcs11 "github.com/miekg/pkcs11" +) + +// pkcs11PrivateKeyDSA contains a reference to a loaded PKCS#11 DSA private key object. +type pkcs11PrivateKeyDSA struct { + pkcs11PrivateKey +} + +// Export the public key corresponding to a private DSA key. +func exportDSAPublicKey(session *pkcs11Session, pubHandle pkcs11.ObjectHandle) (crypto.PublicKey, error) { + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_PRIME, nil), + pkcs11.NewAttribute(pkcs11.CKA_SUBPRIME, nil), + pkcs11.NewAttribute(pkcs11.CKA_BASE, nil), + pkcs11.NewAttribute(pkcs11.CKA_VALUE, nil), + } + exported, err := session.ctx.GetAttributeValue(session.handle, pubHandle, template) + if err != nil { + return nil, err + } + var p, q, g, y big.Int + p.SetBytes(exported[0].Value) + q.SetBytes(exported[1].Value) + g.SetBytes(exported[2].Value) + y.SetBytes(exported[3].Value) + result := dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: &p, + Q: &q, + G: &g, + }, + Y: &y, + } + return &result, nil +} + +func notNilBytes(obj []byte, name string) error { + if obj == nil { + return errors.Errorf("%s cannot be nil", name) + } + return nil +} + +// GenerateDSAKeyPair creates a DSA key pair on the token. The id parameter is used to +// set CKA_ID and must be non-nil. +func (c *Context) GenerateDSAKeyPair(id []byte, params *dsa.Parameters) (Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + public, err := NewAttributeSetWithID(id) + if err != nil { + return nil, err + } + // Copy the AttributeSet to allow modifications. + private := public.Copy() + + return c.GenerateDSAKeyPairWithAttributes(public, private, params) +} + +// GenerateDSAKeyPairWithLabel creates a DSA key pair on the token. The id and label parameters are used to +// set CKA_ID and CKA_LABEL respectively and must be non-nil. +func (c *Context) GenerateDSAKeyPairWithLabel(id, label []byte, params *dsa.Parameters) (Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + public, err := NewAttributeSetWithIDAndLabel(id, label) + if err != nil { + return nil, err + } + // Copy the AttributeSet to allow modifications. + private := public.Copy() + + return c.GenerateDSAKeyPairWithAttributes(public, private, params) +} + +// GenerateDSAKeyPairWithAttributes creates a DSA key pair on the token. After this function returns, public and private +// will contain the attributes applied to the key pair. If required attributes are missing, they will be set to a +// default value. +func (c *Context) GenerateDSAKeyPairWithAttributes(public, private AttributeSet, params *dsa.Parameters) (Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + var k Signer + err := c.withSession(func(session *pkcs11Session) error { + p := params.P.Bytes() + q := params.Q.Bytes() + g := params.G.Bytes() + + public.AddIfNotPresent([]*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY), + pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_DSA), + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_VERIFY, true), + pkcs11.NewAttribute(pkcs11.CKA_PRIME, p), + pkcs11.NewAttribute(pkcs11.CKA_SUBPRIME, q), + pkcs11.NewAttribute(pkcs11.CKA_BASE, g), + }) + private.AddIfNotPresent([]*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_SIGN, true), + pkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true), + pkcs11.NewAttribute(pkcs11.CKA_EXTRACTABLE, false), + }) + + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_DSA_KEY_PAIR_GEN, nil)} + pubHandle, privHandle, err := session.ctx.GenerateKeyPair(session.handle, + mech, + public.ToSlice(), + private.ToSlice()) + if err != nil { + return err + } + pub, err := exportDSAPublicKey(session, pubHandle) + if err != nil { + return err + } + k = &pkcs11PrivateKeyDSA{ + pkcs11PrivateKey: pkcs11PrivateKey{ + pkcs11Object: pkcs11Object{ + handle: privHandle, + context: c, + }, + pubKeyHandle: pubHandle, + pubKey: pub, + }} + return nil + + }) + return k, err +} + +// Sign signs a message using a DSA key. +// +// This completes the implemention of crypto.Signer for pkcs11PrivateKeyDSA. +// +// PKCS#11 expects to pick its own random data for signatures, so the rand argument is ignored. +// +// The return value is a DER-encoded byteblock. +func (signer *pkcs11PrivateKeyDSA) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + return signer.context.dsaGeneric(signer.handle, pkcs11.CKM_DSA, digest) +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/ecdsa.go b/vendor/github.com/ThalesIgnite/crypto11/ecdsa.go new file mode 100644 index 000000000..c09423256 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/ecdsa.go @@ -0,0 +1,302 @@ +// Copyright 2016, 2017 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/asn1" + "io" + "math/big" + + "github.com/miekg/pkcs11" + "github.com/pkg/errors" +) + +// errUnsupportedEllipticCurve is returned when an elliptic curve +// unsupported by crypto11 is specified. Note that the error behavior +// for an elliptic curve unsupported by the underlying PKCS#11 +// implementation will be different. +var errUnsupportedEllipticCurve = errors.New("unsupported elliptic curve") + +// pkcs11PrivateKeyECDSA contains a reference to a loaded PKCS#11 ECDSA private key object. +type pkcs11PrivateKeyECDSA struct { + pkcs11PrivateKey +} + +// Information about an Elliptic Curve +type curveInfo struct { + // ASN.1 marshaled OID + oid []byte + + // Curve definition in Go form + curve elliptic.Curve +} + +// ASN.1 marshal some value and panic on error +func mustMarshal(val interface{}) []byte { + if b, err := asn1.Marshal(val); err != nil { + panic(err) + } else { + return b + } +} + +// Note: some of these are outside what crypto/elliptic currently +// knows about. So I'm making a (reasonable) assumption about what +// they will be called if they are either added or if someone +// specifies them explicitly. +// +// For public key export, the curve has to be a known one, otherwise +// you're stuffed. This is probably better fixed by adding well-known +// curves to crypto/elliptic rather than having a private copy here. +var wellKnownCurves = map[string]curveInfo{ + "P-192": { + mustMarshal(asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1}), + nil, + }, + "P-224": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 33}), + elliptic.P224(), + }, + "P-256": { + mustMarshal(asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}), + elliptic.P256(), + }, + "P-384": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 34}), + elliptic.P384(), + }, + "P-521": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 35}), + elliptic.P521(), + }, + + "K-163": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 1}), + nil, + }, + "K-233": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 26}), + nil, + }, + "K-283": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 16}), + nil, + }, + "K-409": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 36}), + nil, + }, + "K-571": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 38}), + nil, + }, + + "B-163": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 15}), + nil, + }, + "B-233": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 27}), + nil, + }, + "B-283": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 17}), + nil, + }, + "B-409": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 37}), + nil, + }, + "B-571": { + mustMarshal(asn1.ObjectIdentifier{1, 3, 132, 0, 39}), + nil, + }, +} + +func marshalEcParams(c elliptic.Curve) ([]byte, error) { + if ci, ok := wellKnownCurves[c.Params().Name]; ok { + return ci.oid, nil + } + // TODO use ANSI X9.62 ECParameters representation instead + return nil, errUnsupportedEllipticCurve +} + +func unmarshalEcParams(b []byte) (elliptic.Curve, error) { + // See if it's a well-known curve + for _, ci := range wellKnownCurves { + if bytes.Equal(b, ci.oid) { + if ci.curve != nil { + return ci.curve, nil + } + return nil, errUnsupportedEllipticCurve + } + } + // TODO try ANSI X9.62 ECParameters representation + return nil, errUnsupportedEllipticCurve +} + +func unmarshalEcPoint(b []byte, c elliptic.Curve) (*big.Int, *big.Int, error) { + var pointBytes []byte + extra, err := asn1.Unmarshal(b, &pointBytes) + if err != nil { + return nil, nil, errors.WithMessage(err, "elliptic curve point is invalid ASN.1") + } + + if len(extra) > 0 { + // We weren't expecting extra data + return nil, nil, errors.New("unexpected data found when parsing elliptic curve point") + } + + x, y := elliptic.Unmarshal(c, pointBytes) + if x == nil || y == nil { + return nil, nil, errors.New("failed to parse elliptic curve point") + } + return x, y, nil +} + +// Export the public key corresponding to a private ECDSA key. +func exportECDSAPublicKey(session *pkcs11Session, pubHandle pkcs11.ObjectHandle) (crypto.PublicKey, error) { + var err error + var attributes []*pkcs11.Attribute + var pub ecdsa.PublicKey + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_ECDSA_PARAMS, nil), + pkcs11.NewAttribute(pkcs11.CKA_EC_POINT, nil), + } + if attributes, err = session.ctx.GetAttributeValue(session.handle, pubHandle, template); err != nil { + return nil, err + } + if pub.Curve, err = unmarshalEcParams(attributes[0].Value); err != nil { + return nil, err + } + if pub.X, pub.Y, err = unmarshalEcPoint(attributes[1].Value, pub.Curve); err != nil { + return nil, err + } + return &pub, nil +} + +// GenerateECDSAKeyPair creates a ECDSA key pair on the token using curve c. The id parameter is used to +// set CKA_ID and must be non-nil. Only a limited set of named elliptic curves are supported. The +// underlying PKCS#11 implementation may impose further restrictions. +func (c *Context) GenerateECDSAKeyPair(id []byte, curve elliptic.Curve) (Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + public, err := NewAttributeSetWithID(id) + if err != nil { + return nil, err + } + // Copy the AttributeSet to allow modifications. + private := public.Copy() + + return c.GenerateECDSAKeyPairWithAttributes(public, private, curve) +} + +// GenerateECDSAKeyPairWithLabel creates a ECDSA key pair on the token using curve c. The id and label parameters are used to +// set CKA_ID and CKA_LABEL respectively and must be non-nil. Only a limited set of named elliptic curves are supported. The +// underlying PKCS#11 implementation may impose further restrictions. +func (c *Context) GenerateECDSAKeyPairWithLabel(id, label []byte, curve elliptic.Curve) (Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + public, err := NewAttributeSetWithIDAndLabel(id, label) + if err != nil { + return nil, err + } + // Copy the AttributeSet to allow modifications. + private := public.Copy() + + return c.GenerateECDSAKeyPairWithAttributes(public, private, curve) +} + +// GenerateECDSAKeyPairWithAttributes generates an ECDSA key pair on the token. After this function returns, public and +// private will contain the attributes applied to the key pair. If required attributes are missing, they will be set to +// a default value. +func (c *Context) GenerateECDSAKeyPairWithAttributes(public, private AttributeSet, curve elliptic.Curve) (Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + var k Signer + err := c.withSession(func(session *pkcs11Session) error { + + parameters, err := marshalEcParams(curve) + if err != nil { + return err + } + public.AddIfNotPresent([]*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY), + pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_ECDSA), + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_VERIFY, true), + pkcs11.NewAttribute(pkcs11.CKA_ECDSA_PARAMS, parameters), + }) + private.AddIfNotPresent([]*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_SIGN, true), + pkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true), + pkcs11.NewAttribute(pkcs11.CKA_EXTRACTABLE, false), + }) + + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_ECDSA_KEY_PAIR_GEN, nil)} + pubHandle, privHandle, err := session.ctx.GenerateKeyPair(session.handle, + mech, + public.ToSlice(), + private.ToSlice()) + if err != nil { + return err + } + + pub, err := exportECDSAPublicKey(session, pubHandle) + if err != nil { + return err + } + k = &pkcs11PrivateKeyECDSA{ + pkcs11PrivateKey: pkcs11PrivateKey{ + pkcs11Object: pkcs11Object{ + handle: privHandle, + context: c, + }, + pubKeyHandle: pubHandle, + pubKey: pub, + }} + return nil + }) + return k, err +} + +// Sign signs a message using an ECDSA key. +// +// This completes the implemention of crypto.Signer for pkcs11PrivateKeyECDSA. +// +// PKCS#11 expects to pick its own random data where necessary for signatures, so the rand argument is ignored. +// +// The return value is a DER-encoded byteblock. +func (signer *pkcs11PrivateKeyECDSA) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) { + return signer.context.dsaGeneric(signer.handle, pkcs11.CKM_ECDSA, digest) +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/hmac.go b/vendor/github.com/ThalesIgnite/crypto11/hmac.go new file mode 100644 index 000000000..16b741501 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/hmac.go @@ -0,0 +1,215 @@ +// Copyright 2018 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "errors" + "hash" + + "github.com/miekg/pkcs11" +) + +const ( + // NFCK_VENDOR_NCIPHER distinguishes nShield vendor-specific mechanisms. + NFCK_VENDOR_NCIPHER = 0xde436972 + + // CKM_NCIPHER is the base for nShield vendor-specific mechanisms. + CKM_NCIPHER = (pkcs11.CKM_VENDOR_DEFINED | NFCK_VENDOR_NCIPHER) + + // CKM_NC_MD5_HMAC_KEY_GEN is the nShield-specific HMACMD5 key-generation mechanism + CKM_NC_MD5_HMAC_KEY_GEN = (CKM_NCIPHER + 0x6) + + // CKM_NC_SHA_1_HMAC_KEY_GEN is the nShield-specific HMACSHA1 key-generation mechanism + CKM_NC_SHA_1_HMAC_KEY_GEN = (CKM_NCIPHER + 0x3) + + // CKM_NC_SHA224_HMAC_KEY_GEN is the nShield-specific HMACSHA224 key-generation mechanism + CKM_NC_SHA224_HMAC_KEY_GEN = (CKM_NCIPHER + 0x24) + + // CKM_NC_SHA256_HMAC_KEY_GEN is the nShield-specific HMACSHA256 key-generation mechanism + CKM_NC_SHA256_HMAC_KEY_GEN = (CKM_NCIPHER + 0x25) + + // CKM_NC_SHA384_HMAC_KEY_GEN is the nShield-specific HMACSHA384 key-generation mechanism + CKM_NC_SHA384_HMAC_KEY_GEN = (CKM_NCIPHER + 0x26) + + // CKM_NC_SHA512_HMAC_KEY_GEN is the nShield-specific HMACSHA512 key-generation mechanism + CKM_NC_SHA512_HMAC_KEY_GEN = (CKM_NCIPHER + 0x27) +) + +type hmacImplementation struct { + // PKCS#11 session to use + session *pkcs11Session + + // Signing key + key *SecretKey + + // Hash size + size int + + // Block size + blockSize int + + // PKCS#11 mechanism information + mechDescription []*pkcs11.Mechanism + + // Cleanup function + cleanup func() + + // Count of updates + updates uint64 + + // Result, or nil if we don't have the answer yet + result []byte +} + +type hmacInfo struct { + size int + blockSize int + general bool +} + +var hmacInfos = map[int]*hmacInfo{ + pkcs11.CKM_MD5_HMAC: {20, 64, false}, + pkcs11.CKM_MD5_HMAC_GENERAL: {20, 64, true}, + pkcs11.CKM_SHA_1_HMAC: {20, 64, false}, + pkcs11.CKM_SHA_1_HMAC_GENERAL: {20, 64, true}, + pkcs11.CKM_SHA224_HMAC: {28, 64, false}, + pkcs11.CKM_SHA224_HMAC_GENERAL: {28, 64, true}, + pkcs11.CKM_SHA256_HMAC: {32, 64, false}, + pkcs11.CKM_SHA256_HMAC_GENERAL: {32, 64, true}, + pkcs11.CKM_SHA384_HMAC: {48, 64, false}, + pkcs11.CKM_SHA384_HMAC_GENERAL: {48, 64, true}, + pkcs11.CKM_SHA512_HMAC: {64, 128, false}, + pkcs11.CKM_SHA512_HMAC_GENERAL: {64, 128, true}, + pkcs11.CKM_SHA512_224_HMAC: {28, 128, false}, + pkcs11.CKM_SHA512_224_HMAC_GENERAL: {28, 128, true}, + pkcs11.CKM_SHA512_256_HMAC: {32, 128, false}, + pkcs11.CKM_SHA512_256_HMAC_GENERAL: {32, 128, true}, + pkcs11.CKM_RIPEMD160_HMAC: {20, 64, false}, + pkcs11.CKM_RIPEMD160_HMAC_GENERAL: {20, 64, true}, +} + +// errHmacClosed is called if an HMAC is updated after it has finished. +var errHmacClosed = errors.New("already called Sum()") + +// NewHMAC returns a new HMAC hash using the given PKCS#11 mechanism +// and key. +// length specifies the output size, for _GENERAL mechanisms. +// +// If the mechanism is not in the built-in list of known mechanisms then the +// Size() function will return whatever length was, even if it is wrong. +// BlockSize() will always return 0 in this case. +// +// The Reset() method is not implemented. +// After Sum() is called no new data may be added. +func (key *SecretKey) NewHMAC(mech int, length int) (hash.Hash, error) { + hi := hmacImplementation{ + key: key, + } + var params []byte + if info, ok := hmacInfos[mech]; ok { + hi.blockSize = info.blockSize + if info.general { + hi.size = length + params = ulongToBytes(uint(length)) + } else { + hi.size = info.size + } + } else { + hi.size = length + } + hi.mechDescription = []*pkcs11.Mechanism{pkcs11.NewMechanism(uint(mech), params)} + if err := hi.initialize(); err != nil { + return nil, err + } + return &hi, nil +} + +func (hi *hmacImplementation) initialize() (err error) { + session, err := hi.key.context.getSession() + if err != nil { + return err + } + + hi.session = session + hi.cleanup = func() { + hi.key.context.pool.Put(session) + hi.session = nil + } + if err = hi.session.ctx.SignInit(hi.session.handle, hi.mechDescription, hi.key.handle); err != nil { + hi.cleanup() + return + } + hi.updates = 0 + hi.result = nil + return +} + +func (hi *hmacImplementation) Write(p []byte) (n int, err error) { + if hi.result != nil { + if len(p) > 0 { + err = errHmacClosed + } + return + } + if err = hi.session.ctx.SignUpdate(hi.session.handle, p); err != nil { + return + } + hi.updates++ + n = len(p) + return +} + +func (hi *hmacImplementation) Sum(b []byte) []byte { + if hi.result == nil { + var err error + if hi.updates == 0 { + // http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/os/pkcs11-base-v2.40-os.html#_Toc322855304 + // We must ensure that C_SignUpdate is called _at least once_. + if err = hi.session.ctx.SignUpdate(hi.session.handle, []byte{}); err != nil { + panic(err) + } + } + hi.result, err = hi.session.ctx.SignFinal(hi.session.handle) + hi.cleanup() + if err != nil { + panic(err) + } + } + return append(b, hi.result...) +} + +func (hi *hmacImplementation) Reset() { + hi.Sum(nil) // Clean up + + // Assign the error to "_" to indicate we are knowingly ignoring this. It may have been + // sensible to panic at this stage, but we cannot add a panic without breaking backwards + // compatibility. + _ = hi.initialize() +} + +func (hi *hmacImplementation) Size() int { + return hi.size +} + +func (hi *hmacImplementation) BlockSize() int { + return hi.blockSize +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/keys.go b/vendor/github.com/ThalesIgnite/crypto11/keys.go new file mode 100644 index 000000000..998663a5b --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/keys.go @@ -0,0 +1,613 @@ +// Copyright 2016, 2017 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "crypto" + "crypto/x509" + "github.com/miekg/pkcs11" + "github.com/pkg/errors" +) + +const maxHandlePerFind = 20 + +// errNoCkaId is returned if a private key is found which has no CKA_ID attribute +var errNoCkaId = errors.New("private key has no CKA_ID") + +// errNoPublicHalf is returned if a public half cannot be found to match a given private key +var errNoPublicHalf = errors.New("could not find public key to match private key") + +func findKeysWithAttributes(session *pkcs11Session, template []*pkcs11.Attribute) (handles []pkcs11.ObjectHandle, err error) { + if err = session.ctx.FindObjectsInit(session.handle, template); err != nil { + return nil, err + } + defer func() { + finalErr := session.ctx.FindObjectsFinal(session.handle) + if err == nil { + err = finalErr + } + }() + + newhandles, _, err := session.ctx.FindObjects(session.handle, maxHandlePerFind) + if err != nil { + return nil, err + } + + for len(newhandles) > 0 { + handles = append(handles, newhandles...) + + newhandles, _, err = session.ctx.FindObjects(session.handle, maxHandlePerFind) + if err != nil { + return nil, err + } + } + + return handles, nil +} + +// Find key objects. For asymmetric keys this only finds one half so +// callers will call it twice. Returns nil if the key does not exist on the token. +func findKeys(session *pkcs11Session, id []byte, label []byte, keyclass *uint, keytype *uint) (handles []pkcs11.ObjectHandle, err error) { + var template []*pkcs11.Attribute + + if keyclass != nil { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_CLASS, *keyclass)) + } + if keytype != nil { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, *keytype)) + } + if id != nil { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, id)) + } + if label != nil { + template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label)) + } + + if handles, err = findKeysWithAttributes(session, template); err != nil { + return nil, err + } + + return handles, nil +} + +// Find a key object. For asymmetric keys this only finds one half so +// callers will call it twice. Returns nil if the key does not exist on the token. +func findKey(session *pkcs11Session, id []byte, label []byte, keyclass *uint, keytype *uint) (obj *pkcs11.ObjectHandle, err error) { + handles, err := findKeys(session, id, label, keyclass, keytype) + if err != nil { + return nil, err + } + + if len(handles) == 0 { + return nil, nil + } + return &handles[0], nil +} + +// Takes a handles to the private half of a keypair, locates the public half with the matching CKA_ID and CKA_LABEL +// values and constructs a keypair object from them both. +func (c *Context) makeKeyPair(session *pkcs11Session, privHandle *pkcs11.ObjectHandle) (signer Signer, certificate *x509.Certificate, err error) { + attributes := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_ID, nil), + pkcs11.NewAttribute(pkcs11.CKA_LABEL, nil), + pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, 0), + } + if attributes, err = session.ctx.GetAttributeValue(session.handle, *privHandle, attributes); err != nil { + return nil, nil, err + } + id := attributes[0].Value + label := attributes[1].Value + keyType := bytesToUlong(attributes[2].Value) + + // Ensure the private key actually has a non-empty CKA_ID to match on + if id == nil || len(id) == 0 { + return nil, nil, errNoCkaId + } + + var pubHandle *pkcs11.ObjectHandle + + // Find the public half which has a matching CKA_ID + pubHandle, err = findKey(session, id, label, uintPtr(pkcs11.CKO_PUBLIC_KEY), &keyType) + if err != nil { + p11Err, ok := err.(pkcs11.Error) + + if len(label) == 0 && ok && p11Err == pkcs11.CKR_TEMPLATE_INCONSISTENT { + // This probably means we are using a token that doesn't like us passing empty attributes in a template. + // For instance CloudHSM cannot search for a key with CKA_LABEL="". So if the private key doesn't have a + // label, we need to pass nil into findKeys, then match against the first key without a label. + + pubHandles, err := findKeys(session, id, nil, uintPtr(pkcs11.CKO_PUBLIC_KEY), &keyType) + if err != nil { + return nil, nil, err + } + + for _, handle := range pubHandles { + template := []*pkcs11.Attribute{pkcs11.NewAttribute(pkcs11.CKA_LABEL, nil)} + template, err = session.ctx.GetAttributeValue(session.handle, handle, template) + if err != nil { + return nil, nil, err + } + if len(template[0].Value) == 0 { + pubHandle = &handle + break + } + } + } else { + return nil, nil, err + } + } + + if pubHandle == nil { + // Try harder to find a matching public key, based on CKA_ID alone + pubHandle, err = findKey(session, id, nil, uintPtr(pkcs11.CKO_PUBLIC_KEY), &keyType) + } + + resultPkcs11PrivateKey := pkcs11PrivateKey{ + pkcs11Object: pkcs11Object{ + handle: *privHandle, + context: c, + }, + } + + var pub crypto.PublicKey + certificate, _ = findCertificate(session, id, nil, nil) + if certificate != nil && pubHandle == nil { + pub = certificate.PublicKey + } + + if pub == nil && pubHandle == nil { + // We can't return a Signer if we don't have private and public key. Treat it as an error. + return nil, nil, errNoPublicHalf + } + + switch keyType { + case pkcs11.CKK_DSA: + result := &pkcs11PrivateKeyDSA{pkcs11PrivateKey: resultPkcs11PrivateKey} + if pubHandle != nil { + if pub, err = exportDSAPublicKey(session, *pubHandle); err != nil { + return nil, nil, err + } + result.pkcs11PrivateKey.pubKeyHandle = *pubHandle + } + + result.pkcs11PrivateKey.pubKey = pub + return result, certificate, nil + + case pkcs11.CKK_RSA: + result := &pkcs11PrivateKeyRSA{pkcs11PrivateKey: resultPkcs11PrivateKey} + if pubHandle != nil { + if pub, err = exportRSAPublicKey(session, *pubHandle); err != nil { + return nil, nil, err + } + result.pkcs11PrivateKey.pubKeyHandle = *pubHandle + } + + result.pkcs11PrivateKey.pubKey = pub + return result, certificate, nil + + case pkcs11.CKK_ECDSA: + result := &pkcs11PrivateKeyECDSA{pkcs11PrivateKey: resultPkcs11PrivateKey} + if pubHandle != nil { + if pub, err = exportECDSAPublicKey(session, *pubHandle); err != nil { + return nil, nil, err + } + result.pkcs11PrivateKey.pubKeyHandle = *pubHandle + } + + result.pkcs11PrivateKey.pubKey = pub + return result, certificate, nil + + default: + return nil, nil, errors.Errorf("unsupported key type: %X", keyType) + } +} + +// FindKeyPair retrieves a previously created asymmetric key pair, or nil if it cannot be found. +// +// At least one of id and label must be specified. +// Only private keys that have a non-empty CKA_ID will be found, as this is required to locate the matching public key. +// If the private key is found, but the public key with a corresponding CKA_ID is not, the key is not returned +// because we cannot implement crypto.Signer without the public key. +func (c *Context) FindKeyPair(id []byte, label []byte) (Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + result, err := c.FindKeyPairs(id, label) + if err != nil { + return nil, err + } + + if len(result) == 0 { + return nil, nil + } + + return result[0], nil +} + +// FindKeyPairs retrieves all matching asymmetric key pairs, or a nil slice if none can be found. +// +// At least one of id and label must be specified. +// Only private keys that have a non-empty CKA_ID will be found, as this is required to locate the matching public key. +// If the private key is found, but the public key with a corresponding CKA_ID is not, the key is not returned +// because we cannot implement crypto.Signer without the public key. +func (c *Context) FindKeyPairs(id []byte, label []byte) (signer []Signer, err error) { + if c.closed.Get() { + return nil, errClosed + } + + if id == nil && label == nil { + return nil, errors.New("id and label cannot both be nil") + } + + attributes := NewAttributeSet() + + if id != nil { + err = attributes.Set(CkaId, id) + if err != nil { + return nil, err + } + } + if label != nil { + err = attributes.Set(CkaLabel, label) + if err != nil { + return nil, err + } + } + + return c.FindKeyPairsWithAttributes(attributes) +} + +// FindKeyPairWithAttributes retrieves a previously created asymmetric key pair, or nil if it cannot be found. +// The given attributes are matched against the private half only. Then the public half with a matching CKA_ID +// and CKA_LABEL values is found. +// +// Only private keys that have a non-empty CKA_ID will be found, as this is required to locate the matching public key. +// If the private key is found, but the public key with a corresponding CKA_ID is not, the key is not returned +// because we cannot implement crypto.Signer without the public key. +func (c *Context) FindKeyPairWithAttributes(attributes AttributeSet) (Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + result, err := c.FindKeyPairsWithAttributes(attributes) + if err != nil { + return nil, err + } + + if len(result) == 0 { + return nil, nil + } + + return result[0], nil +} + +// FindKeyPairsWithAttributes retrieves previously created asymmetric key pairs, or nil if none can be found. +// The given attributes are matched against the private half only. Then the public half with a matching CKA_ID +// and CKA_LABEL values is found. +// +// Only private keys that have a non-empty CKA_ID will be found, as this is required to locate the matching public key. +// If the private key is found, but the public key with a corresponding CKA_ID is not, the key is not returned +// because we cannot implement crypto.Signer without the public key. +func (c *Context) FindKeyPairsWithAttributes(attributes AttributeSet) (signer []Signer, err error) { + if c.closed.Get() { + return nil, errClosed + } + + var keys []Signer + + if _, ok := attributes[CkaClass]; ok { + return nil, errors.Errorf("keypair attribute set must not contain CkaClass") + } + + err = c.withSession(func(session *pkcs11Session) error { + // Add the private key class to the template to find the private half + privAttributes := attributes.Copy() + err = privAttributes.Set(CkaClass, pkcs11.CKO_PRIVATE_KEY) + if err != nil { + return err + } + + privHandles, err := findKeysWithAttributes(session, privAttributes.ToSlice()) + if err != nil { + return err + } + + for _, privHandle := range privHandles { + k, _, err := c.makeKeyPair(session, &privHandle) + + if err == errNoCkaId || err == errNoPublicHalf { + continue + } + if err != nil { + return err + } + + keys = append(keys, k) + } + + return nil + }) + + if err != nil { + return nil, err + } + + return keys, nil +} + +// FindAllKeyPairs retrieves all existing asymmetric key pairs, or a nil slice if none can be found. +// +// If a private key is found, but the corresponding public key is not, the key is not returned because we cannot +// implement crypto.Signer without the public key. +func (c *Context) FindAllKeyPairs() ([]Signer, error) { + if c.closed.Get() { + return nil, errClosed + } + + return c.FindKeyPairsWithAttributes(NewAttributeSet()) +} + +// Public returns the public half of a private key. +// +// This partially implements the go.crypto.Signer and go.crypto.Decrypter interfaces for +// pkcs11PrivateKey. (The remains of the implementation is in the +// key-specific types.) +func (k pkcs11PrivateKey) Public() crypto.PublicKey { + return k.pubKey +} + +// FindKey retrieves a previously created symmetric key, or nil if it cannot be found. +// +// Either (but not both) of id and label may be nil, in which case they are ignored. +func (c *Context) FindKey(id []byte, label []byte) (*SecretKey, error) { + if c.closed.Get() { + return nil, errClosed + } + + result, err := c.FindKeys(id, label) + if err != nil { + return nil, err + } + + if len(result) == 0 { + return nil, nil + } + + return result[0], nil +} + +// FindKeys retrieves all matching symmetric keys, or a nil slice if none can be found. +// +// At least one of id and label must be specified. +func (c *Context) FindKeys(id []byte, label []byte) (key []*SecretKey, err error) { + if c.closed.Get() { + return nil, errClosed + } + + if id == nil && label == nil { + return nil, errors.New("id and label cannot both be nil") + } + + attributes := NewAttributeSet() + + if id != nil { + err = attributes.Set(CkaId, id) + if err != nil { + return nil, err + } + } + if label != nil { + err = attributes.Set(CkaLabel, label) + if err != nil { + return nil, err + } + } + + return c.FindKeysWithAttributes(attributes) +} + +// FindKeyWithAttributes retrieves a previously created symmetric key, or nil if it cannot be found. +func (c *Context) FindKeyWithAttributes(attributes AttributeSet) (*SecretKey, error) { + if c.closed.Get() { + return nil, errClosed + } + + result, err := c.FindKeysWithAttributes(attributes) + if err != nil { + return nil, err + } + + if len(result) == 0 { + return nil, nil + } + + return result[0], nil +} + +// FindKeysWithAttributes retrieves previously created symmetric keys, or a nil slice if none can be found. +func (c *Context) FindKeysWithAttributes(attributes AttributeSet) ([]*SecretKey, error) { + if c.closed.Get() { + return nil, errClosed + } + + var keys []*SecretKey + + if _, ok := attributes[CkaClass]; ok { + return nil, errors.Errorf("key attribute set must not contain CkaClass") + } + + err := c.withSession(func(session *pkcs11Session) error { + // Add the private key class to the template to find the private half + privAttributes := attributes.Copy() + err := privAttributes.Set(CkaClass, pkcs11.CKO_SECRET_KEY) + if err != nil { + return err + } + + privHandles, err := findKeysWithAttributes(session, privAttributes.ToSlice()) + if err != nil { + return err + } + + for _, privHandle := range privHandles { + attributes := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, 0), + } + if attributes, err = session.ctx.GetAttributeValue(session.handle, privHandle, attributes); err != nil { + return err + } + keyType := bytesToUlong(attributes[0].Value) + + if cipher, ok := Ciphers[int(keyType)]; ok { + k := &SecretKey{pkcs11Object{privHandle, c}, cipher} + keys = append(keys, k) + } else { + return errors.Errorf("unsupported key type: %X", keyType) + } + } + + return nil + }) + + if err != nil { + return nil, err + } + return keys, nil +} + +// FindAllKeyPairs retrieves all existing symmetric keys, or a nil slice if none can be found. +func (c *Context) FindAllKeys() ([]*SecretKey, error) { + if c.closed.Get() { + return nil, errClosed + } + + return c.FindKeysWithAttributes(NewAttributeSet()) +} + +func uintPtr(i uint) *uint { return &i } + +func (c *Context) getAttributes(handle pkcs11.ObjectHandle, attributes []AttributeType) (a AttributeSet, err error) { + values := NewAttributeSet() + + err = c.withSession(func(session *pkcs11Session) error { + var attrs []*pkcs11.Attribute + for _, a := range attributes { + attrs = append(attrs, pkcs11.NewAttribute(a, nil)) + } + + p11values, err := session.ctx.GetAttributeValue(session.handle, handle, attrs) + if err != nil { + return err + } + + values.AddIfNotPresent(p11values) + + return nil + }) + + return values, err +} + +// GetAttributes gets the values of the specified attributes on the given key or keypair. +// If the key is asymmetric, then the attributes are retrieved from the private half. +// +// If the object is not a crypto11 key or keypair then an error is returned. +func (c *Context) GetAttributes(key interface{}, attributes []AttributeType) (a AttributeSet, err error) { + if c.closed.Get() { + return nil, errClosed + } + + var handle pkcs11.ObjectHandle + + switch k := (key).(type) { + case *pkcs11PrivateKeyDSA: + handle = k.handle + case *pkcs11PrivateKeyRSA: + handle = k.handle + case *pkcs11PrivateKeyECDSA: + handle = k.handle + case *SecretKey: + handle = k.handle + default: + return nil, errors.Errorf("not a PKCS#11 key") + } + + return c.getAttributes(handle, attributes) +} + +// GetAttribute gets the value of the specified attribute on the given key or keypair. +// If the key is asymmetric, then the attribute is retrieved from the private half. +// +// If the object is not a crypto11 key or keypair then an error is returned. +func (c *Context) GetAttribute(key interface{}, attribute AttributeType) (a *Attribute, err error) { + if c.closed.Get() { + return nil, errClosed + } + + set, err := c.GetAttributes(key, []AttributeType{attribute}) + if err != nil { + return nil, err + } + + return set[attribute], nil +} + +// GetPubAttributes gets the values of the specified attributes on the public half of the given keypair. +// +// If the object is not a crypto11 keypair then an error is returned. +func (c *Context) GetPubAttributes(key interface{}, attributes []AttributeType) (a AttributeSet, err error) { + if c.closed.Get() { + return nil, errClosed + } + + var handle pkcs11.ObjectHandle + + switch k := (key).(type) { + case *pkcs11PrivateKeyDSA: + handle = k.pubKeyHandle + case *pkcs11PrivateKeyRSA: + handle = k.pubKeyHandle + case *pkcs11PrivateKeyECDSA: + handle = k.pubKeyHandle + default: + return nil, errors.Errorf("not an asymmetric PKCS#11 key") + } + + return c.getAttributes(handle, attributes) +} + +// GetPubAttribute gets the value of the specified attribute on the public half of the given key. +// +// If the object is not a crypto11 keypair then an error is returned. +func (c *Context) GetPubAttribute(key interface{}, attribute AttributeType) (a *Attribute, err error) { + if c.closed.Get() { + return nil, errClosed + } + + set, err := c.GetPubAttributes(key, []AttributeType{attribute}) + if err != nil { + return nil, err + } + + return set[attribute], nil +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/rand.go b/vendor/github.com/ThalesIgnite/crypto11/rand.go new file mode 100644 index 000000000..c820e552c --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/rand.go @@ -0,0 +1,54 @@ +// Copyright 2016, 2017 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "io" +) + +// NewRandomReader returns a reader for the random number generator on the token. +func (c *Context) NewRandomReader() (io.Reader, error) { + if c.closed.Get() { + return nil, errClosed + } + + return pkcs11RandReader{c}, nil +} + +// pkcs11RandReader is a random number reader that uses PKCS#11. +type pkcs11RandReader struct { + context *Context +} + +// This implements the Reader interface for pkcs11RandReader. +func (r pkcs11RandReader) Read(data []byte) (n int, err error) { + var result []byte + + if err = r.context.withSession(func(session *pkcs11Session) error { + result, err = r.context.ctx.GenerateRandom(session.handle, len(data)) + return err + }); err != nil { + return 0, err + } + copy(data, result) + return len(result), err +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/rsa.go b/vendor/github.com/ThalesIgnite/crypto11/rsa.go new file mode 100644 index 000000000..234fd0a0b --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/rsa.go @@ -0,0 +1,325 @@ +// Copyright 2016, 2017 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "crypto" + "crypto/rsa" + "errors" + "io" + "math/big" + + "github.com/miekg/pkcs11" +) + +// errMalformedRSAPublicKey is returned when an RSA public key is not in a suitable form. +// +// Currently this means that the public exponent is either bigger than +// 32 bits, or less than 2. +var errMalformedRSAPublicKey = errors.New("malformed RSA public key") + +// errUnsupportedRSAOptions is returned when an unsupported RSA option is requested. +// +// Currently this means a nontrivial SessionKeyLen when decrypting; or +// an unsupported hash function; or crypto.rsa.PSSSaltLengthAuto was +// requested. +var errUnsupportedRSAOptions = errors.New("unsupported RSA option value") + +// pkcs11PrivateKeyRSA contains a reference to a loaded PKCS#11 RSA private key object. +type pkcs11PrivateKeyRSA struct { + pkcs11PrivateKey +} + +// Export the public key corresponding to a private RSA key. +func exportRSAPublicKey(session *pkcs11Session, pubHandle pkcs11.ObjectHandle) (crypto.PublicKey, error) { + template := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_MODULUS, nil), + pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, nil), + } + exported, err := session.ctx.GetAttributeValue(session.handle, pubHandle, template) + if err != nil { + return nil, err + } + var modulus = new(big.Int) + modulus.SetBytes(exported[0].Value) + var bigExponent = new(big.Int) + bigExponent.SetBytes(exported[1].Value) + if bigExponent.BitLen() > 32 { + return nil, errMalformedRSAPublicKey + } + if bigExponent.Sign() < 1 { + return nil, errMalformedRSAPublicKey + } + exponent := int(bigExponent.Uint64()) + result := rsa.PublicKey{ + N: modulus, + E: exponent, + } + if result.E < 2 { + return nil, errMalformedRSAPublicKey + } + return &result, nil +} + +// GenerateRSAKeyPair creates an RSA key pair on the token. The id parameter is used to +// set CKA_ID and must be non-nil. RSA private keys are generated with both sign and decrypt +// permissions, and a public exponent of 65537. +func (c *Context) GenerateRSAKeyPair(id []byte, bits int) (SignerDecrypter, error) { + if c.closed.Get() { + return nil, errClosed + } + + public, err := NewAttributeSetWithID(id) + if err != nil { + return nil, err + } + // Copy the AttributeSet to allow modifications. + private := public.Copy() + + return c.GenerateRSAKeyPairWithAttributes(public, private, bits) +} + +// GenerateRSAKeyPairWithLabel creates an RSA key pair on the token. The id and label parameters are used to +// set CKA_ID and CKA_LABEL respectively and must be non-nil. RSA private keys are generated with both sign and decrypt +// permissions, and a public exponent of 65537. +func (c *Context) GenerateRSAKeyPairWithLabel(id, label []byte, bits int) (SignerDecrypter, error) { + if c.closed.Get() { + return nil, errClosed + } + + public, err := NewAttributeSetWithIDAndLabel(id, label) + if err != nil { + return nil, err + } + // Copy the AttributeSet to allow modifications. + private := public.Copy() + + return c.GenerateRSAKeyPairWithAttributes(public, private, bits) +} + +// GenerateRSAKeyPairWithAttributes generates an RSA key pair on the token. After this function returns, public and +// private will contain the attributes applied to the key pair. If required attributes are missing, they will be set to +// a default value. +func (c *Context) GenerateRSAKeyPairWithAttributes(public, private AttributeSet, bits int) (SignerDecrypter, error) { + if c.closed.Get() { + return nil, errClosed + } + + var k SignerDecrypter + + err := c.withSession(func(session *pkcs11Session) error { + + public.AddIfNotPresent([]*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY), + pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_RSA), + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_VERIFY, true), + pkcs11.NewAttribute(pkcs11.CKA_ENCRYPT, true), + pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, []byte{1, 0, 1}), + pkcs11.NewAttribute(pkcs11.CKA_MODULUS_BITS, bits), + }) + private.AddIfNotPresent([]*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_SIGN, true), + pkcs11.NewAttribute(pkcs11.CKA_DECRYPT, true), + pkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true), + pkcs11.NewAttribute(pkcs11.CKA_EXTRACTABLE, false), + }) + + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_KEY_PAIR_GEN, nil)} + pubHandle, privHandle, err := session.ctx.GenerateKeyPair(session.handle, + mech, + public.ToSlice(), + private.ToSlice()) + if err != nil { + return err + } + + pub, err := exportRSAPublicKey(session, pubHandle) + if err != nil { + return err + } + k = &pkcs11PrivateKeyRSA{ + pkcs11PrivateKey: pkcs11PrivateKey{ + pkcs11Object: pkcs11Object{ + handle: privHandle, + context: c, + }, + pubKeyHandle: pubHandle, + pubKey: pub, + }} + return nil + }) + return k, err +} + +// Decrypt decrypts a message using a RSA key. +// +// This completes the implemention of crypto.Decrypter for pkcs11PrivateKeyRSA. +// +// Note that the SessionKeyLen option (for PKCS#1v1.5 decryption) is not supported. +// +// The underlying PKCS#11 implementation may impose further restrictions. +func (priv *pkcs11PrivateKeyRSA) Decrypt(rand io.Reader, ciphertext []byte, options crypto.DecrypterOpts) (plaintext []byte, err error) { + err = priv.context.withSession(func(session *pkcs11Session) error { + if options == nil { + plaintext, err = decryptPKCS1v15(session, priv, ciphertext, 0) + } else { + switch o := options.(type) { + case *rsa.PKCS1v15DecryptOptions: + plaintext, err = decryptPKCS1v15(session, priv, ciphertext, o.SessionKeyLen) + case *rsa.OAEPOptions: + plaintext, err = decryptOAEP(session, priv, ciphertext, o.Hash, o.Label) + default: + err = errUnsupportedRSAOptions + } + } + return err + }) + return plaintext, err +} + +func decryptPKCS1v15(session *pkcs11Session, key *pkcs11PrivateKeyRSA, ciphertext []byte, sessionKeyLen int) ([]byte, error) { + if sessionKeyLen != 0 { + return nil, errUnsupportedRSAOptions + } + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS, nil)} + if err := session.ctx.DecryptInit(session.handle, mech, key.handle); err != nil { + return nil, err + } + return session.ctx.Decrypt(session.handle, ciphertext) +} + +func decryptOAEP(session *pkcs11Session, key *pkcs11PrivateKeyRSA, ciphertext []byte, hashFunction crypto.Hash, + label []byte) ([]byte, error) { + + hashAlg, mgfAlg, _, err := hashToPKCS11(hashFunction) + if err != nil { + return nil, err + } + + mech := pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_OAEP, + pkcs11.NewOAEPParams(hashAlg, mgfAlg, pkcs11.CKZ_DATA_SPECIFIED, label)) + + err = session.ctx.DecryptInit(session.handle, []*pkcs11.Mechanism{mech}, key.handle) + if err != nil { + return nil, err + } + return session.ctx.Decrypt(session.handle, ciphertext) +} + +func hashToPKCS11(hashFunction crypto.Hash) (hashAlg uint, mgfAlg uint, hashLen uint, err error) { + switch hashFunction { + case crypto.SHA1: + return pkcs11.CKM_SHA_1, pkcs11.CKG_MGF1_SHA1, 20, nil + case crypto.SHA224: + return pkcs11.CKM_SHA224, pkcs11.CKG_MGF1_SHA224, 28, nil + case crypto.SHA256: + return pkcs11.CKM_SHA256, pkcs11.CKG_MGF1_SHA256, 32, nil + case crypto.SHA384: + return pkcs11.CKM_SHA384, pkcs11.CKG_MGF1_SHA384, 48, nil + case crypto.SHA512: + return pkcs11.CKM_SHA512, pkcs11.CKG_MGF1_SHA512, 64, nil + default: + return 0, 0, 0, errUnsupportedRSAOptions + } +} + +func signPSS(session *pkcs11Session, key *pkcs11PrivateKeyRSA, digest []byte, opts *rsa.PSSOptions) ([]byte, error) { + var hMech, mgf, hLen, sLen uint + var err error + if hMech, mgf, hLen, err = hashToPKCS11(opts.Hash); err != nil { + return nil, err + } + switch opts.SaltLength { + case rsa.PSSSaltLengthAuto: // parseltongue constant + // TODO we could (in principle) work out the biggest + // possible size from the key, but until someone has + // the effort to do that... + return nil, errUnsupportedRSAOptions + case rsa.PSSSaltLengthEqualsHash: + sLen = hLen + default: + sLen = uint(opts.SaltLength) + } + // TODO this is pretty horrible, maybe the PKCS#11 wrapper + // could be improved to help us out here + parameters := concat(ulongToBytes(hMech), + ulongToBytes(mgf), + ulongToBytes(sLen)) + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_PSS, parameters)} + if err = session.ctx.SignInit(session.handle, mech, key.handle); err != nil { + return nil, err + } + return session.ctx.Sign(session.handle, digest) +} + +var pkcs1Prefix = map[crypto.Hash][]byte{ + crypto.SHA1: {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14}, + crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c}, + crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}, + crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30}, + crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40}, +} + +func signPKCS1v15(session *pkcs11Session, key *pkcs11PrivateKeyRSA, digest []byte, hash crypto.Hash) (signature []byte, err error) { + /* Calculate T for EMSA-PKCS1-v1_5. */ + oid := pkcs1Prefix[hash] + T := make([]byte, len(oid)+len(digest)) + copy(T[0:len(oid)], oid) + copy(T[len(oid):], digest) + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS, nil)} + err = session.ctx.SignInit(session.handle, mech, key.handle) + if err == nil { + signature, err = session.ctx.Sign(session.handle, T) + } + return +} + +// Sign signs a message using a RSA key. +// +// This completes the implemention of crypto.Signer for pkcs11PrivateKeyRSA. +// +// PKCS#11 expects to pick its own random data where necessary for signatures, so the rand argument is ignored. +// +// Note that (at present) the crypto.rsa.PSSSaltLengthAuto option is +// not supported. The caller must either use +// crypto.rsa.PSSSaltLengthEqualsHash (recommended) or pass an +// explicit salt length. Moreover the underlying PKCS#11 +// implementation may impose further restrictions. +func (priv *pkcs11PrivateKeyRSA) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + err = priv.context.withSession(func(session *pkcs11Session) error { + switch opts.(type) { + case *rsa.PSSOptions: + signature, err = signPSS(session, priv, digest, opts.(*rsa.PSSOptions)) + default: /* PKCS1-v1_5 */ + signature, err = signPKCS1v15(session, priv, digest, opts.HashFunc()) + } + return err + }) + + if err != nil { + return nil, err + } + + return signature, err +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/sessions.go b/vendor/github.com/ThalesIgnite/crypto11/sessions.go new file mode 100644 index 000000000..af86b7efe --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/sessions.go @@ -0,0 +1,88 @@ +// Copyright 2016, 2017 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "context" + "errors" + + "github.com/miekg/pkcs11" + "github.com/thales-e-security/pool" +) + +// pkcs11Session wraps a PKCS#11 session handle so we can use it in a resource pool. +type pkcs11Session struct { + ctx *pkcs11.Ctx + handle pkcs11.SessionHandle +} + +// Close is required to satisfy the pools.Resource interface. It closes the session, but swallows any +// errors that occur. +func (s pkcs11Session) Close() { + // We cannot return an error, so we swallow it + _ = s.ctx.CloseSession(s.handle) +} + +// withSession executes a function with a session. +func (c *Context) withSession(f func(session *pkcs11Session) error) error { + session, err := c.getSession() + if err != nil { + return err + } + defer c.pool.Put(session) + + return f(session) +} + +// getSession retrieves a session from the pool, respecting the timeout defined in the Context config. +// Callers are responsible for putting this session back in the pool. +func (c *Context) getSession() (*pkcs11Session, error) { + ctx := context.Background() + + if c.cfg.PoolWaitTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(context.Background(), c.cfg.PoolWaitTimeout) + defer cancel() + } + + resource, err := c.pool.Get(ctx) + if err == pool.ErrClosed { + // Our Context must have been closed, return a nicer error. + // We don't use errClosed to ensure our tests identify functions that aren't checking for closure + // correctly. + return nil, errors.New("context is closed") + } + if err != nil { + return nil, err + } + + return resource.(*pkcs11Session), nil +} + +// resourcePoolFactoryFunc is called by the resource pool when a new session is needed. +func (c *Context) resourcePoolFactoryFunc() (pool.Resource, error) { + session, err := c.ctx.OpenSession(c.slot, pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) + if err != nil { + return nil, err + } + return &pkcs11Session{c.ctx, session}, nil +} diff --git a/vendor/github.com/ThalesIgnite/crypto11/symmetric.go b/vendor/github.com/ThalesIgnite/crypto11/symmetric.go new file mode 100644 index 000000000..4a01248b7 --- /dev/null +++ b/vendor/github.com/ThalesIgnite/crypto11/symmetric.go @@ -0,0 +1,366 @@ +// Copyright 2018 Thales e-Security, Inc +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package crypto11 + +import ( + "errors" + + "github.com/miekg/pkcs11" +) + +// SymmetricGenParams holds a consistent (key type, mechanism) key generation pair. +type SymmetricGenParams struct { + // Key type (CKK_...) + KeyType uint + + // Key generation mechanism (CKM_..._KEY_GEN) + GenMech uint +} + +// SymmetricCipher represents information about a symmetric cipher. +type SymmetricCipher struct { + // Possible key generation parameters + // (For HMAC this varies between PKCS#11 implementations.) + GenParams []SymmetricGenParams + + // Block size in bytes + BlockSize int + + // True if encryption supported + Encrypt bool + + // True if MAC supported + MAC bool + + // ECB mechanism (CKM_..._ECB) + ECBMech uint + + // CBC mechanism (CKM_..._CBC) + CBCMech uint + + // CBC mechanism with PKCS#7 padding (CKM_..._CBC) + CBCPKCSMech uint + + // GCM mechanism (CKM_..._GCM) + GCMMech uint +} + +// CipherAES describes the AES cipher. Use this with the +// GenerateSecretKey... functions. +var CipherAES = &SymmetricCipher{ + GenParams: []SymmetricGenParams{ + { + KeyType: pkcs11.CKK_AES, + GenMech: pkcs11.CKM_AES_KEY_GEN, + }, + }, + BlockSize: 16, + Encrypt: true, + MAC: false, + ECBMech: pkcs11.CKM_AES_ECB, + CBCMech: pkcs11.CKM_AES_CBC, + CBCPKCSMech: pkcs11.CKM_AES_CBC_PAD, + GCMMech: pkcs11.CKM_AES_GCM, +} + +// CipherDES3 describes the three-key triple-DES cipher. Use this with the +// GenerateSecretKey... functions. +var CipherDES3 = &SymmetricCipher{ + GenParams: []SymmetricGenParams{ + { + KeyType: pkcs11.CKK_DES3, + GenMech: pkcs11.CKM_DES3_KEY_GEN, + }, + }, + BlockSize: 8, + Encrypt: true, + MAC: false, + ECBMech: pkcs11.CKM_DES3_ECB, + CBCMech: pkcs11.CKM_DES3_CBC, + CBCPKCSMech: pkcs11.CKM_DES3_CBC_PAD, + GCMMech: 0, +} + +// CipherGeneric describes the CKK_GENERIC_SECRET key type. Use this with the +// GenerateSecretKey... functions. +// +// The spec promises that this mechanism can be used to perform HMAC +// operations, although implementations vary; +// CipherHMACSHA1 and so on may give better results. +var CipherGeneric = &SymmetricCipher{ + GenParams: []SymmetricGenParams{ + { + KeyType: pkcs11.CKK_GENERIC_SECRET, + GenMech: pkcs11.CKM_GENERIC_SECRET_KEY_GEN, + }, + }, + BlockSize: 64, + Encrypt: false, + MAC: true, + ECBMech: 0, + CBCMech: 0, + GCMMech: 0, +} + +// CipherHMACSHA1 describes the CKK_SHA_1_HMAC key type. Use this with the +// GenerateSecretKey... functions. +var CipherHMACSHA1 = &SymmetricCipher{ + GenParams: []SymmetricGenParams{ + { + KeyType: pkcs11.CKK_SHA_1_HMAC, + GenMech: CKM_NC_SHA_1_HMAC_KEY_GEN, + }, + { + KeyType: pkcs11.CKK_GENERIC_SECRET, + GenMech: pkcs11.CKM_GENERIC_SECRET_KEY_GEN, + }, + }, + BlockSize: 64, + Encrypt: false, + MAC: true, + ECBMech: 0, + CBCMech: 0, + GCMMech: 0, +} + +// CipherHMACSHA224 describes the CKK_SHA224_HMAC key type. Use this with the +// GenerateSecretKey... functions. +var CipherHMACSHA224 = &SymmetricCipher{ + GenParams: []SymmetricGenParams{ + { + KeyType: pkcs11.CKK_SHA224_HMAC, + GenMech: CKM_NC_SHA224_HMAC_KEY_GEN, + }, + { + KeyType: pkcs11.CKK_GENERIC_SECRET, + GenMech: pkcs11.CKM_GENERIC_SECRET_KEY_GEN, + }, + }, + BlockSize: 64, + Encrypt: false, + MAC: true, + ECBMech: 0, + CBCMech: 0, + GCMMech: 0, +} + +// CipherHMACSHA256 describes the CKK_SHA256_HMAC key type. Use this with the +// GenerateSecretKey... functions. +var CipherHMACSHA256 = &SymmetricCipher{ + GenParams: []SymmetricGenParams{ + { + KeyType: pkcs11.CKK_SHA256_HMAC, + GenMech: CKM_NC_SHA256_HMAC_KEY_GEN, + }, + { + KeyType: pkcs11.CKK_GENERIC_SECRET, + GenMech: pkcs11.CKM_GENERIC_SECRET_KEY_GEN, + }, + }, + BlockSize: 64, + Encrypt: false, + MAC: true, + ECBMech: 0, + CBCMech: 0, + GCMMech: 0, +} + +// CipherHMACSHA384 describes the CKK_SHA384_HMAC key type. Use this with the +// GenerateSecretKey... functions. +var CipherHMACSHA384 = &SymmetricCipher{ + GenParams: []SymmetricGenParams{ + { + KeyType: pkcs11.CKK_SHA384_HMAC, + GenMech: CKM_NC_SHA384_HMAC_KEY_GEN, + }, + { + KeyType: pkcs11.CKK_GENERIC_SECRET, + GenMech: pkcs11.CKM_GENERIC_SECRET_KEY_GEN, + }, + }, + BlockSize: 64, + Encrypt: false, + MAC: true, + ECBMech: 0, + CBCMech: 0, + GCMMech: 0, +} + +// CipherHMACSHA512 describes the CKK_SHA512_HMAC key type. Use this with the +// GenerateSecretKey... functions. +var CipherHMACSHA512 = &SymmetricCipher{ + GenParams: []SymmetricGenParams{ + { + KeyType: pkcs11.CKK_SHA512_HMAC, + GenMech: CKM_NC_SHA512_HMAC_KEY_GEN, + }, + { + KeyType: pkcs11.CKK_GENERIC_SECRET, + GenMech: pkcs11.CKM_GENERIC_SECRET_KEY_GEN, + }, + }, + BlockSize: 128, + Encrypt: false, + MAC: true, + ECBMech: 0, + CBCMech: 0, + GCMMech: 0, +} + +// Ciphers is a map of PKCS#11 key types (CKK_...) to symmetric cipher information. +var Ciphers = map[int]*SymmetricCipher{ + pkcs11.CKK_AES: CipherAES, + pkcs11.CKK_DES3: CipherDES3, + pkcs11.CKK_GENERIC_SECRET: CipherGeneric, + pkcs11.CKK_SHA_1_HMAC: CipherHMACSHA1, + pkcs11.CKK_SHA224_HMAC: CipherHMACSHA224, + pkcs11.CKK_SHA256_HMAC: CipherHMACSHA256, + pkcs11.CKK_SHA384_HMAC: CipherHMACSHA384, + pkcs11.CKK_SHA512_HMAC: CipherHMACSHA512, +} + +// SecretKey contains a reference to a loaded PKCS#11 symmetric key object. +// +// A *SecretKey implements the cipher.Block interface, allowing it be used +// as the argument to cipher.NewCBCEncrypter and similar methods. +// For bulk operation this is very inefficient; +// using NewCBCEncrypterCloser, NewCBCEncrypter or NewCBC from this package is +// much faster. +type SecretKey struct { + pkcs11Object + + // Symmetric cipher information + Cipher *SymmetricCipher +} + +// GenerateSecretKey creates an secret key of given length and type. The id parameter is used to +// set CKA_ID and must be non-nil. +func (c *Context) GenerateSecretKey(id []byte, bits int, cipher *SymmetricCipher) (*SecretKey, error) { + if c.closed.Get() { + return nil, errClosed + } + + template, err := NewAttributeSetWithID(id) + if err != nil { + return nil, err + } + return c.GenerateSecretKeyWithAttributes(template, bits, cipher) +} + +// GenerateSecretKey creates an secret key of given length and type. The id and label parameters are used to +// set CKA_ID and CKA_LABEL respectively and must be non-nil. +func (c *Context) GenerateSecretKeyWithLabel(id, label []byte, bits int, cipher *SymmetricCipher) (*SecretKey, error) { + if c.closed.Get() { + return nil, errClosed + } + + template, err := NewAttributeSetWithIDAndLabel(id, label) + if err != nil { + return nil, err + } + return c.GenerateSecretKeyWithAttributes(template, bits, cipher) + +} + +// GenerateSecretKeyWithAttributes creates an secret key of given length and type. After this function returns, template +// will contain the attributes applied to the key. If required attributes are missing, they will be set to a default +// value. +func (c *Context) GenerateSecretKeyWithAttributes(template AttributeSet, bits int, cipher *SymmetricCipher) (k *SecretKey, err error) { + if c.closed.Get() { + return nil, errClosed + } + + err = c.withSession(func(session *pkcs11Session) error { + + // CKK_*_HMAC exists but there is no specific corresponding CKM_*_KEY_GEN + // mechanism. Therefore we attempt both CKM_GENERIC_SECRET_KEY_GEN and + // vendor-specific mechanisms. + + template.AddIfNotPresent([]*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_SECRET_KEY), + pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), + pkcs11.NewAttribute(pkcs11.CKA_SIGN, cipher.MAC), + pkcs11.NewAttribute(pkcs11.CKA_VERIFY, cipher.MAC), + pkcs11.NewAttribute(pkcs11.CKA_ENCRYPT, cipher.Encrypt), // Not supported on CloudHSM + pkcs11.NewAttribute(pkcs11.CKA_DECRYPT, cipher.Encrypt), // Not supported on CloudHSM + pkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true), + pkcs11.NewAttribute(pkcs11.CKA_EXTRACTABLE, false), + }) + if bits > 0 { + _ = template.Set(pkcs11.CKA_VALUE_LEN, bits/8) // safe for an int + } + + for n, genMech := range cipher.GenParams { + + _ = template.Set(CkaKeyType, genMech.KeyType) + + mech := []*pkcs11.Mechanism{pkcs11.NewMechanism(genMech.GenMech, nil)} + + privHandle, err := session.ctx.GenerateKey(session.handle, mech, template.ToSlice()) + if err == nil { + k = &SecretKey{pkcs11Object{privHandle, c}, cipher} + return nil + } + + // As a special case, AWS CloudHSM does not accept CKA_ENCRYPT and CKA_DECRYPT on a + // Generic Secret key. If we are in that special case, try again without those attributes. + if e, ok := err.(pkcs11.Error); ok && e == pkcs11.CKR_ARGUMENTS_BAD && genMech.GenMech == pkcs11.CKM_GENERIC_SECRET_KEY_GEN { + adjustedTemplate := template.Copy() + adjustedTemplate.Unset(CkaEncrypt) + adjustedTemplate.Unset(CkaDecrypt) + + privHandle, err = session.ctx.GenerateKey(session.handle, mech, adjustedTemplate.ToSlice()) + if err == nil { + // Store the actual attributes + template.cloneFrom(adjustedTemplate) + + k = &SecretKey{pkcs11Object{privHandle, c}, cipher} + return nil + } + } + + if n == len(cipher.GenParams)-1 { + // If we have tried all available gen params, we should return a sensible error. So we skip the + // retry logic below and return directly. + return err + } + + // nShield returns CKR_TEMPLATE_INCONSISTENT if if doesn't like the CKK/CKM combination. + // AWS CloudHSM returns CKR_ATTRIBUTE_VALUE_INVALID in the same circumstances. + if e, ok := err.(pkcs11.Error); ok && + e == pkcs11.CKR_TEMPLATE_INCONSISTENT || e == pkcs11.CKR_ATTRIBUTE_VALUE_INVALID { + continue + } + + return err + } + + // We can only get here if there were no GenParams + return errors.New("cipher must have GenParams") + }) + return +} + +// Delete deletes the secret key from the token. +func (key *SecretKey) Delete() error { + return key.pkcs11Object.Delete() +} diff --git a/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/LICENSE b/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/client/client.go b/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/client/client.go new file mode 100644 index 000000000..1d47c93aa --- /dev/null +++ b/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/client/client.go @@ -0,0 +1,305 @@ +// This file is auto-generated, don't edit it. Thanks. +package client + +import ( + "io" + + "github.com/alibabacloud-go/tea/tea" + credential "github.com/aliyun/credentials-go/credentials" +) + +type InterceptorContext struct { + Request *InterceptorContextRequest `json:"request,omitempty" xml:"request,omitempty" require:"true" type:"Struct"` + Configuration *InterceptorContextConfiguration `json:"configuration,omitempty" xml:"configuration,omitempty" require:"true" type:"Struct"` + Response *InterceptorContextResponse `json:"response,omitempty" xml:"response,omitempty" require:"true" type:"Struct"` +} + +func (s InterceptorContext) String() string { + return tea.Prettify(s) +} + +func (s InterceptorContext) GoString() string { + return s.String() +} + +func (s *InterceptorContext) SetRequest(v *InterceptorContextRequest) *InterceptorContext { + s.Request = v + return s +} + +func (s *InterceptorContext) SetConfiguration(v *InterceptorContextConfiguration) *InterceptorContext { + s.Configuration = v + return s +} + +func (s *InterceptorContext) SetResponse(v *InterceptorContextResponse) *InterceptorContext { + s.Response = v + return s +} + +type InterceptorContextRequest struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"` + Query map[string]*string `json:"query,omitempty" xml:"query,omitempty"` + Body interface{} `json:"body,omitempty" xml:"body,omitempty"` + Stream io.Reader `json:"stream,omitempty" xml:"stream,omitempty"` + HostMap map[string]*string `json:"hostMap,omitempty" xml:"hostMap,omitempty"` + Pathname *string `json:"pathname,omitempty" xml:"pathname,omitempty" require:"true"` + ProductId *string `json:"productId,omitempty" xml:"productId,omitempty" require:"true"` + Action *string `json:"action,omitempty" xml:"action,omitempty" require:"true"` + Version *string `json:"version,omitempty" xml:"version,omitempty" require:"true"` + Protocol *string `json:"protocol,omitempty" xml:"protocol,omitempty" require:"true"` + Method *string `json:"method,omitempty" xml:"method,omitempty" require:"true"` + AuthType *string `json:"authType,omitempty" xml:"authType,omitempty" require:"true"` + BodyType *string `json:"bodyType,omitempty" xml:"bodyType,omitempty" require:"true"` + ReqBodyType *string `json:"reqBodyType,omitempty" xml:"reqBodyType,omitempty" require:"true"` + Style *string `json:"style,omitempty" xml:"style,omitempty"` + Credential credential.Credential `json:"credential,omitempty" xml:"credential,omitempty" require:"true"` + SignatureVersion *string `json:"signatureVersion,omitempty" xml:"signatureVersion,omitempty"` + SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" xml:"signatureAlgorithm,omitempty"` + UserAgent *string `json:"userAgent,omitempty" xml:"userAgent,omitempty" require:"true"` +} + +func (s InterceptorContextRequest) String() string { + return tea.Prettify(s) +} + +func (s InterceptorContextRequest) GoString() string { + return s.String() +} + +func (s *InterceptorContextRequest) SetHeaders(v map[string]*string) *InterceptorContextRequest { + s.Headers = v + return s +} + +func (s *InterceptorContextRequest) SetQuery(v map[string]*string) *InterceptorContextRequest { + s.Query = v + return s +} + +func (s *InterceptorContextRequest) SetBody(v interface{}) *InterceptorContextRequest { + s.Body = v + return s +} + +func (s *InterceptorContextRequest) SetStream(v io.Reader) *InterceptorContextRequest { + s.Stream = v + return s +} + +func (s *InterceptorContextRequest) SetHostMap(v map[string]*string) *InterceptorContextRequest { + s.HostMap = v + return s +} + +func (s *InterceptorContextRequest) SetPathname(v string) *InterceptorContextRequest { + s.Pathname = &v + return s +} + +func (s *InterceptorContextRequest) SetProductId(v string) *InterceptorContextRequest { + s.ProductId = &v + return s +} + +func (s *InterceptorContextRequest) SetAction(v string) *InterceptorContextRequest { + s.Action = &v + return s +} + +func (s *InterceptorContextRequest) SetVersion(v string) *InterceptorContextRequest { + s.Version = &v + return s +} + +func (s *InterceptorContextRequest) SetProtocol(v string) *InterceptorContextRequest { + s.Protocol = &v + return s +} + +func (s *InterceptorContextRequest) SetMethod(v string) *InterceptorContextRequest { + s.Method = &v + return s +} + +func (s *InterceptorContextRequest) SetAuthType(v string) *InterceptorContextRequest { + s.AuthType = &v + return s +} + +func (s *InterceptorContextRequest) SetBodyType(v string) *InterceptorContextRequest { + s.BodyType = &v + return s +} + +func (s *InterceptorContextRequest) SetReqBodyType(v string) *InterceptorContextRequest { + s.ReqBodyType = &v + return s +} + +func (s *InterceptorContextRequest) SetStyle(v string) *InterceptorContextRequest { + s.Style = &v + return s +} + +func (s *InterceptorContextRequest) SetCredential(v credential.Credential) *InterceptorContextRequest { + s.Credential = v + return s +} + +func (s *InterceptorContextRequest) SetSignatureVersion(v string) *InterceptorContextRequest { + s.SignatureVersion = &v + return s +} + +func (s *InterceptorContextRequest) SetSignatureAlgorithm(v string) *InterceptorContextRequest { + s.SignatureAlgorithm = &v + return s +} + +func (s *InterceptorContextRequest) SetUserAgent(v string) *InterceptorContextRequest { + s.UserAgent = &v + return s +} + +type InterceptorContextConfiguration struct { + RegionId *string `json:"regionId,omitempty" xml:"regionId,omitempty" require:"true"` + Endpoint *string `json:"endpoint,omitempty" xml:"endpoint,omitempty"` + EndpointRule *string `json:"endpointRule,omitempty" xml:"endpointRule,omitempty"` + EndpointMap map[string]*string `json:"endpointMap,omitempty" xml:"endpointMap,omitempty"` + EndpointType *string `json:"endpointType,omitempty" xml:"endpointType,omitempty"` + Network *string `json:"network,omitempty" xml:"network,omitempty"` + Suffix *string `json:"suffix,omitempty" xml:"suffix,omitempty"` +} + +func (s InterceptorContextConfiguration) String() string { + return tea.Prettify(s) +} + +func (s InterceptorContextConfiguration) GoString() string { + return s.String() +} + +func (s *InterceptorContextConfiguration) SetRegionId(v string) *InterceptorContextConfiguration { + s.RegionId = &v + return s +} + +func (s *InterceptorContextConfiguration) SetEndpoint(v string) *InterceptorContextConfiguration { + s.Endpoint = &v + return s +} + +func (s *InterceptorContextConfiguration) SetEndpointRule(v string) *InterceptorContextConfiguration { + s.EndpointRule = &v + return s +} + +func (s *InterceptorContextConfiguration) SetEndpointMap(v map[string]*string) *InterceptorContextConfiguration { + s.EndpointMap = v + return s +} + +func (s *InterceptorContextConfiguration) SetEndpointType(v string) *InterceptorContextConfiguration { + s.EndpointType = &v + return s +} + +func (s *InterceptorContextConfiguration) SetNetwork(v string) *InterceptorContextConfiguration { + s.Network = &v + return s +} + +func (s *InterceptorContextConfiguration) SetSuffix(v string) *InterceptorContextConfiguration { + s.Suffix = &v + return s +} + +type InterceptorContextResponse struct { + StatusCode *int `json:"statusCode,omitempty" xml:"statusCode,omitempty"` + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"` + Body io.Reader `json:"body,omitempty" xml:"body,omitempty"` + DeserializedBody interface{} `json:"deserializedBody,omitempty" xml:"deserializedBody,omitempty"` +} + +func (s InterceptorContextResponse) String() string { + return tea.Prettify(s) +} + +func (s InterceptorContextResponse) GoString() string { + return s.String() +} + +func (s *InterceptorContextResponse) SetStatusCode(v int) *InterceptorContextResponse { + s.StatusCode = &v + return s +} + +func (s *InterceptorContextResponse) SetHeaders(v map[string]*string) *InterceptorContextResponse { + s.Headers = v + return s +} + +func (s *InterceptorContextResponse) SetBody(v io.Reader) *InterceptorContextResponse { + s.Body = v + return s +} + +func (s *InterceptorContextResponse) SetDeserializedBody(v interface{}) *InterceptorContextResponse { + s.DeserializedBody = v + return s +} + +type AttributeMap struct { + Attributes map[string]interface{} `json:"attributes,omitempty" xml:"attributes,omitempty" require:"true"` + Key map[string]*string `json:"key,omitempty" xml:"key,omitempty" require:"true"` +} + +func (s AttributeMap) String() string { + return tea.Prettify(s) +} + +func (s AttributeMap) GoString() string { + return s.String() +} + +func (s *AttributeMap) SetAttributes(v map[string]interface{}) *AttributeMap { + s.Attributes = v + return s +} + +func (s *AttributeMap) SetKey(v map[string]*string) *AttributeMap { + s.Key = v + return s +} + +type ClientInterface interface { + ModifyConfiguration(context *InterceptorContext, attributeMap *AttributeMap) error + ModifyRequest(context *InterceptorContext, attributeMap *AttributeMap) error + ModifyResponse(context *InterceptorContext, attributeMap *AttributeMap) error +} + +type Client struct { +} + +func NewClient() (*Client, error) { + client := new(Client) + err := client.Init() + return client, err +} + +func (client *Client) Init() (_err error) { + return nil +} + +func (client *Client) ModifyConfiguration(context *InterceptorContext, attributeMap *AttributeMap) (_err error) { + panic("No Support!") +} + +func (client *Client) ModifyRequest(context *InterceptorContext, attributeMap *AttributeMap) (_err error) { + panic("No Support!") +} + +func (client *Client) ModifyResponse(context *InterceptorContext, attributeMap *AttributeMap) (_err error) { + panic("No Support!") +} diff --git a/vendor/github.com/alibabacloud-go/cr-20160607/client/client.go b/vendor/github.com/alibabacloud-go/cr-20160607/client/client.go new file mode 100644 index 000000000..d2179d673 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/cr-20160607/client/client.go @@ -0,0 +1,2513 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * + */ +package client + +import ( + openapi "github.com/alibabacloud-go/darabonba-openapi/client" + endpointutil "github.com/alibabacloud-go/endpoint-util/service" + openapiutil "github.com/alibabacloud-go/openapi-util/service" + util "github.com/alibabacloud-go/tea-utils/service" + "github.com/alibabacloud-go/tea/tea" +) + +type CancelRepoBuildResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CancelRepoBuildResponse) String() string { + return tea.Prettify(s) +} + +func (s CancelRepoBuildResponse) GoString() string { + return s.String() +} + +func (s *CancelRepoBuildResponse) SetHeaders(v map[string]*string) *CancelRepoBuildResponse { + s.Headers = v + return s +} + +type CreateNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateNamespaceResponse) GoString() string { + return s.String() +} + +func (s *CreateNamespaceResponse) SetHeaders(v map[string]*string) *CreateNamespaceResponse { + s.Headers = v + return s +} + +type CreateRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoResponse) SetHeaders(v map[string]*string) *CreateRepoResponse { + s.Headers = v + return s +} + +type CreateRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoBuildRuleResponse) SetHeaders(v map[string]*string) *CreateRepoBuildRuleResponse { + s.Headers = v + return s +} + +type CreateRepoWebhookResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateRepoWebhookResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoWebhookResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoWebhookResponse) SetHeaders(v map[string]*string) *CreateRepoWebhookResponse { + s.Headers = v + return s +} + +type CreateUserInfoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateUserInfoResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateUserInfoResponse) GoString() string { + return s.String() +} + +func (s *CreateUserInfoResponse) SetHeaders(v map[string]*string) *CreateUserInfoResponse { + s.Headers = v + return s +} + +type DeleteImageResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteImageResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteImageResponse) GoString() string { + return s.String() +} + +func (s *DeleteImageResponse) SetHeaders(v map[string]*string) *DeleteImageResponse { + s.Headers = v + return s +} + +type DeleteNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteNamespaceResponse) GoString() string { + return s.String() +} + +func (s *DeleteNamespaceResponse) SetHeaders(v map[string]*string) *DeleteNamespaceResponse { + s.Headers = v + return s +} + +type DeleteRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoResponse) SetHeaders(v map[string]*string) *DeleteRepoResponse { + s.Headers = v + return s +} + +type DeleteRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoBuildRuleResponse) SetHeaders(v map[string]*string) *DeleteRepoBuildRuleResponse { + s.Headers = v + return s +} + +type DeleteRepoWebhookResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteRepoWebhookResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoWebhookResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoWebhookResponse) SetHeaders(v map[string]*string) *DeleteRepoWebhookResponse { + s.Headers = v + return s +} + +type GetAuthorizationTokenResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetAuthorizationTokenResponse) String() string { + return tea.Prettify(s) +} + +func (s GetAuthorizationTokenResponse) GoString() string { + return s.String() +} + +func (s *GetAuthorizationTokenResponse) SetHeaders(v map[string]*string) *GetAuthorizationTokenResponse { + s.Headers = v + return s +} + +type GetImageLayerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetImageLayerResponse) String() string { + return tea.Prettify(s) +} + +func (s GetImageLayerResponse) GoString() string { + return s.String() +} + +func (s *GetImageLayerResponse) SetHeaders(v map[string]*string) *GetImageLayerResponse { + s.Headers = v + return s +} + +type GetImageManifestRequest struct { + SchemaVersion *int32 `json:"SchemaVersion,omitempty" xml:"SchemaVersion,omitempty"` +} + +func (s GetImageManifestRequest) String() string { + return tea.Prettify(s) +} + +func (s GetImageManifestRequest) GoString() string { + return s.String() +} + +func (s *GetImageManifestRequest) SetSchemaVersion(v int32) *GetImageManifestRequest { + s.SchemaVersion = &v + return s +} + +type GetImageManifestResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetImageManifestResponse) String() string { + return tea.Prettify(s) +} + +func (s GetImageManifestResponse) GoString() string { + return s.String() +} + +func (s *GetImageManifestResponse) SetHeaders(v map[string]*string) *GetImageManifestResponse { + s.Headers = v + return s +} + +type GetNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceResponse) GoString() string { + return s.String() +} + +func (s *GetNamespaceResponse) SetHeaders(v map[string]*string) *GetNamespaceResponse { + s.Headers = v + return s +} + +type GetNamespaceListRequest struct { + Authorize *string `json:"Authorize,omitempty" xml:"Authorize,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetNamespaceListRequest) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceListRequest) GoString() string { + return s.String() +} + +func (s *GetNamespaceListRequest) SetAuthorize(v string) *GetNamespaceListRequest { + s.Authorize = &v + return s +} + +func (s *GetNamespaceListRequest) SetStatus(v string) *GetNamespaceListRequest { + s.Status = &v + return s +} + +type GetNamespaceListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetNamespaceListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceListResponse) GoString() string { + return s.String() +} + +func (s *GetNamespaceListResponse) SetHeaders(v map[string]*string) *GetNamespaceListResponse { + s.Headers = v + return s +} + +type GetRegionRequest struct { + Domain *string `json:"Domain,omitempty" xml:"Domain,omitempty"` +} + +func (s GetRegionRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRegionRequest) GoString() string { + return s.String() +} + +func (s *GetRegionRequest) SetDomain(v string) *GetRegionRequest { + s.Domain = &v + return s +} + +type GetRegionResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRegionResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRegionResponse) GoString() string { + return s.String() +} + +func (s *GetRegionResponse) SetHeaders(v map[string]*string) *GetRegionResponse { + s.Headers = v + return s +} + +type GetRegionListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRegionListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRegionListResponse) GoString() string { + return s.String() +} + +func (s *GetRegionListResponse) SetHeaders(v map[string]*string) *GetRegionListResponse { + s.Headers = v + return s +} + +type GetRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoResponse) GoString() string { + return s.String() +} + +func (s *GetRepoResponse) SetHeaders(v map[string]*string) *GetRepoResponse { + s.Headers = v + return s +} + +type GetRepoBuildListRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` +} + +func (s GetRepoBuildListRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildListRequest) GoString() string { + return s.String() +} + +func (s *GetRepoBuildListRequest) SetPage(v int32) *GetRepoBuildListRequest { + s.Page = &v + return s +} + +func (s *GetRepoBuildListRequest) SetPageSize(v int32) *GetRepoBuildListRequest { + s.PageSize = &v + return s +} + +type GetRepoBuildListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoBuildListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildListResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildListResponse) SetHeaders(v map[string]*string) *GetRepoBuildListResponse { + s.Headers = v + return s +} + +type GetRepoBuildRuleListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoBuildRuleListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRuleListResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRuleListResponse) SetHeaders(v map[string]*string) *GetRepoBuildRuleListResponse { + s.Headers = v + return s +} + +type GetRepoBuildStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoBuildStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildStatusResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildStatusResponse) SetHeaders(v map[string]*string) *GetRepoBuildStatusResponse { + s.Headers = v + return s +} + +type GetRepoListRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetRepoListRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoListRequest) GoString() string { + return s.String() +} + +func (s *GetRepoListRequest) SetPage(v int32) *GetRepoListRequest { + s.Page = &v + return s +} + +func (s *GetRepoListRequest) SetPageSize(v int32) *GetRepoListRequest { + s.PageSize = &v + return s +} + +func (s *GetRepoListRequest) SetStatus(v string) *GetRepoListRequest { + s.Status = &v + return s +} + +type GetRepoListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoListResponse) GoString() string { + return s.String() +} + +func (s *GetRepoListResponse) SetHeaders(v map[string]*string) *GetRepoListResponse { + s.Headers = v + return s +} + +type GetRepoListByNamespaceRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetRepoListByNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoListByNamespaceRequest) GoString() string { + return s.String() +} + +func (s *GetRepoListByNamespaceRequest) SetPage(v int32) *GetRepoListByNamespaceRequest { + s.Page = &v + return s +} + +func (s *GetRepoListByNamespaceRequest) SetPageSize(v int32) *GetRepoListByNamespaceRequest { + s.PageSize = &v + return s +} + +func (s *GetRepoListByNamespaceRequest) SetStatus(v string) *GetRepoListByNamespaceRequest { + s.Status = &v + return s +} + +type GetRepoListByNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoListByNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoListByNamespaceResponse) GoString() string { + return s.String() +} + +func (s *GetRepoListByNamespaceResponse) SetHeaders(v map[string]*string) *GetRepoListByNamespaceResponse { + s.Headers = v + return s +} + +type GetRepoTagResponseBody struct { + Digest *string `json:"digest,omitempty" xml:"digest,omitempty"` + ImageCreate *int64 `json:"imageCreate,omitempty" xml:"imageCreate,omitempty"` + ImageId *string `json:"imageId,omitempty" xml:"imageId,omitempty"` + ImageSize *int64 `json:"imageSize,omitempty" xml:"imageSize,omitempty"` + ImageUpdate *int64 `json:"imageUpdate,omitempty" xml:"imageUpdate,omitempty"` + RequestId *string `json:"requestId,omitempty" xml:"requestId,omitempty"` + Status *string `json:"status,omitempty" xml:"status,omitempty"` + Tag *string `json:"tag,omitempty" xml:"tag,omitempty"` +} + +func (s GetRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagResponseBody) SetDigest(v string) *GetRepoTagResponseBody { + s.Digest = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageCreate(v int64) *GetRepoTagResponseBody { + s.ImageCreate = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageId(v string) *GetRepoTagResponseBody { + s.ImageId = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageSize(v int64) *GetRepoTagResponseBody { + s.ImageSize = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageUpdate(v int64) *GetRepoTagResponseBody { + s.ImageUpdate = &v + return s +} + +func (s *GetRepoTagResponseBody) SetRequestId(v string) *GetRepoTagResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoTagResponseBody) SetStatus(v string) *GetRepoTagResponseBody { + s.Status = &v + return s +} + +func (s *GetRepoTagResponseBody) SetTag(v string) *GetRepoTagResponseBody { + s.Tag = &v + return s +} + +type GetRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagResponse) SetHeaders(v map[string]*string) *GetRepoTagResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagResponse) SetBody(v *GetRepoTagResponseBody) *GetRepoTagResponse { + s.Body = v + return s +} + +type GetRepoTagScanListRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Severity *string `json:"Severity,omitempty" xml:"Severity,omitempty"` +} + +func (s GetRepoTagScanListRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanListRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanListRequest) SetPage(v int32) *GetRepoTagScanListRequest { + s.Page = &v + return s +} + +func (s *GetRepoTagScanListRequest) SetPageSize(v int32) *GetRepoTagScanListRequest { + s.PageSize = &v + return s +} + +func (s *GetRepoTagScanListRequest) SetSeverity(v string) *GetRepoTagScanListRequest { + s.Severity = &v + return s +} + +type GetRepoTagScanListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoTagScanListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanListResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanListResponse) SetHeaders(v map[string]*string) *GetRepoTagScanListResponse { + s.Headers = v + return s +} + +type GetRepoTagScanStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoTagScanStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanStatusResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanStatusResponse) SetHeaders(v map[string]*string) *GetRepoTagScanStatusResponse { + s.Headers = v + return s +} + +type GetRepoTagScanSummaryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoTagScanSummaryResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanSummaryResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanSummaryResponse) SetHeaders(v map[string]*string) *GetRepoTagScanSummaryResponse { + s.Headers = v + return s +} + +type GetRepoTagsRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` +} + +func (s GetRepoTagsRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagsRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagsRequest) SetPage(v int32) *GetRepoTagsRequest { + s.Page = &v + return s +} + +func (s *GetRepoTagsRequest) SetPageSize(v int32) *GetRepoTagsRequest { + s.PageSize = &v + return s +} + +type GetRepoTagsResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoTagsResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagsResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagsResponse) SetHeaders(v map[string]*string) *GetRepoTagsResponse { + s.Headers = v + return s +} + +type GetRepoWebhookResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoWebhookResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoWebhookResponse) GoString() string { + return s.String() +} + +func (s *GetRepoWebhookResponse) SetHeaders(v map[string]*string) *GetRepoWebhookResponse { + s.Headers = v + return s +} + +type GetResourceQuotaResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetResourceQuotaResponse) String() string { + return tea.Prettify(s) +} + +func (s GetResourceQuotaResponse) GoString() string { + return s.String() +} + +func (s *GetResourceQuotaResponse) SetHeaders(v map[string]*string) *GetResourceQuotaResponse { + s.Headers = v + return s +} + +type StartImageScanResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s StartImageScanResponse) String() string { + return tea.Prettify(s) +} + +func (s StartImageScanResponse) GoString() string { + return s.String() +} + +func (s *StartImageScanResponse) SetHeaders(v map[string]*string) *StartImageScanResponse { + s.Headers = v + return s +} + +type StartRepoBuildByRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s StartRepoBuildByRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s StartRepoBuildByRuleResponse) GoString() string { + return s.String() +} + +func (s *StartRepoBuildByRuleResponse) SetHeaders(v map[string]*string) *StartRepoBuildByRuleResponse { + s.Headers = v + return s +} + +type UpdateNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateNamespaceResponse) GoString() string { + return s.String() +} + +func (s *UpdateNamespaceResponse) SetHeaders(v map[string]*string) *UpdateNamespaceResponse { + s.Headers = v + return s +} + +type UpdateRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoResponse) SetHeaders(v map[string]*string) *UpdateRepoResponse { + s.Headers = v + return s +} + +type UpdateRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoBuildRuleResponse) SetHeaders(v map[string]*string) *UpdateRepoBuildRuleResponse { + s.Headers = v + return s +} + +type UpdateRepoWebhookResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateRepoWebhookResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoWebhookResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoWebhookResponse) SetHeaders(v map[string]*string) *UpdateRepoWebhookResponse { + s.Headers = v + return s +} + +type UpdateUserInfoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateUserInfoResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateUserInfoResponse) GoString() string { + return s.String() +} + +func (s *UpdateUserInfoResponse) SetHeaders(v map[string]*string) *UpdateUserInfoResponse { + s.Headers = v + return s +} + +type Client struct { + openapi.Client +} + +func NewClient(config *openapi.Config) (*Client, error) { + client := new(Client) + err := client.Init(config) + return client, err +} + +func (client *Client) Init(config *openapi.Config) (_err error) { + _err = client.Client.Init(config) + if _err != nil { + return _err + } + client.EndpointRule = tea.String("regional") + _err = client.CheckConfig(config) + if _err != nil { + return _err + } + client.Endpoint, _err = client.GetEndpoint(tea.String("cr"), client.RegionId, client.EndpointRule, client.Network, client.Suffix, client.EndpointMap, client.Endpoint) + if _err != nil { + return _err + } + + return nil +} + +func (client *Client) GetEndpoint(productId *string, regionId *string, endpointRule *string, network *string, suffix *string, endpointMap map[string]*string, endpoint *string) (_result *string, _err error) { + if !tea.BoolValue(util.Empty(endpoint)) { + _result = endpoint + return _result, _err + } + + if !tea.BoolValue(util.IsUnset(endpointMap)) && !tea.BoolValue(util.Empty(endpointMap[tea.StringValue(regionId)])) { + _result = endpointMap[tea.StringValue(regionId)] + return _result, _err + } + + _body, _err := endpointutil.GetEndpointRules(productId, regionId, endpointRule, network, suffix) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CancelRepoBuild(RepoNamespace *string, RepoName *string, BuildId *string) (_result *CancelRepoBuildResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CancelRepoBuildResponse{} + _body, _err := client.CancelRepoBuildWithOptions(RepoNamespace, RepoName, BuildId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CancelRepoBuildWithOptions(RepoNamespace *string, RepoName *string, BuildId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *CancelRepoBuildResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildId = openapiutil.GetEncodeParam(BuildId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CancelRepoBuild"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/build/" + tea.StringValue(BuildId) + "/cancel"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CancelRepoBuildResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateNamespace() (_result *CreateNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateNamespaceResponse{} + _body, _err := client.CreateNamespaceWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateNamespaceWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateNamespaceResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepo() (_result *CreateRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateRepoResponse{} + _body, _err := client.CreateRepoWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateRepoResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateRepo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoBuildRule(RepoNamespace *string, RepoName *string) (_result *CreateRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateRepoBuildRuleResponse{} + _body, _err := client.CreateRepoBuildRuleWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoBuildRuleWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateRepoBuildRuleResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateRepoBuildRule"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoWebhook(RepoNamespace *string, RepoName *string) (_result *CreateRepoWebhookResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateRepoWebhookResponse{} + _body, _err := client.CreateRepoWebhookWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoWebhookWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateRepoWebhookResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateRepoWebhook"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/webhooks"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateRepoWebhookResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateUserInfo() (_result *CreateUserInfoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateUserInfoResponse{} + _body, _err := client.CreateUserInfoWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateUserInfoWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateUserInfoResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateUserInfo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/users"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateUserInfoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteImage(RepoNamespace *string, RepoName *string, Tag *string) (_result *DeleteImageResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteImageResponse{} + _body, _err := client.DeleteImageWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteImageWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteImageResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteImage"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteImageResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteNamespace(Namespace *string) (_result *DeleteNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteNamespaceResponse{} + _body, _err := client.DeleteNamespaceWithOptions(Namespace, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteNamespaceWithOptions(Namespace *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteNamespaceResponse, _err error) { + Namespace = openapiutil.GetEncodeParam(Namespace) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace/" + tea.StringValue(Namespace)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepo(RepoNamespace *string, RepoName *string) (_result *DeleteRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteRepoResponse{} + _body, _err := client.DeleteRepoWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteRepoResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteRepo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoBuildRule(RepoNamespace *string, RepoName *string, BuildRuleId *string) (_result *DeleteRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteRepoBuildRuleResponse{} + _body, _err := client.DeleteRepoBuildRuleWithOptions(RepoNamespace, RepoName, BuildRuleId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoBuildRuleWithOptions(RepoNamespace *string, RepoName *string, BuildRuleId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteRepoBuildRuleResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildRuleId = openapiutil.GetEncodeParam(BuildRuleId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoBuildRule"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules/" + tea.StringValue(BuildRuleId)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoWebhook(RepoNamespace *string, RepoName *string, WebhookId *string) (_result *DeleteRepoWebhookResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteRepoWebhookResponse{} + _body, _err := client.DeleteRepoWebhookWithOptions(RepoNamespace, RepoName, WebhookId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoWebhookWithOptions(RepoNamespace *string, RepoName *string, WebhookId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteRepoWebhookResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + WebhookId = openapiutil.GetEncodeParam(WebhookId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoWebhook"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/webhooks/" + tea.StringValue(WebhookId)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteRepoWebhookResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetAuthorizationToken() (_result *GetAuthorizationTokenResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetAuthorizationTokenResponse{} + _body, _err := client.GetAuthorizationTokenWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetAuthorizationTokenWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetAuthorizationTokenResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetAuthorizationToken"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/tokens"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetAuthorizationTokenResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetImageLayer(RepoNamespace *string, RepoName *string, Tag *string) (_result *GetImageLayerResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetImageLayerResponse{} + _body, _err := client.GetImageLayerWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetImageLayerWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetImageLayerResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetImageLayer"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/layers"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetImageLayerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetImageManifest(RepoNamespace *string, RepoName *string, Tag *string, request *GetImageManifestRequest) (_result *GetImageManifestResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetImageManifestResponse{} + _body, _err := client.GetImageManifestWithOptions(RepoNamespace, RepoName, Tag, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetImageManifestWithOptions(RepoNamespace *string, RepoName *string, Tag *string, request *GetImageManifestRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetImageManifestResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.SchemaVersion)) { + query["SchemaVersion"] = request.SchemaVersion + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetImageManifest"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/manifest"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetImageManifestResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetNamespace(Namespace *string) (_result *GetNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetNamespaceResponse{} + _body, _err := client.GetNamespaceWithOptions(Namespace, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetNamespaceWithOptions(Namespace *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetNamespaceResponse, _err error) { + Namespace = openapiutil.GetEncodeParam(Namespace) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace/" + tea.StringValue(Namespace)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetNamespaceList(request *GetNamespaceListRequest) (_result *GetNamespaceListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetNamespaceListResponse{} + _body, _err := client.GetNamespaceListWithOptions(request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetNamespaceListWithOptions(request *GetNamespaceListRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetNamespaceListResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Authorize)) { + query["Authorize"] = request.Authorize + } + + if !tea.BoolValue(util.IsUnset(request.Status)) { + query["Status"] = request.Status + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetNamespaceList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetNamespaceListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRegion(request *GetRegionRequest) (_result *GetRegionResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRegionResponse{} + _body, _err := client.GetRegionWithOptions(request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRegionWithOptions(request *GetRegionRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRegionResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Domain)) { + query["Domain"] = request.Domain + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRegion"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/regions"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRegionResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRegionList() (_result *GetRegionListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRegionListResponse{} + _body, _err := client.GetRegionListWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRegionListWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRegionListResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRegionList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/regions"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRegionListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepo(RepoNamespace *string, RepoName *string) (_result *GetRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoResponse{} + _body, _err := client.GetRepoWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildList(RepoNamespace *string, RepoName *string, request *GetRepoBuildListRequest) (_result *GetRepoBuildListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoBuildListResponse{} + _body, _err := client.GetRepoBuildListWithOptions(RepoNamespace, RepoName, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildListWithOptions(RepoNamespace *string, RepoName *string, request *GetRepoBuildListRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoBuildListResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/build"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoBuildListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildRuleList(RepoNamespace *string, RepoName *string) (_result *GetRepoBuildRuleListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoBuildRuleListResponse{} + _body, _err := client.GetRepoBuildRuleListWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildRuleListWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoBuildRuleListResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildRuleList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoBuildRuleListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildStatus(RepoNamespace *string, RepoName *string, BuildId *string) (_result *GetRepoBuildStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoBuildStatusResponse{} + _body, _err := client.GetRepoBuildStatusWithOptions(RepoNamespace, RepoName, BuildId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildStatusWithOptions(RepoNamespace *string, RepoName *string, BuildId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoBuildStatusResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildId = openapiutil.GetEncodeParam(BuildId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildStatus"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/build/" + tea.StringValue(BuildId) + "/status"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoBuildStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoList(request *GetRepoListRequest) (_result *GetRepoListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoListResponse{} + _body, _err := client.GetRepoListWithOptions(request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoListWithOptions(request *GetRepoListRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoListResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.Status)) { + query["Status"] = request.Status + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoListByNamespace(RepoNamespace *string, request *GetRepoListByNamespaceRequest) (_result *GetRepoListByNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoListByNamespaceResponse{} + _body, _err := client.GetRepoListByNamespaceWithOptions(RepoNamespace, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoListByNamespaceWithOptions(RepoNamespace *string, request *GetRepoListByNamespaceRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoListByNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.Status)) { + query["Status"] = request.Status + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoListByNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoListByNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTag(RepoNamespace *string, RepoName *string, Tag *string) (_result *GetRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagResponse{} + _body, _err := client.GetRepoTagWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoTag"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanList(RepoNamespace *string, RepoName *string, Tag *string, request *GetRepoTagScanListRequest) (_result *GetRepoTagScanListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagScanListResponse{} + _body, _err := client.GetRepoTagScanListWithOptions(RepoNamespace, RepoName, Tag, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanListWithOptions(RepoNamespace *string, RepoName *string, Tag *string, request *GetRepoTagScanListRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagScanListResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.Severity)) { + query["Severity"] = request.Severity + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/scanResult"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoTagScanListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanStatus(RepoNamespace *string, RepoName *string, Tag *string) (_result *GetRepoTagScanStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagScanStatusResponse{} + _body, _err := client.GetRepoTagScanStatusWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanStatusWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagScanStatusResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanStatus"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/scanStatus"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoTagScanStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanSummary(RepoNamespace *string, RepoName *string, Tag *string) (_result *GetRepoTagScanSummaryResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagScanSummaryResponse{} + _body, _err := client.GetRepoTagScanSummaryWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanSummaryWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagScanSummaryResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanSummary"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/scanCount"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoTagScanSummaryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTags(RepoNamespace *string, RepoName *string, request *GetRepoTagsRequest) (_result *GetRepoTagsResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagsResponse{} + _body, _err := client.GetRepoTagsWithOptions(RepoNamespace, RepoName, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagsWithOptions(RepoNamespace *string, RepoName *string, request *GetRepoTagsRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagsResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTags"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoTagsResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoWebhook(RepoNamespace *string, RepoName *string) (_result *GetRepoWebhookResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoWebhookResponse{} + _body, _err := client.GetRepoWebhookWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoWebhookWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoWebhookResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoWebhook"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/webhooks"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoWebhookResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetResourceQuota(ResourceName *string) (_result *GetResourceQuotaResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetResourceQuotaResponse{} + _body, _err := client.GetResourceQuotaWithOptions(ResourceName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetResourceQuotaWithOptions(ResourceName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetResourceQuotaResponse, _err error) { + ResourceName = openapiutil.GetEncodeParam(ResourceName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetResourceQuota"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/resource/" + tea.StringValue(ResourceName)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetResourceQuotaResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) StartImageScan(RepoNamespace *string, RepoName *string, Tag *string) (_result *StartImageScanResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &StartImageScanResponse{} + _body, _err := client.StartImageScanWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) StartImageScanWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *StartImageScanResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("StartImageScan"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/scan"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &StartImageScanResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) StartRepoBuildByRule(RepoNamespace *string, RepoName *string, BuildRuleId *string) (_result *StartRepoBuildByRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &StartRepoBuildByRuleResponse{} + _body, _err := client.StartRepoBuildByRuleWithOptions(RepoNamespace, RepoName, BuildRuleId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) StartRepoBuildByRuleWithOptions(RepoNamespace *string, RepoName *string, BuildRuleId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *StartRepoBuildByRuleResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildRuleId = openapiutil.GetEncodeParam(BuildRuleId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("StartRepoBuildByRule"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules/" + tea.StringValue(BuildRuleId) + "/build"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &StartRepoBuildByRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateNamespace(Namespace *string) (_result *UpdateNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateNamespaceResponse{} + _body, _err := client.UpdateNamespaceWithOptions(Namespace, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateNamespaceWithOptions(Namespace *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateNamespaceResponse, _err error) { + Namespace = openapiutil.GetEncodeParam(Namespace) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace/" + tea.StringValue(Namespace)), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepo(RepoNamespace *string, RepoName *string) (_result *UpdateRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateRepoResponse{} + _body, _err := client.UpdateRepoWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateRepoResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateRepo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName)), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoBuildRule(RepoNamespace *string, RepoName *string, BuildRuleId *string) (_result *UpdateRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateRepoBuildRuleResponse{} + _body, _err := client.UpdateRepoBuildRuleWithOptions(RepoNamespace, RepoName, BuildRuleId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoBuildRuleWithOptions(RepoNamespace *string, RepoName *string, BuildRuleId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateRepoBuildRuleResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildRuleId = openapiutil.GetEncodeParam(BuildRuleId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoBuildRule"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules/" + tea.StringValue(BuildRuleId)), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoWebhook(RepoNamespace *string, RepoName *string, WebhookId *string) (_result *UpdateRepoWebhookResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateRepoWebhookResponse{} + _body, _err := client.UpdateRepoWebhookWithOptions(RepoNamespace, RepoName, WebhookId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoWebhookWithOptions(RepoNamespace *string, RepoName *string, WebhookId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateRepoWebhookResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + WebhookId = openapiutil.GetEncodeParam(WebhookId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoWebhook"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/webhooks/" + tea.StringValue(WebhookId)), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateRepoWebhookResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateUserInfo() (_result *UpdateUserInfoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateUserInfoResponse{} + _body, _err := client.UpdateUserInfoWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateUserInfoWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateUserInfoResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateUserInfo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/users"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateUserInfoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} diff --git a/vendor/github.com/alibabacloud-go/cr-20181201/LICENSE b/vendor/github.com/alibabacloud-go/cr-20181201/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/alibabacloud-go/cr-20181201/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/cr-20181201/client/client.go b/vendor/github.com/alibabacloud-go/cr-20181201/client/client.go new file mode 100644 index 000000000..abe198809 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/cr-20181201/client/client.go @@ -0,0 +1,16031 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * + */ +package client + +import ( + openapi "github.com/alibabacloud-go/darabonba-openapi/client" + endpointutil "github.com/alibabacloud-go/endpoint-util/service" + openapiutil "github.com/alibabacloud-go/openapi-util/service" + util "github.com/alibabacloud-go/tea-utils/service" + "github.com/alibabacloud-go/tea/tea" +) + +type CancelArtifactBuildTaskRequest struct { + BuildTaskId *string `json:"BuildTaskId,omitempty" xml:"BuildTaskId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s CancelArtifactBuildTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s CancelArtifactBuildTaskRequest) GoString() string { + return s.String() +} + +func (s *CancelArtifactBuildTaskRequest) SetBuildTaskId(v string) *CancelArtifactBuildTaskRequest { + s.BuildTaskId = &v + return s +} + +func (s *CancelArtifactBuildTaskRequest) SetInstanceId(v string) *CancelArtifactBuildTaskRequest { + s.InstanceId = &v + return s +} + +type CancelArtifactBuildTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CancelArtifactBuildTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CancelArtifactBuildTaskResponseBody) GoString() string { + return s.String() +} + +func (s *CancelArtifactBuildTaskResponseBody) SetCode(v string) *CancelArtifactBuildTaskResponseBody { + s.Code = &v + return s +} + +func (s *CancelArtifactBuildTaskResponseBody) SetIsSuccess(v bool) *CancelArtifactBuildTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CancelArtifactBuildTaskResponseBody) SetRequestId(v string) *CancelArtifactBuildTaskResponseBody { + s.RequestId = &v + return s +} + +type CancelArtifactBuildTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CancelArtifactBuildTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CancelArtifactBuildTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s CancelArtifactBuildTaskResponse) GoString() string { + return s.String() +} + +func (s *CancelArtifactBuildTaskResponse) SetHeaders(v map[string]*string) *CancelArtifactBuildTaskResponse { + s.Headers = v + return s +} + +func (s *CancelArtifactBuildTaskResponse) SetBody(v *CancelArtifactBuildTaskResponseBody) *CancelArtifactBuildTaskResponse { + s.Body = v + return s +} + +type CancelRepoBuildRecordRequest struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s CancelRepoBuildRecordRequest) String() string { + return tea.Prettify(s) +} + +func (s CancelRepoBuildRecordRequest) GoString() string { + return s.String() +} + +func (s *CancelRepoBuildRecordRequest) SetBuildRecordId(v string) *CancelRepoBuildRecordRequest { + s.BuildRecordId = &v + return s +} + +func (s *CancelRepoBuildRecordRequest) SetInstanceId(v string) *CancelRepoBuildRecordRequest { + s.InstanceId = &v + return s +} + +func (s *CancelRepoBuildRecordRequest) SetRepoId(v string) *CancelRepoBuildRecordRequest { + s.RepoId = &v + return s +} + +type CancelRepoBuildRecordResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CancelRepoBuildRecordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CancelRepoBuildRecordResponseBody) GoString() string { + return s.String() +} + +func (s *CancelRepoBuildRecordResponseBody) SetCode(v string) *CancelRepoBuildRecordResponseBody { + s.Code = &v + return s +} + +func (s *CancelRepoBuildRecordResponseBody) SetIsSuccess(v bool) *CancelRepoBuildRecordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CancelRepoBuildRecordResponseBody) SetRequestId(v string) *CancelRepoBuildRecordResponseBody { + s.RequestId = &v + return s +} + +type CancelRepoBuildRecordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CancelRepoBuildRecordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CancelRepoBuildRecordResponse) String() string { + return tea.Prettify(s) +} + +func (s CancelRepoBuildRecordResponse) GoString() string { + return s.String() +} + +func (s *CancelRepoBuildRecordResponse) SetHeaders(v map[string]*string) *CancelRepoBuildRecordResponse { + s.Headers = v + return s +} + +func (s *CancelRepoBuildRecordResponse) SetBody(v *CancelRepoBuildRecordResponseBody) *CancelRepoBuildRecordResponse { + s.Body = v + return s +} + +type CreateBuildRecordByRuleRequest struct { + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s CreateBuildRecordByRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateBuildRecordByRuleRequest) GoString() string { + return s.String() +} + +func (s *CreateBuildRecordByRuleRequest) SetBuildRuleId(v string) *CreateBuildRecordByRuleRequest { + s.BuildRuleId = &v + return s +} + +func (s *CreateBuildRecordByRuleRequest) SetInstanceId(v string) *CreateBuildRecordByRuleRequest { + s.InstanceId = &v + return s +} + +func (s *CreateBuildRecordByRuleRequest) SetRepoId(v string) *CreateBuildRecordByRuleRequest { + s.RepoId = &v + return s +} + +type CreateBuildRecordByRuleResponseBody struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateBuildRecordByRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateBuildRecordByRuleResponseBody) GoString() string { + return s.String() +} + +func (s *CreateBuildRecordByRuleResponseBody) SetBuildRecordId(v string) *CreateBuildRecordByRuleResponseBody { + s.BuildRecordId = &v + return s +} + +func (s *CreateBuildRecordByRuleResponseBody) SetCode(v string) *CreateBuildRecordByRuleResponseBody { + s.Code = &v + return s +} + +func (s *CreateBuildRecordByRuleResponseBody) SetIsSuccess(v bool) *CreateBuildRecordByRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateBuildRecordByRuleResponseBody) SetRequestId(v string) *CreateBuildRecordByRuleResponseBody { + s.RequestId = &v + return s +} + +type CreateBuildRecordByRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateBuildRecordByRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateBuildRecordByRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateBuildRecordByRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateBuildRecordByRuleResponse) SetHeaders(v map[string]*string) *CreateBuildRecordByRuleResponse { + s.Headers = v + return s +} + +func (s *CreateBuildRecordByRuleResponse) SetBody(v *CreateBuildRecordByRuleResponseBody) *CreateBuildRecordByRuleResponse { + s.Body = v + return s +} + +type CreateChainRequest struct { + ChainConfig *string `json:"ChainConfig,omitempty" xml:"ChainConfig,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s CreateChainRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateChainRequest) GoString() string { + return s.String() +} + +func (s *CreateChainRequest) SetChainConfig(v string) *CreateChainRequest { + s.ChainConfig = &v + return s +} + +func (s *CreateChainRequest) SetDescription(v string) *CreateChainRequest { + s.Description = &v + return s +} + +func (s *CreateChainRequest) SetInstanceId(v string) *CreateChainRequest { + s.InstanceId = &v + return s +} + +func (s *CreateChainRequest) SetName(v string) *CreateChainRequest { + s.Name = &v + return s +} + +func (s *CreateChainRequest) SetRepoName(v string) *CreateChainRequest { + s.RepoName = &v + return s +} + +func (s *CreateChainRequest) SetRepoNamespaceName(v string) *CreateChainRequest { + s.RepoNamespaceName = &v + return s +} + +type CreateChainResponseBody struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateChainResponseBody) GoString() string { + return s.String() +} + +func (s *CreateChainResponseBody) SetChainId(v string) *CreateChainResponseBody { + s.ChainId = &v + return s +} + +func (s *CreateChainResponseBody) SetCode(v string) *CreateChainResponseBody { + s.Code = &v + return s +} + +func (s *CreateChainResponseBody) SetIsSuccess(v bool) *CreateChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateChainResponseBody) SetRequestId(v string) *CreateChainResponseBody { + s.RequestId = &v + return s +} + +type CreateChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateChainResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateChainResponse) GoString() string { + return s.String() +} + +func (s *CreateChainResponse) SetHeaders(v map[string]*string) *CreateChainResponse { + s.Headers = v + return s +} + +func (s *CreateChainResponse) SetBody(v *CreateChainResponseBody) *CreateChainResponse { + s.Body = v + return s +} + +type CreateChartNamespaceRequest struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s CreateChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *CreateChartNamespaceRequest) SetAutoCreateRepo(v bool) *CreateChartNamespaceRequest { + s.AutoCreateRepo = &v + return s +} + +func (s *CreateChartNamespaceRequest) SetDefaultRepoType(v string) *CreateChartNamespaceRequest { + s.DefaultRepoType = &v + return s +} + +func (s *CreateChartNamespaceRequest) SetInstanceId(v string) *CreateChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *CreateChartNamespaceRequest) SetNamespaceName(v string) *CreateChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +func (s *CreateChartNamespaceRequest) SetResourceGroupId(v string) *CreateChartNamespaceRequest { + s.ResourceGroupId = &v + return s +} + +type CreateChartNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *CreateChartNamespaceResponseBody) SetCode(v string) *CreateChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *CreateChartNamespaceResponseBody) SetIsSuccess(v bool) *CreateChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateChartNamespaceResponseBody) SetRequestId(v string) *CreateChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +type CreateChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *CreateChartNamespaceResponse) SetHeaders(v map[string]*string) *CreateChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *CreateChartNamespaceResponse) SetBody(v *CreateChartNamespaceResponseBody) *CreateChartNamespaceResponse { + s.Body = v + return s +} + +type CreateChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` +} + +func (s CreateChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *CreateChartRepositoryRequest) SetInstanceId(v string) *CreateChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *CreateChartRepositoryRequest) SetRepoName(v string) *CreateChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *CreateChartRepositoryRequest) SetRepoNamespaceName(v string) *CreateChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *CreateChartRepositoryRequest) SetRepoType(v string) *CreateChartRepositoryRequest { + s.RepoType = &v + return s +} + +func (s *CreateChartRepositoryRequest) SetSummary(v string) *CreateChartRepositoryRequest { + s.Summary = &v + return s +} + +type CreateChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *CreateChartRepositoryResponseBody) SetCode(v string) *CreateChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *CreateChartRepositoryResponseBody) SetIsSuccess(v bool) *CreateChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateChartRepositoryResponseBody) SetRepoId(v string) *CreateChartRepositoryResponseBody { + s.RepoId = &v + return s +} + +func (s *CreateChartRepositoryResponseBody) SetRequestId(v string) *CreateChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +type CreateChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *CreateChartRepositoryResponse) SetHeaders(v map[string]*string) *CreateChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *CreateChartRepositoryResponse) SetBody(v *CreateChartRepositoryResponseBody) *CreateChartRepositoryResponse { + s.Body = v + return s +} + +type CreateInstanceEndpointAclPolicyRequest struct { + Comment *string `json:"Comment,omitempty" xml:"Comment,omitempty"` + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + Entry *string `json:"Entry,omitempty" xml:"Entry,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s CreateInstanceEndpointAclPolicyRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceEndpointAclPolicyRequest) GoString() string { + return s.String() +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetComment(v string) *CreateInstanceEndpointAclPolicyRequest { + s.Comment = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetEndpointType(v string) *CreateInstanceEndpointAclPolicyRequest { + s.EndpointType = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetEntry(v string) *CreateInstanceEndpointAclPolicyRequest { + s.Entry = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetInstanceId(v string) *CreateInstanceEndpointAclPolicyRequest { + s.InstanceId = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetModuleName(v string) *CreateInstanceEndpointAclPolicyRequest { + s.ModuleName = &v + return s +} + +type CreateInstanceEndpointAclPolicyResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateInstanceEndpointAclPolicyResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceEndpointAclPolicyResponseBody) GoString() string { + return s.String() +} + +func (s *CreateInstanceEndpointAclPolicyResponseBody) SetCode(v string) *CreateInstanceEndpointAclPolicyResponseBody { + s.Code = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyResponseBody) SetIsSuccess(v bool) *CreateInstanceEndpointAclPolicyResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyResponseBody) SetRequestId(v string) *CreateInstanceEndpointAclPolicyResponseBody { + s.RequestId = &v + return s +} + +type CreateInstanceEndpointAclPolicyResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateInstanceEndpointAclPolicyResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateInstanceEndpointAclPolicyResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceEndpointAclPolicyResponse) GoString() string { + return s.String() +} + +func (s *CreateInstanceEndpointAclPolicyResponse) SetHeaders(v map[string]*string) *CreateInstanceEndpointAclPolicyResponse { + s.Headers = v + return s +} + +func (s *CreateInstanceEndpointAclPolicyResponse) SetBody(v *CreateInstanceEndpointAclPolicyResponseBody) *CreateInstanceEndpointAclPolicyResponse { + s.Body = v + return s +} + +type CreateInstanceVpcEndpointLinkedVpcRequest struct { + EnableCreateDNSRecordInPvzt *bool `json:"EnableCreateDNSRecordInPvzt,omitempty" xml:"EnableCreateDNSRecordInPvzt,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` + VpcId *string `json:"VpcId,omitempty" xml:"VpcId,omitempty"` + VswitchId *string `json:"VswitchId,omitempty" xml:"VswitchId,omitempty"` +} + +func (s CreateInstanceVpcEndpointLinkedVpcRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceVpcEndpointLinkedVpcRequest) GoString() string { + return s.String() +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetEnableCreateDNSRecordInPvzt(v bool) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.EnableCreateDNSRecordInPvzt = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetInstanceId(v string) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.InstanceId = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetModuleName(v string) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.ModuleName = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetVpcId(v string) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.VpcId = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetVswitchId(v string) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.VswitchId = &v + return s +} + +type CreateInstanceVpcEndpointLinkedVpcResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateInstanceVpcEndpointLinkedVpcResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceVpcEndpointLinkedVpcResponseBody) GoString() string { + return s.String() +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponseBody) SetCode(v string) *CreateInstanceVpcEndpointLinkedVpcResponseBody { + s.Code = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponseBody) SetIsSuccess(v bool) *CreateInstanceVpcEndpointLinkedVpcResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponseBody) SetRequestId(v string) *CreateInstanceVpcEndpointLinkedVpcResponseBody { + s.RequestId = &v + return s +} + +type CreateInstanceVpcEndpointLinkedVpcResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateInstanceVpcEndpointLinkedVpcResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateInstanceVpcEndpointLinkedVpcResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceVpcEndpointLinkedVpcResponse) GoString() string { + return s.String() +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponse) SetHeaders(v map[string]*string) *CreateInstanceVpcEndpointLinkedVpcResponse { + s.Headers = v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponse) SetBody(v *CreateInstanceVpcEndpointLinkedVpcResponseBody) *CreateInstanceVpcEndpointLinkedVpcResponse { + s.Body = v + return s +} + +type CreateNamespaceRequest struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s CreateNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateNamespaceRequest) GoString() string { + return s.String() +} + +func (s *CreateNamespaceRequest) SetAutoCreateRepo(v bool) *CreateNamespaceRequest { + s.AutoCreateRepo = &v + return s +} + +func (s *CreateNamespaceRequest) SetDefaultRepoType(v string) *CreateNamespaceRequest { + s.DefaultRepoType = &v + return s +} + +func (s *CreateNamespaceRequest) SetInstanceId(v string) *CreateNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *CreateNamespaceRequest) SetNamespaceName(v string) *CreateNamespaceRequest { + s.NamespaceName = &v + return s +} + +func (s *CreateNamespaceRequest) SetResourceGroupId(v string) *CreateNamespaceRequest { + s.ResourceGroupId = &v + return s +} + +type CreateNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *CreateNamespaceResponseBody) SetCode(v string) *CreateNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *CreateNamespaceResponseBody) SetIsSuccess(v bool) *CreateNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateNamespaceResponseBody) SetRequestId(v string) *CreateNamespaceResponseBody { + s.RequestId = &v + return s +} + +type CreateNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateNamespaceResponse) GoString() string { + return s.String() +} + +func (s *CreateNamespaceResponse) SetHeaders(v map[string]*string) *CreateNamespaceResponse { + s.Headers = v + return s +} + +func (s *CreateNamespaceResponse) SetBody(v *CreateNamespaceResponseBody) *CreateNamespaceResponse { + s.Body = v + return s +} + +type CreateRepoBuildRuleRequest struct { + BuildArgs []*string `json:"BuildArgs,omitempty" xml:"BuildArgs,omitempty" type:"Repeated"` + DockerfileLocation *string `json:"DockerfileLocation,omitempty" xml:"DockerfileLocation,omitempty"` + DockerfileName *string `json:"DockerfileName,omitempty" xml:"DockerfileName,omitempty"` + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Platforms []*string `json:"Platforms,omitempty" xml:"Platforms,omitempty" type:"Repeated"` + PushName *string `json:"PushName,omitempty" xml:"PushName,omitempty"` + PushType *string `json:"PushType,omitempty" xml:"PushType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s CreateRepoBuildRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoBuildRuleRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoBuildRuleRequest) SetBuildArgs(v []*string) *CreateRepoBuildRuleRequest { + s.BuildArgs = v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetDockerfileLocation(v string) *CreateRepoBuildRuleRequest { + s.DockerfileLocation = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetDockerfileName(v string) *CreateRepoBuildRuleRequest { + s.DockerfileName = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetImageTag(v string) *CreateRepoBuildRuleRequest { + s.ImageTag = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetInstanceId(v string) *CreateRepoBuildRuleRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetPlatforms(v []*string) *CreateRepoBuildRuleRequest { + s.Platforms = v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetPushName(v string) *CreateRepoBuildRuleRequest { + s.PushName = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetPushType(v string) *CreateRepoBuildRuleRequest { + s.PushType = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetRepoId(v string) *CreateRepoBuildRuleRequest { + s.RepoId = &v + return s +} + +type CreateRepoBuildRuleResponseBody struct { + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepoBuildRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoBuildRuleResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoBuildRuleResponseBody) SetBuildRuleId(v string) *CreateRepoBuildRuleResponseBody { + s.BuildRuleId = &v + return s +} + +func (s *CreateRepoBuildRuleResponseBody) SetCode(v string) *CreateRepoBuildRuleResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoBuildRuleResponseBody) SetIsSuccess(v bool) *CreateRepoBuildRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoBuildRuleResponseBody) SetRequestId(v string) *CreateRepoBuildRuleResponseBody { + s.RequestId = &v + return s +} + +type CreateRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoBuildRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoBuildRuleResponse) SetHeaders(v map[string]*string) *CreateRepoBuildRuleResponse { + s.Headers = v + return s +} + +func (s *CreateRepoBuildRuleResponse) SetBody(v *CreateRepoBuildRuleResponseBody) *CreateRepoBuildRuleResponse { + s.Body = v + return s +} + +type CreateRepoSourceCodeRepoRequest struct { + AutoBuild *bool `json:"AutoBuild,omitempty" xml:"AutoBuild,omitempty"` + CodeRepoName *string `json:"CodeRepoName,omitempty" xml:"CodeRepoName,omitempty"` + CodeRepoNamespaceName *string `json:"CodeRepoNamespaceName,omitempty" xml:"CodeRepoNamespaceName,omitempty"` + CodeRepoType *string `json:"CodeRepoType,omitempty" xml:"CodeRepoType,omitempty"` + DisableCacheBuild *bool `json:"DisableCacheBuild,omitempty" xml:"DisableCacheBuild,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + OverseaBuild *bool `json:"OverseaBuild,omitempty" xml:"OverseaBuild,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s CreateRepoSourceCodeRepoRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSourceCodeRepoRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoSourceCodeRepoRequest) SetAutoBuild(v bool) *CreateRepoSourceCodeRepoRequest { + s.AutoBuild = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetCodeRepoName(v string) *CreateRepoSourceCodeRepoRequest { + s.CodeRepoName = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetCodeRepoNamespaceName(v string) *CreateRepoSourceCodeRepoRequest { + s.CodeRepoNamespaceName = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetCodeRepoType(v string) *CreateRepoSourceCodeRepoRequest { + s.CodeRepoType = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetDisableCacheBuild(v bool) *CreateRepoSourceCodeRepoRequest { + s.DisableCacheBuild = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetInstanceId(v string) *CreateRepoSourceCodeRepoRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetOverseaBuild(v bool) *CreateRepoSourceCodeRepoRequest { + s.OverseaBuild = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetRepoId(v string) *CreateRepoSourceCodeRepoRequest { + s.RepoId = &v + return s +} + +type CreateRepoSourceCodeRepoResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepoSourceCodeRepoResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSourceCodeRepoResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoSourceCodeRepoResponseBody) SetCode(v string) *CreateRepoSourceCodeRepoResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoSourceCodeRepoResponseBody) SetIsSuccess(v bool) *CreateRepoSourceCodeRepoResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoSourceCodeRepoResponseBody) SetRequestId(v string) *CreateRepoSourceCodeRepoResponseBody { + s.RequestId = &v + return s +} + +type CreateRepoSourceCodeRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoSourceCodeRepoResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoSourceCodeRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSourceCodeRepoResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoSourceCodeRepoResponse) SetHeaders(v map[string]*string) *CreateRepoSourceCodeRepoResponse { + s.Headers = v + return s +} + +func (s *CreateRepoSourceCodeRepoResponse) SetBody(v *CreateRepoSourceCodeRepoResponseBody) *CreateRepoSourceCodeRepoResponse { + s.Body = v + return s +} + +type CreateRepoSyncRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + SyncRuleName *string `json:"SyncRuleName,omitempty" xml:"SyncRuleName,omitempty"` + SyncScope *string `json:"SyncScope,omitempty" xml:"SyncScope,omitempty"` + SyncTrigger *string `json:"SyncTrigger,omitempty" xml:"SyncTrigger,omitempty"` + TagFilter *string `json:"TagFilter,omitempty" xml:"TagFilter,omitempty"` + TargetInstanceId *string `json:"TargetInstanceId,omitempty" xml:"TargetInstanceId,omitempty"` + TargetNamespaceName *string `json:"TargetNamespaceName,omitempty" xml:"TargetNamespaceName,omitempty"` + TargetRegionId *string `json:"TargetRegionId,omitempty" xml:"TargetRegionId,omitempty"` + TargetRepoName *string `json:"TargetRepoName,omitempty" xml:"TargetRepoName,omitempty"` + TargetUserId *string `json:"TargetUserId,omitempty" xml:"TargetUserId,omitempty"` +} + +func (s CreateRepoSyncRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncRuleRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncRuleRequest) SetInstanceId(v string) *CreateRepoSyncRuleRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetNamespaceName(v string) *CreateRepoSyncRuleRequest { + s.NamespaceName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetRepoName(v string) *CreateRepoSyncRuleRequest { + s.RepoName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetSyncRuleName(v string) *CreateRepoSyncRuleRequest { + s.SyncRuleName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetSyncScope(v string) *CreateRepoSyncRuleRequest { + s.SyncScope = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetSyncTrigger(v string) *CreateRepoSyncRuleRequest { + s.SyncTrigger = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTagFilter(v string) *CreateRepoSyncRuleRequest { + s.TagFilter = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetInstanceId(v string) *CreateRepoSyncRuleRequest { + s.TargetInstanceId = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetNamespaceName(v string) *CreateRepoSyncRuleRequest { + s.TargetNamespaceName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetRegionId(v string) *CreateRepoSyncRuleRequest { + s.TargetRegionId = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetRepoName(v string) *CreateRepoSyncRuleRequest { + s.TargetRepoName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetUserId(v string) *CreateRepoSyncRuleRequest { + s.TargetUserId = &v + return s +} + +type CreateRepoSyncRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` +} + +func (s CreateRepoSyncRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncRuleResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncRuleResponseBody) SetCode(v string) *CreateRepoSyncRuleResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoSyncRuleResponseBody) SetIsSuccess(v bool) *CreateRepoSyncRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoSyncRuleResponseBody) SetRequestId(v string) *CreateRepoSyncRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *CreateRepoSyncRuleResponseBody) SetSyncRuleId(v string) *CreateRepoSyncRuleResponseBody { + s.SyncRuleId = &v + return s +} + +type CreateRepoSyncRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoSyncRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoSyncRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncRuleResponse) SetHeaders(v map[string]*string) *CreateRepoSyncRuleResponse { + s.Headers = v + return s +} + +func (s *CreateRepoSyncRuleResponse) SetBody(v *CreateRepoSyncRuleResponseBody) *CreateRepoSyncRuleResponse { + s.Body = v + return s +} + +type CreateRepoSyncTaskRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Override *bool `json:"Override,omitempty" xml:"Override,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` + TargetInstanceId *string `json:"TargetInstanceId,omitempty" xml:"TargetInstanceId,omitempty"` + TargetNamespace *string `json:"TargetNamespace,omitempty" xml:"TargetNamespace,omitempty"` + TargetRegionId *string `json:"TargetRegionId,omitempty" xml:"TargetRegionId,omitempty"` + TargetRepoName *string `json:"TargetRepoName,omitempty" xml:"TargetRepoName,omitempty"` + TargetTag *string `json:"TargetTag,omitempty" xml:"TargetTag,omitempty"` + TargetUserId *string `json:"TargetUserId,omitempty" xml:"TargetUserId,omitempty"` +} + +func (s CreateRepoSyncTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskRequest) SetInstanceId(v string) *CreateRepoSyncTaskRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetOverride(v bool) *CreateRepoSyncTaskRequest { + s.Override = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetRepoId(v string) *CreateRepoSyncTaskRequest { + s.RepoId = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTag(v string) *CreateRepoSyncTaskRequest { + s.Tag = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetInstanceId(v string) *CreateRepoSyncTaskRequest { + s.TargetInstanceId = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetNamespace(v string) *CreateRepoSyncTaskRequest { + s.TargetNamespace = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetRegionId(v string) *CreateRepoSyncTaskRequest { + s.TargetRegionId = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetRepoName(v string) *CreateRepoSyncTaskRequest { + s.TargetRepoName = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetTag(v string) *CreateRepoSyncTaskRequest { + s.TargetTag = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetUserId(v string) *CreateRepoSyncTaskRequest { + s.TargetUserId = &v + return s +} + +type CreateRepoSyncTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` +} + +func (s CreateRepoSyncTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskResponseBody) SetCode(v string) *CreateRepoSyncTaskResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoSyncTaskResponseBody) SetIsSuccess(v bool) *CreateRepoSyncTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoSyncTaskResponseBody) SetRequestId(v string) *CreateRepoSyncTaskResponseBody { + s.RequestId = &v + return s +} + +func (s *CreateRepoSyncTaskResponseBody) SetSyncTaskId(v string) *CreateRepoSyncTaskResponseBody { + s.SyncTaskId = &v + return s +} + +type CreateRepoSyncTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoSyncTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoSyncTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskResponse) SetHeaders(v map[string]*string) *CreateRepoSyncTaskResponse { + s.Headers = v + return s +} + +func (s *CreateRepoSyncTaskResponse) SetBody(v *CreateRepoSyncTaskResponseBody) *CreateRepoSyncTaskResponse { + s.Body = v + return s +} + +type CreateRepoSyncTaskByRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s CreateRepoSyncTaskByRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskByRuleRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskByRuleRequest) SetInstanceId(v string) *CreateRepoSyncTaskByRuleRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleRequest) SetRepoId(v string) *CreateRepoSyncTaskByRuleRequest { + s.RepoId = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleRequest) SetSyncRuleId(v string) *CreateRepoSyncTaskByRuleRequest { + s.SyncRuleId = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleRequest) SetTag(v string) *CreateRepoSyncTaskByRuleRequest { + s.Tag = &v + return s +} + +type CreateRepoSyncTaskByRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` +} + +func (s CreateRepoSyncTaskByRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskByRuleResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskByRuleResponseBody) SetCode(v string) *CreateRepoSyncTaskByRuleResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleResponseBody) SetIsSuccess(v bool) *CreateRepoSyncTaskByRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleResponseBody) SetRequestId(v string) *CreateRepoSyncTaskByRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleResponseBody) SetSyncTaskId(v string) *CreateRepoSyncTaskByRuleResponseBody { + s.SyncTaskId = &v + return s +} + +type CreateRepoSyncTaskByRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoSyncTaskByRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoSyncTaskByRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskByRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskByRuleResponse) SetHeaders(v map[string]*string) *CreateRepoSyncTaskByRuleResponse { + s.Headers = v + return s +} + +func (s *CreateRepoSyncTaskByRuleResponse) SetBody(v *CreateRepoSyncTaskByRuleResponseBody) *CreateRepoSyncTaskByRuleResponse { + s.Body = v + return s +} + +type CreateRepoTagRequest struct { + FromTag *string `json:"FromTag,omitempty" xml:"FromTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + ToTag *string `json:"ToTag,omitempty" xml:"ToTag,omitempty"` +} + +func (s CreateRepoTagRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoTagRequest) SetFromTag(v string) *CreateRepoTagRequest { + s.FromTag = &v + return s +} + +func (s *CreateRepoTagRequest) SetInstanceId(v string) *CreateRepoTagRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoTagRequest) SetNamespaceName(v string) *CreateRepoTagRequest { + s.NamespaceName = &v + return s +} + +func (s *CreateRepoTagRequest) SetRepoName(v string) *CreateRepoTagRequest { + s.RepoName = &v + return s +} + +func (s *CreateRepoTagRequest) SetToTag(v string) *CreateRepoTagRequest { + s.ToTag = &v + return s +} + +type CreateRepoTagResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoTagResponseBody) SetCode(v string) *CreateRepoTagResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoTagResponseBody) SetIsSuccess(v bool) *CreateRepoTagResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoTagResponseBody) SetRequestId(v string) *CreateRepoTagResponseBody { + s.RequestId = &v + return s +} + +type CreateRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoTagResponse) SetHeaders(v map[string]*string) *CreateRepoTagResponse { + s.Headers = v + return s +} + +func (s *CreateRepoTagResponse) SetBody(v *CreateRepoTagResponseBody) *CreateRepoTagResponse { + s.Body = v + return s +} + +type CreateRepoTagScanTaskRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + ScanService *string `json:"ScanService,omitempty" xml:"ScanService,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s CreateRepoTagScanTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagScanTaskRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoTagScanTaskRequest) SetDigest(v string) *CreateRepoTagScanTaskRequest { + s.Digest = &v + return s +} + +func (s *CreateRepoTagScanTaskRequest) SetInstanceId(v string) *CreateRepoTagScanTaskRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoTagScanTaskRequest) SetRepoId(v string) *CreateRepoTagScanTaskRequest { + s.RepoId = &v + return s +} + +func (s *CreateRepoTagScanTaskRequest) SetScanService(v string) *CreateRepoTagScanTaskRequest { + s.ScanService = &v + return s +} + +func (s *CreateRepoTagScanTaskRequest) SetTag(v string) *CreateRepoTagScanTaskRequest { + s.Tag = &v + return s +} + +type CreateRepoTagScanTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepoTagScanTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagScanTaskResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoTagScanTaskResponseBody) SetCode(v string) *CreateRepoTagScanTaskResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoTagScanTaskResponseBody) SetIsSuccess(v bool) *CreateRepoTagScanTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoTagScanTaskResponseBody) SetRequestId(v string) *CreateRepoTagScanTaskResponseBody { + s.RequestId = &v + return s +} + +type CreateRepoTagScanTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoTagScanTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoTagScanTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagScanTaskResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoTagScanTaskResponse) SetHeaders(v map[string]*string) *CreateRepoTagScanTaskResponse { + s.Headers = v + return s +} + +func (s *CreateRepoTagScanTaskResponse) SetBody(v *CreateRepoTagScanTaskResponseBody) *CreateRepoTagScanTaskResponse { + s.Body = v + return s +} + +type CreateRepoTriggerRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + TriggerName *string `json:"TriggerName,omitempty" xml:"TriggerName,omitempty"` + TriggerTag *string `json:"TriggerTag,omitempty" xml:"TriggerTag,omitempty"` + TriggerType *string `json:"TriggerType,omitempty" xml:"TriggerType,omitempty"` + TriggerUrl *string `json:"TriggerUrl,omitempty" xml:"TriggerUrl,omitempty"` +} + +func (s CreateRepoTriggerRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTriggerRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoTriggerRequest) SetInstanceId(v string) *CreateRepoTriggerRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetRepoId(v string) *CreateRepoTriggerRequest { + s.RepoId = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetTriggerName(v string) *CreateRepoTriggerRequest { + s.TriggerName = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetTriggerTag(v string) *CreateRepoTriggerRequest { + s.TriggerTag = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetTriggerType(v string) *CreateRepoTriggerRequest { + s.TriggerType = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetTriggerUrl(v string) *CreateRepoTriggerRequest { + s.TriggerUrl = &v + return s +} + +type CreateRepoTriggerResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TriggerId *string `json:"TriggerId,omitempty" xml:"TriggerId,omitempty"` +} + +func (s CreateRepoTriggerResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTriggerResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoTriggerResponseBody) SetCode(v string) *CreateRepoTriggerResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoTriggerResponseBody) SetIsSuccess(v bool) *CreateRepoTriggerResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoTriggerResponseBody) SetRequestId(v string) *CreateRepoTriggerResponseBody { + s.RequestId = &v + return s +} + +func (s *CreateRepoTriggerResponseBody) SetTriggerId(v string) *CreateRepoTriggerResponseBody { + s.TriggerId = &v + return s +} + +type CreateRepoTriggerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoTriggerResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoTriggerResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTriggerResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoTriggerResponse) SetHeaders(v map[string]*string) *CreateRepoTriggerResponse { + s.Headers = v + return s +} + +func (s *CreateRepoTriggerResponse) SetBody(v *CreateRepoTriggerResponseBody) *CreateRepoTriggerResponse { + s.Body = v + return s +} + +type CreateRepositoryRequest struct { + Detail *string `json:"Detail,omitempty" xml:"Detail,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` + TagImmutability *bool `json:"TagImmutability,omitempty" xml:"TagImmutability,omitempty"` +} + +func (s CreateRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepositoryRequest) GoString() string { + return s.String() +} + +func (s *CreateRepositoryRequest) SetDetail(v string) *CreateRepositoryRequest { + s.Detail = &v + return s +} + +func (s *CreateRepositoryRequest) SetInstanceId(v string) *CreateRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepositoryRequest) SetRepoName(v string) *CreateRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *CreateRepositoryRequest) SetRepoNamespaceName(v string) *CreateRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *CreateRepositoryRequest) SetRepoType(v string) *CreateRepositoryRequest { + s.RepoType = &v + return s +} + +func (s *CreateRepositoryRequest) SetResourceGroupId(v string) *CreateRepositoryRequest { + s.ResourceGroupId = &v + return s +} + +func (s *CreateRepositoryRequest) SetSummary(v string) *CreateRepositoryRequest { + s.Summary = &v + return s +} + +func (s *CreateRepositoryRequest) SetTagImmutability(v bool) *CreateRepositoryRequest { + s.TagImmutability = &v + return s +} + +type CreateRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepositoryResponseBody) SetCode(v string) *CreateRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepositoryResponseBody) SetIsSuccess(v bool) *CreateRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepositoryResponseBody) SetRepoId(v string) *CreateRepositoryResponseBody { + s.RepoId = &v + return s +} + +func (s *CreateRepositoryResponseBody) SetRequestId(v string) *CreateRepositoryResponseBody { + s.RequestId = &v + return s +} + +type CreateRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepositoryResponse) GoString() string { + return s.String() +} + +func (s *CreateRepositoryResponse) SetHeaders(v map[string]*string) *CreateRepositoryResponse { + s.Headers = v + return s +} + +func (s *CreateRepositoryResponse) SetBody(v *CreateRepositoryResponseBody) *CreateRepositoryResponse { + s.Body = v + return s +} + +type DeleteChainRequest struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s DeleteChainRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteChainRequest) GoString() string { + return s.String() +} + +func (s *DeleteChainRequest) SetChainId(v string) *DeleteChainRequest { + s.ChainId = &v + return s +} + +func (s *DeleteChainRequest) SetInstanceId(v string) *DeleteChainRequest { + s.InstanceId = &v + return s +} + +type DeleteChainResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteChainResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteChainResponseBody) SetCode(v string) *DeleteChainResponseBody { + s.Code = &v + return s +} + +func (s *DeleteChainResponseBody) SetIsSuccess(v bool) *DeleteChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteChainResponseBody) SetRequestId(v string) *DeleteChainResponseBody { + s.RequestId = &v + return s +} + +type DeleteChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteChainResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteChainResponse) GoString() string { + return s.String() +} + +func (s *DeleteChainResponse) SetHeaders(v map[string]*string) *DeleteChainResponse { + s.Headers = v + return s +} + +func (s *DeleteChainResponse) SetBody(v *DeleteChainResponseBody) *DeleteChainResponse { + s.Body = v + return s +} + +type DeleteChartNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s DeleteChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *DeleteChartNamespaceRequest) SetInstanceId(v string) *DeleteChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteChartNamespaceRequest) SetNamespaceName(v string) *DeleteChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +type DeleteChartNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteChartNamespaceResponseBody) SetCode(v string) *DeleteChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *DeleteChartNamespaceResponseBody) SetIsSuccess(v bool) *DeleteChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteChartNamespaceResponseBody) SetRequestId(v string) *DeleteChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +type DeleteChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *DeleteChartNamespaceResponse) SetHeaders(v map[string]*string) *DeleteChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *DeleteChartNamespaceResponse) SetBody(v *DeleteChartNamespaceResponseBody) *DeleteChartNamespaceResponse { + s.Body = v + return s +} + +type DeleteChartReleaseRequest struct { + Chart *string `json:"Chart,omitempty" xml:"Chart,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Release *string `json:"Release,omitempty" xml:"Release,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s DeleteChartReleaseRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartReleaseRequest) GoString() string { + return s.String() +} + +func (s *DeleteChartReleaseRequest) SetChart(v string) *DeleteChartReleaseRequest { + s.Chart = &v + return s +} + +func (s *DeleteChartReleaseRequest) SetInstanceId(v string) *DeleteChartReleaseRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteChartReleaseRequest) SetRelease(v string) *DeleteChartReleaseRequest { + s.Release = &v + return s +} + +func (s *DeleteChartReleaseRequest) SetRepoName(v string) *DeleteChartReleaseRequest { + s.RepoName = &v + return s +} + +func (s *DeleteChartReleaseRequest) SetRepoNamespaceName(v string) *DeleteChartReleaseRequest { + s.RepoNamespaceName = &v + return s +} + +type DeleteChartReleaseResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteChartReleaseResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartReleaseResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteChartReleaseResponseBody) SetCode(v string) *DeleteChartReleaseResponseBody { + s.Code = &v + return s +} + +func (s *DeleteChartReleaseResponseBody) SetIsSuccess(v bool) *DeleteChartReleaseResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteChartReleaseResponseBody) SetRequestId(v string) *DeleteChartReleaseResponseBody { + s.RequestId = &v + return s +} + +type DeleteChartReleaseResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteChartReleaseResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteChartReleaseResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartReleaseResponse) GoString() string { + return s.String() +} + +func (s *DeleteChartReleaseResponse) SetHeaders(v map[string]*string) *DeleteChartReleaseResponse { + s.Headers = v + return s +} + +func (s *DeleteChartReleaseResponse) SetBody(v *DeleteChartReleaseResponseBody) *DeleteChartReleaseResponse { + s.Body = v + return s +} + +type DeleteChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s DeleteChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *DeleteChartRepositoryRequest) SetInstanceId(v string) *DeleteChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteChartRepositoryRequest) SetRepoName(v string) *DeleteChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *DeleteChartRepositoryRequest) SetRepoNamespaceName(v string) *DeleteChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +type DeleteChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteChartRepositoryResponseBody) SetCode(v string) *DeleteChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *DeleteChartRepositoryResponseBody) SetIsSuccess(v bool) *DeleteChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteChartRepositoryResponseBody) SetRequestId(v string) *DeleteChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +type DeleteChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *DeleteChartRepositoryResponse) SetHeaders(v map[string]*string) *DeleteChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *DeleteChartRepositoryResponse) SetBody(v *DeleteChartRepositoryResponseBody) *DeleteChartRepositoryResponse { + s.Body = v + return s +} + +type DeleteEventCenterRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` +} + +func (s DeleteEventCenterRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteEventCenterRuleRequest) GoString() string { + return s.String() +} + +func (s *DeleteEventCenterRuleRequest) SetInstanceId(v string) *DeleteEventCenterRuleRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteEventCenterRuleRequest) SetRuleId(v string) *DeleteEventCenterRuleRequest { + s.RuleId = &v + return s +} + +type DeleteEventCenterRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + // Id of the request + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteEventCenterRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteEventCenterRuleResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteEventCenterRuleResponseBody) SetCode(v string) *DeleteEventCenterRuleResponseBody { + s.Code = &v + return s +} + +func (s *DeleteEventCenterRuleResponseBody) SetRequestId(v string) *DeleteEventCenterRuleResponseBody { + s.RequestId = &v + return s +} + +type DeleteEventCenterRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteEventCenterRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteEventCenterRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteEventCenterRuleResponse) GoString() string { + return s.String() +} + +func (s *DeleteEventCenterRuleResponse) SetHeaders(v map[string]*string) *DeleteEventCenterRuleResponse { + s.Headers = v + return s +} + +func (s *DeleteEventCenterRuleResponse) SetBody(v *DeleteEventCenterRuleResponseBody) *DeleteEventCenterRuleResponse { + s.Body = v + return s +} + +type DeleteInstanceEndpointAclPolicyRequest struct { + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + Entry *string `json:"Entry,omitempty" xml:"Entry,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s DeleteInstanceEndpointAclPolicyRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceEndpointAclPolicyRequest) GoString() string { + return s.String() +} + +func (s *DeleteInstanceEndpointAclPolicyRequest) SetEndpointType(v string) *DeleteInstanceEndpointAclPolicyRequest { + s.EndpointType = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyRequest) SetEntry(v string) *DeleteInstanceEndpointAclPolicyRequest { + s.Entry = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyRequest) SetInstanceId(v string) *DeleteInstanceEndpointAclPolicyRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyRequest) SetModuleName(v string) *DeleteInstanceEndpointAclPolicyRequest { + s.ModuleName = &v + return s +} + +type DeleteInstanceEndpointAclPolicyResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteInstanceEndpointAclPolicyResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceEndpointAclPolicyResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteInstanceEndpointAclPolicyResponseBody) SetCode(v string) *DeleteInstanceEndpointAclPolicyResponseBody { + s.Code = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyResponseBody) SetIsSuccess(v bool) *DeleteInstanceEndpointAclPolicyResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyResponseBody) SetRequestId(v string) *DeleteInstanceEndpointAclPolicyResponseBody { + s.RequestId = &v + return s +} + +type DeleteInstanceEndpointAclPolicyResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteInstanceEndpointAclPolicyResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteInstanceEndpointAclPolicyResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceEndpointAclPolicyResponse) GoString() string { + return s.String() +} + +func (s *DeleteInstanceEndpointAclPolicyResponse) SetHeaders(v map[string]*string) *DeleteInstanceEndpointAclPolicyResponse { + s.Headers = v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyResponse) SetBody(v *DeleteInstanceEndpointAclPolicyResponseBody) *DeleteInstanceEndpointAclPolicyResponse { + s.Body = v + return s +} + +type DeleteInstanceVpcEndpointLinkedVpcRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` + VpcId *string `json:"VpcId,omitempty" xml:"VpcId,omitempty"` + VswitchId *string `json:"VswitchId,omitempty" xml:"VswitchId,omitempty"` +} + +func (s DeleteInstanceVpcEndpointLinkedVpcRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceVpcEndpointLinkedVpcRequest) GoString() string { + return s.String() +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcRequest) SetInstanceId(v string) *DeleteInstanceVpcEndpointLinkedVpcRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcRequest) SetModuleName(v string) *DeleteInstanceVpcEndpointLinkedVpcRequest { + s.ModuleName = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcRequest) SetVpcId(v string) *DeleteInstanceVpcEndpointLinkedVpcRequest { + s.VpcId = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcRequest) SetVswitchId(v string) *DeleteInstanceVpcEndpointLinkedVpcRequest { + s.VswitchId = &v + return s +} + +type DeleteInstanceVpcEndpointLinkedVpcResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteInstanceVpcEndpointLinkedVpcResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceVpcEndpointLinkedVpcResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponseBody) SetCode(v string) *DeleteInstanceVpcEndpointLinkedVpcResponseBody { + s.Code = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponseBody) SetIsSuccess(v bool) *DeleteInstanceVpcEndpointLinkedVpcResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponseBody) SetRequestId(v string) *DeleteInstanceVpcEndpointLinkedVpcResponseBody { + s.RequestId = &v + return s +} + +type DeleteInstanceVpcEndpointLinkedVpcResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteInstanceVpcEndpointLinkedVpcResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteInstanceVpcEndpointLinkedVpcResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceVpcEndpointLinkedVpcResponse) GoString() string { + return s.String() +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponse) SetHeaders(v map[string]*string) *DeleteInstanceVpcEndpointLinkedVpcResponse { + s.Headers = v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponse) SetBody(v *DeleteInstanceVpcEndpointLinkedVpcResponseBody) *DeleteInstanceVpcEndpointLinkedVpcResponse { + s.Body = v + return s +} + +type DeleteNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s DeleteNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteNamespaceRequest) GoString() string { + return s.String() +} + +func (s *DeleteNamespaceRequest) SetInstanceId(v string) *DeleteNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteNamespaceRequest) SetNamespaceName(v string) *DeleteNamespaceRequest { + s.NamespaceName = &v + return s +} + +type DeleteNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteNamespaceResponseBody) SetCode(v string) *DeleteNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *DeleteNamespaceResponseBody) SetIsSuccess(v bool) *DeleteNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteNamespaceResponseBody) SetRequestId(v string) *DeleteNamespaceResponseBody { + s.RequestId = &v + return s +} + +type DeleteNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteNamespaceResponse) GoString() string { + return s.String() +} + +func (s *DeleteNamespaceResponse) SetHeaders(v map[string]*string) *DeleteNamespaceResponse { + s.Headers = v + return s +} + +func (s *DeleteNamespaceResponse) SetBody(v *DeleteNamespaceResponseBody) *DeleteNamespaceResponse { + s.Body = v + return s +} + +type DeleteRepoBuildRuleRequest struct { + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s DeleteRepoBuildRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoBuildRuleRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepoBuildRuleRequest) SetBuildRuleId(v string) *DeleteRepoBuildRuleRequest { + s.BuildRuleId = &v + return s +} + +func (s *DeleteRepoBuildRuleRequest) SetInstanceId(v string) *DeleteRepoBuildRuleRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepoBuildRuleRequest) SetRepoId(v string) *DeleteRepoBuildRuleRequest { + s.RepoId = &v + return s +} + +type DeleteRepoBuildRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepoBuildRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoBuildRuleResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepoBuildRuleResponseBody) SetCode(v string) *DeleteRepoBuildRuleResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepoBuildRuleResponseBody) SetIsSuccess(v bool) *DeleteRepoBuildRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepoBuildRuleResponseBody) SetRequestId(v string) *DeleteRepoBuildRuleResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepoBuildRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoBuildRuleResponse) SetHeaders(v map[string]*string) *DeleteRepoBuildRuleResponse { + s.Headers = v + return s +} + +func (s *DeleteRepoBuildRuleResponse) SetBody(v *DeleteRepoBuildRuleResponseBody) *DeleteRepoBuildRuleResponse { + s.Body = v + return s +} + +type DeleteRepoSyncRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` +} + +func (s DeleteRepoSyncRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoSyncRuleRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepoSyncRuleRequest) SetInstanceId(v string) *DeleteRepoSyncRuleRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepoSyncRuleRequest) SetSyncRuleId(v string) *DeleteRepoSyncRuleRequest { + s.SyncRuleId = &v + return s +} + +type DeleteRepoSyncRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepoSyncRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoSyncRuleResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepoSyncRuleResponseBody) SetCode(v string) *DeleteRepoSyncRuleResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepoSyncRuleResponseBody) SetIsSuccess(v bool) *DeleteRepoSyncRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepoSyncRuleResponseBody) SetRequestId(v string) *DeleteRepoSyncRuleResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepoSyncRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepoSyncRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepoSyncRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoSyncRuleResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoSyncRuleResponse) SetHeaders(v map[string]*string) *DeleteRepoSyncRuleResponse { + s.Headers = v + return s +} + +func (s *DeleteRepoSyncRuleResponse) SetBody(v *DeleteRepoSyncRuleResponseBody) *DeleteRepoSyncRuleResponse { + s.Body = v + return s +} + +type DeleteRepoTagRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s DeleteRepoTagRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTagRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepoTagRequest) SetInstanceId(v string) *DeleteRepoTagRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepoTagRequest) SetRepoId(v string) *DeleteRepoTagRequest { + s.RepoId = &v + return s +} + +func (s *DeleteRepoTagRequest) SetTag(v string) *DeleteRepoTagRequest { + s.Tag = &v + return s +} + +type DeleteRepoTagResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepoTagResponseBody) SetCode(v string) *DeleteRepoTagResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepoTagResponseBody) SetIsSuccess(v bool) *DeleteRepoTagResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepoTagResponseBody) SetRequestId(v string) *DeleteRepoTagResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTagResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoTagResponse) SetHeaders(v map[string]*string) *DeleteRepoTagResponse { + s.Headers = v + return s +} + +func (s *DeleteRepoTagResponse) SetBody(v *DeleteRepoTagResponseBody) *DeleteRepoTagResponse { + s.Body = v + return s +} + +type DeleteRepoTriggerRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + TriggerId *string `json:"TriggerId,omitempty" xml:"TriggerId,omitempty"` +} + +func (s DeleteRepoTriggerRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTriggerRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepoTriggerRequest) SetInstanceId(v string) *DeleteRepoTriggerRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepoTriggerRequest) SetRepoId(v string) *DeleteRepoTriggerRequest { + s.RepoId = &v + return s +} + +func (s *DeleteRepoTriggerRequest) SetTriggerId(v string) *DeleteRepoTriggerRequest { + s.TriggerId = &v + return s +} + +type DeleteRepoTriggerResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepoTriggerResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTriggerResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepoTriggerResponseBody) SetCode(v string) *DeleteRepoTriggerResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepoTriggerResponseBody) SetIsSuccess(v bool) *DeleteRepoTriggerResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepoTriggerResponseBody) SetRequestId(v string) *DeleteRepoTriggerResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepoTriggerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepoTriggerResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepoTriggerResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTriggerResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoTriggerResponse) SetHeaders(v map[string]*string) *DeleteRepoTriggerResponse { + s.Headers = v + return s +} + +func (s *DeleteRepoTriggerResponse) SetBody(v *DeleteRepoTriggerResponseBody) *DeleteRepoTriggerResponse { + s.Body = v + return s +} + +type DeleteRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s DeleteRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepositoryRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepositoryRequest) SetInstanceId(v string) *DeleteRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepositoryRequest) SetRepoId(v string) *DeleteRepositoryRequest { + s.RepoId = &v + return s +} + +type DeleteRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepositoryResponseBody) SetCode(v string) *DeleteRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepositoryResponseBody) SetIsSuccess(v bool) *DeleteRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepositoryResponseBody) SetRequestId(v string) *DeleteRepositoryResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepositoryResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepositoryResponse) SetHeaders(v map[string]*string) *DeleteRepositoryResponse { + s.Headers = v + return s +} + +func (s *DeleteRepositoryResponse) SetBody(v *DeleteRepositoryResponseBody) *DeleteRepositoryResponse { + s.Body = v + return s +} + +type GetArtifactBuildTaskRequest struct { + BuildTaskId *string `json:"BuildTaskId,omitempty" xml:"BuildTaskId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetArtifactBuildTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskRequest) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskRequest) SetBuildTaskId(v string) *GetArtifactBuildTaskRequest { + s.BuildTaskId = &v + return s +} + +func (s *GetArtifactBuildTaskRequest) SetInstanceId(v string) *GetArtifactBuildTaskRequest { + s.InstanceId = &v + return s +} + +type GetArtifactBuildTaskResponseBody struct { + ArtifactBuildType *string `json:"ArtifactBuildType,omitempty" xml:"ArtifactBuildType,omitempty"` + BuildTaskId *string `json:"BuildTaskId,omitempty" xml:"BuildTaskId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + EndTime *int32 `json:"EndTime,omitempty" xml:"EndTime,omitempty"` + Instructions []*string `json:"Instructions,omitempty" xml:"Instructions,omitempty" type:"Repeated"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SourceArtifact *GetArtifactBuildTaskResponseBodySourceArtifact `json:"SourceArtifact,omitempty" xml:"SourceArtifact,omitempty" type:"Struct"` + StartTime *int32 `json:"StartTime,omitempty" xml:"StartTime,omitempty"` + TargetArtifact *GetArtifactBuildTaskResponseBodyTargetArtifact `json:"TargetArtifact,omitempty" xml:"TargetArtifact,omitempty" type:"Struct"` + TaskStatus *string `json:"TaskStatus,omitempty" xml:"TaskStatus,omitempty"` +} + +func (s GetArtifactBuildTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskResponseBody) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskResponseBody) SetArtifactBuildType(v string) *GetArtifactBuildTaskResponseBody { + s.ArtifactBuildType = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetBuildTaskId(v string) *GetArtifactBuildTaskResponseBody { + s.BuildTaskId = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetCode(v string) *GetArtifactBuildTaskResponseBody { + s.Code = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetEndTime(v int32) *GetArtifactBuildTaskResponseBody { + s.EndTime = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetInstructions(v []*string) *GetArtifactBuildTaskResponseBody { + s.Instructions = v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetIsSuccess(v bool) *GetArtifactBuildTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetRequestId(v string) *GetArtifactBuildTaskResponseBody { + s.RequestId = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetSourceArtifact(v *GetArtifactBuildTaskResponseBodySourceArtifact) *GetArtifactBuildTaskResponseBody { + s.SourceArtifact = v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetStartTime(v int32) *GetArtifactBuildTaskResponseBody { + s.StartTime = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetTargetArtifact(v *GetArtifactBuildTaskResponseBodyTargetArtifact) *GetArtifactBuildTaskResponseBody { + s.TargetArtifact = v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetTaskStatus(v string) *GetArtifactBuildTaskResponseBody { + s.TaskStatus = &v + return s +} + +type GetArtifactBuildTaskResponseBodySourceArtifact struct { + ArtifactType *string `json:"ArtifactType,omitempty" xml:"ArtifactType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Version *string `json:"Version,omitempty" xml:"Version,omitempty"` +} + +func (s GetArtifactBuildTaskResponseBodySourceArtifact) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskResponseBodySourceArtifact) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskResponseBodySourceArtifact) SetArtifactType(v string) *GetArtifactBuildTaskResponseBodySourceArtifact { + s.ArtifactType = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBodySourceArtifact) SetRepoId(v string) *GetArtifactBuildTaskResponseBodySourceArtifact { + s.RepoId = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBodySourceArtifact) SetVersion(v string) *GetArtifactBuildTaskResponseBodySourceArtifact { + s.Version = &v + return s +} + +type GetArtifactBuildTaskResponseBodyTargetArtifact struct { + ArtifactType *string `json:"ArtifactType,omitempty" xml:"ArtifactType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Version *string `json:"Version,omitempty" xml:"Version,omitempty"` +} + +func (s GetArtifactBuildTaskResponseBodyTargetArtifact) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskResponseBodyTargetArtifact) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskResponseBodyTargetArtifact) SetArtifactType(v string) *GetArtifactBuildTaskResponseBodyTargetArtifact { + s.ArtifactType = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBodyTargetArtifact) SetRepoId(v string) *GetArtifactBuildTaskResponseBodyTargetArtifact { + s.RepoId = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBodyTargetArtifact) SetVersion(v string) *GetArtifactBuildTaskResponseBodyTargetArtifact { + s.Version = &v + return s +} + +type GetArtifactBuildTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetArtifactBuildTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetArtifactBuildTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskResponse) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskResponse) SetHeaders(v map[string]*string) *GetArtifactBuildTaskResponse { + s.Headers = v + return s +} + +func (s *GetArtifactBuildTaskResponse) SetBody(v *GetArtifactBuildTaskResponseBody) *GetArtifactBuildTaskResponse { + s.Body = v + return s +} + +type GetAuthorizationTokenRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetAuthorizationTokenRequest) String() string { + return tea.Prettify(s) +} + +func (s GetAuthorizationTokenRequest) GoString() string { + return s.String() +} + +func (s *GetAuthorizationTokenRequest) SetInstanceId(v string) *GetAuthorizationTokenRequest { + s.InstanceId = &v + return s +} + +type GetAuthorizationTokenResponseBody struct { + AuthorizationToken *string `json:"AuthorizationToken,omitempty" xml:"AuthorizationToken,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + ExpireTime *int64 `json:"ExpireTime,omitempty" xml:"ExpireTime,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TempUsername *string `json:"TempUsername,omitempty" xml:"TempUsername,omitempty"` +} + +func (s GetAuthorizationTokenResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetAuthorizationTokenResponseBody) GoString() string { + return s.String() +} + +func (s *GetAuthorizationTokenResponseBody) SetAuthorizationToken(v string) *GetAuthorizationTokenResponseBody { + s.AuthorizationToken = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetCode(v string) *GetAuthorizationTokenResponseBody { + s.Code = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetExpireTime(v int64) *GetAuthorizationTokenResponseBody { + s.ExpireTime = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetIsSuccess(v bool) *GetAuthorizationTokenResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetRequestId(v string) *GetAuthorizationTokenResponseBody { + s.RequestId = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetTempUsername(v string) *GetAuthorizationTokenResponseBody { + s.TempUsername = &v + return s +} + +type GetAuthorizationTokenResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetAuthorizationTokenResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetAuthorizationTokenResponse) String() string { + return tea.Prettify(s) +} + +func (s GetAuthorizationTokenResponse) GoString() string { + return s.String() +} + +func (s *GetAuthorizationTokenResponse) SetHeaders(v map[string]*string) *GetAuthorizationTokenResponse { + s.Headers = v + return s +} + +func (s *GetAuthorizationTokenResponse) SetBody(v *GetAuthorizationTokenResponseBody) *GetAuthorizationTokenResponse { + s.Body = v + return s +} + +type GetChainRequest struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetChainRequest) String() string { + return tea.Prettify(s) +} + +func (s GetChainRequest) GoString() string { + return s.String() +} + +func (s *GetChainRequest) SetChainId(v string) *GetChainRequest { + s.ChainId = &v + return s +} + +func (s *GetChainRequest) SetInstanceId(v string) *GetChainRequest { + s.InstanceId = &v + return s +} + +type GetChainResponseBody struct { + ChainConfig *GetChainResponseBodyChainConfig `json:"ChainConfig,omitempty" xml:"ChainConfig,omitempty" type:"Struct"` + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ScopeId *string `json:"ScopeId,omitempty" xml:"ScopeId,omitempty"` + ScopeType *string `json:"ScopeType,omitempty" xml:"ScopeType,omitempty"` +} + +func (s GetChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBody) GoString() string { + return s.String() +} + +func (s *GetChainResponseBody) SetChainConfig(v *GetChainResponseBodyChainConfig) *GetChainResponseBody { + s.ChainConfig = v + return s +} + +func (s *GetChainResponseBody) SetChainId(v string) *GetChainResponseBody { + s.ChainId = &v + return s +} + +func (s *GetChainResponseBody) SetCode(v string) *GetChainResponseBody { + s.Code = &v + return s +} + +func (s *GetChainResponseBody) SetCreateTime(v int64) *GetChainResponseBody { + s.CreateTime = &v + return s +} + +func (s *GetChainResponseBody) SetDescription(v string) *GetChainResponseBody { + s.Description = &v + return s +} + +func (s *GetChainResponseBody) SetInstanceId(v string) *GetChainResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetChainResponseBody) SetIsSuccess(v bool) *GetChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetChainResponseBody) SetModifiedTime(v int64) *GetChainResponseBody { + s.ModifiedTime = &v + return s +} + +func (s *GetChainResponseBody) SetName(v string) *GetChainResponseBody { + s.Name = &v + return s +} + +func (s *GetChainResponseBody) SetRequestId(v string) *GetChainResponseBody { + s.RequestId = &v + return s +} + +func (s *GetChainResponseBody) SetScopeId(v string) *GetChainResponseBody { + s.ScopeId = &v + return s +} + +func (s *GetChainResponseBody) SetScopeType(v string) *GetChainResponseBody { + s.ScopeType = &v + return s +} + +type GetChainResponseBodyChainConfig struct { + ChainConfigId *string `json:"ChainConfigId,omitempty" xml:"ChainConfigId,omitempty"` + IsActive *bool `json:"IsActive,omitempty" xml:"IsActive,omitempty"` + Nodes []*GetChainResponseBodyChainConfigNodes `json:"Nodes,omitempty" xml:"Nodes,omitempty" type:"Repeated"` + Routers []*GetChainResponseBodyChainConfigRouters `json:"Routers,omitempty" xml:"Routers,omitempty" type:"Repeated"` + Version *string `json:"Version,omitempty" xml:"Version,omitempty"` +} + +func (s GetChainResponseBodyChainConfig) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfig) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfig) SetChainConfigId(v string) *GetChainResponseBodyChainConfig { + s.ChainConfigId = &v + return s +} + +func (s *GetChainResponseBodyChainConfig) SetIsActive(v bool) *GetChainResponseBodyChainConfig { + s.IsActive = &v + return s +} + +func (s *GetChainResponseBodyChainConfig) SetNodes(v []*GetChainResponseBodyChainConfigNodes) *GetChainResponseBodyChainConfig { + s.Nodes = v + return s +} + +func (s *GetChainResponseBodyChainConfig) SetRouters(v []*GetChainResponseBodyChainConfigRouters) *GetChainResponseBodyChainConfig { + s.Routers = v + return s +} + +func (s *GetChainResponseBodyChainConfig) SetVersion(v string) *GetChainResponseBodyChainConfig { + s.Version = &v + return s +} + +type GetChainResponseBodyChainConfigNodes struct { + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + NodeConfig *GetChainResponseBodyChainConfigNodesNodeConfig `json:"NodeConfig,omitempty" xml:"NodeConfig,omitempty" type:"Struct"` + NodeName *string `json:"NodeName,omitempty" xml:"NodeName,omitempty"` +} + +func (s GetChainResponseBodyChainConfigNodes) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigNodes) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigNodes) SetEnable(v bool) *GetChainResponseBodyChainConfigNodes { + s.Enable = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodes) SetNodeConfig(v *GetChainResponseBodyChainConfigNodesNodeConfig) *GetChainResponseBodyChainConfigNodes { + s.NodeConfig = v + return s +} + +func (s *GetChainResponseBodyChainConfigNodes) SetNodeName(v string) *GetChainResponseBodyChainConfigNodes { + s.NodeName = &v + return s +} + +type GetChainResponseBodyChainConfigNodesNodeConfig struct { + DenyPolicy *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy `json:"DenyPolicy,omitempty" xml:"DenyPolicy,omitempty" type:"Struct"` + Retry *int32 `json:"Retry,omitempty" xml:"Retry,omitempty"` + ScanEngine *string `json:"ScanEngine,omitempty" xml:"ScanEngine,omitempty"` + Timeout *int64 `json:"Timeout,omitempty" xml:"Timeout,omitempty"` +} + +func (s GetChainResponseBodyChainConfigNodesNodeConfig) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigNodesNodeConfig) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfig) SetDenyPolicy(v *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) *GetChainResponseBodyChainConfigNodesNodeConfig { + s.DenyPolicy = v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfig) SetRetry(v int32) *GetChainResponseBodyChainConfigNodesNodeConfig { + s.Retry = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfig) SetScanEngine(v string) *GetChainResponseBodyChainConfigNodesNodeConfig { + s.ScanEngine = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfig) SetTimeout(v int64) *GetChainResponseBodyChainConfigNodesNodeConfig { + s.Timeout = &v + return s +} + +type GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy struct { + Action *string `json:"Action,omitempty" xml:"Action,omitempty"` + IssueCount *string `json:"IssueCount,omitempty" xml:"IssueCount,omitempty"` + IssueLevel *string `json:"IssueLevel,omitempty" xml:"IssueLevel,omitempty"` + Logic *string `json:"Logic,omitempty" xml:"Logic,omitempty"` +} + +func (s GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) SetAction(v string) *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy { + s.Action = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) SetIssueCount(v string) *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy { + s.IssueCount = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) SetIssueLevel(v string) *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy { + s.IssueLevel = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) SetLogic(v string) *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy { + s.Logic = &v + return s +} + +type GetChainResponseBodyChainConfigRouters struct { + From *GetChainResponseBodyChainConfigRoutersFrom `json:"From,omitempty" xml:"From,omitempty" type:"Struct"` + To *GetChainResponseBodyChainConfigRoutersTo `json:"To,omitempty" xml:"To,omitempty" type:"Struct"` +} + +func (s GetChainResponseBodyChainConfigRouters) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigRouters) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigRouters) SetFrom(v *GetChainResponseBodyChainConfigRoutersFrom) *GetChainResponseBodyChainConfigRouters { + s.From = v + return s +} + +func (s *GetChainResponseBodyChainConfigRouters) SetTo(v *GetChainResponseBodyChainConfigRoutersTo) *GetChainResponseBodyChainConfigRouters { + s.To = v + return s +} + +type GetChainResponseBodyChainConfigRoutersFrom struct { + NodeName *string `json:"NodeName,omitempty" xml:"NodeName,omitempty"` +} + +func (s GetChainResponseBodyChainConfigRoutersFrom) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigRoutersFrom) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigRoutersFrom) SetNodeName(v string) *GetChainResponseBodyChainConfigRoutersFrom { + s.NodeName = &v + return s +} + +type GetChainResponseBodyChainConfigRoutersTo struct { + NodeName *string `json:"NodeName,omitempty" xml:"NodeName,omitempty"` +} + +func (s GetChainResponseBodyChainConfigRoutersTo) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigRoutersTo) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigRoutersTo) SetNodeName(v string) *GetChainResponseBodyChainConfigRoutersTo { + s.NodeName = &v + return s +} + +type GetChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetChainResponse) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponse) GoString() string { + return s.String() +} + +func (s *GetChainResponse) SetHeaders(v map[string]*string) *GetChainResponse { + s.Headers = v + return s +} + +func (s *GetChainResponse) SetBody(v *GetChainResponseBody) *GetChainResponse { + s.Body = v + return s +} + +type GetChartNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s GetChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s GetChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *GetChartNamespaceRequest) SetInstanceId(v string) *GetChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *GetChartNamespaceRequest) SetNamespaceName(v string) *GetChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +type GetChartNamespaceResponseBody struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s GetChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *GetChartNamespaceResponseBody) SetAutoCreateRepo(v bool) *GetChartNamespaceResponseBody { + s.AutoCreateRepo = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetCode(v string) *GetChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetDefaultRepoType(v string) *GetChartNamespaceResponseBody { + s.DefaultRepoType = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetInstanceId(v string) *GetChartNamespaceResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetIsSuccess(v bool) *GetChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetNamespaceId(v string) *GetChartNamespaceResponseBody { + s.NamespaceId = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetNamespaceName(v string) *GetChartNamespaceResponseBody { + s.NamespaceName = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetNamespaceStatus(v string) *GetChartNamespaceResponseBody { + s.NamespaceStatus = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetRequestId(v string) *GetChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetResourceGroupId(v string) *GetChartNamespaceResponseBody { + s.ResourceGroupId = &v + return s +} + +type GetChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *GetChartNamespaceResponse) SetHeaders(v map[string]*string) *GetChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *GetChartNamespaceResponse) SetBody(v *GetChartNamespaceResponseBody) *GetChartNamespaceResponse { + s.Body = v + return s +} + +type GetChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s GetChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *GetChartRepositoryRequest) SetInstanceId(v string) *GetChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *GetChartRepositoryRequest) SetRepoName(v string) *GetChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *GetChartRepositoryRequest) SetRepoNamespaceName(v string) *GetChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +type GetChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` +} + +func (s GetChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *GetChartRepositoryResponseBody) SetCode(v string) *GetChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetCreateTime(v int64) *GetChartRepositoryResponseBody { + s.CreateTime = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetInstanceId(v string) *GetChartRepositoryResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetIsSuccess(v bool) *GetChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetModifiedTime(v int64) *GetChartRepositoryResponseBody { + s.ModifiedTime = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoId(v string) *GetChartRepositoryResponseBody { + s.RepoId = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoName(v string) *GetChartRepositoryResponseBody { + s.RepoName = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoNamespaceName(v string) *GetChartRepositoryResponseBody { + s.RepoNamespaceName = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoStatus(v string) *GetChartRepositoryResponseBody { + s.RepoStatus = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoType(v string) *GetChartRepositoryResponseBody { + s.RepoType = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRequestId(v string) *GetChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetSummary(v string) *GetChartRepositoryResponseBody { + s.Summary = &v + return s +} + +type GetChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s GetChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *GetChartRepositoryResponse) SetHeaders(v map[string]*string) *GetChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *GetChartRepositoryResponse) SetBody(v *GetChartRepositoryResponseBody) *GetChartRepositoryResponse { + s.Body = v + return s +} + +type GetInstanceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetInstanceRequest) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceRequest) GoString() string { + return s.String() +} + +func (s *GetInstanceRequest) SetInstanceId(v string) *GetInstanceRequest { + s.InstanceId = &v + return s +} + +type GetInstanceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + InstanceName *string `json:"InstanceName,omitempty" xml:"InstanceName,omitempty"` + InstanceSpecification *string `json:"InstanceSpecification,omitempty" xml:"InstanceSpecification,omitempty"` + InstanceStatus *string `json:"InstanceStatus,omitempty" xml:"InstanceStatus,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s GetInstanceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceResponseBody) SetCode(v string) *GetInstanceResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceResponseBody) SetCreateTime(v int64) *GetInstanceResponseBody { + s.CreateTime = &v + return s +} + +func (s *GetInstanceResponseBody) SetInstanceId(v string) *GetInstanceResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetInstanceResponseBody) SetInstanceName(v string) *GetInstanceResponseBody { + s.InstanceName = &v + return s +} + +func (s *GetInstanceResponseBody) SetInstanceSpecification(v string) *GetInstanceResponseBody { + s.InstanceSpecification = &v + return s +} + +func (s *GetInstanceResponseBody) SetInstanceStatus(v string) *GetInstanceResponseBody { + s.InstanceStatus = &v + return s +} + +func (s *GetInstanceResponseBody) SetIsSuccess(v bool) *GetInstanceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceResponseBody) SetModifiedTime(v int64) *GetInstanceResponseBody { + s.ModifiedTime = &v + return s +} + +func (s *GetInstanceResponseBody) SetRequestId(v string) *GetInstanceResponseBody { + s.RequestId = &v + return s +} + +func (s *GetInstanceResponseBody) SetResourceGroupId(v string) *GetInstanceResponseBody { + s.ResourceGroupId = &v + return s +} + +type GetInstanceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceResponse) SetHeaders(v map[string]*string) *GetInstanceResponse { + s.Headers = v + return s +} + +func (s *GetInstanceResponse) SetBody(v *GetInstanceResponseBody) *GetInstanceResponse { + s.Body = v + return s +} + +type GetInstanceCountResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Count *int32 `json:"Count,omitempty" xml:"Count,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetInstanceCountResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceCountResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceCountResponseBody) SetCode(v string) *GetInstanceCountResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceCountResponseBody) SetCount(v int32) *GetInstanceCountResponseBody { + s.Count = &v + return s +} + +func (s *GetInstanceCountResponseBody) SetIsSuccess(v bool) *GetInstanceCountResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceCountResponseBody) SetRequestId(v string) *GetInstanceCountResponseBody { + s.RequestId = &v + return s +} + +type GetInstanceCountResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceCountResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceCountResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceCountResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceCountResponse) SetHeaders(v map[string]*string) *GetInstanceCountResponse { + s.Headers = v + return s +} + +func (s *GetInstanceCountResponse) SetBody(v *GetInstanceCountResponseBody) *GetInstanceCountResponse { + s.Body = v + return s +} + +type GetInstanceEndpointRequest struct { + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s GetInstanceEndpointRequest) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointRequest) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointRequest) SetEndpointType(v string) *GetInstanceEndpointRequest { + s.EndpointType = &v + return s +} + +func (s *GetInstanceEndpointRequest) SetInstanceId(v string) *GetInstanceEndpointRequest { + s.InstanceId = &v + return s +} + +func (s *GetInstanceEndpointRequest) SetModuleName(v string) *GetInstanceEndpointRequest { + s.ModuleName = &v + return s +} + +type GetInstanceEndpointResponseBody struct { + AclEnable *bool `json:"AclEnable,omitempty" xml:"AclEnable,omitempty"` + AclEntries []*GetInstanceEndpointResponseBodyAclEntries `json:"AclEntries,omitempty" xml:"AclEntries,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Domains []*GetInstanceEndpointResponseBodyDomains `json:"Domains,omitempty" xml:"Domains,omitempty" type:"Repeated"` + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetInstanceEndpointResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointResponseBody) SetAclEnable(v bool) *GetInstanceEndpointResponseBody { + s.AclEnable = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetAclEntries(v []*GetInstanceEndpointResponseBodyAclEntries) *GetInstanceEndpointResponseBody { + s.AclEntries = v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetCode(v string) *GetInstanceEndpointResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetDomains(v []*GetInstanceEndpointResponseBodyDomains) *GetInstanceEndpointResponseBody { + s.Domains = v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetEnable(v bool) *GetInstanceEndpointResponseBody { + s.Enable = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetIsSuccess(v bool) *GetInstanceEndpointResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetRequestId(v string) *GetInstanceEndpointResponseBody { + s.RequestId = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetStatus(v string) *GetInstanceEndpointResponseBody { + s.Status = &v + return s +} + +type GetInstanceEndpointResponseBodyAclEntries struct { + Comment *string `json:"Comment,omitempty" xml:"Comment,omitempty"` + Entry *string `json:"Entry,omitempty" xml:"Entry,omitempty"` +} + +func (s GetInstanceEndpointResponseBodyAclEntries) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointResponseBodyAclEntries) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointResponseBodyAclEntries) SetComment(v string) *GetInstanceEndpointResponseBodyAclEntries { + s.Comment = &v + return s +} + +func (s *GetInstanceEndpointResponseBodyAclEntries) SetEntry(v string) *GetInstanceEndpointResponseBodyAclEntries { + s.Entry = &v + return s +} + +type GetInstanceEndpointResponseBodyDomains struct { + Domain *string `json:"Domain,omitempty" xml:"Domain,omitempty"` + Type *string `json:"Type,omitempty" xml:"Type,omitempty"` +} + +func (s GetInstanceEndpointResponseBodyDomains) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointResponseBodyDomains) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointResponseBodyDomains) SetDomain(v string) *GetInstanceEndpointResponseBodyDomains { + s.Domain = &v + return s +} + +func (s *GetInstanceEndpointResponseBodyDomains) SetType(v string) *GetInstanceEndpointResponseBodyDomains { + s.Type = &v + return s +} + +type GetInstanceEndpointResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceEndpointResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceEndpointResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointResponse) SetHeaders(v map[string]*string) *GetInstanceEndpointResponse { + s.Headers = v + return s +} + +func (s *GetInstanceEndpointResponse) SetBody(v *GetInstanceEndpointResponseBody) *GetInstanceEndpointResponse { + s.Body = v + return s +} + +type GetInstanceUsageRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetInstanceUsageRequest) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceUsageRequest) GoString() string { + return s.String() +} + +func (s *GetInstanceUsageRequest) SetInstanceId(v string) *GetInstanceUsageRequest { + s.InstanceId = &v + return s +} + +type GetInstanceUsageResponseBody struct { + ChartNamespaceQuota *string `json:"ChartNamespaceQuota,omitempty" xml:"ChartNamespaceQuota,omitempty"` + ChartNamespaceUsage *string `json:"ChartNamespaceUsage,omitempty" xml:"ChartNamespaceUsage,omitempty"` + ChartRepoQuota *string `json:"ChartRepoQuota,omitempty" xml:"ChartRepoQuota,omitempty"` + ChartRepoUsage *string `json:"ChartRepoUsage,omitempty" xml:"ChartRepoUsage,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + NamespaceQuota *string `json:"NamespaceQuota,omitempty" xml:"NamespaceQuota,omitempty"` + NamespaceUsage *string `json:"NamespaceUsage,omitempty" xml:"NamespaceUsage,omitempty"` + RepoQuota *string `json:"RepoQuota,omitempty" xml:"RepoQuota,omitempty"` + RepoUsage *string `json:"RepoUsage,omitempty" xml:"RepoUsage,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetInstanceUsageResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceUsageResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceUsageResponseBody) SetChartNamespaceQuota(v string) *GetInstanceUsageResponseBody { + s.ChartNamespaceQuota = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetChartNamespaceUsage(v string) *GetInstanceUsageResponseBody { + s.ChartNamespaceUsage = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetChartRepoQuota(v string) *GetInstanceUsageResponseBody { + s.ChartRepoQuota = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetChartRepoUsage(v string) *GetInstanceUsageResponseBody { + s.ChartRepoUsage = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetCode(v string) *GetInstanceUsageResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetIsSuccess(v bool) *GetInstanceUsageResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetNamespaceQuota(v string) *GetInstanceUsageResponseBody { + s.NamespaceQuota = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetNamespaceUsage(v string) *GetInstanceUsageResponseBody { + s.NamespaceUsage = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetRepoQuota(v string) *GetInstanceUsageResponseBody { + s.RepoQuota = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetRepoUsage(v string) *GetInstanceUsageResponseBody { + s.RepoUsage = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetRequestId(v string) *GetInstanceUsageResponseBody { + s.RequestId = &v + return s +} + +type GetInstanceUsageResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceUsageResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceUsageResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceUsageResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceUsageResponse) SetHeaders(v map[string]*string) *GetInstanceUsageResponse { + s.Headers = v + return s +} + +func (s *GetInstanceUsageResponse) SetBody(v *GetInstanceUsageResponseBody) *GetInstanceUsageResponse { + s.Body = v + return s +} + +type GetInstanceVpcEndpointRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s GetInstanceVpcEndpointRequest) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceVpcEndpointRequest) GoString() string { + return s.String() +} + +func (s *GetInstanceVpcEndpointRequest) SetInstanceId(v string) *GetInstanceVpcEndpointRequest { + s.InstanceId = &v + return s +} + +func (s *GetInstanceVpcEndpointRequest) SetModuleName(v string) *GetInstanceVpcEndpointRequest { + s.ModuleName = &v + return s +} + +type GetInstanceVpcEndpointResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Domains []*string `json:"Domains,omitempty" xml:"Domains,omitempty" type:"Repeated"` + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + LinkedVpcs []*GetInstanceVpcEndpointResponseBodyLinkedVpcs `json:"LinkedVpcs,omitempty" xml:"LinkedVpcs,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetInstanceVpcEndpointResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceVpcEndpointResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceVpcEndpointResponseBody) SetCode(v string) *GetInstanceVpcEndpointResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetDomains(v []*string) *GetInstanceVpcEndpointResponseBody { + s.Domains = v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetEnable(v bool) *GetInstanceVpcEndpointResponseBody { + s.Enable = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetIsSuccess(v bool) *GetInstanceVpcEndpointResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetLinkedVpcs(v []*GetInstanceVpcEndpointResponseBodyLinkedVpcs) *GetInstanceVpcEndpointResponseBody { + s.LinkedVpcs = v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetRequestId(v string) *GetInstanceVpcEndpointResponseBody { + s.RequestId = &v + return s +} + +type GetInstanceVpcEndpointResponseBodyLinkedVpcs struct { + DefaultAccess *bool `json:"DefaultAccess,omitempty" xml:"DefaultAccess,omitempty"` + Ip *string `json:"Ip,omitempty" xml:"Ip,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` + VpcId *string `json:"VpcId,omitempty" xml:"VpcId,omitempty"` + VswitchId *string `json:"VswitchId,omitempty" xml:"VswitchId,omitempty"` +} + +func (s GetInstanceVpcEndpointResponseBodyLinkedVpcs) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceVpcEndpointResponseBodyLinkedVpcs) GoString() string { + return s.String() +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetDefaultAccess(v bool) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.DefaultAccess = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetIp(v string) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.Ip = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetStatus(v string) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.Status = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetVpcId(v string) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.VpcId = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetVswitchId(v string) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.VswitchId = &v + return s +} + +type GetInstanceVpcEndpointResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceVpcEndpointResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceVpcEndpointResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceVpcEndpointResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceVpcEndpointResponse) SetHeaders(v map[string]*string) *GetInstanceVpcEndpointResponse { + s.Headers = v + return s +} + +func (s *GetInstanceVpcEndpointResponse) SetBody(v *GetInstanceVpcEndpointResponseBody) *GetInstanceVpcEndpointResponse { + s.Body = v + return s +} + +type GetNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s GetNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceRequest) GoString() string { + return s.String() +} + +func (s *GetNamespaceRequest) SetInstanceId(v string) *GetNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *GetNamespaceRequest) SetNamespaceId(v string) *GetNamespaceRequest { + s.NamespaceId = &v + return s +} + +func (s *GetNamespaceRequest) SetNamespaceName(v string) *GetNamespaceRequest { + s.NamespaceName = &v + return s +} + +type GetNamespaceResponseBody struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Tags []*GetNamespaceResponseBodyTags `json:"Tags,omitempty" xml:"Tags,omitempty" type:"Repeated"` +} + +func (s GetNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *GetNamespaceResponseBody) SetAutoCreateRepo(v bool) *GetNamespaceResponseBody { + s.AutoCreateRepo = &v + return s +} + +func (s *GetNamespaceResponseBody) SetCode(v string) *GetNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *GetNamespaceResponseBody) SetDefaultRepoType(v string) *GetNamespaceResponseBody { + s.DefaultRepoType = &v + return s +} + +func (s *GetNamespaceResponseBody) SetInstanceId(v string) *GetNamespaceResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetNamespaceResponseBody) SetIsSuccess(v bool) *GetNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetNamespaceResponseBody) SetNamespaceId(v string) *GetNamespaceResponseBody { + s.NamespaceId = &v + return s +} + +func (s *GetNamespaceResponseBody) SetNamespaceName(v string) *GetNamespaceResponseBody { + s.NamespaceName = &v + return s +} + +func (s *GetNamespaceResponseBody) SetNamespaceStatus(v string) *GetNamespaceResponseBody { + s.NamespaceStatus = &v + return s +} + +func (s *GetNamespaceResponseBody) SetRequestId(v string) *GetNamespaceResponseBody { + s.RequestId = &v + return s +} + +func (s *GetNamespaceResponseBody) SetResourceGroupId(v string) *GetNamespaceResponseBody { + s.ResourceGroupId = &v + return s +} + +func (s *GetNamespaceResponseBody) SetTags(v []*GetNamespaceResponseBodyTags) *GetNamespaceResponseBody { + s.Tags = v + return s +} + +type GetNamespaceResponseBodyTags struct { + TagKey *string `json:"TagKey,omitempty" xml:"TagKey,omitempty"` + TagValue *string `json:"TagValue,omitempty" xml:"TagValue,omitempty"` +} + +func (s GetNamespaceResponseBodyTags) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceResponseBodyTags) GoString() string { + return s.String() +} + +func (s *GetNamespaceResponseBodyTags) SetTagKey(v string) *GetNamespaceResponseBodyTags { + s.TagKey = &v + return s +} + +func (s *GetNamespaceResponseBodyTags) SetTagValue(v string) *GetNamespaceResponseBodyTags { + s.TagValue = &v + return s +} + +type GetNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceResponse) GoString() string { + return s.String() +} + +func (s *GetNamespaceResponse) SetHeaders(v map[string]*string) *GetNamespaceResponse { + s.Headers = v + return s +} + +func (s *GetNamespaceResponse) SetBody(v *GetNamespaceResponseBody) *GetNamespaceResponse { + s.Body = v + return s +} + +type GetRepoBuildRecordRequest struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetRepoBuildRecordRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordRequest) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordRequest) SetBuildRecordId(v string) *GetRepoBuildRecordRequest { + s.BuildRecordId = &v + return s +} + +func (s *GetRepoBuildRecordRequest) SetInstanceId(v string) *GetRepoBuildRecordRequest { + s.InstanceId = &v + return s +} + +type GetRepoBuildRecordResponseBody struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + EndTime *int64 `json:"EndTime,omitempty" xml:"EndTime,omitempty"` + Image *GetRepoBuildRecordResponseBodyImage `json:"Image,omitempty" xml:"Image,omitempty" type:"Struct"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + StartTime *int64 `json:"StartTime,omitempty" xml:"StartTime,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetRepoBuildRecordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordResponseBody) SetBuildRecordId(v string) *GetRepoBuildRecordResponseBody { + s.BuildRecordId = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetCode(v string) *GetRepoBuildRecordResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetEndTime(v int64) *GetRepoBuildRecordResponseBody { + s.EndTime = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetImage(v *GetRepoBuildRecordResponseBodyImage) *GetRepoBuildRecordResponseBody { + s.Image = v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetIsSuccess(v bool) *GetRepoBuildRecordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetRequestId(v string) *GetRepoBuildRecordResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetStartTime(v int64) *GetRepoBuildRecordResponseBody { + s.StartTime = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetStatus(v string) *GetRepoBuildRecordResponseBody { + s.Status = &v + return s +} + +type GetRepoBuildRecordResponseBodyImage struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetRepoBuildRecordResponseBodyImage) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordResponseBodyImage) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordResponseBodyImage) SetImageTag(v string) *GetRepoBuildRecordResponseBodyImage { + s.ImageTag = &v + return s +} + +func (s *GetRepoBuildRecordResponseBodyImage) SetRepoName(v string) *GetRepoBuildRecordResponseBodyImage { + s.RepoName = &v + return s +} + +func (s *GetRepoBuildRecordResponseBodyImage) SetRepoNamespaceName(v string) *GetRepoBuildRecordResponseBodyImage { + s.RepoNamespaceName = &v + return s +} + +type GetRepoBuildRecordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoBuildRecordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoBuildRecordResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordResponse) SetHeaders(v map[string]*string) *GetRepoBuildRecordResponse { + s.Headers = v + return s +} + +func (s *GetRepoBuildRecordResponse) SetBody(v *GetRepoBuildRecordResponseBody) *GetRepoBuildRecordResponse { + s.Body = v + return s +} + +type GetRepoBuildRecordStatusRequest struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s GetRepoBuildRecordStatusRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordStatusRequest) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordStatusRequest) SetBuildRecordId(v string) *GetRepoBuildRecordStatusRequest { + s.BuildRecordId = &v + return s +} + +func (s *GetRepoBuildRecordStatusRequest) SetInstanceId(v string) *GetRepoBuildRecordStatusRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoBuildRecordStatusRequest) SetRepoId(v string) *GetRepoBuildRecordStatusRequest { + s.RepoId = &v + return s +} + +type GetRepoBuildRecordStatusResponseBody struct { + BuildStatus *string `json:"BuildStatus,omitempty" xml:"BuildStatus,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetRepoBuildRecordStatusResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordStatusResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordStatusResponseBody) SetBuildStatus(v string) *GetRepoBuildRecordStatusResponseBody { + s.BuildStatus = &v + return s +} + +func (s *GetRepoBuildRecordStatusResponseBody) SetCode(v string) *GetRepoBuildRecordStatusResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoBuildRecordStatusResponseBody) SetIsSuccess(v bool) *GetRepoBuildRecordStatusResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoBuildRecordStatusResponseBody) SetRequestId(v string) *GetRepoBuildRecordStatusResponseBody { + s.RequestId = &v + return s +} + +type GetRepoBuildRecordStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoBuildRecordStatusResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoBuildRecordStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordStatusResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordStatusResponse) SetHeaders(v map[string]*string) *GetRepoBuildRecordStatusResponse { + s.Headers = v + return s +} + +func (s *GetRepoBuildRecordStatusResponse) SetBody(v *GetRepoBuildRecordStatusResponseBody) *GetRepoBuildRecordStatusResponse { + s.Body = v + return s +} + +type GetRepoSourceCodeRepoRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s GetRepoSourceCodeRepoRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSourceCodeRepoRequest) GoString() string { + return s.String() +} + +func (s *GetRepoSourceCodeRepoRequest) SetInstanceId(v string) *GetRepoSourceCodeRepoRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoSourceCodeRepoRequest) SetRepoId(v string) *GetRepoSourceCodeRepoRequest { + s.RepoId = &v + return s +} + +type GetRepoSourceCodeRepoResponseBody struct { + AutoBuild *string `json:"AutoBuild,omitempty" xml:"AutoBuild,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CodeRepoDomain *string `json:"CodeRepoDomain,omitempty" xml:"CodeRepoDomain,omitempty"` + CodeRepoName *string `json:"CodeRepoName,omitempty" xml:"CodeRepoName,omitempty"` + CodeRepoNamespaceName *string `json:"CodeRepoNamespaceName,omitempty" xml:"CodeRepoNamespaceName,omitempty"` + CodeRepoType *string `json:"CodeRepoType,omitempty" xml:"CodeRepoType,omitempty"` + DisableCacheBuild *string `json:"DisableCacheBuild,omitempty" xml:"DisableCacheBuild,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + OverseaBuild *string `json:"OverseaBuild,omitempty" xml:"OverseaBuild,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetRepoSourceCodeRepoResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSourceCodeRepoResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetAutoBuild(v string) *GetRepoSourceCodeRepoResponseBody { + s.AutoBuild = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCode(v string) *GetRepoSourceCodeRepoResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCodeRepoDomain(v string) *GetRepoSourceCodeRepoResponseBody { + s.CodeRepoDomain = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCodeRepoName(v string) *GetRepoSourceCodeRepoResponseBody { + s.CodeRepoName = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCodeRepoNamespaceName(v string) *GetRepoSourceCodeRepoResponseBody { + s.CodeRepoNamespaceName = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCodeRepoType(v string) *GetRepoSourceCodeRepoResponseBody { + s.CodeRepoType = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetDisableCacheBuild(v string) *GetRepoSourceCodeRepoResponseBody { + s.DisableCacheBuild = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetIsSuccess(v bool) *GetRepoSourceCodeRepoResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetOverseaBuild(v string) *GetRepoSourceCodeRepoResponseBody { + s.OverseaBuild = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetRepoId(v string) *GetRepoSourceCodeRepoResponseBody { + s.RepoId = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetRequestId(v string) *GetRepoSourceCodeRepoResponseBody { + s.RequestId = &v + return s +} + +type GetRepoSourceCodeRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoSourceCodeRepoResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoSourceCodeRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSourceCodeRepoResponse) GoString() string { + return s.String() +} + +func (s *GetRepoSourceCodeRepoResponse) SetHeaders(v map[string]*string) *GetRepoSourceCodeRepoResponse { + s.Headers = v + return s +} + +func (s *GetRepoSourceCodeRepoResponse) SetBody(v *GetRepoSourceCodeRepoResponseBody) *GetRepoSourceCodeRepoResponse { + s.Body = v + return s +} + +type GetRepoSyncTaskRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` +} + +func (s GetRepoSyncTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskRequest) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskRequest) SetInstanceId(v string) *GetRepoSyncTaskRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoSyncTaskRequest) SetSyncTaskId(v string) *GetRepoSyncTaskRequest { + s.SyncTaskId = &v + return s +} + +type GetRepoSyncTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CrossUser *bool `json:"CrossUser,omitempty" xml:"CrossUser,omitempty"` + ImageFrom *GetRepoSyncTaskResponseBodyImageFrom `json:"ImageFrom,omitempty" xml:"ImageFrom,omitempty" type:"Struct"` + ImageTo *GetRepoSyncTaskResponseBodyImageTo `json:"ImageTo,omitempty" xml:"ImageTo,omitempty" type:"Struct"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + LayerTasks []*GetRepoSyncTaskResponseBodyLayerTasks `json:"LayerTasks,omitempty" xml:"LayerTasks,omitempty" type:"Repeated"` + Progress *int64 `json:"Progress,omitempty" xml:"Progress,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncBatchTaskId *string `json:"SyncBatchTaskId,omitempty" xml:"SyncBatchTaskId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` + SyncTransAccelerate *bool `json:"SyncTransAccelerate,omitempty" xml:"SyncTransAccelerate,omitempty"` + SyncedSize *int64 `json:"SyncedSize,omitempty" xml:"SyncedSize,omitempty"` + TaskStatus *string `json:"TaskStatus,omitempty" xml:"TaskStatus,omitempty"` + TaskTrigger *string `json:"TaskTrigger,omitempty" xml:"TaskTrigger,omitempty"` +} + +func (s GetRepoSyncTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponseBody) SetCode(v string) *GetRepoSyncTaskResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetCrossUser(v bool) *GetRepoSyncTaskResponseBody { + s.CrossUser = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetImageFrom(v *GetRepoSyncTaskResponseBodyImageFrom) *GetRepoSyncTaskResponseBody { + s.ImageFrom = v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetImageTo(v *GetRepoSyncTaskResponseBodyImageTo) *GetRepoSyncTaskResponseBody { + s.ImageTo = v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetIsSuccess(v bool) *GetRepoSyncTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetLayerTasks(v []*GetRepoSyncTaskResponseBodyLayerTasks) *GetRepoSyncTaskResponseBody { + s.LayerTasks = v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetProgress(v int64) *GetRepoSyncTaskResponseBody { + s.Progress = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetRequestId(v string) *GetRepoSyncTaskResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncBatchTaskId(v string) *GetRepoSyncTaskResponseBody { + s.SyncBatchTaskId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncRuleId(v string) *GetRepoSyncTaskResponseBody { + s.SyncRuleId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncTaskId(v string) *GetRepoSyncTaskResponseBody { + s.SyncTaskId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncTransAccelerate(v bool) *GetRepoSyncTaskResponseBody { + s.SyncTransAccelerate = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncedSize(v int64) *GetRepoSyncTaskResponseBody { + s.SyncedSize = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetTaskStatus(v string) *GetRepoSyncTaskResponseBody { + s.TaskStatus = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetTaskTrigger(v string) *GetRepoSyncTaskResponseBody { + s.TaskTrigger = &v + return s +} + +type GetRepoSyncTaskResponseBodyImageFrom struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetRepoSyncTaskResponseBodyImageFrom) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponseBodyImageFrom) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetImageTag(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.ImageTag = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetInstanceId(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.InstanceId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetRegionId(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.RegionId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetRepoName(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.RepoName = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetRepoNamespaceName(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.RepoNamespaceName = &v + return s +} + +type GetRepoSyncTaskResponseBodyImageTo struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetRepoSyncTaskResponseBodyImageTo) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponseBodyImageTo) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetImageTag(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.ImageTag = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetInstanceId(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.InstanceId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetRegionId(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.RegionId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetRepoName(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.RepoName = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetRepoNamespaceName(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.RepoNamespaceName = &v + return s +} + +type GetRepoSyncTaskResponseBodyLayerTasks struct { + ArtifactDigest *string `json:"ArtifactDigest,omitempty" xml:"ArtifactDigest,omitempty"` + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + Size *int64 `json:"Size,omitempty" xml:"Size,omitempty"` + SyncLayerTaskId *string `json:"SyncLayerTaskId,omitempty" xml:"SyncLayerTaskId,omitempty"` + SyncedSize *int64 `json:"SyncedSize,omitempty" xml:"SyncedSize,omitempty"` + TaskStatus *string `json:"TaskStatus,omitempty" xml:"TaskStatus,omitempty"` +} + +func (s GetRepoSyncTaskResponseBodyLayerTasks) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponseBodyLayerTasks) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetArtifactDigest(v string) *GetRepoSyncTaskResponseBodyLayerTasks { + s.ArtifactDigest = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetDigest(v string) *GetRepoSyncTaskResponseBodyLayerTasks { + s.Digest = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetSize(v int64) *GetRepoSyncTaskResponseBodyLayerTasks { + s.Size = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetSyncLayerTaskId(v string) *GetRepoSyncTaskResponseBodyLayerTasks { + s.SyncLayerTaskId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetSyncedSize(v int64) *GetRepoSyncTaskResponseBodyLayerTasks { + s.SyncedSize = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetTaskStatus(v string) *GetRepoSyncTaskResponseBodyLayerTasks { + s.TaskStatus = &v + return s +} + +type GetRepoSyncTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoSyncTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoSyncTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponse) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponse) SetHeaders(v map[string]*string) *GetRepoSyncTaskResponse { + s.Headers = v + return s +} + +func (s *GetRepoSyncTaskResponse) SetBody(v *GetRepoSyncTaskResponseBody) *GetRepoSyncTaskResponse { + s.Body = v + return s +} + +type GetRepoTagRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagRequest) SetInstanceId(v string) *GetRepoTagRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagRequest) SetRepoId(v string) *GetRepoTagRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagRequest) SetTag(v string) *GetRepoTagRequest { + s.Tag = &v + return s +} + +type GetRepoTagResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + ImageCreate *int64 `json:"ImageCreate,omitempty" xml:"ImageCreate,omitempty"` + ImageId *string `json:"ImageId,omitempty" xml:"ImageId,omitempty"` + ImageSize *int64 `json:"ImageSize,omitempty" xml:"ImageSize,omitempty"` + ImageUpdate *int64 `json:"ImageUpdate,omitempty" xml:"ImageUpdate,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagResponseBody) SetCode(v string) *GetRepoTagResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagResponseBody) SetDigest(v string) *GetRepoTagResponseBody { + s.Digest = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageCreate(v int64) *GetRepoTagResponseBody { + s.ImageCreate = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageId(v string) *GetRepoTagResponseBody { + s.ImageId = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageSize(v int64) *GetRepoTagResponseBody { + s.ImageSize = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageUpdate(v int64) *GetRepoTagResponseBody { + s.ImageUpdate = &v + return s +} + +func (s *GetRepoTagResponseBody) SetIsSuccess(v bool) *GetRepoTagResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagResponseBody) SetRequestId(v string) *GetRepoTagResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoTagResponseBody) SetStatus(v string) *GetRepoTagResponseBody { + s.Status = &v + return s +} + +func (s *GetRepoTagResponseBody) SetTag(v string) *GetRepoTagResponseBody { + s.Tag = &v + return s +} + +type GetRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagResponse) SetHeaders(v map[string]*string) *GetRepoTagResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagResponse) SetBody(v *GetRepoTagResponseBody) *GetRepoTagResponse { + s.Body = v + return s +} + +type GetRepoTagLayersRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagLayersRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagLayersRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagLayersRequest) SetDigest(v string) *GetRepoTagLayersRequest { + s.Digest = &v + return s +} + +func (s *GetRepoTagLayersRequest) SetInstanceId(v string) *GetRepoTagLayersRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagLayersRequest) SetRepoId(v string) *GetRepoTagLayersRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagLayersRequest) SetTag(v string) *GetRepoTagLayersRequest { + s.Tag = &v + return s +} + +type GetRepoTagLayersResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Layers []*GetRepoTagLayersResponseBodyLayers `json:"Layers,omitempty" xml:"Layers,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetRepoTagLayersResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagLayersResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagLayersResponseBody) SetCode(v string) *GetRepoTagLayersResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagLayersResponseBody) SetIsSuccess(v bool) *GetRepoTagLayersResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagLayersResponseBody) SetLayers(v []*GetRepoTagLayersResponseBodyLayers) *GetRepoTagLayersResponseBody { + s.Layers = v + return s +} + +func (s *GetRepoTagLayersResponseBody) SetRequestId(v string) *GetRepoTagLayersResponseBody { + s.RequestId = &v + return s +} + +type GetRepoTagLayersResponseBodyLayers struct { + BlobDigest *string `json:"BlobDigest,omitempty" xml:"BlobDigest,omitempty"` + BlobSize *int64 `json:"BlobSize,omitempty" xml:"BlobSize,omitempty"` + LayerCMD *string `json:"LayerCMD,omitempty" xml:"LayerCMD,omitempty"` + LayerIndex *int32 `json:"LayerIndex,omitempty" xml:"LayerIndex,omitempty"` + LayerInstruction *string `json:"LayerInstruction,omitempty" xml:"LayerInstruction,omitempty"` +} + +func (s GetRepoTagLayersResponseBodyLayers) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagLayersResponseBodyLayers) GoString() string { + return s.String() +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetBlobDigest(v string) *GetRepoTagLayersResponseBodyLayers { + s.BlobDigest = &v + return s +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetBlobSize(v int64) *GetRepoTagLayersResponseBodyLayers { + s.BlobSize = &v + return s +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetLayerCMD(v string) *GetRepoTagLayersResponseBodyLayers { + s.LayerCMD = &v + return s +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetLayerIndex(v int32) *GetRepoTagLayersResponseBodyLayers { + s.LayerIndex = &v + return s +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetLayerInstruction(v string) *GetRepoTagLayersResponseBodyLayers { + s.LayerInstruction = &v + return s +} + +type GetRepoTagLayersResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagLayersResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagLayersResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagLayersResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagLayersResponse) SetHeaders(v map[string]*string) *GetRepoTagLayersResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagLayersResponse) SetBody(v *GetRepoTagLayersResponseBody) *GetRepoTagLayersResponse { + s.Body = v + return s +} + +type GetRepoTagManifestRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + SchemaVersion *int32 `json:"SchemaVersion,omitempty" xml:"SchemaVersion,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagManifestRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestRequest) SetInstanceId(v string) *GetRepoTagManifestRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagManifestRequest) SetRepoId(v string) *GetRepoTagManifestRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagManifestRequest) SetSchemaVersion(v int32) *GetRepoTagManifestRequest { + s.SchemaVersion = &v + return s +} + +func (s *GetRepoTagManifestRequest) SetTag(v string) *GetRepoTagManifestRequest { + s.Tag = &v + return s +} + +type GetRepoTagManifestResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Manifest *GetRepoTagManifestResponseBodyManifest `json:"Manifest,omitempty" xml:"Manifest,omitempty" type:"Struct"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetRepoTagManifestResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBody) SetCode(v string) *GetRepoTagManifestResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagManifestResponseBody) SetIsSuccess(v bool) *GetRepoTagManifestResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagManifestResponseBody) SetManifest(v *GetRepoTagManifestResponseBodyManifest) *GetRepoTagManifestResponseBody { + s.Manifest = v + return s +} + +func (s *GetRepoTagManifestResponseBody) SetRequestId(v string) *GetRepoTagManifestResponseBody { + s.RequestId = &v + return s +} + +type GetRepoTagManifestResponseBodyManifest struct { + Architecture *string `json:"Architecture,omitempty" xml:"Architecture,omitempty"` + Config *GetRepoTagManifestResponseBodyManifestConfig `json:"Config,omitempty" xml:"Config,omitempty" type:"Struct"` + FsLayers []*GetRepoTagManifestResponseBodyManifestFsLayers `json:"FsLayers,omitempty" xml:"FsLayers,omitempty" type:"Repeated"` + History []*GetRepoTagManifestResponseBodyManifestHistory `json:"History,omitempty" xml:"History,omitempty" type:"Repeated"` + Layers []*GetRepoTagManifestResponseBodyManifestLayers `json:"Layers,omitempty" xml:"Layers,omitempty" type:"Repeated"` + MediaType *string `json:"MediaType,omitempty" xml:"MediaType,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` + SchemaVersion *int32 `json:"SchemaVersion,omitempty" xml:"SchemaVersion,omitempty"` + Signatures []*GetRepoTagManifestResponseBodyManifestSignatures `json:"Signatures,omitempty" xml:"Signatures,omitempty" type:"Repeated"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifest) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetArchitecture(v string) *GetRepoTagManifestResponseBodyManifest { + s.Architecture = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetConfig(v *GetRepoTagManifestResponseBodyManifestConfig) *GetRepoTagManifestResponseBodyManifest { + s.Config = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetFsLayers(v []*GetRepoTagManifestResponseBodyManifestFsLayers) *GetRepoTagManifestResponseBodyManifest { + s.FsLayers = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetHistory(v []*GetRepoTagManifestResponseBodyManifestHistory) *GetRepoTagManifestResponseBodyManifest { + s.History = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetLayers(v []*GetRepoTagManifestResponseBodyManifestLayers) *GetRepoTagManifestResponseBodyManifest { + s.Layers = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetMediaType(v string) *GetRepoTagManifestResponseBodyManifest { + s.MediaType = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetName(v string) *GetRepoTagManifestResponseBodyManifest { + s.Name = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetSchemaVersion(v int32) *GetRepoTagManifestResponseBodyManifest { + s.SchemaVersion = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetSignatures(v []*GetRepoTagManifestResponseBodyManifestSignatures) *GetRepoTagManifestResponseBodyManifest { + s.Signatures = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetTag(v string) *GetRepoTagManifestResponseBodyManifest { + s.Tag = &v + return s +} + +type GetRepoTagManifestResponseBodyManifestConfig struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + MediaType *string `json:"MediaType,omitempty" xml:"MediaType,omitempty"` + Size *int64 `json:"Size,omitempty" xml:"Size,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestConfig) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestConfig) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestConfig) SetDigest(v string) *GetRepoTagManifestResponseBodyManifestConfig { + s.Digest = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestConfig) SetMediaType(v string) *GetRepoTagManifestResponseBodyManifestConfig { + s.MediaType = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestConfig) SetSize(v int64) *GetRepoTagManifestResponseBodyManifestConfig { + s.Size = &v + return s +} + +type GetRepoTagManifestResponseBodyManifestFsLayers struct { + BlobSum *string `json:"BlobSum,omitempty" xml:"BlobSum,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestFsLayers) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestFsLayers) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestFsLayers) SetBlobSum(v string) *GetRepoTagManifestResponseBodyManifestFsLayers { + s.BlobSum = &v + return s +} + +type GetRepoTagManifestResponseBodyManifestHistory struct { + V1Compatibility map[string]interface{} `json:"V1Compatibility,omitempty" xml:"V1Compatibility,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestHistory) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestHistory) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestHistory) SetV1Compatibility(v map[string]interface{}) *GetRepoTagManifestResponseBodyManifestHistory { + s.V1Compatibility = v + return s +} + +type GetRepoTagManifestResponseBodyManifestLayers struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + MediaType *string `json:"MediaType,omitempty" xml:"MediaType,omitempty"` + Size *int64 `json:"Size,omitempty" xml:"Size,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestLayers) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestLayers) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestLayers) SetDigest(v string) *GetRepoTagManifestResponseBodyManifestLayers { + s.Digest = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestLayers) SetMediaType(v string) *GetRepoTagManifestResponseBodyManifestLayers { + s.MediaType = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestLayers) SetSize(v int64) *GetRepoTagManifestResponseBodyManifestLayers { + s.Size = &v + return s +} + +type GetRepoTagManifestResponseBodyManifestSignatures struct { + Header map[string]interface{} `json:"Header,omitempty" xml:"Header,omitempty"` + Protected *string `json:"Protected,omitempty" xml:"Protected,omitempty"` + Signature *string `json:"Signature,omitempty" xml:"Signature,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestSignatures) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestSignatures) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestSignatures) SetHeader(v map[string]interface{}) *GetRepoTagManifestResponseBodyManifestSignatures { + s.Header = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestSignatures) SetProtected(v string) *GetRepoTagManifestResponseBodyManifestSignatures { + s.Protected = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestSignatures) SetSignature(v string) *GetRepoTagManifestResponseBodyManifestSignatures { + s.Signature = &v + return s +} + +type GetRepoTagManifestResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagManifestResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagManifestResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponse) SetHeaders(v map[string]*string) *GetRepoTagManifestResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagManifestResponse) SetBody(v *GetRepoTagManifestResponseBody) *GetRepoTagManifestResponse { + s.Body = v + return s +} + +type GetRepoTagScanStatusRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + ScanTaskId *string `json:"ScanTaskId,omitempty" xml:"ScanTaskId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagScanStatusRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanStatusRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanStatusRequest) SetDigest(v string) *GetRepoTagScanStatusRequest { + s.Digest = &v + return s +} + +func (s *GetRepoTagScanStatusRequest) SetInstanceId(v string) *GetRepoTagScanStatusRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagScanStatusRequest) SetRepoId(v string) *GetRepoTagScanStatusRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagScanStatusRequest) SetScanTaskId(v string) *GetRepoTagScanStatusRequest { + s.ScanTaskId = &v + return s +} + +func (s *GetRepoTagScanStatusRequest) SetTag(v string) *GetRepoTagScanStatusRequest { + s.Tag = &v + return s +} + +type GetRepoTagScanStatusResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ScanService *string `json:"ScanService,omitempty" xml:"ScanService,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetRepoTagScanStatusResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanStatusResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanStatusResponseBody) SetCode(v string) *GetRepoTagScanStatusResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagScanStatusResponseBody) SetIsSuccess(v bool) *GetRepoTagScanStatusResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagScanStatusResponseBody) SetRequestId(v string) *GetRepoTagScanStatusResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoTagScanStatusResponseBody) SetScanService(v string) *GetRepoTagScanStatusResponseBody { + s.ScanService = &v + return s +} + +func (s *GetRepoTagScanStatusResponseBody) SetStatus(v string) *GetRepoTagScanStatusResponseBody { + s.Status = &v + return s +} + +type GetRepoTagScanStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagScanStatusResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagScanStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanStatusResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanStatusResponse) SetHeaders(v map[string]*string) *GetRepoTagScanStatusResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagScanStatusResponse) SetBody(v *GetRepoTagScanStatusResponseBody) *GetRepoTagScanStatusResponse { + s.Body = v + return s +} + +type GetRepoTagScanSummaryRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + ScanTaskId *string `json:"ScanTaskId,omitempty" xml:"ScanTaskId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagScanSummaryRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanSummaryRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanSummaryRequest) SetDigest(v string) *GetRepoTagScanSummaryRequest { + s.Digest = &v + return s +} + +func (s *GetRepoTagScanSummaryRequest) SetInstanceId(v string) *GetRepoTagScanSummaryRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagScanSummaryRequest) SetRepoId(v string) *GetRepoTagScanSummaryRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagScanSummaryRequest) SetScanTaskId(v string) *GetRepoTagScanSummaryRequest { + s.ScanTaskId = &v + return s +} + +func (s *GetRepoTagScanSummaryRequest) SetTag(v string) *GetRepoTagScanSummaryRequest { + s.Tag = &v + return s +} + +type GetRepoTagScanSummaryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + HighSeverity *int32 `json:"HighSeverity,omitempty" xml:"HighSeverity,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + LowSeverity *int32 `json:"LowSeverity,omitempty" xml:"LowSeverity,omitempty"` + MediumSeverity *int32 `json:"MediumSeverity,omitempty" xml:"MediumSeverity,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalSeverity *int32 `json:"TotalSeverity,omitempty" xml:"TotalSeverity,omitempty"` + UnknownSeverity *int32 `json:"UnknownSeverity,omitempty" xml:"UnknownSeverity,omitempty"` +} + +func (s GetRepoTagScanSummaryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanSummaryResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanSummaryResponseBody) SetCode(v string) *GetRepoTagScanSummaryResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetHighSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.HighSeverity = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetIsSuccess(v bool) *GetRepoTagScanSummaryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetLowSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.LowSeverity = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetMediumSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.MediumSeverity = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetRequestId(v string) *GetRepoTagScanSummaryResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetTotalSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.TotalSeverity = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetUnknownSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.UnknownSeverity = &v + return s +} + +type GetRepoTagScanSummaryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagScanSummaryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagScanSummaryResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanSummaryResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanSummaryResponse) SetHeaders(v map[string]*string) *GetRepoTagScanSummaryResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagScanSummaryResponse) SetBody(v *GetRepoTagScanSummaryResponseBody) *GetRepoTagScanSummaryResponse { + s.Body = v + return s +} + +type GetRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepositoryRequest) GoString() string { + return s.String() +} + +func (s *GetRepositoryRequest) SetInstanceId(v string) *GetRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepositoryRequest) SetRepoId(v string) *GetRepositoryRequest { + s.RepoId = &v + return s +} + +func (s *GetRepositoryRequest) SetRepoName(v string) *GetRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *GetRepositoryRequest) SetRepoNamespaceName(v string) *GetRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +type GetRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + Detail *string `json:"Detail,omitempty" xml:"Detail,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoBuildType *string `json:"RepoBuildType,omitempty" xml:"RepoBuildType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` + TagImmutability *bool `json:"TagImmutability,omitempty" xml:"TagImmutability,omitempty"` +} + +func (s GetRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepositoryResponseBody) SetCode(v string) *GetRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *GetRepositoryResponseBody) SetCreateTime(v int64) *GetRepositoryResponseBody { + s.CreateTime = &v + return s +} + +func (s *GetRepositoryResponseBody) SetDetail(v string) *GetRepositoryResponseBody { + s.Detail = &v + return s +} + +func (s *GetRepositoryResponseBody) SetInstanceId(v string) *GetRepositoryResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetRepositoryResponseBody) SetIsSuccess(v bool) *GetRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepositoryResponseBody) SetModifiedTime(v int64) *GetRepositoryResponseBody { + s.ModifiedTime = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoBuildType(v string) *GetRepositoryResponseBody { + s.RepoBuildType = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoId(v string) *GetRepositoryResponseBody { + s.RepoId = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoName(v string) *GetRepositoryResponseBody { + s.RepoName = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoNamespaceName(v string) *GetRepositoryResponseBody { + s.RepoNamespaceName = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoStatus(v string) *GetRepositoryResponseBody { + s.RepoStatus = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoType(v string) *GetRepositoryResponseBody { + s.RepoType = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRequestId(v string) *GetRepositoryResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepositoryResponseBody) SetResourceGroupId(v string) *GetRepositoryResponseBody { + s.ResourceGroupId = &v + return s +} + +func (s *GetRepositoryResponseBody) SetSummary(v string) *GetRepositoryResponseBody { + s.Summary = &v + return s +} + +func (s *GetRepositoryResponseBody) SetTagImmutability(v bool) *GetRepositoryResponseBody { + s.TagImmutability = &v + return s +} + +type GetRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepositoryResponse) GoString() string { + return s.String() +} + +func (s *GetRepositoryResponse) SetHeaders(v map[string]*string) *GetRepositoryResponse { + s.Headers = v + return s +} + +func (s *GetRepositoryResponse) SetBody(v *GetRepositoryResponseBody) *GetRepositoryResponse { + s.Body = v + return s +} + +type ListArtifactBuildTaskLogRequest struct { + BuildTaskId *string `json:"BuildTaskId,omitempty" xml:"BuildTaskId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` +} + +func (s ListArtifactBuildTaskLogRequest) String() string { + return tea.Prettify(s) +} + +func (s ListArtifactBuildTaskLogRequest) GoString() string { + return s.String() +} + +func (s *ListArtifactBuildTaskLogRequest) SetBuildTaskId(v string) *ListArtifactBuildTaskLogRequest { + s.BuildTaskId = &v + return s +} + +func (s *ListArtifactBuildTaskLogRequest) SetInstanceId(v string) *ListArtifactBuildTaskLogRequest { + s.InstanceId = &v + return s +} + +func (s *ListArtifactBuildTaskLogRequest) SetPage(v int32) *ListArtifactBuildTaskLogRequest { + s.Page = &v + return s +} + +func (s *ListArtifactBuildTaskLogRequest) SetPageSize(v int32) *ListArtifactBuildTaskLogRequest { + s.PageSize = &v + return s +} + +type ListArtifactBuildTaskLogResponseBody struct { + BuildTaskLogs []*ListArtifactBuildTaskLogResponseBodyBuildTaskLogs `json:"BuildTaskLogs,omitempty" xml:"BuildTaskLogs,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListArtifactBuildTaskLogResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListArtifactBuildTaskLogResponseBody) GoString() string { + return s.String() +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetBuildTaskLogs(v []*ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) *ListArtifactBuildTaskLogResponseBody { + s.BuildTaskLogs = v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetCode(v string) *ListArtifactBuildTaskLogResponseBody { + s.Code = &v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetIsSuccess(v bool) *ListArtifactBuildTaskLogResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetRequestId(v string) *ListArtifactBuildTaskLogResponseBody { + s.RequestId = &v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetTotalCount(v int32) *ListArtifactBuildTaskLogResponseBody { + s.TotalCount = &v + return s +} + +type ListArtifactBuildTaskLogResponseBodyBuildTaskLogs struct { + LineNumber *int32 `json:"LineNumber,omitempty" xml:"LineNumber,omitempty"` + Message *string `json:"Message,omitempty" xml:"Message,omitempty"` +} + +func (s ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) String() string { + return tea.Prettify(s) +} + +func (s ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) GoString() string { + return s.String() +} + +func (s *ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) SetLineNumber(v int32) *ListArtifactBuildTaskLogResponseBodyBuildTaskLogs { + s.LineNumber = &v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) SetMessage(v string) *ListArtifactBuildTaskLogResponseBodyBuildTaskLogs { + s.Message = &v + return s +} + +type ListArtifactBuildTaskLogResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListArtifactBuildTaskLogResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListArtifactBuildTaskLogResponse) String() string { + return tea.Prettify(s) +} + +func (s ListArtifactBuildTaskLogResponse) GoString() string { + return s.String() +} + +func (s *ListArtifactBuildTaskLogResponse) SetHeaders(v map[string]*string) *ListArtifactBuildTaskLogResponse { + s.Headers = v + return s +} + +func (s *ListArtifactBuildTaskLogResponse) SetBody(v *ListArtifactBuildTaskLogResponseBody) *ListArtifactBuildTaskLogResponse { + s.Body = v + return s +} + +type ListChainRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListChainRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChainRequest) GoString() string { + return s.String() +} + +func (s *ListChainRequest) SetInstanceId(v string) *ListChainRequest { + s.InstanceId = &v + return s +} + +func (s *ListChainRequest) SetPageNo(v int32) *ListChainRequest { + s.PageNo = &v + return s +} + +func (s *ListChainRequest) SetPageSize(v int32) *ListChainRequest { + s.PageSize = &v + return s +} + +func (s *ListChainRequest) SetRepoName(v string) *ListChainRequest { + s.RepoName = &v + return s +} + +func (s *ListChainRequest) SetRepoNamespaceName(v string) *ListChainRequest { + s.RepoNamespaceName = &v + return s +} + +type ListChainResponseBody struct { + Chains []*ListChainResponseBodyChains `json:"Chains,omitempty" xml:"Chains,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChainResponseBody) GoString() string { + return s.String() +} + +func (s *ListChainResponseBody) SetChains(v []*ListChainResponseBodyChains) *ListChainResponseBody { + s.Chains = v + return s +} + +func (s *ListChainResponseBody) SetCode(v string) *ListChainResponseBody { + s.Code = &v + return s +} + +func (s *ListChainResponseBody) SetIsSuccess(v bool) *ListChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChainResponseBody) SetPageNo(v int32) *ListChainResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChainResponseBody) SetPageSize(v int32) *ListChainResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChainResponseBody) SetRequestId(v string) *ListChainResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChainResponseBody) SetTotalCount(v int32) *ListChainResponseBody { + s.TotalCount = &v + return s +} + +type ListChainResponseBodyChains struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` + ScopeId *string `json:"ScopeId,omitempty" xml:"ScopeId,omitempty"` + ScopeType *string `json:"ScopeType,omitempty" xml:"ScopeType,omitempty"` +} + +func (s ListChainResponseBodyChains) String() string { + return tea.Prettify(s) +} + +func (s ListChainResponseBodyChains) GoString() string { + return s.String() +} + +func (s *ListChainResponseBodyChains) SetChainId(v string) *ListChainResponseBodyChains { + s.ChainId = &v + return s +} + +func (s *ListChainResponseBodyChains) SetCreateTime(v int64) *ListChainResponseBodyChains { + s.CreateTime = &v + return s +} + +func (s *ListChainResponseBodyChains) SetDescription(v string) *ListChainResponseBodyChains { + s.Description = &v + return s +} + +func (s *ListChainResponseBodyChains) SetInstanceId(v string) *ListChainResponseBodyChains { + s.InstanceId = &v + return s +} + +func (s *ListChainResponseBodyChains) SetModifiedTime(v int64) *ListChainResponseBodyChains { + s.ModifiedTime = &v + return s +} + +func (s *ListChainResponseBodyChains) SetName(v string) *ListChainResponseBodyChains { + s.Name = &v + return s +} + +func (s *ListChainResponseBodyChains) SetScopeId(v string) *ListChainResponseBodyChains { + s.ScopeId = &v + return s +} + +func (s *ListChainResponseBodyChains) SetScopeType(v string) *ListChainResponseBodyChains { + s.ScopeType = &v + return s +} + +type ListChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChainResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChainResponse) GoString() string { + return s.String() +} + +func (s *ListChainResponse) SetHeaders(v map[string]*string) *ListChainResponse { + s.Headers = v + return s +} + +func (s *ListChainResponse) SetBody(v *ListChainResponseBody) *ListChainResponse { + s.Body = v + return s +} + +type ListChainInstanceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListChainInstanceRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceRequest) GoString() string { + return s.String() +} + +func (s *ListChainInstanceRequest) SetInstanceId(v string) *ListChainInstanceRequest { + s.InstanceId = &v + return s +} + +func (s *ListChainInstanceRequest) SetPageNo(v int32) *ListChainInstanceRequest { + s.PageNo = &v + return s +} + +func (s *ListChainInstanceRequest) SetPageSize(v int32) *ListChainInstanceRequest { + s.PageSize = &v + return s +} + +func (s *ListChainInstanceRequest) SetRepoName(v string) *ListChainInstanceRequest { + s.RepoName = &v + return s +} + +func (s *ListChainInstanceRequest) SetRepoNamespaceName(v string) *ListChainInstanceRequest { + s.RepoNamespaceName = &v + return s +} + +type ListChainInstanceResponseBody struct { + ChainInstances []*ListChainInstanceResponseBodyChainInstances `json:"ChainInstances,omitempty" xml:"ChainInstances,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChainInstanceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceResponseBody) GoString() string { + return s.String() +} + +func (s *ListChainInstanceResponseBody) SetChainInstances(v []*ListChainInstanceResponseBodyChainInstances) *ListChainInstanceResponseBody { + s.ChainInstances = v + return s +} + +func (s *ListChainInstanceResponseBody) SetCode(v string) *ListChainInstanceResponseBody { + s.Code = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetInstanceId(v string) *ListChainInstanceResponseBody { + s.InstanceId = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetIsSuccess(v bool) *ListChainInstanceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetPageNo(v int32) *ListChainInstanceResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetPageSize(v int32) *ListChainInstanceResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetRequestId(v string) *ListChainInstanceResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetTotalCount(v int32) *ListChainInstanceResponseBody { + s.TotalCount = &v + return s +} + +type ListChainInstanceResponseBodyChainInstances struct { + Chain *ListChainInstanceResponseBodyChainInstancesChain `json:"Chain,omitempty" xml:"Chain,omitempty" type:"Struct"` + ChainInstanceId *string `json:"ChainInstanceId,omitempty" xml:"ChainInstanceId,omitempty"` + EndTime *int64 `json:"EndTime,omitempty" xml:"EndTime,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + Result *string `json:"Result,omitempty" xml:"Result,omitempty"` + StartTime *int64 `json:"StartTime,omitempty" xml:"StartTime,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s ListChainInstanceResponseBodyChainInstances) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceResponseBodyChainInstances) GoString() string { + return s.String() +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetChain(v *ListChainInstanceResponseBodyChainInstancesChain) *ListChainInstanceResponseBodyChainInstances { + s.Chain = v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetChainInstanceId(v string) *ListChainInstanceResponseBodyChainInstances { + s.ChainInstanceId = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetEndTime(v int64) *ListChainInstanceResponseBodyChainInstances { + s.EndTime = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetRepoName(v string) *ListChainInstanceResponseBodyChainInstances { + s.RepoName = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetRepoNamespaceName(v string) *ListChainInstanceResponseBodyChainInstances { + s.RepoNamespaceName = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetResult(v string) *ListChainInstanceResponseBodyChainInstances { + s.Result = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetStartTime(v int64) *ListChainInstanceResponseBodyChainInstances { + s.StartTime = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetStatus(v string) *ListChainInstanceResponseBodyChainInstances { + s.Status = &v + return s +} + +type ListChainInstanceResponseBodyChainInstancesChain struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + ChainName *string `json:"ChainName,omitempty" xml:"ChainName,omitempty"` + Version *int64 `json:"Version,omitempty" xml:"Version,omitempty"` +} + +func (s ListChainInstanceResponseBodyChainInstancesChain) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceResponseBodyChainInstancesChain) GoString() string { + return s.String() +} + +func (s *ListChainInstanceResponseBodyChainInstancesChain) SetChainId(v string) *ListChainInstanceResponseBodyChainInstancesChain { + s.ChainId = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstancesChain) SetChainName(v string) *ListChainInstanceResponseBodyChainInstancesChain { + s.ChainName = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstancesChain) SetVersion(v int64) *ListChainInstanceResponseBodyChainInstancesChain { + s.Version = &v + return s +} + +type ListChainInstanceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChainInstanceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChainInstanceResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceResponse) GoString() string { + return s.String() +} + +func (s *ListChainInstanceResponse) SetHeaders(v map[string]*string) *ListChainInstanceResponse { + s.Headers = v + return s +} + +func (s *ListChainInstanceResponse) SetBody(v *ListChainInstanceResponseBody) *ListChainInstanceResponse { + s.Body = v + return s +} + +type ListChartRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListChartRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChartRequest) GoString() string { + return s.String() +} + +func (s *ListChartRequest) SetInstanceId(v string) *ListChartRequest { + s.InstanceId = &v + return s +} + +func (s *ListChartRequest) SetPageNo(v int32) *ListChartRequest { + s.PageNo = &v + return s +} + +func (s *ListChartRequest) SetPageSize(v int32) *ListChartRequest { + s.PageSize = &v + return s +} + +func (s *ListChartRequest) SetRepoName(v string) *ListChartRequest { + s.RepoName = &v + return s +} + +func (s *ListChartRequest) SetRepoNamespaceName(v string) *ListChartRequest { + s.RepoNamespaceName = &v + return s +} + +type ListChartResponseBody struct { + Charts []*ListChartResponseBodyCharts `json:"Charts,omitempty" xml:"Charts,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChartResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChartResponseBody) GoString() string { + return s.String() +} + +func (s *ListChartResponseBody) SetCharts(v []*ListChartResponseBodyCharts) *ListChartResponseBody { + s.Charts = v + return s +} + +func (s *ListChartResponseBody) SetCode(v string) *ListChartResponseBody { + s.Code = &v + return s +} + +func (s *ListChartResponseBody) SetIsSuccess(v bool) *ListChartResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChartResponseBody) SetPageNo(v int32) *ListChartResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChartResponseBody) SetPageSize(v int32) *ListChartResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChartResponseBody) SetRequestId(v string) *ListChartResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChartResponseBody) SetTotalCount(v int32) *ListChartResponseBody { + s.TotalCount = &v + return s +} + +type ListChartResponseBodyCharts struct { + Chart *string `json:"Chart,omitempty" xml:"Chart,omitempty"` + CreateTime *string `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s ListChartResponseBodyCharts) String() string { + return tea.Prettify(s) +} + +func (s ListChartResponseBodyCharts) GoString() string { + return s.String() +} + +func (s *ListChartResponseBodyCharts) SetChart(v string) *ListChartResponseBodyCharts { + s.Chart = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetCreateTime(v string) *ListChartResponseBodyCharts { + s.CreateTime = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetInstanceId(v string) *ListChartResponseBodyCharts { + s.InstanceId = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetModifiedTime(v int64) *ListChartResponseBodyCharts { + s.ModifiedTime = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetRepoId(v string) *ListChartResponseBodyCharts { + s.RepoId = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetStatus(v string) *ListChartResponseBodyCharts { + s.Status = &v + return s +} + +type ListChartResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChartResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChartResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChartResponse) GoString() string { + return s.String() +} + +func (s *ListChartResponse) SetHeaders(v map[string]*string) *ListChartResponse { + s.Headers = v + return s +} + +func (s *ListChartResponse) SetBody(v *ListChartResponseBody) *ListChartResponse { + s.Body = v + return s +} + +type ListChartNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` +} + +func (s ListChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *ListChartNamespaceRequest) SetInstanceId(v string) *ListChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *ListChartNamespaceRequest) SetNamespaceName(v string) *ListChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +func (s *ListChartNamespaceRequest) SetNamespaceStatus(v string) *ListChartNamespaceRequest { + s.NamespaceStatus = &v + return s +} + +func (s *ListChartNamespaceRequest) SetPageNo(v int32) *ListChartNamespaceRequest { + s.PageNo = &v + return s +} + +func (s *ListChartNamespaceRequest) SetPageSize(v int32) *ListChartNamespaceRequest { + s.PageSize = &v + return s +} + +type ListChartNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Namespaces []*ListChartNamespaceResponseBodyNamespaces `json:"Namespaces,omitempty" xml:"Namespaces,omitempty" type:"Repeated"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *ListChartNamespaceResponseBody) SetCode(v string) *ListChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetIsSuccess(v bool) *ListChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetNamespaces(v []*ListChartNamespaceResponseBodyNamespaces) *ListChartNamespaceResponseBody { + s.Namespaces = v + return s +} + +func (s *ListChartNamespaceResponseBody) SetPageNo(v int32) *ListChartNamespaceResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetPageSize(v int32) *ListChartNamespaceResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetRequestId(v string) *ListChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetTotalCount(v string) *ListChartNamespaceResponseBody { + s.TotalCount = &v + return s +} + +type ListChartNamespaceResponseBodyNamespaces struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s ListChartNamespaceResponseBodyNamespaces) String() string { + return tea.Prettify(s) +} + +func (s ListChartNamespaceResponseBodyNamespaces) GoString() string { + return s.String() +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetAutoCreateRepo(v bool) *ListChartNamespaceResponseBodyNamespaces { + s.AutoCreateRepo = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetDefaultRepoType(v string) *ListChartNamespaceResponseBodyNamespaces { + s.DefaultRepoType = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetInstanceId(v string) *ListChartNamespaceResponseBodyNamespaces { + s.InstanceId = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetNamespaceId(v string) *ListChartNamespaceResponseBodyNamespaces { + s.NamespaceId = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetNamespaceName(v string) *ListChartNamespaceResponseBodyNamespaces { + s.NamespaceName = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetNamespaceStatus(v string) *ListChartNamespaceResponseBodyNamespaces { + s.NamespaceStatus = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetResourceGroupId(v string) *ListChartNamespaceResponseBodyNamespaces { + s.ResourceGroupId = &v + return s +} + +type ListChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *ListChartNamespaceResponse) SetHeaders(v map[string]*string) *ListChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *ListChartNamespaceResponse) SetBody(v *ListChartNamespaceResponseBody) *ListChartNamespaceResponse { + s.Body = v + return s +} + +type ListChartReleaseRequest struct { + Chart *string `json:"Chart,omitempty" xml:"Chart,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListChartReleaseRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChartReleaseRequest) GoString() string { + return s.String() +} + +func (s *ListChartReleaseRequest) SetChart(v string) *ListChartReleaseRequest { + s.Chart = &v + return s +} + +func (s *ListChartReleaseRequest) SetInstanceId(v string) *ListChartReleaseRequest { + s.InstanceId = &v + return s +} + +func (s *ListChartReleaseRequest) SetPageNo(v int32) *ListChartReleaseRequest { + s.PageNo = &v + return s +} + +func (s *ListChartReleaseRequest) SetPageSize(v int32) *ListChartReleaseRequest { + s.PageSize = &v + return s +} + +func (s *ListChartReleaseRequest) SetRepoName(v string) *ListChartReleaseRequest { + s.RepoName = &v + return s +} + +func (s *ListChartReleaseRequest) SetRepoNamespaceName(v string) *ListChartReleaseRequest { + s.RepoNamespaceName = &v + return s +} + +type ListChartReleaseResponseBody struct { + ChartReleases []*ListChartReleaseResponseBodyChartReleases `json:"ChartReleases,omitempty" xml:"ChartReleases,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChartReleaseResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChartReleaseResponseBody) GoString() string { + return s.String() +} + +func (s *ListChartReleaseResponseBody) SetChartReleases(v []*ListChartReleaseResponseBodyChartReleases) *ListChartReleaseResponseBody { + s.ChartReleases = v + return s +} + +func (s *ListChartReleaseResponseBody) SetCode(v string) *ListChartReleaseResponseBody { + s.Code = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetIsSuccess(v bool) *ListChartReleaseResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetPageNo(v int32) *ListChartReleaseResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetPageSize(v int32) *ListChartReleaseResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetRequestId(v string) *ListChartReleaseResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetTotalCount(v string) *ListChartReleaseResponseBody { + s.TotalCount = &v + return s +} + +type ListChartReleaseResponseBodyChartReleases struct { + Chart *string `json:"Chart,omitempty" xml:"Chart,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + Release *string `json:"Release,omitempty" xml:"Release,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Size *string `json:"Size,omitempty" xml:"Size,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s ListChartReleaseResponseBodyChartReleases) String() string { + return tea.Prettify(s) +} + +func (s ListChartReleaseResponseBodyChartReleases) GoString() string { + return s.String() +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetChart(v string) *ListChartReleaseResponseBodyChartReleases { + s.Chart = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetInstanceId(v string) *ListChartReleaseResponseBodyChartReleases { + s.InstanceId = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetModifiedTime(v int64) *ListChartReleaseResponseBodyChartReleases { + s.ModifiedTime = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetRelease(v string) *ListChartReleaseResponseBodyChartReleases { + s.Release = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetRepoId(v string) *ListChartReleaseResponseBodyChartReleases { + s.RepoId = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetSize(v string) *ListChartReleaseResponseBodyChartReleases { + s.Size = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetStatus(v string) *ListChartReleaseResponseBodyChartReleases { + s.Status = &v + return s +} + +type ListChartReleaseResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChartReleaseResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChartReleaseResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChartReleaseResponse) GoString() string { + return s.String() +} + +func (s *ListChartReleaseResponse) SetHeaders(v map[string]*string) *ListChartReleaseResponse { + s.Headers = v + return s +} + +func (s *ListChartReleaseResponse) SetBody(v *ListChartReleaseResponseBody) *ListChartReleaseResponse { + s.Body = v + return s +} + +type ListChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` +} + +func (s ListChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *ListChartRepositoryRequest) SetInstanceId(v string) *ListChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *ListChartRepositoryRequest) SetPageNo(v int32) *ListChartRepositoryRequest { + s.PageNo = &v + return s +} + +func (s *ListChartRepositoryRequest) SetPageSize(v int32) *ListChartRepositoryRequest { + s.PageSize = &v + return s +} + +func (s *ListChartRepositoryRequest) SetRepoName(v string) *ListChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *ListChartRepositoryRequest) SetRepoNamespaceName(v string) *ListChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *ListChartRepositoryRequest) SetRepoStatus(v string) *ListChartRepositoryRequest { + s.RepoStatus = &v + return s +} + +type ListChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Repositories []*ListChartRepositoryResponseBodyRepositories `json:"Repositories,omitempty" xml:"Repositories,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *ListChartRepositoryResponseBody) SetCode(v string) *ListChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetIsSuccess(v bool) *ListChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetPageNo(v int32) *ListChartRepositoryResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetPageSize(v int32) *ListChartRepositoryResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetRepositories(v []*ListChartRepositoryResponseBodyRepositories) *ListChartRepositoryResponseBody { + s.Repositories = v + return s +} + +func (s *ListChartRepositoryResponseBody) SetRequestId(v string) *ListChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetTotalCount(v string) *ListChartRepositoryResponseBody { + s.TotalCount = &v + return s +} + +type ListChartRepositoryResponseBodyRepositories struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` +} + +func (s ListChartRepositoryResponseBodyRepositories) String() string { + return tea.Prettify(s) +} + +func (s ListChartRepositoryResponseBodyRepositories) GoString() string { + return s.String() +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetCreateTime(v int64) *ListChartRepositoryResponseBodyRepositories { + s.CreateTime = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetInstanceId(v string) *ListChartRepositoryResponseBodyRepositories { + s.InstanceId = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetModifiedTime(v int64) *ListChartRepositoryResponseBodyRepositories { + s.ModifiedTime = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoId(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoId = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoName(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoName = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoNamespaceName(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoNamespaceName = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoStatus(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoStatus = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoType(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoType = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetSummary(v string) *ListChartRepositoryResponseBodyRepositories { + s.Summary = &v + return s +} + +type ListChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *ListChartRepositoryResponse) SetHeaders(v map[string]*string) *ListChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *ListChartRepositoryResponse) SetBody(v *ListChartRepositoryResponseBody) *ListChartRepositoryResponse { + s.Body = v + return s +} + +type ListEventCenterRecordRequest struct { + EventType *string `json:"EventType,omitempty" xml:"EventType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` +} + +func (s ListEventCenterRecordRequest) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRecordRequest) GoString() string { + return s.String() +} + +func (s *ListEventCenterRecordRequest) SetEventType(v string) *ListEventCenterRecordRequest { + s.EventType = &v + return s +} + +func (s *ListEventCenterRecordRequest) SetInstanceId(v string) *ListEventCenterRecordRequest { + s.InstanceId = &v + return s +} + +func (s *ListEventCenterRecordRequest) SetPageNo(v int32) *ListEventCenterRecordRequest { + s.PageNo = &v + return s +} + +func (s *ListEventCenterRecordRequest) SetPageSize(v int32) *ListEventCenterRecordRequest { + s.PageSize = &v + return s +} + +func (s *ListEventCenterRecordRequest) SetRuleId(v string) *ListEventCenterRecordRequest { + s.RuleId = &v + return s +} + +type ListEventCenterRecordResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Records []*ListEventCenterRecordResponseBodyRecords `json:"Records,omitempty" xml:"Records,omitempty" type:"Repeated"` + // Id of the request + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListEventCenterRecordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRecordResponseBody) GoString() string { + return s.String() +} + +func (s *ListEventCenterRecordResponseBody) SetCode(v string) *ListEventCenterRecordResponseBody { + s.Code = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetIsSuccess(v bool) *ListEventCenterRecordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetPageNo(v int32) *ListEventCenterRecordResponseBody { + s.PageNo = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetPageSize(v int32) *ListEventCenterRecordResponseBody { + s.PageSize = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetRecords(v []*ListEventCenterRecordResponseBodyRecords) *ListEventCenterRecordResponseBody { + s.Records = v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetRequestId(v string) *ListEventCenterRecordResponseBody { + s.RequestId = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetTotalCount(v int32) *ListEventCenterRecordResponseBody { + s.TotalCount = &v + return s +} + +type ListEventCenterRecordResponseBodyRecords struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + EventChannel *string `json:"EventChannel,omitempty" xml:"EventChannel,omitempty"` + EventNotifyId *string `json:"EventNotifyId,omitempty" xml:"EventNotifyId,omitempty"` + EventNotifyMethod *string `json:"EventNotifyMethod,omitempty" xml:"EventNotifyMethod,omitempty"` + EventType *string `json:"EventType,omitempty" xml:"EventType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Namespace *string `json:"Namespace,omitempty" xml:"Namespace,omitempty"` + RecordId *string `json:"RecordId,omitempty" xml:"RecordId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` + RuleName *string `json:"RuleName,omitempty" xml:"RuleName,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` + UpdateTime *int64 `json:"UpdateTime,omitempty" xml:"UpdateTime,omitempty"` +} + +func (s ListEventCenterRecordResponseBodyRecords) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRecordResponseBodyRecords) GoString() string { + return s.String() +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetCreateTime(v int64) *ListEventCenterRecordResponseBodyRecords { + s.CreateTime = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetEventChannel(v string) *ListEventCenterRecordResponseBodyRecords { + s.EventChannel = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetEventNotifyId(v string) *ListEventCenterRecordResponseBodyRecords { + s.EventNotifyId = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetEventNotifyMethod(v string) *ListEventCenterRecordResponseBodyRecords { + s.EventNotifyMethod = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetEventType(v string) *ListEventCenterRecordResponseBodyRecords { + s.EventType = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetInstanceId(v string) *ListEventCenterRecordResponseBodyRecords { + s.InstanceId = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetNamespace(v string) *ListEventCenterRecordResponseBodyRecords { + s.Namespace = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetRecordId(v string) *ListEventCenterRecordResponseBodyRecords { + s.RecordId = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetRepoName(v string) *ListEventCenterRecordResponseBodyRecords { + s.RepoName = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetRuleId(v string) *ListEventCenterRecordResponseBodyRecords { + s.RuleId = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetRuleName(v string) *ListEventCenterRecordResponseBodyRecords { + s.RuleName = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetTag(v string) *ListEventCenterRecordResponseBodyRecords { + s.Tag = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetUpdateTime(v int64) *ListEventCenterRecordResponseBodyRecords { + s.UpdateTime = &v + return s +} + +type ListEventCenterRecordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListEventCenterRecordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListEventCenterRecordResponse) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRecordResponse) GoString() string { + return s.String() +} + +func (s *ListEventCenterRecordResponse) SetHeaders(v map[string]*string) *ListEventCenterRecordResponse { + s.Headers = v + return s +} + +func (s *ListEventCenterRecordResponse) SetBody(v *ListEventCenterRecordResponseBody) *ListEventCenterRecordResponse { + s.Body = v + return s +} + +type ListEventCenterRuleNameRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s ListEventCenterRuleNameRequest) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRuleNameRequest) GoString() string { + return s.String() +} + +func (s *ListEventCenterRuleNameRequest) SetInstanceId(v string) *ListEventCenterRuleNameRequest { + s.InstanceId = &v + return s +} + +type ListEventCenterRuleNameResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + // Id of the request + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + RuleNames []*ListEventCenterRuleNameResponseBodyRuleNames `json:"RuleNames,omitempty" xml:"RuleNames,omitempty" type:"Repeated"` +} + +func (s ListEventCenterRuleNameResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRuleNameResponseBody) GoString() string { + return s.String() +} + +func (s *ListEventCenterRuleNameResponseBody) SetCode(v string) *ListEventCenterRuleNameResponseBody { + s.Code = &v + return s +} + +func (s *ListEventCenterRuleNameResponseBody) SetIsSuccess(v bool) *ListEventCenterRuleNameResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListEventCenterRuleNameResponseBody) SetRequestId(v string) *ListEventCenterRuleNameResponseBody { + s.RequestId = &v + return s +} + +func (s *ListEventCenterRuleNameResponseBody) SetRuleNames(v []*ListEventCenterRuleNameResponseBodyRuleNames) *ListEventCenterRuleNameResponseBody { + s.RuleNames = v + return s +} + +type ListEventCenterRuleNameResponseBodyRuleNames struct { + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` + RuleName *string `json:"RuleName,omitempty" xml:"RuleName,omitempty"` +} + +func (s ListEventCenterRuleNameResponseBodyRuleNames) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRuleNameResponseBodyRuleNames) GoString() string { + return s.String() +} + +func (s *ListEventCenterRuleNameResponseBodyRuleNames) SetRuleId(v string) *ListEventCenterRuleNameResponseBodyRuleNames { + s.RuleId = &v + return s +} + +func (s *ListEventCenterRuleNameResponseBodyRuleNames) SetRuleName(v string) *ListEventCenterRuleNameResponseBodyRuleNames { + s.RuleName = &v + return s +} + +type ListEventCenterRuleNameResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListEventCenterRuleNameResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListEventCenterRuleNameResponse) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRuleNameResponse) GoString() string { + return s.String() +} + +func (s *ListEventCenterRuleNameResponse) SetHeaders(v map[string]*string) *ListEventCenterRuleNameResponse { + s.Headers = v + return s +} + +func (s *ListEventCenterRuleNameResponse) SetBody(v *ListEventCenterRuleNameResponseBody) *ListEventCenterRuleNameResponse { + s.Body = v + return s +} + +type ListInstanceRequest struct { + InstanceName *string `json:"InstanceName,omitempty" xml:"InstanceName,omitempty"` + InstanceStatus *string `json:"InstanceStatus,omitempty" xml:"InstanceStatus,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s ListInstanceRequest) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRequest) GoString() string { + return s.String() +} + +func (s *ListInstanceRequest) SetInstanceName(v string) *ListInstanceRequest { + s.InstanceName = &v + return s +} + +func (s *ListInstanceRequest) SetInstanceStatus(v string) *ListInstanceRequest { + s.InstanceStatus = &v + return s +} + +func (s *ListInstanceRequest) SetPageNo(v int32) *ListInstanceRequest { + s.PageNo = &v + return s +} + +func (s *ListInstanceRequest) SetPageSize(v int32) *ListInstanceRequest { + s.PageSize = &v + return s +} + +func (s *ListInstanceRequest) SetResourceGroupId(v string) *ListInstanceRequest { + s.ResourceGroupId = &v + return s +} + +type ListInstanceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Instances []*ListInstanceResponseBodyInstances `json:"Instances,omitempty" xml:"Instances,omitempty" type:"Repeated"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListInstanceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceResponseBody) GoString() string { + return s.String() +} + +func (s *ListInstanceResponseBody) SetCode(v string) *ListInstanceResponseBody { + s.Code = &v + return s +} + +func (s *ListInstanceResponseBody) SetInstances(v []*ListInstanceResponseBodyInstances) *ListInstanceResponseBody { + s.Instances = v + return s +} + +func (s *ListInstanceResponseBody) SetIsSuccess(v bool) *ListInstanceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListInstanceResponseBody) SetPageNo(v int32) *ListInstanceResponseBody { + s.PageNo = &v + return s +} + +func (s *ListInstanceResponseBody) SetPageSize(v int32) *ListInstanceResponseBody { + s.PageSize = &v + return s +} + +func (s *ListInstanceResponseBody) SetRequestId(v string) *ListInstanceResponseBody { + s.RequestId = &v + return s +} + +func (s *ListInstanceResponseBody) SetTotalCount(v int32) *ListInstanceResponseBody { + s.TotalCount = &v + return s +} + +type ListInstanceResponseBodyInstances struct { + CreateTime *string `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + InstanceName *string `json:"InstanceName,omitempty" xml:"InstanceName,omitempty"` + InstanceSpecification *string `json:"InstanceSpecification,omitempty" xml:"InstanceSpecification,omitempty"` + InstanceStatus *string `json:"InstanceStatus,omitempty" xml:"InstanceStatus,omitempty"` + ModifiedTime *string `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s ListInstanceResponseBodyInstances) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceResponseBodyInstances) GoString() string { + return s.String() +} + +func (s *ListInstanceResponseBodyInstances) SetCreateTime(v string) *ListInstanceResponseBodyInstances { + s.CreateTime = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetInstanceId(v string) *ListInstanceResponseBodyInstances { + s.InstanceId = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetInstanceName(v string) *ListInstanceResponseBodyInstances { + s.InstanceName = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetInstanceSpecification(v string) *ListInstanceResponseBodyInstances { + s.InstanceSpecification = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetInstanceStatus(v string) *ListInstanceResponseBodyInstances { + s.InstanceStatus = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetModifiedTime(v string) *ListInstanceResponseBodyInstances { + s.ModifiedTime = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetRegionId(v string) *ListInstanceResponseBodyInstances { + s.RegionId = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetResourceGroupId(v string) *ListInstanceResponseBodyInstances { + s.ResourceGroupId = &v + return s +} + +type ListInstanceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListInstanceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListInstanceResponse) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceResponse) GoString() string { + return s.String() +} + +func (s *ListInstanceResponse) SetHeaders(v map[string]*string) *ListInstanceResponse { + s.Headers = v + return s +} + +func (s *ListInstanceResponse) SetBody(v *ListInstanceResponseBody) *ListInstanceResponse { + s.Body = v + return s +} + +type ListInstanceEndpointRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s ListInstanceEndpointRequest) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointRequest) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointRequest) SetInstanceId(v string) *ListInstanceEndpointRequest { + s.InstanceId = &v + return s +} + +func (s *ListInstanceEndpointRequest) SetModuleName(v string) *ListInstanceEndpointRequest { + s.ModuleName = &v + return s +} + +type ListInstanceEndpointResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Endpoints []*ListInstanceEndpointResponseBodyEndpoints `json:"Endpoints,omitempty" xml:"Endpoints,omitempty" type:"Repeated"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s ListInstanceEndpointResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBody) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBody) SetCode(v string) *ListInstanceEndpointResponseBody { + s.Code = &v + return s +} + +func (s *ListInstanceEndpointResponseBody) SetEndpoints(v []*ListInstanceEndpointResponseBodyEndpoints) *ListInstanceEndpointResponseBody { + s.Endpoints = v + return s +} + +func (s *ListInstanceEndpointResponseBody) SetIsSuccess(v bool) *ListInstanceEndpointResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListInstanceEndpointResponseBody) SetRequestId(v string) *ListInstanceEndpointResponseBody { + s.RequestId = &v + return s +} + +type ListInstanceEndpointResponseBodyEndpoints struct { + AclEnable *bool `json:"AclEnable,omitempty" xml:"AclEnable,omitempty"` + AclEntries []*ListInstanceEndpointResponseBodyEndpointsAclEntries `json:"AclEntries,omitempty" xml:"AclEntries,omitempty" type:"Repeated"` + Domains []*ListInstanceEndpointResponseBodyEndpointsDomains `json:"Domains,omitempty" xml:"Domains,omitempty" type:"Repeated"` + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + LinkedVpcs []*ListInstanceEndpointResponseBodyEndpointsLinkedVpcs `json:"LinkedVpcs,omitempty" xml:"LinkedVpcs,omitempty" type:"Repeated"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s ListInstanceEndpointResponseBodyEndpoints) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBodyEndpoints) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetAclEnable(v bool) *ListInstanceEndpointResponseBodyEndpoints { + s.AclEnable = &v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetAclEntries(v []*ListInstanceEndpointResponseBodyEndpointsAclEntries) *ListInstanceEndpointResponseBodyEndpoints { + s.AclEntries = v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetDomains(v []*ListInstanceEndpointResponseBodyEndpointsDomains) *ListInstanceEndpointResponseBodyEndpoints { + s.Domains = v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetEnable(v bool) *ListInstanceEndpointResponseBodyEndpoints { + s.Enable = &v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetEndpointType(v string) *ListInstanceEndpointResponseBodyEndpoints { + s.EndpointType = &v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetLinkedVpcs(v []*ListInstanceEndpointResponseBodyEndpointsLinkedVpcs) *ListInstanceEndpointResponseBodyEndpoints { + s.LinkedVpcs = v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetStatus(v string) *ListInstanceEndpointResponseBodyEndpoints { + s.Status = &v + return s +} + +type ListInstanceEndpointResponseBodyEndpointsAclEntries struct { + Entry *string `json:"Entry,omitempty" xml:"Entry,omitempty"` +} + +func (s ListInstanceEndpointResponseBodyEndpointsAclEntries) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBodyEndpointsAclEntries) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBodyEndpointsAclEntries) SetEntry(v string) *ListInstanceEndpointResponseBodyEndpointsAclEntries { + s.Entry = &v + return s +} + +type ListInstanceEndpointResponseBodyEndpointsDomains struct { + Domain *string `json:"Domain,omitempty" xml:"Domain,omitempty"` + Type *string `json:"Type,omitempty" xml:"Type,omitempty"` +} + +func (s ListInstanceEndpointResponseBodyEndpointsDomains) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBodyEndpointsDomains) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBodyEndpointsDomains) SetDomain(v string) *ListInstanceEndpointResponseBodyEndpointsDomains { + s.Domain = &v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpointsDomains) SetType(v string) *ListInstanceEndpointResponseBodyEndpointsDomains { + s.Type = &v + return s +} + +type ListInstanceEndpointResponseBodyEndpointsLinkedVpcs struct { + VpcId *string `json:"VpcId,omitempty" xml:"VpcId,omitempty"` +} + +func (s ListInstanceEndpointResponseBodyEndpointsLinkedVpcs) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBodyEndpointsLinkedVpcs) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBodyEndpointsLinkedVpcs) SetVpcId(v string) *ListInstanceEndpointResponseBodyEndpointsLinkedVpcs { + s.VpcId = &v + return s +} + +type ListInstanceEndpointResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListInstanceEndpointResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListInstanceEndpointResponse) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponse) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponse) SetHeaders(v map[string]*string) *ListInstanceEndpointResponse { + s.Headers = v + return s +} + +func (s *ListInstanceEndpointResponse) SetBody(v *ListInstanceEndpointResponseBody) *ListInstanceEndpointResponse { + s.Body = v + return s +} + +type ListInstanceRegionRequest struct { + Lang *string `json:"Lang,omitempty" xml:"Lang,omitempty"` +} + +func (s ListInstanceRegionRequest) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRegionRequest) GoString() string { + return s.String() +} + +func (s *ListInstanceRegionRequest) SetLang(v string) *ListInstanceRegionRequest { + s.Lang = &v + return s +} + +type ListInstanceRegionResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Regions []*ListInstanceRegionResponseBodyRegions `json:"Regions,omitempty" xml:"Regions,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s ListInstanceRegionResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRegionResponseBody) GoString() string { + return s.String() +} + +func (s *ListInstanceRegionResponseBody) SetCode(v string) *ListInstanceRegionResponseBody { + s.Code = &v + return s +} + +func (s *ListInstanceRegionResponseBody) SetIsSuccess(v bool) *ListInstanceRegionResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListInstanceRegionResponseBody) SetRegions(v []*ListInstanceRegionResponseBodyRegions) *ListInstanceRegionResponseBody { + s.Regions = v + return s +} + +func (s *ListInstanceRegionResponseBody) SetRequestId(v string) *ListInstanceRegionResponseBody { + s.RequestId = &v + return s +} + +type ListInstanceRegionResponseBodyRegions struct { + LocalName *string `json:"LocalName,omitempty" xml:"LocalName,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` +} + +func (s ListInstanceRegionResponseBodyRegions) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRegionResponseBodyRegions) GoString() string { + return s.String() +} + +func (s *ListInstanceRegionResponseBodyRegions) SetLocalName(v string) *ListInstanceRegionResponseBodyRegions { + s.LocalName = &v + return s +} + +func (s *ListInstanceRegionResponseBodyRegions) SetRegionId(v string) *ListInstanceRegionResponseBodyRegions { + s.RegionId = &v + return s +} + +type ListInstanceRegionResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListInstanceRegionResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListInstanceRegionResponse) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRegionResponse) GoString() string { + return s.String() +} + +func (s *ListInstanceRegionResponse) SetHeaders(v map[string]*string) *ListInstanceRegionResponse { + s.Headers = v + return s +} + +func (s *ListInstanceRegionResponse) SetBody(v *ListInstanceRegionResponseBody) *ListInstanceRegionResponse { + s.Body = v + return s +} + +type ListNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Tag []*ListNamespaceRequestTag `json:"Tag,omitempty" xml:"Tag,omitempty" type:"Repeated"` +} + +func (s ListNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceRequest) GoString() string { + return s.String() +} + +func (s *ListNamespaceRequest) SetInstanceId(v string) *ListNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *ListNamespaceRequest) SetNamespaceName(v string) *ListNamespaceRequest { + s.NamespaceName = &v + return s +} + +func (s *ListNamespaceRequest) SetNamespaceStatus(v string) *ListNamespaceRequest { + s.NamespaceStatus = &v + return s +} + +func (s *ListNamespaceRequest) SetPageNo(v int32) *ListNamespaceRequest { + s.PageNo = &v + return s +} + +func (s *ListNamespaceRequest) SetPageSize(v int32) *ListNamespaceRequest { + s.PageSize = &v + return s +} + +func (s *ListNamespaceRequest) SetResourceGroupId(v string) *ListNamespaceRequest { + s.ResourceGroupId = &v + return s +} + +func (s *ListNamespaceRequest) SetTag(v []*ListNamespaceRequestTag) *ListNamespaceRequest { + s.Tag = v + return s +} + +type ListNamespaceRequestTag struct { + Key *string `json:"Key,omitempty" xml:"Key,omitempty"` + Value *string `json:"Value,omitempty" xml:"Value,omitempty"` +} + +func (s ListNamespaceRequestTag) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceRequestTag) GoString() string { + return s.String() +} + +func (s *ListNamespaceRequestTag) SetKey(v string) *ListNamespaceRequestTag { + s.Key = &v + return s +} + +func (s *ListNamespaceRequestTag) SetValue(v string) *ListNamespaceRequestTag { + s.Value = &v + return s +} + +type ListNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Namespaces []*ListNamespaceResponseBodyNamespaces `json:"Namespaces,omitempty" xml:"Namespaces,omitempty" type:"Repeated"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *ListNamespaceResponseBody) SetCode(v string) *ListNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *ListNamespaceResponseBody) SetIsSuccess(v bool) *ListNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListNamespaceResponseBody) SetNamespaces(v []*ListNamespaceResponseBodyNamespaces) *ListNamespaceResponseBody { + s.Namespaces = v + return s +} + +func (s *ListNamespaceResponseBody) SetPageNo(v int32) *ListNamespaceResponseBody { + s.PageNo = &v + return s +} + +func (s *ListNamespaceResponseBody) SetPageSize(v int32) *ListNamespaceResponseBody { + s.PageSize = &v + return s +} + +func (s *ListNamespaceResponseBody) SetRequestId(v string) *ListNamespaceResponseBody { + s.RequestId = &v + return s +} + +func (s *ListNamespaceResponseBody) SetTotalCount(v string) *ListNamespaceResponseBody { + s.TotalCount = &v + return s +} + +type ListNamespaceResponseBodyNamespaces struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Tags []*ListNamespaceResponseBodyNamespacesTags `json:"Tags,omitempty" xml:"Tags,omitempty" type:"Repeated"` +} + +func (s ListNamespaceResponseBodyNamespaces) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceResponseBodyNamespaces) GoString() string { + return s.String() +} + +func (s *ListNamespaceResponseBodyNamespaces) SetAutoCreateRepo(v bool) *ListNamespaceResponseBodyNamespaces { + s.AutoCreateRepo = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetDefaultRepoType(v string) *ListNamespaceResponseBodyNamespaces { + s.DefaultRepoType = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetInstanceId(v string) *ListNamespaceResponseBodyNamespaces { + s.InstanceId = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetNamespaceId(v string) *ListNamespaceResponseBodyNamespaces { + s.NamespaceId = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetNamespaceName(v string) *ListNamespaceResponseBodyNamespaces { + s.NamespaceName = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetNamespaceStatus(v string) *ListNamespaceResponseBodyNamespaces { + s.NamespaceStatus = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetResourceGroupId(v string) *ListNamespaceResponseBodyNamespaces { + s.ResourceGroupId = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetTags(v []*ListNamespaceResponseBodyNamespacesTags) *ListNamespaceResponseBodyNamespaces { + s.Tags = v + return s +} + +type ListNamespaceResponseBodyNamespacesTags struct { + TagKey *string `json:"TagKey,omitempty" xml:"TagKey,omitempty"` + TagValue *string `json:"TagValue,omitempty" xml:"TagValue,omitempty"` +} + +func (s ListNamespaceResponseBodyNamespacesTags) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceResponseBodyNamespacesTags) GoString() string { + return s.String() +} + +func (s *ListNamespaceResponseBodyNamespacesTags) SetTagKey(v string) *ListNamespaceResponseBodyNamespacesTags { + s.TagKey = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespacesTags) SetTagValue(v string) *ListNamespaceResponseBodyNamespacesTags { + s.TagValue = &v + return s +} + +type ListNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceResponse) GoString() string { + return s.String() +} + +func (s *ListNamespaceResponse) SetHeaders(v map[string]*string) *ListNamespaceResponse { + s.Headers = v + return s +} + +func (s *ListNamespaceResponse) SetBody(v *ListNamespaceResponseBody) *ListNamespaceResponse { + s.Body = v + return s +} + +type ListRepoBuildRecordRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoBuildRecordRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordRequest) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordRequest) SetInstanceId(v string) *ListRepoBuildRecordRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoBuildRecordRequest) SetPageNo(v int32) *ListRepoBuildRecordRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRecordRequest) SetPageSize(v int32) *ListRepoBuildRecordRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRecordRequest) SetRepoId(v string) *ListRepoBuildRecordRequest { + s.RepoId = &v + return s +} + +type ListRepoBuildRecordResponseBody struct { + BuildRecords []*ListRepoBuildRecordResponseBodyBuildRecords `json:"BuildRecords,omitempty" xml:"BuildRecords,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoBuildRecordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordResponseBody) SetBuildRecords(v []*ListRepoBuildRecordResponseBodyBuildRecords) *ListRepoBuildRecordResponseBody { + s.BuildRecords = v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetCode(v string) *ListRepoBuildRecordResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetIsSuccess(v bool) *ListRepoBuildRecordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetPageNo(v int32) *ListRepoBuildRecordResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetPageSize(v int32) *ListRepoBuildRecordResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetRequestId(v string) *ListRepoBuildRecordResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetTotalCount(v string) *ListRepoBuildRecordResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoBuildRecordResponseBodyBuildRecords struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + BuildStatus *string `json:"BuildStatus,omitempty" xml:"BuildStatus,omitempty"` + EndTime *string `json:"EndTime,omitempty" xml:"EndTime,omitempty"` + Image *ListRepoBuildRecordResponseBodyBuildRecordsImage `json:"Image,omitempty" xml:"Image,omitempty" type:"Struct"` + StartTime *string `json:"StartTime,omitempty" xml:"StartTime,omitempty"` +} + +func (s ListRepoBuildRecordResponseBodyBuildRecords) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordResponseBodyBuildRecords) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetBuildRecordId(v string) *ListRepoBuildRecordResponseBodyBuildRecords { + s.BuildRecordId = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetBuildStatus(v string) *ListRepoBuildRecordResponseBodyBuildRecords { + s.BuildStatus = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetEndTime(v string) *ListRepoBuildRecordResponseBodyBuildRecords { + s.EndTime = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetImage(v *ListRepoBuildRecordResponseBodyBuildRecordsImage) *ListRepoBuildRecordResponseBodyBuildRecords { + s.Image = v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetStartTime(v string) *ListRepoBuildRecordResponseBodyBuildRecords { + s.StartTime = &v + return s +} + +type ListRepoBuildRecordResponseBodyBuildRecordsImage struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListRepoBuildRecordResponseBodyBuildRecordsImage) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordResponseBodyBuildRecordsImage) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecordsImage) SetImageTag(v string) *ListRepoBuildRecordResponseBodyBuildRecordsImage { + s.ImageTag = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecordsImage) SetRepoId(v string) *ListRepoBuildRecordResponseBodyBuildRecordsImage { + s.RepoId = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecordsImage) SetRepoName(v string) *ListRepoBuildRecordResponseBodyBuildRecordsImage { + s.RepoName = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecordsImage) SetRepoNamespaceName(v string) *ListRepoBuildRecordResponseBodyBuildRecordsImage { + s.RepoNamespaceName = &v + return s +} + +type ListRepoBuildRecordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoBuildRecordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoBuildRecordResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordResponse) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordResponse) SetHeaders(v map[string]*string) *ListRepoBuildRecordResponse { + s.Headers = v + return s +} + +func (s *ListRepoBuildRecordResponse) SetBody(v *ListRepoBuildRecordResponseBody) *ListRepoBuildRecordResponse { + s.Body = v + return s +} + +type ListRepoBuildRecordLogRequest struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Offset *int32 `json:"Offset,omitempty" xml:"Offset,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoBuildRecordLogRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordLogRequest) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordLogRequest) SetBuildRecordId(v string) *ListRepoBuildRecordLogRequest { + s.BuildRecordId = &v + return s +} + +func (s *ListRepoBuildRecordLogRequest) SetInstanceId(v string) *ListRepoBuildRecordLogRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoBuildRecordLogRequest) SetOffset(v int32) *ListRepoBuildRecordLogRequest { + s.Offset = &v + return s +} + +func (s *ListRepoBuildRecordLogRequest) SetRepoId(v string) *ListRepoBuildRecordLogRequest { + s.RepoId = &v + return s +} + +type ListRepoBuildRecordLogResponseBody struct { + BuildRecordLogs []*ListRepoBuildRecordLogResponseBodyBuildRecordLogs `json:"BuildRecordLogs,omitempty" xml:"BuildRecordLogs,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoBuildRecordLogResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordLogResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordLogResponseBody) SetBuildRecordLogs(v []*ListRepoBuildRecordLogResponseBodyBuildRecordLogs) *ListRepoBuildRecordLogResponseBody { + s.BuildRecordLogs = v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetCode(v string) *ListRepoBuildRecordLogResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetIsSuccess(v bool) *ListRepoBuildRecordLogResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetPageNo(v int32) *ListRepoBuildRecordLogResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetPageSize(v int32) *ListRepoBuildRecordLogResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetRequestId(v string) *ListRepoBuildRecordLogResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetTotalCount(v string) *ListRepoBuildRecordLogResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoBuildRecordLogResponseBodyBuildRecordLogs struct { + BuildStage *string `json:"BuildStage,omitempty" xml:"BuildStage,omitempty"` + LineNumber *int32 `json:"LineNumber,omitempty" xml:"LineNumber,omitempty"` + Message *string `json:"Message,omitempty" xml:"Message,omitempty"` +} + +func (s ListRepoBuildRecordLogResponseBodyBuildRecordLogs) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordLogResponseBodyBuildRecordLogs) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordLogResponseBodyBuildRecordLogs) SetBuildStage(v string) *ListRepoBuildRecordLogResponseBodyBuildRecordLogs { + s.BuildStage = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBodyBuildRecordLogs) SetLineNumber(v int32) *ListRepoBuildRecordLogResponseBodyBuildRecordLogs { + s.LineNumber = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBodyBuildRecordLogs) SetMessage(v string) *ListRepoBuildRecordLogResponseBodyBuildRecordLogs { + s.Message = &v + return s +} + +type ListRepoBuildRecordLogResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoBuildRecordLogResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoBuildRecordLogResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordLogResponse) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordLogResponse) SetHeaders(v map[string]*string) *ListRepoBuildRecordLogResponse { + s.Headers = v + return s +} + +func (s *ListRepoBuildRecordLogResponse) SetBody(v *ListRepoBuildRecordLogResponseBody) *ListRepoBuildRecordLogResponse { + s.Body = v + return s +} + +type ListRepoBuildRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoBuildRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRuleRequest) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRuleRequest) SetInstanceId(v string) *ListRepoBuildRuleRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoBuildRuleRequest) SetPageNo(v int32) *ListRepoBuildRuleRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRuleRequest) SetPageSize(v int32) *ListRepoBuildRuleRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRuleRequest) SetRepoId(v string) *ListRepoBuildRuleRequest { + s.RepoId = &v + return s +} + +type ListRepoBuildRuleResponseBody struct { + BuildRules []*ListRepoBuildRuleResponseBodyBuildRules `json:"BuildRules,omitempty" xml:"BuildRules,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoBuildRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRuleResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRuleResponseBody) SetBuildRules(v []*ListRepoBuildRuleResponseBodyBuildRules) *ListRepoBuildRuleResponseBody { + s.BuildRules = v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetCode(v string) *ListRepoBuildRuleResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetIsSuccess(v bool) *ListRepoBuildRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetPageNo(v int32) *ListRepoBuildRuleResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetPageSize(v int32) *ListRepoBuildRuleResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetRequestId(v string) *ListRepoBuildRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetTotalCount(v string) *ListRepoBuildRuleResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoBuildRuleResponseBodyBuildRules struct { + BuildArgs []*string `json:"BuildArgs,omitempty" xml:"BuildArgs,omitempty" type:"Repeated"` + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + DockerfileLocation *string `json:"DockerfileLocation,omitempty" xml:"DockerfileLocation,omitempty"` + DockerfileName *string `json:"DockerfileName,omitempty" xml:"DockerfileName,omitempty"` + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + Platforms []*string `json:"Platforms,omitempty" xml:"Platforms,omitempty" type:"Repeated"` + PushName *string `json:"PushName,omitempty" xml:"PushName,omitempty"` + PushType *string `json:"PushType,omitempty" xml:"PushType,omitempty"` +} + +func (s ListRepoBuildRuleResponseBodyBuildRules) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRuleResponseBodyBuildRules) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetBuildArgs(v []*string) *ListRepoBuildRuleResponseBodyBuildRules { + s.BuildArgs = v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetBuildRuleId(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.BuildRuleId = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetDockerfileLocation(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.DockerfileLocation = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetDockerfileName(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.DockerfileName = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetImageTag(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.ImageTag = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetPlatforms(v []*string) *ListRepoBuildRuleResponseBodyBuildRules { + s.Platforms = v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetPushName(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.PushName = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetPushType(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.PushType = &v + return s +} + +type ListRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoBuildRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRuleResponse) SetHeaders(v map[string]*string) *ListRepoBuildRuleResponse { + s.Headers = v + return s +} + +func (s *ListRepoBuildRuleResponse) SetBody(v *ListRepoBuildRuleResponseBody) *ListRepoBuildRuleResponse { + s.Body = v + return s +} + +type ListRepoSyncRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + TargetInstanceId *string `json:"TargetInstanceId,omitempty" xml:"TargetInstanceId,omitempty"` + TargetRegionId *string `json:"TargetRegionId,omitempty" xml:"TargetRegionId,omitempty"` +} + +func (s ListRepoSyncRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncRuleRequest) GoString() string { + return s.String() +} + +func (s *ListRepoSyncRuleRequest) SetInstanceId(v string) *ListRepoSyncRuleRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetNamespaceName(v string) *ListRepoSyncRuleRequest { + s.NamespaceName = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetPageNo(v int32) *ListRepoSyncRuleRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetPageSize(v int32) *ListRepoSyncRuleRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetRepoName(v string) *ListRepoSyncRuleRequest { + s.RepoName = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetTargetInstanceId(v string) *ListRepoSyncRuleRequest { + s.TargetInstanceId = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetTargetRegionId(v string) *ListRepoSyncRuleRequest { + s.TargetRegionId = &v + return s +} + +type ListRepoSyncRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncRules []*ListRepoSyncRuleResponseBodySyncRules `json:"SyncRules,omitempty" xml:"SyncRules,omitempty" type:"Repeated"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoSyncRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncRuleResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoSyncRuleResponseBody) SetCode(v string) *ListRepoSyncRuleResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetIsSuccess(v bool) *ListRepoSyncRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetPageNo(v int32) *ListRepoSyncRuleResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetPageSize(v int32) *ListRepoSyncRuleResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetRequestId(v string) *ListRepoSyncRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetSyncRules(v []*ListRepoSyncRuleResponseBodySyncRules) *ListRepoSyncRuleResponseBody { + s.SyncRules = v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetTotalCount(v int32) *ListRepoSyncRuleResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoSyncRuleResponseBodySyncRules struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + CrossUser *bool `json:"CrossUser,omitempty" xml:"CrossUser,omitempty"` + LocalInstanceId *string `json:"LocalInstanceId,omitempty" xml:"LocalInstanceId,omitempty"` + LocalNamespaceName *string `json:"LocalNamespaceName,omitempty" xml:"LocalNamespaceName,omitempty"` + LocalRegionId *string `json:"LocalRegionId,omitempty" xml:"LocalRegionId,omitempty"` + LocalRepoName *string `json:"LocalRepoName,omitempty" xml:"LocalRepoName,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + SyncDirection *string `json:"SyncDirection,omitempty" xml:"SyncDirection,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` + SyncRuleName *string `json:"SyncRuleName,omitempty" xml:"SyncRuleName,omitempty"` + SyncScope *string `json:"SyncScope,omitempty" xml:"SyncScope,omitempty"` + SyncTrigger *string `json:"SyncTrigger,omitempty" xml:"SyncTrigger,omitempty"` + TagFilter *string `json:"TagFilter,omitempty" xml:"TagFilter,omitempty"` + TargetInstanceId *string `json:"TargetInstanceId,omitempty" xml:"TargetInstanceId,omitempty"` + TargetNamespaceName *string `json:"TargetNamespaceName,omitempty" xml:"TargetNamespaceName,omitempty"` + TargetRegionId *string `json:"TargetRegionId,omitempty" xml:"TargetRegionId,omitempty"` + TargetRepoName *string `json:"TargetRepoName,omitempty" xml:"TargetRepoName,omitempty"` +} + +func (s ListRepoSyncRuleResponseBodySyncRules) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncRuleResponseBodySyncRules) GoString() string { + return s.String() +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetCreateTime(v int64) *ListRepoSyncRuleResponseBodySyncRules { + s.CreateTime = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetCrossUser(v bool) *ListRepoSyncRuleResponseBodySyncRules { + s.CrossUser = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetLocalInstanceId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.LocalInstanceId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetLocalNamespaceName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.LocalNamespaceName = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetLocalRegionId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.LocalRegionId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetLocalRepoName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.LocalRepoName = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetModifiedTime(v int64) *ListRepoSyncRuleResponseBodySyncRules { + s.ModifiedTime = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncDirection(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncDirection = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncRuleId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncRuleId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncRuleName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncRuleName = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncScope(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncScope = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncTrigger(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncTrigger = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTagFilter(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TagFilter = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTargetInstanceId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TargetInstanceId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTargetNamespaceName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TargetNamespaceName = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTargetRegionId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TargetRegionId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTargetRepoName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TargetRepoName = &v + return s +} + +type ListRepoSyncRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoSyncRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoSyncRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncRuleResponse) GoString() string { + return s.String() +} + +func (s *ListRepoSyncRuleResponse) SetHeaders(v map[string]*string) *ListRepoSyncRuleResponse { + s.Headers = v + return s +} + +func (s *ListRepoSyncRuleResponse) SetBody(v *ListRepoSyncRuleResponseBody) *ListRepoSyncRuleResponse { + s.Body = v + return s +} + +type ListRepoSyncTaskRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + SyncRecordId *string `json:"SyncRecordId,omitempty" xml:"SyncRecordId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s ListRepoSyncTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskRequest) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskRequest) SetInstanceId(v string) *ListRepoSyncTaskRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetPageNo(v int32) *ListRepoSyncTaskRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetPageSize(v int32) *ListRepoSyncTaskRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetRepoName(v string) *ListRepoSyncTaskRequest { + s.RepoName = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetRepoNamespaceName(v string) *ListRepoSyncTaskRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetSyncRecordId(v string) *ListRepoSyncTaskRequest { + s.SyncRecordId = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetTag(v string) *ListRepoSyncTaskRequest { + s.Tag = &v + return s +} + +type ListRepoSyncTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncTasks []*ListRepoSyncTaskResponseBodySyncTasks `json:"SyncTasks,omitempty" xml:"SyncTasks,omitempty" type:"Repeated"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoSyncTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponseBody) SetCode(v string) *ListRepoSyncTaskResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetIsSuccess(v bool) *ListRepoSyncTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetPageNo(v int32) *ListRepoSyncTaskResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetPageSize(v int32) *ListRepoSyncTaskResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetRequestId(v string) *ListRepoSyncTaskResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetSyncTasks(v []*ListRepoSyncTaskResponseBodySyncTasks) *ListRepoSyncTaskResponseBody { + s.SyncTasks = v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetTotalCount(v string) *ListRepoSyncTaskResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoSyncTaskResponseBodySyncTasks struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + CrossUser *bool `json:"CrossUser,omitempty" xml:"CrossUser,omitempty"` + CustomLink *bool `json:"CustomLink,omitempty" xml:"CustomLink,omitempty"` + ImageFrom *ListRepoSyncTaskResponseBodySyncTasksImageFrom `json:"ImageFrom,omitempty" xml:"ImageFrom,omitempty" type:"Struct"` + ImageTo *ListRepoSyncTaskResponseBodySyncTasksImageTo `json:"ImageTo,omitempty" xml:"ImageTo,omitempty" type:"Struct"` + ModifedTime *int64 `json:"ModifedTime,omitempty" xml:"ModifedTime,omitempty"` + SyncBatchTaskId *string `json:"SyncBatchTaskId,omitempty" xml:"SyncBatchTaskId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` + SyncTransAccelerate *bool `json:"SyncTransAccelerate,omitempty" xml:"SyncTransAccelerate,omitempty"` + TaskStatus *string `json:"TaskStatus,omitempty" xml:"TaskStatus,omitempty"` + TaskTrigger *string `json:"TaskTrigger,omitempty" xml:"TaskTrigger,omitempty"` +} + +func (s ListRepoSyncTaskResponseBodySyncTasks) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponseBodySyncTasks) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetCreateTime(v int64) *ListRepoSyncTaskResponseBodySyncTasks { + s.CreateTime = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetCrossUser(v bool) *ListRepoSyncTaskResponseBodySyncTasks { + s.CrossUser = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetCustomLink(v bool) *ListRepoSyncTaskResponseBodySyncTasks { + s.CustomLink = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetImageFrom(v *ListRepoSyncTaskResponseBodySyncTasksImageFrom) *ListRepoSyncTaskResponseBodySyncTasks { + s.ImageFrom = v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetImageTo(v *ListRepoSyncTaskResponseBodySyncTasksImageTo) *ListRepoSyncTaskResponseBodySyncTasks { + s.ImageTo = v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetModifedTime(v int64) *ListRepoSyncTaskResponseBodySyncTasks { + s.ModifedTime = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetSyncBatchTaskId(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.SyncBatchTaskId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetSyncRuleId(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.SyncRuleId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetSyncTaskId(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.SyncTaskId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetSyncTransAccelerate(v bool) *ListRepoSyncTaskResponseBodySyncTasks { + s.SyncTransAccelerate = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetTaskStatus(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.TaskStatus = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetTaskTrigger(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.TaskTrigger = &v + return s +} + +type ListRepoSyncTaskResponseBodySyncTasksImageFrom struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListRepoSyncTaskResponseBodySyncTasksImageFrom) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponseBodySyncTasksImageFrom) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetImageTag(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.ImageTag = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetInstanceId(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.InstanceId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetRegionId(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.RegionId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetRepoName(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.RepoName = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetRepoNamespaceName(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.RepoNamespaceName = &v + return s +} + +type ListRepoSyncTaskResponseBodySyncTasksImageTo struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListRepoSyncTaskResponseBodySyncTasksImageTo) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponseBodySyncTasksImageTo) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetImageTag(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.ImageTag = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetInstanceId(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.InstanceId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetRegionId(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.RegionId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetRepoName(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.RepoName = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetRepoNamespaceName(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.RepoNamespaceName = &v + return s +} + +type ListRepoSyncTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoSyncTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoSyncTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponse) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponse) SetHeaders(v map[string]*string) *ListRepoSyncTaskResponse { + s.Headers = v + return s +} + +func (s *ListRepoSyncTaskResponse) SetBody(v *ListRepoSyncTaskResponseBody) *ListRepoSyncTaskResponse { + s.Body = v + return s +} + +type ListRepoTagRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoTagRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagRequest) GoString() string { + return s.String() +} + +func (s *ListRepoTagRequest) SetInstanceId(v string) *ListRepoTagRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoTagRequest) SetPageNo(v int32) *ListRepoTagRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoTagRequest) SetPageSize(v int32) *ListRepoTagRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoTagRequest) SetRepoId(v string) *ListRepoTagRequest { + s.RepoId = &v + return s +} + +type ListRepoTagResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Images []*ListRepoTagResponseBodyImages `json:"Images,omitempty" xml:"Images,omitempty" type:"Repeated"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoTagResponseBody) SetCode(v string) *ListRepoTagResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoTagResponseBody) SetImages(v []*ListRepoTagResponseBodyImages) *ListRepoTagResponseBody { + s.Images = v + return s +} + +func (s *ListRepoTagResponseBody) SetIsSuccess(v bool) *ListRepoTagResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoTagResponseBody) SetPageNo(v int32) *ListRepoTagResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoTagResponseBody) SetPageSize(v int32) *ListRepoTagResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoTagResponseBody) SetRequestId(v string) *ListRepoTagResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoTagResponseBody) SetTotalCount(v string) *ListRepoTagResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoTagResponseBodyImages struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + ImageCreate *string `json:"ImageCreate,omitempty" xml:"ImageCreate,omitempty"` + ImageId *string `json:"ImageId,omitempty" xml:"ImageId,omitempty"` + ImageSize *int64 `json:"ImageSize,omitempty" xml:"ImageSize,omitempty"` + ImageUpdate *string `json:"ImageUpdate,omitempty" xml:"ImageUpdate,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s ListRepoTagResponseBodyImages) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagResponseBodyImages) GoString() string { + return s.String() +} + +func (s *ListRepoTagResponseBodyImages) SetDigest(v string) *ListRepoTagResponseBodyImages { + s.Digest = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetImageCreate(v string) *ListRepoTagResponseBodyImages { + s.ImageCreate = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetImageId(v string) *ListRepoTagResponseBodyImages { + s.ImageId = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetImageSize(v int64) *ListRepoTagResponseBodyImages { + s.ImageSize = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetImageUpdate(v string) *ListRepoTagResponseBodyImages { + s.ImageUpdate = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetStatus(v string) *ListRepoTagResponseBodyImages { + s.Status = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetTag(v string) *ListRepoTagResponseBodyImages { + s.Tag = &v + return s +} + +type ListRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagResponse) GoString() string { + return s.String() +} + +func (s *ListRepoTagResponse) SetHeaders(v map[string]*string) *ListRepoTagResponse { + s.Headers = v + return s +} + +func (s *ListRepoTagResponse) SetBody(v *ListRepoTagResponseBody) *ListRepoTagResponse { + s.Body = v + return s +} + +type ListRepoTagScanResultRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + FilterValue *string `json:"FilterValue,omitempty" xml:"FilterValue,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + ScanTaskId *string `json:"ScanTaskId,omitempty" xml:"ScanTaskId,omitempty"` + ScanType *string `json:"ScanType,omitempty" xml:"ScanType,omitempty"` + Severity *string `json:"Severity,omitempty" xml:"Severity,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` + VulQueryKey *string `json:"VulQueryKey,omitempty" xml:"VulQueryKey,omitempty"` +} + +func (s ListRepoTagScanResultRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagScanResultRequest) GoString() string { + return s.String() +} + +func (s *ListRepoTagScanResultRequest) SetDigest(v string) *ListRepoTagScanResultRequest { + s.Digest = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetFilterValue(v string) *ListRepoTagScanResultRequest { + s.FilterValue = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetInstanceId(v string) *ListRepoTagScanResultRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetPageNo(v int32) *ListRepoTagScanResultRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetPageSize(v int32) *ListRepoTagScanResultRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetRepoId(v string) *ListRepoTagScanResultRequest { + s.RepoId = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetScanTaskId(v string) *ListRepoTagScanResultRequest { + s.ScanTaskId = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetScanType(v string) *ListRepoTagScanResultRequest { + s.ScanType = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetSeverity(v string) *ListRepoTagScanResultRequest { + s.Severity = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetTag(v string) *ListRepoTagScanResultRequest { + s.Tag = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetVulQueryKey(v string) *ListRepoTagScanResultRequest { + s.VulQueryKey = &v + return s +} + +type ListRepoTagScanResultResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` + Vulnerabilities []*ListRepoTagScanResultResponseBodyVulnerabilities `json:"Vulnerabilities,omitempty" xml:"Vulnerabilities,omitempty" type:"Repeated"` +} + +func (s ListRepoTagScanResultResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagScanResultResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoTagScanResultResponseBody) SetCode(v string) *ListRepoTagScanResultResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetIsSuccess(v bool) *ListRepoTagScanResultResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetPageNo(v int32) *ListRepoTagScanResultResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetPageSize(v int32) *ListRepoTagScanResultResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetRequestId(v string) *ListRepoTagScanResultResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetTotalCount(v int32) *ListRepoTagScanResultResponseBody { + s.TotalCount = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetVulnerabilities(v []*ListRepoTagScanResultResponseBodyVulnerabilities) *ListRepoTagScanResultResponseBody { + s.Vulnerabilities = v + return s +} + +type ListRepoTagScanResultResponseBodyVulnerabilities struct { + AddedBy *string `json:"AddedBy,omitempty" xml:"AddedBy,omitempty"` + AliasName *string `json:"AliasName,omitempty" xml:"AliasName,omitempty"` + CveLink *string `json:"CveLink,omitempty" xml:"CveLink,omitempty"` + CveLocation *string `json:"CveLocation,omitempty" xml:"CveLocation,omitempty"` + CveName *string `json:"CveName,omitempty" xml:"CveName,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + Feature *string `json:"Feature,omitempty" xml:"Feature,omitempty"` + FixCmd *string `json:"FixCmd,omitempty" xml:"FixCmd,omitempty"` + ScanType *string `json:"ScanType,omitempty" xml:"ScanType,omitempty"` + Severity *string `json:"Severity,omitempty" xml:"Severity,omitempty"` + Version *string `json:"Version,omitempty" xml:"Version,omitempty"` + VersionFixed *string `json:"VersionFixed,omitempty" xml:"VersionFixed,omitempty"` + VersionFormat *string `json:"VersionFormat,omitempty" xml:"VersionFormat,omitempty"` +} + +func (s ListRepoTagScanResultResponseBodyVulnerabilities) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagScanResultResponseBodyVulnerabilities) GoString() string { + return s.String() +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetAddedBy(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.AddedBy = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetAliasName(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.AliasName = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetCveLink(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.CveLink = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetCveLocation(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.CveLocation = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetCveName(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.CveName = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetDescription(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.Description = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetFeature(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.Feature = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetFixCmd(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.FixCmd = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetScanType(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.ScanType = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetSeverity(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.Severity = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetVersion(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.Version = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetVersionFixed(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.VersionFixed = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetVersionFormat(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.VersionFormat = &v + return s +} + +type ListRepoTagScanResultResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoTagScanResultResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoTagScanResultResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagScanResultResponse) GoString() string { + return s.String() +} + +func (s *ListRepoTagScanResultResponse) SetHeaders(v map[string]*string) *ListRepoTagScanResultResponse { + s.Headers = v + return s +} + +func (s *ListRepoTagScanResultResponse) SetBody(v *ListRepoTagScanResultResponseBody) *ListRepoTagScanResultResponse { + s.Body = v + return s +} + +type ListRepoTriggerRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoTriggerRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTriggerRequest) GoString() string { + return s.String() +} + +func (s *ListRepoTriggerRequest) SetInstanceId(v string) *ListRepoTriggerRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoTriggerRequest) SetRepoId(v string) *ListRepoTriggerRequest { + s.RepoId = &v + return s +} + +type ListRepoTriggerResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + Triggers []*ListRepoTriggerResponseBodyTriggers `json:"Triggers,omitempty" xml:"Triggers,omitempty" type:"Repeated"` +} + +func (s ListRepoTriggerResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTriggerResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoTriggerResponseBody) SetCode(v string) *ListRepoTriggerResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoTriggerResponseBody) SetIsSuccess(v bool) *ListRepoTriggerResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoTriggerResponseBody) SetRequestId(v string) *ListRepoTriggerResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoTriggerResponseBody) SetTriggers(v []*ListRepoTriggerResponseBodyTriggers) *ListRepoTriggerResponseBody { + s.Triggers = v + return s +} + +type ListRepoTriggerResponseBodyTriggers struct { + RepoEvent *string `json:"RepoEvent,omitempty" xml:"RepoEvent,omitempty"` + TriggerId *string `json:"TriggerId,omitempty" xml:"TriggerId,omitempty"` + TriggerName *string `json:"TriggerName,omitempty" xml:"TriggerName,omitempty"` + TriggerTag *string `json:"TriggerTag,omitempty" xml:"TriggerTag,omitempty"` + TriggerType *string `json:"TriggerType,omitempty" xml:"TriggerType,omitempty"` + TriggerUrl *string `json:"TriggerUrl,omitempty" xml:"TriggerUrl,omitempty"` +} + +func (s ListRepoTriggerResponseBodyTriggers) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTriggerResponseBodyTriggers) GoString() string { + return s.String() +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetRepoEvent(v string) *ListRepoTriggerResponseBodyTriggers { + s.RepoEvent = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerId(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerId = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerName(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerName = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerTag(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerTag = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerType(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerType = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerUrl(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerUrl = &v + return s +} + +type ListRepoTriggerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoTriggerResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoTriggerResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTriggerResponse) GoString() string { + return s.String() +} + +func (s *ListRepoTriggerResponse) SetHeaders(v map[string]*string) *ListRepoTriggerResponse { + s.Headers = v + return s +} + +func (s *ListRepoTriggerResponse) SetBody(v *ListRepoTriggerResponseBody) *ListRepoTriggerResponse { + s.Body = v + return s +} + +type ListRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s ListRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepositoryRequest) GoString() string { + return s.String() +} + +func (s *ListRepositoryRequest) SetInstanceId(v string) *ListRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepositoryRequest) SetPageNo(v int32) *ListRepositoryRequest { + s.PageNo = &v + return s +} + +func (s *ListRepositoryRequest) SetPageSize(v int32) *ListRepositoryRequest { + s.PageSize = &v + return s +} + +func (s *ListRepositoryRequest) SetRepoName(v string) *ListRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *ListRepositoryRequest) SetRepoNamespaceName(v string) *ListRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *ListRepositoryRequest) SetRepoStatus(v string) *ListRepositoryRequest { + s.RepoStatus = &v + return s +} + +func (s *ListRepositoryRequest) SetResourceGroupId(v string) *ListRepositoryRequest { + s.ResourceGroupId = &v + return s +} + +type ListRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Repositories []*ListRepositoryResponseBodyRepositories `json:"Repositories,omitempty" xml:"Repositories,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepositoryResponseBody) SetCode(v string) *ListRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *ListRepositoryResponseBody) SetIsSuccess(v bool) *ListRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepositoryResponseBody) SetPageNo(v int32) *ListRepositoryResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepositoryResponseBody) SetPageSize(v int32) *ListRepositoryResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepositoryResponseBody) SetRepositories(v []*ListRepositoryResponseBodyRepositories) *ListRepositoryResponseBody { + s.Repositories = v + return s +} + +func (s *ListRepositoryResponseBody) SetRequestId(v string) *ListRepositoryResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepositoryResponseBody) SetTotalCount(v string) *ListRepositoryResponseBody { + s.TotalCount = &v + return s +} + +type ListRepositoryResponseBodyRepositories struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoBuildType *string `json:"RepoBuildType,omitempty" xml:"RepoBuildType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` + TagImmutability *bool `json:"TagImmutability,omitempty" xml:"TagImmutability,omitempty"` +} + +func (s ListRepositoryResponseBodyRepositories) String() string { + return tea.Prettify(s) +} + +func (s ListRepositoryResponseBodyRepositories) GoString() string { + return s.String() +} + +func (s *ListRepositoryResponseBodyRepositories) SetCreateTime(v int64) *ListRepositoryResponseBodyRepositories { + s.CreateTime = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetInstanceId(v string) *ListRepositoryResponseBodyRepositories { + s.InstanceId = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetModifiedTime(v int64) *ListRepositoryResponseBodyRepositories { + s.ModifiedTime = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoBuildType(v string) *ListRepositoryResponseBodyRepositories { + s.RepoBuildType = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoId(v string) *ListRepositoryResponseBodyRepositories { + s.RepoId = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoName(v string) *ListRepositoryResponseBodyRepositories { + s.RepoName = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoNamespaceName(v string) *ListRepositoryResponseBodyRepositories { + s.RepoNamespaceName = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoStatus(v string) *ListRepositoryResponseBodyRepositories { + s.RepoStatus = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoType(v string) *ListRepositoryResponseBodyRepositories { + s.RepoType = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetResourceGroupId(v string) *ListRepositoryResponseBodyRepositories { + s.ResourceGroupId = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetSummary(v string) *ListRepositoryResponseBodyRepositories { + s.Summary = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetTagImmutability(v bool) *ListRepositoryResponseBodyRepositories { + s.TagImmutability = &v + return s +} + +type ListRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepositoryResponse) GoString() string { + return s.String() +} + +func (s *ListRepositoryResponse) SetHeaders(v map[string]*string) *ListRepositoryResponse { + s.Headers = v + return s +} + +func (s *ListRepositoryResponse) SetBody(v *ListRepositoryResponseBody) *ListRepositoryResponse { + s.Body = v + return s +} + +type ResetLoginPasswordRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Password *string `json:"Password,omitempty" xml:"Password,omitempty"` +} + +func (s ResetLoginPasswordRequest) String() string { + return tea.Prettify(s) +} + +func (s ResetLoginPasswordRequest) GoString() string { + return s.String() +} + +func (s *ResetLoginPasswordRequest) SetInstanceId(v string) *ResetLoginPasswordRequest { + s.InstanceId = &v + return s +} + +func (s *ResetLoginPasswordRequest) SetPassword(v string) *ResetLoginPasswordRequest { + s.Password = &v + return s +} + +type ResetLoginPasswordResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s ResetLoginPasswordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ResetLoginPasswordResponseBody) GoString() string { + return s.String() +} + +func (s *ResetLoginPasswordResponseBody) SetCode(v string) *ResetLoginPasswordResponseBody { + s.Code = &v + return s +} + +func (s *ResetLoginPasswordResponseBody) SetIsSuccess(v bool) *ResetLoginPasswordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ResetLoginPasswordResponseBody) SetRequestId(v string) *ResetLoginPasswordResponseBody { + s.RequestId = &v + return s +} + +type ResetLoginPasswordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ResetLoginPasswordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ResetLoginPasswordResponse) String() string { + return tea.Prettify(s) +} + +func (s ResetLoginPasswordResponse) GoString() string { + return s.String() +} + +func (s *ResetLoginPasswordResponse) SetHeaders(v map[string]*string) *ResetLoginPasswordResponse { + s.Headers = v + return s +} + +func (s *ResetLoginPasswordResponse) SetBody(v *ResetLoginPasswordResponseBody) *ResetLoginPasswordResponse { + s.Body = v + return s +} + +type UpdateChainRequest struct { + ChainConfig *string `json:"ChainConfig,omitempty" xml:"ChainConfig,omitempty"` + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` +} + +func (s UpdateChainRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateChainRequest) GoString() string { + return s.String() +} + +func (s *UpdateChainRequest) SetChainConfig(v string) *UpdateChainRequest { + s.ChainConfig = &v + return s +} + +func (s *UpdateChainRequest) SetChainId(v string) *UpdateChainRequest { + s.ChainId = &v + return s +} + +func (s *UpdateChainRequest) SetDescription(v string) *UpdateChainRequest { + s.Description = &v + return s +} + +func (s *UpdateChainRequest) SetInstanceId(v string) *UpdateChainRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateChainRequest) SetName(v string) *UpdateChainRequest { + s.Name = &v + return s +} + +type UpdateChainResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateChainResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateChainResponseBody) SetCode(v string) *UpdateChainResponseBody { + s.Code = &v + return s +} + +func (s *UpdateChainResponseBody) SetIsSuccess(v bool) *UpdateChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateChainResponseBody) SetRequestId(v string) *UpdateChainResponseBody { + s.RequestId = &v + return s +} + +type UpdateChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateChainResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateChainResponse) GoString() string { + return s.String() +} + +func (s *UpdateChainResponse) SetHeaders(v map[string]*string) *UpdateChainResponse { + s.Headers = v + return s +} + +func (s *UpdateChainResponse) SetBody(v *UpdateChainResponseBody) *UpdateChainResponse { + s.Body = v + return s +} + +type UpdateChartNamespaceRequest struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s UpdateChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *UpdateChartNamespaceRequest) SetAutoCreateRepo(v bool) *UpdateChartNamespaceRequest { + s.AutoCreateRepo = &v + return s +} + +func (s *UpdateChartNamespaceRequest) SetDefaultRepoType(v string) *UpdateChartNamespaceRequest { + s.DefaultRepoType = &v + return s +} + +func (s *UpdateChartNamespaceRequest) SetInstanceId(v string) *UpdateChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateChartNamespaceRequest) SetNamespaceName(v string) *UpdateChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +type UpdateChartNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateChartNamespaceResponseBody) SetCode(v string) *UpdateChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *UpdateChartNamespaceResponseBody) SetIsSuccess(v bool) *UpdateChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateChartNamespaceResponseBody) SetRequestId(v string) *UpdateChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +type UpdateChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *UpdateChartNamespaceResponse) SetHeaders(v map[string]*string) *UpdateChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *UpdateChartNamespaceResponse) SetBody(v *UpdateChartNamespaceResponseBody) *UpdateChartNamespaceResponse { + s.Body = v + return s +} + +type UpdateChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` +} + +func (s UpdateChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *UpdateChartRepositoryRequest) SetInstanceId(v string) *UpdateChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateChartRepositoryRequest) SetRepoName(v string) *UpdateChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *UpdateChartRepositoryRequest) SetRepoNamespaceName(v string) *UpdateChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *UpdateChartRepositoryRequest) SetRepoType(v string) *UpdateChartRepositoryRequest { + s.RepoType = &v + return s +} + +func (s *UpdateChartRepositoryRequest) SetSummary(v string) *UpdateChartRepositoryRequest { + s.Summary = &v + return s +} + +type UpdateChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateChartRepositoryResponseBody) SetCode(v string) *UpdateChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *UpdateChartRepositoryResponseBody) SetIsSuccess(v bool) *UpdateChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateChartRepositoryResponseBody) SetRequestId(v string) *UpdateChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +type UpdateChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *UpdateChartRepositoryResponse) SetHeaders(v map[string]*string) *UpdateChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *UpdateChartRepositoryResponse) SetBody(v *UpdateChartRepositoryResponseBody) *UpdateChartRepositoryResponse { + s.Body = v + return s +} + +type UpdateEventCenterRuleRequest struct { + EventChannel *string `json:"EventChannel,omitempty" xml:"EventChannel,omitempty"` + EventConfig *string `json:"EventConfig,omitempty" xml:"EventConfig,omitempty"` + EventScope *string `json:"EventScope,omitempty" xml:"EventScope,omitempty"` + EventType *string `json:"EventType,omitempty" xml:"EventType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Namespaces []*string `json:"Namespaces,omitempty" xml:"Namespaces,omitempty" type:"Repeated"` + RepoNames []*string `json:"RepoNames,omitempty" xml:"RepoNames,omitempty" type:"Repeated"` + RepoTagFilterPattern *string `json:"RepoTagFilterPattern,omitempty" xml:"RepoTagFilterPattern,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` + RuleName *string `json:"RuleName,omitempty" xml:"RuleName,omitempty"` +} + +func (s UpdateEventCenterRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateEventCenterRuleRequest) GoString() string { + return s.String() +} + +func (s *UpdateEventCenterRuleRequest) SetEventChannel(v string) *UpdateEventCenterRuleRequest { + s.EventChannel = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetEventConfig(v string) *UpdateEventCenterRuleRequest { + s.EventConfig = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetEventScope(v string) *UpdateEventCenterRuleRequest { + s.EventScope = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetEventType(v string) *UpdateEventCenterRuleRequest { + s.EventType = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetInstanceId(v string) *UpdateEventCenterRuleRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetNamespaces(v []*string) *UpdateEventCenterRuleRequest { + s.Namespaces = v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetRepoNames(v []*string) *UpdateEventCenterRuleRequest { + s.RepoNames = v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetRepoTagFilterPattern(v string) *UpdateEventCenterRuleRequest { + s.RepoTagFilterPattern = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetRuleId(v string) *UpdateEventCenterRuleRequest { + s.RuleId = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetRuleName(v string) *UpdateEventCenterRuleRequest { + s.RuleName = &v + return s +} + +type UpdateEventCenterRuleShrinkRequest struct { + EventChannel *string `json:"EventChannel,omitempty" xml:"EventChannel,omitempty"` + EventConfig *string `json:"EventConfig,omitempty" xml:"EventConfig,omitempty"` + EventScope *string `json:"EventScope,omitempty" xml:"EventScope,omitempty"` + EventType *string `json:"EventType,omitempty" xml:"EventType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespacesShrink *string `json:"Namespaces,omitempty" xml:"Namespaces,omitempty"` + RepoNamesShrink *string `json:"RepoNames,omitempty" xml:"RepoNames,omitempty"` + RepoTagFilterPattern *string `json:"RepoTagFilterPattern,omitempty" xml:"RepoTagFilterPattern,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` + RuleName *string `json:"RuleName,omitempty" xml:"RuleName,omitempty"` +} + +func (s UpdateEventCenterRuleShrinkRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateEventCenterRuleShrinkRequest) GoString() string { + return s.String() +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetEventChannel(v string) *UpdateEventCenterRuleShrinkRequest { + s.EventChannel = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetEventConfig(v string) *UpdateEventCenterRuleShrinkRequest { + s.EventConfig = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetEventScope(v string) *UpdateEventCenterRuleShrinkRequest { + s.EventScope = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetEventType(v string) *UpdateEventCenterRuleShrinkRequest { + s.EventType = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetInstanceId(v string) *UpdateEventCenterRuleShrinkRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetNamespacesShrink(v string) *UpdateEventCenterRuleShrinkRequest { + s.NamespacesShrink = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetRepoNamesShrink(v string) *UpdateEventCenterRuleShrinkRequest { + s.RepoNamesShrink = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetRepoTagFilterPattern(v string) *UpdateEventCenterRuleShrinkRequest { + s.RepoTagFilterPattern = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetRuleId(v string) *UpdateEventCenterRuleShrinkRequest { + s.RuleId = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetRuleName(v string) *UpdateEventCenterRuleShrinkRequest { + s.RuleName = &v + return s +} + +type UpdateEventCenterRuleResponseBody struct { + Code *int32 `json:"Code,omitempty" xml:"Code,omitempty"` + // Id of the request + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` +} + +func (s UpdateEventCenterRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateEventCenterRuleResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateEventCenterRuleResponseBody) SetCode(v int32) *UpdateEventCenterRuleResponseBody { + s.Code = &v + return s +} + +func (s *UpdateEventCenterRuleResponseBody) SetRequestId(v string) *UpdateEventCenterRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *UpdateEventCenterRuleResponseBody) SetRuleId(v string) *UpdateEventCenterRuleResponseBody { + s.RuleId = &v + return s +} + +type UpdateEventCenterRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateEventCenterRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateEventCenterRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateEventCenterRuleResponse) GoString() string { + return s.String() +} + +func (s *UpdateEventCenterRuleResponse) SetHeaders(v map[string]*string) *UpdateEventCenterRuleResponse { + s.Headers = v + return s +} + +func (s *UpdateEventCenterRuleResponse) SetBody(v *UpdateEventCenterRuleResponseBody) *UpdateEventCenterRuleResponse { + s.Body = v + return s +} + +type UpdateInstanceEndpointStatusRequest struct { + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s UpdateInstanceEndpointStatusRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateInstanceEndpointStatusRequest) GoString() string { + return s.String() +} + +func (s *UpdateInstanceEndpointStatusRequest) SetEnable(v bool) *UpdateInstanceEndpointStatusRequest { + s.Enable = &v + return s +} + +func (s *UpdateInstanceEndpointStatusRequest) SetEndpointType(v string) *UpdateInstanceEndpointStatusRequest { + s.EndpointType = &v + return s +} + +func (s *UpdateInstanceEndpointStatusRequest) SetInstanceId(v string) *UpdateInstanceEndpointStatusRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateInstanceEndpointStatusRequest) SetModuleName(v string) *UpdateInstanceEndpointStatusRequest { + s.ModuleName = &v + return s +} + +type UpdateInstanceEndpointStatusResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateInstanceEndpointStatusResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateInstanceEndpointStatusResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateInstanceEndpointStatusResponseBody) SetCode(v string) *UpdateInstanceEndpointStatusResponseBody { + s.Code = &v + return s +} + +func (s *UpdateInstanceEndpointStatusResponseBody) SetIsSuccess(v bool) *UpdateInstanceEndpointStatusResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateInstanceEndpointStatusResponseBody) SetRequestId(v string) *UpdateInstanceEndpointStatusResponseBody { + s.RequestId = &v + return s +} + +type UpdateInstanceEndpointStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateInstanceEndpointStatusResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateInstanceEndpointStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateInstanceEndpointStatusResponse) GoString() string { + return s.String() +} + +func (s *UpdateInstanceEndpointStatusResponse) SetHeaders(v map[string]*string) *UpdateInstanceEndpointStatusResponse { + s.Headers = v + return s +} + +func (s *UpdateInstanceEndpointStatusResponse) SetBody(v *UpdateInstanceEndpointStatusResponseBody) *UpdateInstanceEndpointStatusResponse { + s.Body = v + return s +} + +type UpdateNamespaceRequest struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s UpdateNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateNamespaceRequest) GoString() string { + return s.String() +} + +func (s *UpdateNamespaceRequest) SetAutoCreateRepo(v bool) *UpdateNamespaceRequest { + s.AutoCreateRepo = &v + return s +} + +func (s *UpdateNamespaceRequest) SetDefaultRepoType(v string) *UpdateNamespaceRequest { + s.DefaultRepoType = &v + return s +} + +func (s *UpdateNamespaceRequest) SetInstanceId(v string) *UpdateNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateNamespaceRequest) SetNamespaceName(v string) *UpdateNamespaceRequest { + s.NamespaceName = &v + return s +} + +type UpdateNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateNamespaceResponseBody) SetCode(v string) *UpdateNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *UpdateNamespaceResponseBody) SetIsSuccess(v bool) *UpdateNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateNamespaceResponseBody) SetRequestId(v string) *UpdateNamespaceResponseBody { + s.RequestId = &v + return s +} + +type UpdateNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateNamespaceResponse) GoString() string { + return s.String() +} + +func (s *UpdateNamespaceResponse) SetHeaders(v map[string]*string) *UpdateNamespaceResponse { + s.Headers = v + return s +} + +func (s *UpdateNamespaceResponse) SetBody(v *UpdateNamespaceResponseBody) *UpdateNamespaceResponse { + s.Body = v + return s +} + +type UpdateRepoBuildRuleRequest struct { + BuildArgs []*string `json:"BuildArgs,omitempty" xml:"BuildArgs,omitempty" type:"Repeated"` + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + DockerfileLocation *string `json:"DockerfileLocation,omitempty" xml:"DockerfileLocation,omitempty"` + DockerfileName *string `json:"DockerfileName,omitempty" xml:"DockerfileName,omitempty"` + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Platforms []*string `json:"Platforms,omitempty" xml:"Platforms,omitempty" type:"Repeated"` + PushName *string `json:"PushName,omitempty" xml:"PushName,omitempty"` + PushType *string `json:"PushType,omitempty" xml:"PushType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s UpdateRepoBuildRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoBuildRuleRequest) GoString() string { + return s.String() +} + +func (s *UpdateRepoBuildRuleRequest) SetBuildArgs(v []*string) *UpdateRepoBuildRuleRequest { + s.BuildArgs = v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetBuildRuleId(v string) *UpdateRepoBuildRuleRequest { + s.BuildRuleId = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetDockerfileLocation(v string) *UpdateRepoBuildRuleRequest { + s.DockerfileLocation = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetDockerfileName(v string) *UpdateRepoBuildRuleRequest { + s.DockerfileName = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetImageTag(v string) *UpdateRepoBuildRuleRequest { + s.ImageTag = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetInstanceId(v string) *UpdateRepoBuildRuleRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetPlatforms(v []*string) *UpdateRepoBuildRuleRequest { + s.Platforms = v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetPushName(v string) *UpdateRepoBuildRuleRequest { + s.PushName = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetPushType(v string) *UpdateRepoBuildRuleRequest { + s.PushType = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetRepoId(v string) *UpdateRepoBuildRuleRequest { + s.RepoId = &v + return s +} + +type UpdateRepoBuildRuleResponseBody struct { + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateRepoBuildRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoBuildRuleResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateRepoBuildRuleResponseBody) SetBuildRuleId(v string) *UpdateRepoBuildRuleResponseBody { + s.BuildRuleId = &v + return s +} + +func (s *UpdateRepoBuildRuleResponseBody) SetCode(v string) *UpdateRepoBuildRuleResponseBody { + s.Code = &v + return s +} + +func (s *UpdateRepoBuildRuleResponseBody) SetIsSuccess(v bool) *UpdateRepoBuildRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateRepoBuildRuleResponseBody) SetRequestId(v string) *UpdateRepoBuildRuleResponseBody { + s.RequestId = &v + return s +} + +type UpdateRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateRepoBuildRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoBuildRuleResponse) SetHeaders(v map[string]*string) *UpdateRepoBuildRuleResponse { + s.Headers = v + return s +} + +func (s *UpdateRepoBuildRuleResponse) SetBody(v *UpdateRepoBuildRuleResponseBody) *UpdateRepoBuildRuleResponse { + s.Body = v + return s +} + +type UpdateRepoSourceCodeRepoRequest struct { + AutoBuild *string `json:"AutoBuild,omitempty" xml:"AutoBuild,omitempty"` + CodeRepoId *string `json:"CodeRepoId,omitempty" xml:"CodeRepoId,omitempty"` + CodeRepoName *string `json:"CodeRepoName,omitempty" xml:"CodeRepoName,omitempty"` + CodeRepoNamespaceName *string `json:"CodeRepoNamespaceName,omitempty" xml:"CodeRepoNamespaceName,omitempty"` + CodeRepoType *string `json:"CodeRepoType,omitempty" xml:"CodeRepoType,omitempty"` + DisableCacheBuild *string `json:"DisableCacheBuild,omitempty" xml:"DisableCacheBuild,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + OverseaBuild *string `json:"OverseaBuild,omitempty" xml:"OverseaBuild,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s UpdateRepoSourceCodeRepoRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoSourceCodeRepoRequest) GoString() string { + return s.String() +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetAutoBuild(v string) *UpdateRepoSourceCodeRepoRequest { + s.AutoBuild = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetCodeRepoId(v string) *UpdateRepoSourceCodeRepoRequest { + s.CodeRepoId = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetCodeRepoName(v string) *UpdateRepoSourceCodeRepoRequest { + s.CodeRepoName = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetCodeRepoNamespaceName(v string) *UpdateRepoSourceCodeRepoRequest { + s.CodeRepoNamespaceName = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetCodeRepoType(v string) *UpdateRepoSourceCodeRepoRequest { + s.CodeRepoType = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetDisableCacheBuild(v string) *UpdateRepoSourceCodeRepoRequest { + s.DisableCacheBuild = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetInstanceId(v string) *UpdateRepoSourceCodeRepoRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetOverseaBuild(v string) *UpdateRepoSourceCodeRepoRequest { + s.OverseaBuild = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetRepoId(v string) *UpdateRepoSourceCodeRepoRequest { + s.RepoId = &v + return s +} + +type UpdateRepoSourceCodeRepoResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateRepoSourceCodeRepoResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoSourceCodeRepoResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateRepoSourceCodeRepoResponseBody) SetCode(v string) *UpdateRepoSourceCodeRepoResponseBody { + s.Code = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoResponseBody) SetIsSuccess(v bool) *UpdateRepoSourceCodeRepoResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoResponseBody) SetRequestId(v string) *UpdateRepoSourceCodeRepoResponseBody { + s.RequestId = &v + return s +} + +type UpdateRepoSourceCodeRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateRepoSourceCodeRepoResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateRepoSourceCodeRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoSourceCodeRepoResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoSourceCodeRepoResponse) SetHeaders(v map[string]*string) *UpdateRepoSourceCodeRepoResponse { + s.Headers = v + return s +} + +func (s *UpdateRepoSourceCodeRepoResponse) SetBody(v *UpdateRepoSourceCodeRepoResponseBody) *UpdateRepoSourceCodeRepoResponse { + s.Body = v + return s +} + +type UpdateRepoTriggerRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + TriggerId *string `json:"TriggerId,omitempty" xml:"TriggerId,omitempty"` + TriggerName *string `json:"TriggerName,omitempty" xml:"TriggerName,omitempty"` + TriggerTag *string `json:"TriggerTag,omitempty" xml:"TriggerTag,omitempty"` + TriggerType *string `json:"TriggerType,omitempty" xml:"TriggerType,omitempty"` + TriggerUrl *string `json:"TriggerUrl,omitempty" xml:"TriggerUrl,omitempty"` +} + +func (s UpdateRepoTriggerRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoTriggerRequest) GoString() string { + return s.String() +} + +func (s *UpdateRepoTriggerRequest) SetInstanceId(v string) *UpdateRepoTriggerRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetRepoId(v string) *UpdateRepoTriggerRequest { + s.RepoId = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerId(v string) *UpdateRepoTriggerRequest { + s.TriggerId = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerName(v string) *UpdateRepoTriggerRequest { + s.TriggerName = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerTag(v string) *UpdateRepoTriggerRequest { + s.TriggerTag = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerType(v string) *UpdateRepoTriggerRequest { + s.TriggerType = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerUrl(v string) *UpdateRepoTriggerRequest { + s.TriggerUrl = &v + return s +} + +type UpdateRepoTriggerResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateRepoTriggerResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoTriggerResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateRepoTriggerResponseBody) SetCode(v string) *UpdateRepoTriggerResponseBody { + s.Code = &v + return s +} + +func (s *UpdateRepoTriggerResponseBody) SetIsSuccess(v bool) *UpdateRepoTriggerResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateRepoTriggerResponseBody) SetRequestId(v string) *UpdateRepoTriggerResponseBody { + s.RequestId = &v + return s +} + +type UpdateRepoTriggerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateRepoTriggerResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateRepoTriggerResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoTriggerResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoTriggerResponse) SetHeaders(v map[string]*string) *UpdateRepoTriggerResponse { + s.Headers = v + return s +} + +func (s *UpdateRepoTriggerResponse) SetBody(v *UpdateRepoTriggerResponseBody) *UpdateRepoTriggerResponse { + s.Body = v + return s +} + +type UpdateRepositoryRequest struct { + Detail *string `json:"Detail,omitempty" xml:"Detail,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` + TagImmutability *bool `json:"TagImmutability,omitempty" xml:"TagImmutability,omitempty"` +} + +func (s UpdateRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepositoryRequest) GoString() string { + return s.String() +} + +func (s *UpdateRepositoryRequest) SetDetail(v string) *UpdateRepositoryRequest { + s.Detail = &v + return s +} + +func (s *UpdateRepositoryRequest) SetInstanceId(v string) *UpdateRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateRepositoryRequest) SetRepoId(v string) *UpdateRepositoryRequest { + s.RepoId = &v + return s +} + +func (s *UpdateRepositoryRequest) SetRepoType(v string) *UpdateRepositoryRequest { + s.RepoType = &v + return s +} + +func (s *UpdateRepositoryRequest) SetSummary(v string) *UpdateRepositoryRequest { + s.Summary = &v + return s +} + +func (s *UpdateRepositoryRequest) SetTagImmutability(v bool) *UpdateRepositoryRequest { + s.TagImmutability = &v + return s +} + +type UpdateRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateRepositoryResponseBody) SetCode(v string) *UpdateRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *UpdateRepositoryResponseBody) SetIsSuccess(v bool) *UpdateRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateRepositoryResponseBody) SetRequestId(v string) *UpdateRepositoryResponseBody { + s.RequestId = &v + return s +} + +type UpdateRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepositoryResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepositoryResponse) SetHeaders(v map[string]*string) *UpdateRepositoryResponse { + s.Headers = v + return s +} + +func (s *UpdateRepositoryResponse) SetBody(v *UpdateRepositoryResponseBody) *UpdateRepositoryResponse { + s.Body = v + return s +} + +type Client struct { + openapi.Client +} + +func NewClient(config *openapi.Config) (*Client, error) { + client := new(Client) + err := client.Init(config) + return client, err +} + +func (client *Client) Init(config *openapi.Config) (_err error) { + _err = client.Client.Init(config) + if _err != nil { + return _err + } + client.EndpointRule = tea.String("regional") + _err = client.CheckConfig(config) + if _err != nil { + return _err + } + client.Endpoint, _err = client.GetEndpoint(tea.String("cr"), client.RegionId, client.EndpointRule, client.Network, client.Suffix, client.EndpointMap, client.Endpoint) + if _err != nil { + return _err + } + + return nil +} + +func (client *Client) GetEndpoint(productId *string, regionId *string, endpointRule *string, network *string, suffix *string, endpointMap map[string]*string, endpoint *string) (_result *string, _err error) { + if !tea.BoolValue(util.Empty(endpoint)) { + _result = endpoint + return _result, _err + } + + if !tea.BoolValue(util.IsUnset(endpointMap)) && !tea.BoolValue(util.Empty(endpointMap[tea.StringValue(regionId)])) { + _result = endpointMap[tea.StringValue(regionId)] + return _result, _err + } + + _body, _err := endpointutil.GetEndpointRules(productId, regionId, endpointRule, network, suffix) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CancelArtifactBuildTaskWithOptions(request *CancelArtifactBuildTaskRequest, runtime *util.RuntimeOptions) (_result *CancelArtifactBuildTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildTaskId)) { + query["BuildTaskId"] = request.BuildTaskId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CancelArtifactBuildTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CancelArtifactBuildTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CancelArtifactBuildTask(request *CancelArtifactBuildTaskRequest) (_result *CancelArtifactBuildTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CancelArtifactBuildTaskResponse{} + _body, _err := client.CancelArtifactBuildTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CancelRepoBuildRecordWithOptions(request *CancelRepoBuildRecordRequest, runtime *util.RuntimeOptions) (_result *CancelRepoBuildRecordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRecordId)) { + query["BuildRecordId"] = request.BuildRecordId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CancelRepoBuildRecord"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CancelRepoBuildRecordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CancelRepoBuildRecord(request *CancelRepoBuildRecordRequest) (_result *CancelRepoBuildRecordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CancelRepoBuildRecordResponse{} + _body, _err := client.CancelRepoBuildRecordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateBuildRecordByRuleWithOptions(request *CreateBuildRecordByRuleRequest, runtime *util.RuntimeOptions) (_result *CreateBuildRecordByRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRuleId)) { + query["BuildRuleId"] = request.BuildRuleId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateBuildRecordByRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateBuildRecordByRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateBuildRecordByRule(request *CreateBuildRecordByRuleRequest) (_result *CreateBuildRecordByRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateBuildRecordByRuleResponse{} + _body, _err := client.CreateBuildRecordByRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateChainWithOptions(request *CreateChainRequest, runtime *util.RuntimeOptions) (_result *CreateChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.ChainConfig)) { + query["ChainConfig"] = request.ChainConfig + } + + if !tea.BoolValue(util.IsUnset(request.Description)) { + query["Description"] = request.Description + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Name)) { + query["Name"] = request.Name + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateChain(request *CreateChainRequest) (_result *CreateChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateChainResponse{} + _body, _err := client.CreateChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateChartNamespaceWithOptions(request *CreateChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *CreateChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoCreateRepo)) { + query["AutoCreateRepo"] = request.AutoCreateRepo + } + + if !tea.BoolValue(util.IsUnset(request.DefaultRepoType)) { + query["DefaultRepoType"] = request.DefaultRepoType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateChartNamespace(request *CreateChartNamespaceRequest) (_result *CreateChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateChartNamespaceResponse{} + _body, _err := client.CreateChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateChartRepositoryWithOptions(request *CreateChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *CreateChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoType)) { + query["RepoType"] = request.RepoType + } + + if !tea.BoolValue(util.IsUnset(request.Summary)) { + query["Summary"] = request.Summary + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateChartRepository(request *CreateChartRepositoryRequest) (_result *CreateChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateChartRepositoryResponse{} + _body, _err := client.CreateChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateInstanceEndpointAclPolicyWithOptions(request *CreateInstanceEndpointAclPolicyRequest, runtime *util.RuntimeOptions) (_result *CreateInstanceEndpointAclPolicyResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Comment)) { + query["Comment"] = request.Comment + } + + if !tea.BoolValue(util.IsUnset(request.EndpointType)) { + query["EndpointType"] = request.EndpointType + } + + if !tea.BoolValue(util.IsUnset(request.Entry)) { + query["Entry"] = request.Entry + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateInstanceEndpointAclPolicy"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateInstanceEndpointAclPolicyResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateInstanceEndpointAclPolicy(request *CreateInstanceEndpointAclPolicyRequest) (_result *CreateInstanceEndpointAclPolicyResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateInstanceEndpointAclPolicyResponse{} + _body, _err := client.CreateInstanceEndpointAclPolicyWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateInstanceVpcEndpointLinkedVpcWithOptions(request *CreateInstanceVpcEndpointLinkedVpcRequest, runtime *util.RuntimeOptions) (_result *CreateInstanceVpcEndpointLinkedVpcResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.EnableCreateDNSRecordInPvzt)) { + query["EnableCreateDNSRecordInPvzt"] = request.EnableCreateDNSRecordInPvzt + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + if !tea.BoolValue(util.IsUnset(request.VpcId)) { + query["VpcId"] = request.VpcId + } + + if !tea.BoolValue(util.IsUnset(request.VswitchId)) { + query["VswitchId"] = request.VswitchId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateInstanceVpcEndpointLinkedVpc"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateInstanceVpcEndpointLinkedVpcResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateInstanceVpcEndpointLinkedVpc(request *CreateInstanceVpcEndpointLinkedVpcRequest) (_result *CreateInstanceVpcEndpointLinkedVpcResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateInstanceVpcEndpointLinkedVpcResponse{} + _body, _err := client.CreateInstanceVpcEndpointLinkedVpcWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateNamespaceWithOptions(request *CreateNamespaceRequest, runtime *util.RuntimeOptions) (_result *CreateNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoCreateRepo)) { + query["AutoCreateRepo"] = request.AutoCreateRepo + } + + if !tea.BoolValue(util.IsUnset(request.DefaultRepoType)) { + query["DefaultRepoType"] = request.DefaultRepoType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateNamespace(request *CreateNamespaceRequest) (_result *CreateNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateNamespaceResponse{} + _body, _err := client.CreateNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoBuildRuleWithOptions(request *CreateRepoBuildRuleRequest, runtime *util.RuntimeOptions) (_result *CreateRepoBuildRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildArgs)) { + query["BuildArgs"] = request.BuildArgs + } + + if !tea.BoolValue(util.IsUnset(request.DockerfileLocation)) { + query["DockerfileLocation"] = request.DockerfileLocation + } + + if !tea.BoolValue(util.IsUnset(request.DockerfileName)) { + query["DockerfileName"] = request.DockerfileName + } + + if !tea.BoolValue(util.IsUnset(request.ImageTag)) { + query["ImageTag"] = request.ImageTag + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Platforms)) { + query["Platforms"] = request.Platforms + } + + if !tea.BoolValue(util.IsUnset(request.PushName)) { + query["PushName"] = request.PushName + } + + if !tea.BoolValue(util.IsUnset(request.PushType)) { + query["PushType"] = request.PushType + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoBuildRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoBuildRule(request *CreateRepoBuildRuleRequest) (_result *CreateRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoBuildRuleResponse{} + _body, _err := client.CreateRepoBuildRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoSourceCodeRepoWithOptions(request *CreateRepoSourceCodeRepoRequest, runtime *util.RuntimeOptions) (_result *CreateRepoSourceCodeRepoResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoBuild)) { + query["AutoBuild"] = request.AutoBuild + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoName)) { + query["CodeRepoName"] = request.CodeRepoName + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoNamespaceName)) { + query["CodeRepoNamespaceName"] = request.CodeRepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoType)) { + query["CodeRepoType"] = request.CodeRepoType + } + + if !tea.BoolValue(util.IsUnset(request.DisableCacheBuild)) { + query["DisableCacheBuild"] = request.DisableCacheBuild + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.OverseaBuild)) { + query["OverseaBuild"] = request.OverseaBuild + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoSourceCodeRepo"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoSourceCodeRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoSourceCodeRepo(request *CreateRepoSourceCodeRepoRequest) (_result *CreateRepoSourceCodeRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoSourceCodeRepoResponse{} + _body, _err := client.CreateRepoSourceCodeRepoWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoSyncRuleWithOptions(request *CreateRepoSyncRuleRequest, runtime *util.RuntimeOptions) (_result *CreateRepoSyncRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.SyncRuleName)) { + query["SyncRuleName"] = request.SyncRuleName + } + + if !tea.BoolValue(util.IsUnset(request.SyncScope)) { + query["SyncScope"] = request.SyncScope + } + + if !tea.BoolValue(util.IsUnset(request.SyncTrigger)) { + query["SyncTrigger"] = request.SyncTrigger + } + + if !tea.BoolValue(util.IsUnset(request.TagFilter)) { + query["TagFilter"] = request.TagFilter + } + + if !tea.BoolValue(util.IsUnset(request.TargetInstanceId)) { + query["TargetInstanceId"] = request.TargetInstanceId + } + + if !tea.BoolValue(util.IsUnset(request.TargetNamespaceName)) { + query["TargetNamespaceName"] = request.TargetNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.TargetRegionId)) { + query["TargetRegionId"] = request.TargetRegionId + } + + if !tea.BoolValue(util.IsUnset(request.TargetRepoName)) { + query["TargetRepoName"] = request.TargetRepoName + } + + if !tea.BoolValue(util.IsUnset(request.TargetUserId)) { + query["TargetUserId"] = request.TargetUserId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoSyncRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoSyncRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoSyncRule(request *CreateRepoSyncRuleRequest) (_result *CreateRepoSyncRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoSyncRuleResponse{} + _body, _err := client.CreateRepoSyncRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoSyncTaskWithOptions(request *CreateRepoSyncTaskRequest, runtime *util.RuntimeOptions) (_result *CreateRepoSyncTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Override)) { + query["Override"] = request.Override + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + if !tea.BoolValue(util.IsUnset(request.TargetInstanceId)) { + query["TargetInstanceId"] = request.TargetInstanceId + } + + if !tea.BoolValue(util.IsUnset(request.TargetNamespace)) { + query["TargetNamespace"] = request.TargetNamespace + } + + if !tea.BoolValue(util.IsUnset(request.TargetRegionId)) { + query["TargetRegionId"] = request.TargetRegionId + } + + if !tea.BoolValue(util.IsUnset(request.TargetRepoName)) { + query["TargetRepoName"] = request.TargetRepoName + } + + if !tea.BoolValue(util.IsUnset(request.TargetTag)) { + query["TargetTag"] = request.TargetTag + } + + if !tea.BoolValue(util.IsUnset(request.TargetUserId)) { + query["TargetUserId"] = request.TargetUserId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoSyncTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoSyncTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoSyncTask(request *CreateRepoSyncTaskRequest) (_result *CreateRepoSyncTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoSyncTaskResponse{} + _body, _err := client.CreateRepoSyncTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoSyncTaskByRuleWithOptions(request *CreateRepoSyncTaskByRuleRequest, runtime *util.RuntimeOptions) (_result *CreateRepoSyncTaskByRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.SyncRuleId)) { + query["SyncRuleId"] = request.SyncRuleId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoSyncTaskByRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoSyncTaskByRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoSyncTaskByRule(request *CreateRepoSyncTaskByRuleRequest) (_result *CreateRepoSyncTaskByRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoSyncTaskByRuleResponse{} + _body, _err := client.CreateRepoSyncTaskByRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoTagWithOptions(request *CreateRepoTagRequest, runtime *util.RuntimeOptions) (_result *CreateRepoTagResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.FromTag)) { + query["FromTag"] = request.FromTag + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.ToTag)) { + query["ToTag"] = request.ToTag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoTag"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoTag(request *CreateRepoTagRequest) (_result *CreateRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoTagResponse{} + _body, _err := client.CreateRepoTagWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoTagScanTaskWithOptions(request *CreateRepoTagScanTaskRequest, runtime *util.RuntimeOptions) (_result *CreateRepoTagScanTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.ScanService)) { + query["ScanService"] = request.ScanService + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoTagScanTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoTagScanTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoTagScanTask(request *CreateRepoTagScanTaskRequest) (_result *CreateRepoTagScanTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoTagScanTaskResponse{} + _body, _err := client.CreateRepoTagScanTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoTriggerWithOptions(request *CreateRepoTriggerRequest, runtime *util.RuntimeOptions) (_result *CreateRepoTriggerResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.TriggerName)) { + query["TriggerName"] = request.TriggerName + } + + if !tea.BoolValue(util.IsUnset(request.TriggerTag)) { + query["TriggerTag"] = request.TriggerTag + } + + if !tea.BoolValue(util.IsUnset(request.TriggerType)) { + query["TriggerType"] = request.TriggerType + } + + if !tea.BoolValue(util.IsUnset(request.TriggerUrl)) { + query["TriggerUrl"] = request.TriggerUrl + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoTrigger"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoTriggerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoTrigger(request *CreateRepoTriggerRequest) (_result *CreateRepoTriggerResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoTriggerResponse{} + _body, _err := client.CreateRepoTriggerWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepositoryWithOptions(request *CreateRepositoryRequest, runtime *util.RuntimeOptions) (_result *CreateRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Detail)) { + query["Detail"] = request.Detail + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoType)) { + query["RepoType"] = request.RepoType + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + if !tea.BoolValue(util.IsUnset(request.Summary)) { + query["Summary"] = request.Summary + } + + if !tea.BoolValue(util.IsUnset(request.TagImmutability)) { + query["TagImmutability"] = request.TagImmutability + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepository(request *CreateRepositoryRequest) (_result *CreateRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepositoryResponse{} + _body, _err := client.CreateRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteChainWithOptions(request *DeleteChainRequest, runtime *util.RuntimeOptions) (_result *DeleteChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.ChainId)) { + query["ChainId"] = request.ChainId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteChain(request *DeleteChainRequest) (_result *DeleteChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteChainResponse{} + _body, _err := client.DeleteChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteChartNamespaceWithOptions(request *DeleteChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *DeleteChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteChartNamespace(request *DeleteChartNamespaceRequest) (_result *DeleteChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteChartNamespaceResponse{} + _body, _err := client.DeleteChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteChartReleaseWithOptions(request *DeleteChartReleaseRequest, runtime *util.RuntimeOptions) (_result *DeleteChartReleaseResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Chart)) { + query["Chart"] = request.Chart + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Release)) { + query["Release"] = request.Release + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteChartRelease"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteChartReleaseResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteChartRelease(request *DeleteChartReleaseRequest) (_result *DeleteChartReleaseResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteChartReleaseResponse{} + _body, _err := client.DeleteChartReleaseWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteChartRepositoryWithOptions(request *DeleteChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *DeleteChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteChartRepository(request *DeleteChartRepositoryRequest) (_result *DeleteChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteChartRepositoryResponse{} + _body, _err := client.DeleteChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteEventCenterRuleWithOptions(request *DeleteEventCenterRuleRequest, runtime *util.RuntimeOptions) (_result *DeleteEventCenterRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RuleId)) { + query["RuleId"] = request.RuleId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteEventCenterRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteEventCenterRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteEventCenterRule(request *DeleteEventCenterRuleRequest) (_result *DeleteEventCenterRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteEventCenterRuleResponse{} + _body, _err := client.DeleteEventCenterRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteInstanceEndpointAclPolicyWithOptions(request *DeleteInstanceEndpointAclPolicyRequest, runtime *util.RuntimeOptions) (_result *DeleteInstanceEndpointAclPolicyResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.EndpointType)) { + query["EndpointType"] = request.EndpointType + } + + if !tea.BoolValue(util.IsUnset(request.Entry)) { + query["Entry"] = request.Entry + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteInstanceEndpointAclPolicy"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteInstanceEndpointAclPolicyResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteInstanceEndpointAclPolicy(request *DeleteInstanceEndpointAclPolicyRequest) (_result *DeleteInstanceEndpointAclPolicyResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteInstanceEndpointAclPolicyResponse{} + _body, _err := client.DeleteInstanceEndpointAclPolicyWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteInstanceVpcEndpointLinkedVpcWithOptions(request *DeleteInstanceVpcEndpointLinkedVpcRequest, runtime *util.RuntimeOptions) (_result *DeleteInstanceVpcEndpointLinkedVpcResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + if !tea.BoolValue(util.IsUnset(request.VpcId)) { + query["VpcId"] = request.VpcId + } + + if !tea.BoolValue(util.IsUnset(request.VswitchId)) { + query["VswitchId"] = request.VswitchId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteInstanceVpcEndpointLinkedVpc"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteInstanceVpcEndpointLinkedVpcResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteInstanceVpcEndpointLinkedVpc(request *DeleteInstanceVpcEndpointLinkedVpcRequest) (_result *DeleteInstanceVpcEndpointLinkedVpcResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteInstanceVpcEndpointLinkedVpcResponse{} + _body, _err := client.DeleteInstanceVpcEndpointLinkedVpcWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteNamespaceWithOptions(request *DeleteNamespaceRequest, runtime *util.RuntimeOptions) (_result *DeleteNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteNamespace(request *DeleteNamespaceRequest) (_result *DeleteNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteNamespaceResponse{} + _body, _err := client.DeleteNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoBuildRuleWithOptions(request *DeleteRepoBuildRuleRequest, runtime *util.RuntimeOptions) (_result *DeleteRepoBuildRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRuleId)) { + query["BuildRuleId"] = request.BuildRuleId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoBuildRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoBuildRule(request *DeleteRepoBuildRuleRequest) (_result *DeleteRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepoBuildRuleResponse{} + _body, _err := client.DeleteRepoBuildRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoSyncRuleWithOptions(request *DeleteRepoSyncRuleRequest, runtime *util.RuntimeOptions) (_result *DeleteRepoSyncRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.SyncRuleId)) { + query["SyncRuleId"] = request.SyncRuleId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoSyncRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepoSyncRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoSyncRule(request *DeleteRepoSyncRuleRequest) (_result *DeleteRepoSyncRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepoSyncRuleResponse{} + _body, _err := client.DeleteRepoSyncRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoTagWithOptions(request *DeleteRepoTagRequest, runtime *util.RuntimeOptions) (_result *DeleteRepoTagResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoTag"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoTag(request *DeleteRepoTagRequest) (_result *DeleteRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepoTagResponse{} + _body, _err := client.DeleteRepoTagWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoTriggerWithOptions(request *DeleteRepoTriggerRequest, runtime *util.RuntimeOptions) (_result *DeleteRepoTriggerResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.TriggerId)) { + query["TriggerId"] = request.TriggerId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoTrigger"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepoTriggerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoTrigger(request *DeleteRepoTriggerRequest) (_result *DeleteRepoTriggerResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepoTriggerResponse{} + _body, _err := client.DeleteRepoTriggerWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepositoryWithOptions(request *DeleteRepositoryRequest, runtime *util.RuntimeOptions) (_result *DeleteRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepository(request *DeleteRepositoryRequest) (_result *DeleteRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepositoryResponse{} + _body, _err := client.DeleteRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetArtifactBuildTaskWithOptions(request *GetArtifactBuildTaskRequest, runtime *util.RuntimeOptions) (_result *GetArtifactBuildTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetArtifactBuildTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetArtifactBuildTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetArtifactBuildTask(request *GetArtifactBuildTaskRequest) (_result *GetArtifactBuildTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetArtifactBuildTaskResponse{} + _body, _err := client.GetArtifactBuildTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetAuthorizationTokenWithOptions(request *GetAuthorizationTokenRequest, runtime *util.RuntimeOptions) (_result *GetAuthorizationTokenResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetAuthorizationToken"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetAuthorizationTokenResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetAuthorizationToken(request *GetAuthorizationTokenRequest) (_result *GetAuthorizationTokenResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetAuthorizationTokenResponse{} + _body, _err := client.GetAuthorizationTokenWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetChainWithOptions(request *GetChainRequest, runtime *util.RuntimeOptions) (_result *GetChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.ChainId)) { + query["ChainId"] = request.ChainId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetChain(request *GetChainRequest) (_result *GetChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetChainResponse{} + _body, _err := client.GetChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetChartNamespaceWithOptions(request *GetChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *GetChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetChartNamespace(request *GetChartNamespaceRequest) (_result *GetChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetChartNamespaceResponse{} + _body, _err := client.GetChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetChartRepositoryWithOptions(request *GetChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *GetChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetChartRepository(request *GetChartRepositoryRequest) (_result *GetChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetChartRepositoryResponse{} + _body, _err := client.GetChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceWithOptions(request *GetInstanceRequest, runtime *util.RuntimeOptions) (_result *GetInstanceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetInstance"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstance(request *GetInstanceRequest) (_result *GetInstanceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceResponse{} + _body, _err := client.GetInstanceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceCountWithOptions(runtime *util.RuntimeOptions) (_result *GetInstanceCountResponse, _err error) { + req := &openapi.OpenApiRequest{} + params := &openapi.Params{ + Action: tea.String("GetInstanceCount"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceCountResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstanceCount() (_result *GetInstanceCountResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceCountResponse{} + _body, _err := client.GetInstanceCountWithOptions(runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceEndpointWithOptions(request *GetInstanceEndpointRequest, runtime *util.RuntimeOptions) (_result *GetInstanceEndpointResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.EndpointType)) { + query["EndpointType"] = request.EndpointType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetInstanceEndpoint"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceEndpointResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstanceEndpoint(request *GetInstanceEndpointRequest) (_result *GetInstanceEndpointResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceEndpointResponse{} + _body, _err := client.GetInstanceEndpointWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceUsageWithOptions(request *GetInstanceUsageRequest, runtime *util.RuntimeOptions) (_result *GetInstanceUsageResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetInstanceUsage"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceUsageResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstanceUsage(request *GetInstanceUsageRequest) (_result *GetInstanceUsageResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceUsageResponse{} + _body, _err := client.GetInstanceUsageWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceVpcEndpointWithOptions(request *GetInstanceVpcEndpointRequest, runtime *util.RuntimeOptions) (_result *GetInstanceVpcEndpointResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetInstanceVpcEndpoint"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceVpcEndpointResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstanceVpcEndpoint(request *GetInstanceVpcEndpointRequest) (_result *GetInstanceVpcEndpointResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceVpcEndpointResponse{} + _body, _err := client.GetInstanceVpcEndpointWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetNamespaceWithOptions(request *GetNamespaceRequest, runtime *util.RuntimeOptions) (_result *GetNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceId)) { + query["NamespaceId"] = request.NamespaceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetNamespace(request *GetNamespaceRequest) (_result *GetNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetNamespaceResponse{} + _body, _err := client.GetNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildRecordWithOptions(request *GetRepoBuildRecordRequest, runtime *util.RuntimeOptions) (_result *GetRepoBuildRecordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRecordId)) { + query["BuildRecordId"] = request.BuildRecordId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildRecord"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoBuildRecordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildRecord(request *GetRepoBuildRecordRequest) (_result *GetRepoBuildRecordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoBuildRecordResponse{} + _body, _err := client.GetRepoBuildRecordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildRecordStatusWithOptions(request *GetRepoBuildRecordStatusRequest, runtime *util.RuntimeOptions) (_result *GetRepoBuildRecordStatusResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRecordId)) { + query["BuildRecordId"] = request.BuildRecordId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildRecordStatus"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoBuildRecordStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildRecordStatus(request *GetRepoBuildRecordStatusRequest) (_result *GetRepoBuildRecordStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoBuildRecordStatusResponse{} + _body, _err := client.GetRepoBuildRecordStatusWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoSourceCodeRepoWithOptions(request *GetRepoSourceCodeRepoRequest, runtime *util.RuntimeOptions) (_result *GetRepoSourceCodeRepoResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoSourceCodeRepo"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoSourceCodeRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoSourceCodeRepo(request *GetRepoSourceCodeRepoRequest) (_result *GetRepoSourceCodeRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoSourceCodeRepoResponse{} + _body, _err := client.GetRepoSourceCodeRepoWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoSyncTaskWithOptions(request *GetRepoSyncTaskRequest, runtime *util.RuntimeOptions) (_result *GetRepoSyncTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.SyncTaskId)) { + query["SyncTaskId"] = request.SyncTaskId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoSyncTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoSyncTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoSyncTask(request *GetRepoSyncTaskRequest) (_result *GetRepoSyncTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoSyncTaskResponse{} + _body, _err := client.GetRepoSyncTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagWithOptions(request *GetRepoTagRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTag"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTag(request *GetRepoTagRequest) (_result *GetRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagResponse{} + _body, _err := client.GetRepoTagWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagLayersWithOptions(request *GetRepoTagLayersRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagLayersResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagLayers"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagLayersResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagLayers(request *GetRepoTagLayersRequest) (_result *GetRepoTagLayersResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagLayersResponse{} + _body, _err := client.GetRepoTagLayersWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagManifestWithOptions(request *GetRepoTagManifestRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagManifestResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.SchemaVersion)) { + query["SchemaVersion"] = request.SchemaVersion + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagManifest"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagManifestResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagManifest(request *GetRepoTagManifestRequest) (_result *GetRepoTagManifestResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagManifestResponse{} + _body, _err := client.GetRepoTagManifestWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanStatusWithOptions(request *GetRepoTagScanStatusRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagScanStatusResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.ScanTaskId)) { + query["ScanTaskId"] = request.ScanTaskId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanStatus"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagScanStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanStatus(request *GetRepoTagScanStatusRequest) (_result *GetRepoTagScanStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagScanStatusResponse{} + _body, _err := client.GetRepoTagScanStatusWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanSummaryWithOptions(request *GetRepoTagScanSummaryRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagScanSummaryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.ScanTaskId)) { + query["ScanTaskId"] = request.ScanTaskId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanSummary"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagScanSummaryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanSummary(request *GetRepoTagScanSummaryRequest) (_result *GetRepoTagScanSummaryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagScanSummaryResponse{} + _body, _err := client.GetRepoTagScanSummaryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepositoryWithOptions(request *GetRepositoryRequest, runtime *util.RuntimeOptions) (_result *GetRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepository(request *GetRepositoryRequest) (_result *GetRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepositoryResponse{} + _body, _err := client.GetRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListArtifactBuildTaskLogWithOptions(request *ListArtifactBuildTaskLogRequest, runtime *util.RuntimeOptions) (_result *ListArtifactBuildTaskLogResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListArtifactBuildTaskLog"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListArtifactBuildTaskLogResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListArtifactBuildTaskLog(request *ListArtifactBuildTaskLogRequest) (_result *ListArtifactBuildTaskLogResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListArtifactBuildTaskLogResponse{} + _body, _err := client.ListArtifactBuildTaskLogWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChainWithOptions(request *ListChainRequest, runtime *util.RuntimeOptions) (_result *ListChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChain(request *ListChainRequest) (_result *ListChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChainResponse{} + _body, _err := client.ListChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChainInstanceWithOptions(request *ListChainInstanceRequest, runtime *util.RuntimeOptions) (_result *ListChainInstanceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChainInstance"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChainInstanceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChainInstance(request *ListChainInstanceRequest) (_result *ListChainInstanceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChainInstanceResponse{} + _body, _err := client.ListChainInstanceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChartWithOptions(request *ListChartRequest, runtime *util.RuntimeOptions) (_result *ListChartResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChart"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChartResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChart(request *ListChartRequest) (_result *ListChartResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChartResponse{} + _body, _err := client.ListChartWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChartNamespaceWithOptions(request *ListChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *ListChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceStatus)) { + query["NamespaceStatus"] = request.NamespaceStatus + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChartNamespace(request *ListChartNamespaceRequest) (_result *ListChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChartNamespaceResponse{} + _body, _err := client.ListChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChartReleaseWithOptions(request *ListChartReleaseRequest, runtime *util.RuntimeOptions) (_result *ListChartReleaseResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Chart)) { + query["Chart"] = request.Chart + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChartRelease"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChartReleaseResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChartRelease(request *ListChartReleaseRequest) (_result *ListChartReleaseResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChartReleaseResponse{} + _body, _err := client.ListChartReleaseWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChartRepositoryWithOptions(request *ListChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *ListChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoStatus)) { + query["RepoStatus"] = request.RepoStatus + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChartRepository(request *ListChartRepositoryRequest) (_result *ListChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChartRepositoryResponse{} + _body, _err := client.ListChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListEventCenterRecordWithOptions(request *ListEventCenterRecordRequest, runtime *util.RuntimeOptions) (_result *ListEventCenterRecordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListEventCenterRecord"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListEventCenterRecordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListEventCenterRecord(request *ListEventCenterRecordRequest) (_result *ListEventCenterRecordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListEventCenterRecordResponse{} + _body, _err := client.ListEventCenterRecordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListEventCenterRuleNameWithOptions(request *ListEventCenterRuleNameRequest, runtime *util.RuntimeOptions) (_result *ListEventCenterRuleNameResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListEventCenterRuleName"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListEventCenterRuleNameResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListEventCenterRuleName(request *ListEventCenterRuleNameRequest) (_result *ListEventCenterRuleNameResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListEventCenterRuleNameResponse{} + _body, _err := client.ListEventCenterRuleNameWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListInstanceWithOptions(request *ListInstanceRequest, runtime *util.RuntimeOptions) (_result *ListInstanceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceName)) { + query["InstanceName"] = request.InstanceName + } + + if !tea.BoolValue(util.IsUnset(request.InstanceStatus)) { + query["InstanceStatus"] = request.InstanceStatus + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListInstance"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListInstanceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListInstance(request *ListInstanceRequest) (_result *ListInstanceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListInstanceResponse{} + _body, _err := client.ListInstanceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListInstanceEndpointWithOptions(request *ListInstanceEndpointRequest, runtime *util.RuntimeOptions) (_result *ListInstanceEndpointResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListInstanceEndpoint"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListInstanceEndpointResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListInstanceEndpoint(request *ListInstanceEndpointRequest) (_result *ListInstanceEndpointResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListInstanceEndpointResponse{} + _body, _err := client.ListInstanceEndpointWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListInstanceRegionWithOptions(request *ListInstanceRegionRequest, runtime *util.RuntimeOptions) (_result *ListInstanceRegionResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Lang)) { + query["Lang"] = request.Lang + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListInstanceRegion"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListInstanceRegionResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListInstanceRegion(request *ListInstanceRegionRequest) (_result *ListInstanceRegionResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListInstanceRegionResponse{} + _body, _err := client.ListInstanceRegionWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListNamespaceWithOptions(request *ListNamespaceRequest, runtime *util.RuntimeOptions) (_result *ListNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceStatus)) { + query["NamespaceStatus"] = request.NamespaceStatus + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListNamespace(request *ListNamespaceRequest) (_result *ListNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListNamespaceResponse{} + _body, _err := client.ListNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoBuildRecordWithOptions(request *ListRepoBuildRecordRequest, runtime *util.RuntimeOptions) (_result *ListRepoBuildRecordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoBuildRecord"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoBuildRecordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoBuildRecord(request *ListRepoBuildRecordRequest) (_result *ListRepoBuildRecordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoBuildRecordResponse{} + _body, _err := client.ListRepoBuildRecordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoBuildRecordLogWithOptions(request *ListRepoBuildRecordLogRequest, runtime *util.RuntimeOptions) (_result *ListRepoBuildRecordLogResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRecordId)) { + query["BuildRecordId"] = request.BuildRecordId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Offset)) { + query["Offset"] = request.Offset + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoBuildRecordLog"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoBuildRecordLogResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoBuildRecordLog(request *ListRepoBuildRecordLogRequest) (_result *ListRepoBuildRecordLogResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoBuildRecordLogResponse{} + _body, _err := client.ListRepoBuildRecordLogWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoBuildRuleWithOptions(request *ListRepoBuildRuleRequest, runtime *util.RuntimeOptions) (_result *ListRepoBuildRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoBuildRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoBuildRule(request *ListRepoBuildRuleRequest) (_result *ListRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoBuildRuleResponse{} + _body, _err := client.ListRepoBuildRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoSyncRuleWithOptions(request *ListRepoSyncRuleRequest, runtime *util.RuntimeOptions) (_result *ListRepoSyncRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.TargetInstanceId)) { + query["TargetInstanceId"] = request.TargetInstanceId + } + + if !tea.BoolValue(util.IsUnset(request.TargetRegionId)) { + query["TargetRegionId"] = request.TargetRegionId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoSyncRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoSyncRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoSyncRule(request *ListRepoSyncRuleRequest) (_result *ListRepoSyncRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoSyncRuleResponse{} + _body, _err := client.ListRepoSyncRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoSyncTaskWithOptions(request *ListRepoSyncTaskRequest, runtime *util.RuntimeOptions) (_result *ListRepoSyncTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.SyncRecordId)) { + query["SyncRecordId"] = request.SyncRecordId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoSyncTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoSyncTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoSyncTask(request *ListRepoSyncTaskRequest) (_result *ListRepoSyncTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoSyncTaskResponse{} + _body, _err := client.ListRepoSyncTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoTagWithOptions(request *ListRepoTagRequest, runtime *util.RuntimeOptions) (_result *ListRepoTagResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoTag"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoTag(request *ListRepoTagRequest) (_result *ListRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoTagResponse{} + _body, _err := client.ListRepoTagWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoTagScanResultWithOptions(request *ListRepoTagScanResultRequest, runtime *util.RuntimeOptions) (_result *ListRepoTagScanResultResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.FilterValue)) { + query["FilterValue"] = request.FilterValue + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.ScanTaskId)) { + query["ScanTaskId"] = request.ScanTaskId + } + + if !tea.BoolValue(util.IsUnset(request.ScanType)) { + query["ScanType"] = request.ScanType + } + + if !tea.BoolValue(util.IsUnset(request.Severity)) { + query["Severity"] = request.Severity + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + if !tea.BoolValue(util.IsUnset(request.VulQueryKey)) { + query["VulQueryKey"] = request.VulQueryKey + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoTagScanResult"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoTagScanResultResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoTagScanResult(request *ListRepoTagScanResultRequest) (_result *ListRepoTagScanResultResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoTagScanResultResponse{} + _body, _err := client.ListRepoTagScanResultWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoTriggerWithOptions(request *ListRepoTriggerRequest, runtime *util.RuntimeOptions) (_result *ListRepoTriggerResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoTrigger"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoTriggerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoTrigger(request *ListRepoTriggerRequest) (_result *ListRepoTriggerResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoTriggerResponse{} + _body, _err := client.ListRepoTriggerWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepositoryWithOptions(request *ListRepositoryRequest, runtime *util.RuntimeOptions) (_result *ListRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoStatus)) { + query["RepoStatus"] = request.RepoStatus + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepository(request *ListRepositoryRequest) (_result *ListRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepositoryResponse{} + _body, _err := client.ListRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ResetLoginPasswordWithOptions(request *ResetLoginPasswordRequest, runtime *util.RuntimeOptions) (_result *ResetLoginPasswordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Password)) { + query["Password"] = request.Password + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ResetLoginPassword"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ResetLoginPasswordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ResetLoginPassword(request *ResetLoginPasswordRequest) (_result *ResetLoginPasswordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ResetLoginPasswordResponse{} + _body, _err := client.ResetLoginPasswordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateChainWithOptions(request *UpdateChainRequest, runtime *util.RuntimeOptions) (_result *UpdateChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.ChainConfig)) { + query["ChainConfig"] = request.ChainConfig + } + + if !tea.BoolValue(util.IsUnset(request.ChainId)) { + query["ChainId"] = request.ChainId + } + + if !tea.BoolValue(util.IsUnset(request.Description)) { + query["Description"] = request.Description + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Name)) { + query["Name"] = request.Name + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateChain(request *UpdateChainRequest) (_result *UpdateChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateChainResponse{} + _body, _err := client.UpdateChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateChartNamespaceWithOptions(request *UpdateChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *UpdateChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoCreateRepo)) { + query["AutoCreateRepo"] = request.AutoCreateRepo + } + + if !tea.BoolValue(util.IsUnset(request.DefaultRepoType)) { + query["DefaultRepoType"] = request.DefaultRepoType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateChartNamespace(request *UpdateChartNamespaceRequest) (_result *UpdateChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateChartNamespaceResponse{} + _body, _err := client.UpdateChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateChartRepositoryWithOptions(request *UpdateChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *UpdateChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoType)) { + query["RepoType"] = request.RepoType + } + + if !tea.BoolValue(util.IsUnset(request.Summary)) { + query["Summary"] = request.Summary + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateChartRepository(request *UpdateChartRepositoryRequest) (_result *UpdateChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateChartRepositoryResponse{} + _body, _err := client.UpdateChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateEventCenterRuleWithOptions(tmpReq *UpdateEventCenterRuleRequest, runtime *util.RuntimeOptions) (_result *UpdateEventCenterRuleResponse, _err error) { + _err = util.ValidateModel(tmpReq) + if _err != nil { + return _result, _err + } + request := &UpdateEventCenterRuleShrinkRequest{} + openapiutil.Convert(tmpReq, request) + if !tea.BoolValue(util.IsUnset(tmpReq.Namespaces)) { + request.NamespacesShrink = openapiutil.ArrayToStringWithSpecifiedStyle(tmpReq.Namespaces, tea.String("Namespaces"), tea.String("json")) + } + + if !tea.BoolValue(util.IsUnset(tmpReq.RepoNames)) { + request.RepoNamesShrink = openapiutil.ArrayToStringWithSpecifiedStyle(tmpReq.RepoNames, tea.String("RepoNames"), tea.String("json")) + } + + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.EventChannel)) { + query["EventChannel"] = request.EventChannel + } + + if !tea.BoolValue(util.IsUnset(request.EventConfig)) { + query["EventConfig"] = request.EventConfig + } + + if !tea.BoolValue(util.IsUnset(request.EventScope)) { + query["EventScope"] = request.EventScope + } + + if !tea.BoolValue(util.IsUnset(request.EventType)) { + query["EventType"] = request.EventType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespacesShrink)) { + query["Namespaces"] = request.NamespacesShrink + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamesShrink)) { + query["RepoNames"] = request.RepoNamesShrink + } + + if !tea.BoolValue(util.IsUnset(request.RepoTagFilterPattern)) { + query["RepoTagFilterPattern"] = request.RepoTagFilterPattern + } + + if !tea.BoolValue(util.IsUnset(request.RuleId)) { + query["RuleId"] = request.RuleId + } + + if !tea.BoolValue(util.IsUnset(request.RuleName)) { + query["RuleName"] = request.RuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateEventCenterRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateEventCenterRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateEventCenterRule(request *UpdateEventCenterRuleRequest) (_result *UpdateEventCenterRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateEventCenterRuleResponse{} + _body, _err := client.UpdateEventCenterRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateInstanceEndpointStatusWithOptions(request *UpdateInstanceEndpointStatusRequest, runtime *util.RuntimeOptions) (_result *UpdateInstanceEndpointStatusResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Enable)) { + query["Enable"] = request.Enable + } + + if !tea.BoolValue(util.IsUnset(request.EndpointType)) { + query["EndpointType"] = request.EndpointType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateInstanceEndpointStatus"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateInstanceEndpointStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateInstanceEndpointStatus(request *UpdateInstanceEndpointStatusRequest) (_result *UpdateInstanceEndpointStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateInstanceEndpointStatusResponse{} + _body, _err := client.UpdateInstanceEndpointStatusWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateNamespaceWithOptions(request *UpdateNamespaceRequest, runtime *util.RuntimeOptions) (_result *UpdateNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoCreateRepo)) { + query["AutoCreateRepo"] = request.AutoCreateRepo + } + + if !tea.BoolValue(util.IsUnset(request.DefaultRepoType)) { + query["DefaultRepoType"] = request.DefaultRepoType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateNamespace(request *UpdateNamespaceRequest) (_result *UpdateNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateNamespaceResponse{} + _body, _err := client.UpdateNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoBuildRuleWithOptions(request *UpdateRepoBuildRuleRequest, runtime *util.RuntimeOptions) (_result *UpdateRepoBuildRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildArgs)) { + query["BuildArgs"] = request.BuildArgs + } + + if !tea.BoolValue(util.IsUnset(request.BuildRuleId)) { + query["BuildRuleId"] = request.BuildRuleId + } + + if !tea.BoolValue(util.IsUnset(request.DockerfileLocation)) { + query["DockerfileLocation"] = request.DockerfileLocation + } + + if !tea.BoolValue(util.IsUnset(request.DockerfileName)) { + query["DockerfileName"] = request.DockerfileName + } + + if !tea.BoolValue(util.IsUnset(request.ImageTag)) { + query["ImageTag"] = request.ImageTag + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Platforms)) { + query["Platforms"] = request.Platforms + } + + if !tea.BoolValue(util.IsUnset(request.PushName)) { + query["PushName"] = request.PushName + } + + if !tea.BoolValue(util.IsUnset(request.PushType)) { + query["PushType"] = request.PushType + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoBuildRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoBuildRule(request *UpdateRepoBuildRuleRequest) (_result *UpdateRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateRepoBuildRuleResponse{} + _body, _err := client.UpdateRepoBuildRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoSourceCodeRepoWithOptions(request *UpdateRepoSourceCodeRepoRequest, runtime *util.RuntimeOptions) (_result *UpdateRepoSourceCodeRepoResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoBuild)) { + query["AutoBuild"] = request.AutoBuild + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoId)) { + query["CodeRepoId"] = request.CodeRepoId + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoName)) { + query["CodeRepoName"] = request.CodeRepoName + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoNamespaceName)) { + query["CodeRepoNamespaceName"] = request.CodeRepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoType)) { + query["CodeRepoType"] = request.CodeRepoType + } + + if !tea.BoolValue(util.IsUnset(request.DisableCacheBuild)) { + query["DisableCacheBuild"] = request.DisableCacheBuild + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.OverseaBuild)) { + query["OverseaBuild"] = request.OverseaBuild + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoSourceCodeRepo"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateRepoSourceCodeRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoSourceCodeRepo(request *UpdateRepoSourceCodeRepoRequest) (_result *UpdateRepoSourceCodeRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateRepoSourceCodeRepoResponse{} + _body, _err := client.UpdateRepoSourceCodeRepoWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoTriggerWithOptions(request *UpdateRepoTriggerRequest, runtime *util.RuntimeOptions) (_result *UpdateRepoTriggerResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.TriggerId)) { + query["TriggerId"] = request.TriggerId + } + + if !tea.BoolValue(util.IsUnset(request.TriggerName)) { + query["TriggerName"] = request.TriggerName + } + + if !tea.BoolValue(util.IsUnset(request.TriggerTag)) { + query["TriggerTag"] = request.TriggerTag + } + + if !tea.BoolValue(util.IsUnset(request.TriggerType)) { + query["TriggerType"] = request.TriggerType + } + + if !tea.BoolValue(util.IsUnset(request.TriggerUrl)) { + query["TriggerUrl"] = request.TriggerUrl + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoTrigger"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateRepoTriggerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoTrigger(request *UpdateRepoTriggerRequest) (_result *UpdateRepoTriggerResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateRepoTriggerResponse{} + _body, _err := client.UpdateRepoTriggerWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepositoryWithOptions(request *UpdateRepositoryRequest, runtime *util.RuntimeOptions) (_result *UpdateRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Detail)) { + query["Detail"] = request.Detail + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.RepoType)) { + query["RepoType"] = request.RepoType + } + + if !tea.BoolValue(util.IsUnset(request.Summary)) { + query["Summary"] = request.Summary + } + + if !tea.BoolValue(util.IsUnset(request.TagImmutability)) { + query["TagImmutability"] = request.TagImmutability + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepository(request *UpdateRepositoryRequest) (_result *UpdateRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateRepositoryResponse{} + _body, _err := client.UpdateRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} diff --git a/vendor/github.com/alibabacloud-go/darabonba-openapi/LICENSE b/vendor/github.com/alibabacloud-go/darabonba-openapi/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/alibabacloud-go/darabonba-openapi/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/darabonba-openapi/client/client.go b/vendor/github.com/alibabacloud-go/darabonba-openapi/client/client.go new file mode 100644 index 000000000..6da3e1fdd --- /dev/null +++ b/vendor/github.com/alibabacloud-go/darabonba-openapi/client/client.go @@ -0,0 +1,1623 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * This is for OpenApi SDK + */ +package client + +import ( + "io" + + spi "github.com/alibabacloud-go/alibabacloud-gateway-spi/client" + openapiutil "github.com/alibabacloud-go/openapi-util/service" + util "github.com/alibabacloud-go/tea-utils/service" + xml "github.com/alibabacloud-go/tea-xml/service" + "github.com/alibabacloud-go/tea/tea" + credential "github.com/aliyun/credentials-go/credentials" +) + +/** + * Model for initing client + */ +type Config struct { + // accesskey id + AccessKeyId *string `json:"accessKeyId,omitempty" xml:"accessKeyId,omitempty"` + // accesskey secret + AccessKeySecret *string `json:"accessKeySecret,omitempty" xml:"accessKeySecret,omitempty"` + // security token + SecurityToken *string `json:"securityToken,omitempty" xml:"securityToken,omitempty"` + // http protocol + Protocol *string `json:"protocol,omitempty" xml:"protocol,omitempty"` + // http method + Method *string `json:"method,omitempty" xml:"method,omitempty"` + // region id + RegionId *string `json:"regionId,omitempty" xml:"regionId,omitempty"` + // read timeout + ReadTimeout *int `json:"readTimeout,omitempty" xml:"readTimeout,omitempty"` + // connect timeout + ConnectTimeout *int `json:"connectTimeout,omitempty" xml:"connectTimeout,omitempty"` + // http proxy + HttpProxy *string `json:"httpProxy,omitempty" xml:"httpProxy,omitempty"` + // https proxy + HttpsProxy *string `json:"httpsProxy,omitempty" xml:"httpsProxy,omitempty"` + // credential + Credential credential.Credential `json:"credential,omitempty" xml:"credential,omitempty"` + // endpoint + Endpoint *string `json:"endpoint,omitempty" xml:"endpoint,omitempty"` + // proxy white list + NoProxy *string `json:"noProxy,omitempty" xml:"noProxy,omitempty"` + // max idle conns + MaxIdleConns *int `json:"maxIdleConns,omitempty" xml:"maxIdleConns,omitempty"` + // network for endpoint + Network *string `json:"network,omitempty" xml:"network,omitempty"` + // user agent + UserAgent *string `json:"userAgent,omitempty" xml:"userAgent,omitempty"` + // suffix for endpoint + Suffix *string `json:"suffix,omitempty" xml:"suffix,omitempty"` + // socks5 proxy + Socks5Proxy *string `json:"socks5Proxy,omitempty" xml:"socks5Proxy,omitempty"` + // socks5 network + Socks5NetWork *string `json:"socks5NetWork,omitempty" xml:"socks5NetWork,omitempty"` + // endpoint type + EndpointType *string `json:"endpointType,omitempty" xml:"endpointType,omitempty"` + // OpenPlatform endpoint + OpenPlatformEndpoint *string `json:"openPlatformEndpoint,omitempty" xml:"openPlatformEndpoint,omitempty"` + // Deprecated + // credential type + Type *string `json:"type,omitempty" xml:"type,omitempty"` + // Signature Version + SignatureVersion *string `json:"signatureVersion,omitempty" xml:"signatureVersion,omitempty"` + // Signature Algorithm + SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" xml:"signatureAlgorithm,omitempty"` +} + +func (s Config) String() string { + return tea.Prettify(s) +} + +func (s Config) GoString() string { + return s.String() +} + +func (s *Config) SetAccessKeyId(v string) *Config { + s.AccessKeyId = &v + return s +} + +func (s *Config) SetAccessKeySecret(v string) *Config { + s.AccessKeySecret = &v + return s +} + +func (s *Config) SetSecurityToken(v string) *Config { + s.SecurityToken = &v + return s +} + +func (s *Config) SetProtocol(v string) *Config { + s.Protocol = &v + return s +} + +func (s *Config) SetMethod(v string) *Config { + s.Method = &v + return s +} + +func (s *Config) SetRegionId(v string) *Config { + s.RegionId = &v + return s +} + +func (s *Config) SetReadTimeout(v int) *Config { + s.ReadTimeout = &v + return s +} + +func (s *Config) SetConnectTimeout(v int) *Config { + s.ConnectTimeout = &v + return s +} + +func (s *Config) SetHttpProxy(v string) *Config { + s.HttpProxy = &v + return s +} + +func (s *Config) SetHttpsProxy(v string) *Config { + s.HttpsProxy = &v + return s +} + +func (s *Config) SetCredential(v credential.Credential) *Config { + s.Credential = v + return s +} + +func (s *Config) SetEndpoint(v string) *Config { + s.Endpoint = &v + return s +} + +func (s *Config) SetNoProxy(v string) *Config { + s.NoProxy = &v + return s +} + +func (s *Config) SetMaxIdleConns(v int) *Config { + s.MaxIdleConns = &v + return s +} + +func (s *Config) SetNetwork(v string) *Config { + s.Network = &v + return s +} + +func (s *Config) SetUserAgent(v string) *Config { + s.UserAgent = &v + return s +} + +func (s *Config) SetSuffix(v string) *Config { + s.Suffix = &v + return s +} + +func (s *Config) SetSocks5Proxy(v string) *Config { + s.Socks5Proxy = &v + return s +} + +func (s *Config) SetSocks5NetWork(v string) *Config { + s.Socks5NetWork = &v + return s +} + +func (s *Config) SetEndpointType(v string) *Config { + s.EndpointType = &v + return s +} + +func (s *Config) SetOpenPlatformEndpoint(v string) *Config { + s.OpenPlatformEndpoint = &v + return s +} + +func (s *Config) SetType(v string) *Config { + s.Type = &v + return s +} + +func (s *Config) SetSignatureVersion(v string) *Config { + s.SignatureVersion = &v + return s +} + +func (s *Config) SetSignatureAlgorithm(v string) *Config { + s.SignatureAlgorithm = &v + return s +} + +type OpenApiRequest struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"` + Query map[string]*string `json:"query,omitempty" xml:"query,omitempty"` + Body interface{} `json:"body,omitempty" xml:"body,omitempty"` + Stream io.Reader `json:"stream,omitempty" xml:"stream,omitempty"` + HostMap map[string]*string `json:"hostMap,omitempty" xml:"hostMap,omitempty"` + EndpointOverride *string `json:"endpointOverride,omitempty" xml:"endpointOverride,omitempty"` +} + +func (s OpenApiRequest) String() string { + return tea.Prettify(s) +} + +func (s OpenApiRequest) GoString() string { + return s.String() +} + +func (s *OpenApiRequest) SetHeaders(v map[string]*string) *OpenApiRequest { + s.Headers = v + return s +} + +func (s *OpenApiRequest) SetQuery(v map[string]*string) *OpenApiRequest { + s.Query = v + return s +} + +func (s *OpenApiRequest) SetBody(v interface{}) *OpenApiRequest { + s.Body = v + return s +} + +func (s *OpenApiRequest) SetStream(v io.Reader) *OpenApiRequest { + s.Stream = v + return s +} + +func (s *OpenApiRequest) SetHostMap(v map[string]*string) *OpenApiRequest { + s.HostMap = v + return s +} + +func (s *OpenApiRequest) SetEndpointOverride(v string) *OpenApiRequest { + s.EndpointOverride = &v + return s +} + +type Params struct { + Action *string `json:"action,omitempty" xml:"action,omitempty" require:"true"` + Version *string `json:"version,omitempty" xml:"version,omitempty" require:"true"` + Protocol *string `json:"protocol,omitempty" xml:"protocol,omitempty" require:"true"` + Pathname *string `json:"pathname,omitempty" xml:"pathname,omitempty" require:"true"` + Method *string `json:"method,omitempty" xml:"method,omitempty" require:"true"` + AuthType *string `json:"authType,omitempty" xml:"authType,omitempty" require:"true"` + BodyType *string `json:"bodyType,omitempty" xml:"bodyType,omitempty" require:"true"` + ReqBodyType *string `json:"reqBodyType,omitempty" xml:"reqBodyType,omitempty" require:"true"` + Style *string `json:"style,omitempty" xml:"style,omitempty"` +} + +func (s Params) String() string { + return tea.Prettify(s) +} + +func (s Params) GoString() string { + return s.String() +} + +func (s *Params) SetAction(v string) *Params { + s.Action = &v + return s +} + +func (s *Params) SetVersion(v string) *Params { + s.Version = &v + return s +} + +func (s *Params) SetProtocol(v string) *Params { + s.Protocol = &v + return s +} + +func (s *Params) SetPathname(v string) *Params { + s.Pathname = &v + return s +} + +func (s *Params) SetMethod(v string) *Params { + s.Method = &v + return s +} + +func (s *Params) SetAuthType(v string) *Params { + s.AuthType = &v + return s +} + +func (s *Params) SetBodyType(v string) *Params { + s.BodyType = &v + return s +} + +func (s *Params) SetReqBodyType(v string) *Params { + s.ReqBodyType = &v + return s +} + +func (s *Params) SetStyle(v string) *Params { + s.Style = &v + return s +} + +type Client struct { + Endpoint *string + RegionId *string + Protocol *string + Method *string + UserAgent *string + EndpointRule *string + EndpointMap map[string]*string + Suffix *string + ReadTimeout *int + ConnectTimeout *int + HttpProxy *string + HttpsProxy *string + Socks5Proxy *string + Socks5NetWork *string + NoProxy *string + Network *string + ProductId *string + MaxIdleConns *int + EndpointType *string + OpenPlatformEndpoint *string + Credential credential.Credential + SignatureVersion *string + SignatureAlgorithm *string + Headers map[string]*string + Spi spi.ClientInterface +} + +/** + * Init client with Config + * @param config config contains the necessary information to create a client + */ +func NewClient(config *Config) (*Client, error) { + client := new(Client) + err := client.Init(config) + return client, err +} + +func (client *Client) Init(config *Config) (_err error) { + if tea.BoolValue(util.IsUnset(tea.ToMap(config))) { + _err = tea.NewSDKError(map[string]interface{}{ + "code": "ParameterMissing", + "message": "'config' can not be unset", + }) + return _err + } + + if !tea.BoolValue(util.Empty(config.AccessKeyId)) && !tea.BoolValue(util.Empty(config.AccessKeySecret)) { + if !tea.BoolValue(util.Empty(config.SecurityToken)) { + config.Type = tea.String("sts") + } else { + config.Type = tea.String("access_key") + } + + credentialConfig := &credential.Config{ + AccessKeyId: config.AccessKeyId, + Type: config.Type, + AccessKeySecret: config.AccessKeySecret, + SecurityToken: config.SecurityToken, + } + client.Credential, _err = credential.NewCredential(credentialConfig) + if _err != nil { + return _err + } + + } else if !tea.BoolValue(util.IsUnset(config.Credential)) { + client.Credential = config.Credential + } + + client.Endpoint = config.Endpoint + client.EndpointType = config.EndpointType + client.Network = config.Network + client.Suffix = config.Suffix + client.Protocol = config.Protocol + client.Method = config.Method + client.RegionId = config.RegionId + client.UserAgent = config.UserAgent + client.ReadTimeout = config.ReadTimeout + client.ConnectTimeout = config.ConnectTimeout + client.HttpProxy = config.HttpProxy + client.HttpsProxy = config.HttpsProxy + client.NoProxy = config.NoProxy + client.Socks5Proxy = config.Socks5Proxy + client.Socks5NetWork = config.Socks5NetWork + client.MaxIdleConns = config.MaxIdleConns + client.SignatureVersion = config.SignatureVersion + client.SignatureAlgorithm = config.SignatureAlgorithm + return nil +} + +/** + * Encapsulate the request and invoke the network + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) DoRPCRequest(action *string, version *string, protocol *string, method *string, authType *string, bodyType *string, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + request_.Protocol = util.DefaultString(client.Protocol, protocol) + request_.Method = method + request_.Pathname = tea.String("/") + request_.Query = tea.Merge(map[string]*string{ + "Action": action, + "Format": tea.String("json"), + "Version": version, + "Timestamp": openapiutil.GetTimestamp(), + "SignatureNonce": util.GetNonce(), + }, request.Query) + headers, _err := client.GetRpcHeaders() + if _err != nil { + return _result, _err + } + + if tea.BoolValue(util.IsUnset(headers)) { + // endpoint is setted in product client + request_.Headers = map[string]*string{ + "host": client.Endpoint, + "x-acs-version": version, + "x-acs-action": action, + "user-agent": client.GetUserAgent(), + } + } else { + request_.Headers = tea.Merge(map[string]*string{ + "host": client.Endpoint, + "x-acs-version": version, + "x-acs-action": action, + "user-agent": client.GetUserAgent(), + }, headers) + } + + if !tea.BoolValue(util.IsUnset(request.Body)) { + m := util.AssertAsMap(request.Body) + tmp := util.AnyifyMapValue(openapiutil.Query(m)) + request_.Body = tea.ToReader(util.ToFormString(tmp)) + request_.Headers["content-type"] = tea.String("application/x-www-form-urlencoded") + } + + if !tea.BoolValue(util.EqualString(authType, tea.String("Anonymous"))) { + accessKeyId, _err := client.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + accessKeySecret, _err := client.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + securityToken, _err := client.GetSecurityToken() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.Empty(securityToken)) { + request_.Query["SecurityToken"] = securityToken + } + + request_.Query["SignatureMethod"] = tea.String("HMAC-SHA1") + request_.Query["SignatureVersion"] = tea.String("1.0") + request_.Query["AccessKeyId"] = accessKeyId + var t map[string]interface{} + if !tea.BoolValue(util.IsUnset(request.Body)) { + t = util.AssertAsMap(request.Body) + } + + signedParam := tea.Merge(request_.Query, + openapiutil.Query(t)) + request_.Query["Signature"] = openapiutil.GetRPCSignature(signedParam, request_.Method, accessKeySecret) + } + + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + if tea.BoolValue(util.Is4xx(response_.StatusCode)) || tea.BoolValue(util.Is5xx(response_.StatusCode)) { + _res, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + err := util.AssertAsMap(_res) + requestId := DefaultAny(err["RequestId"], err["requestId"]) + _err = tea.NewSDKError(map[string]interface{}{ + "code": tea.ToString(DefaultAny(err["Code"], err["code"])), + "message": "code: " + tea.ToString(tea.IntValue(response_.StatusCode)) + ", " + tea.ToString(DefaultAny(err["Message"], err["message"])) + " request id: " + tea.ToString(requestId), + "data": err, + }) + return _result, _err + } + + if tea.BoolValue(util.EqualString(bodyType, tea.String("binary"))) { + resp := map[string]interface{}{ + "body": response_.Body, + "headers": response_.Headers, + } + _result = resp + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("byte"))) { + byt, _err := util.ReadAsBytes(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": byt, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("string"))) { + str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": tea.StringValue(str), + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("json"))) { + obj, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + res := util.AssertAsMap(obj) + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": res, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("array"))) { + arr, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": arr, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +/** + * Encapsulate the request and invoke the network + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param pathname pathname of every api + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) DoROARequest(action *string, version *string, protocol *string, method *string, authType *string, pathname *string, bodyType *string, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + request_.Protocol = util.DefaultString(client.Protocol, protocol) + request_.Method = method + request_.Pathname = pathname + request_.Headers = tea.Merge(map[string]*string{ + "date": util.GetDateUTCString(), + "host": client.Endpoint, + "accept": tea.String("application/json"), + "x-acs-signature-nonce": util.GetNonce(), + "x-acs-signature-method": tea.String("HMAC-SHA1"), + "x-acs-signature-version": tea.String("1.0"), + "x-acs-version": version, + "x-acs-action": action, + "user-agent": util.GetUserAgent(client.UserAgent), + }, request.Headers) + if !tea.BoolValue(util.IsUnset(request.Body)) { + request_.Body = tea.ToReader(util.ToJSONString(request.Body)) + request_.Headers["content-type"] = tea.String("application/json; charset=utf-8") + } + + if !tea.BoolValue(util.IsUnset(request.Query)) { + request_.Query = request.Query + } + + if !tea.BoolValue(util.EqualString(authType, tea.String("Anonymous"))) { + accessKeyId, _err := client.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + accessKeySecret, _err := client.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + securityToken, _err := client.GetSecurityToken() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.Empty(securityToken)) { + request_.Headers["x-acs-accesskey-id"] = accessKeyId + request_.Headers["x-acs-security-token"] = securityToken + } + + stringToSign := openapiutil.GetStringToSign(request_) + request_.Headers["authorization"] = tea.String("acs " + tea.StringValue(accessKeyId) + ":" + tea.StringValue(openapiutil.GetROASignature(stringToSign, accessKeySecret))) + } + + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + if tea.BoolValue(util.EqualNumber(response_.StatusCode, tea.Int(204))) { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + if tea.BoolValue(util.Is4xx(response_.StatusCode)) || tea.BoolValue(util.Is5xx(response_.StatusCode)) { + _res, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + err := util.AssertAsMap(_res) + requestId := DefaultAny(err["RequestId"], err["requestId"]) + requestId = DefaultAny(requestId, err["requestid"]) + _err = tea.NewSDKError(map[string]interface{}{ + "code": tea.ToString(DefaultAny(err["Code"], err["code"])), + "message": "code: " + tea.ToString(tea.IntValue(response_.StatusCode)) + ", " + tea.ToString(DefaultAny(err["Message"], err["message"])) + " request id: " + tea.ToString(requestId), + "data": err, + }) + return _result, _err + } + + if tea.BoolValue(util.EqualString(bodyType, tea.String("binary"))) { + resp := map[string]interface{}{ + "body": response_.Body, + "headers": response_.Headers, + } + _result = resp + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("byte"))) { + byt, _err := util.ReadAsBytes(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": byt, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("string"))) { + str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": tea.StringValue(str), + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("json"))) { + obj, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + res := util.AssertAsMap(obj) + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": res, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("array"))) { + arr, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": arr, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +/** + * Encapsulate the request and invoke the network with form body + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param pathname pathname of every api + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) DoROARequestWithForm(action *string, version *string, protocol *string, method *string, authType *string, pathname *string, bodyType *string, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + request_.Protocol = util.DefaultString(client.Protocol, protocol) + request_.Method = method + request_.Pathname = pathname + request_.Headers = tea.Merge(map[string]*string{ + "date": util.GetDateUTCString(), + "host": client.Endpoint, + "accept": tea.String("application/json"), + "x-acs-signature-nonce": util.GetNonce(), + "x-acs-signature-method": tea.String("HMAC-SHA1"), + "x-acs-signature-version": tea.String("1.0"), + "x-acs-version": version, + "x-acs-action": action, + "user-agent": util.GetUserAgent(client.UserAgent), + }, request.Headers) + if !tea.BoolValue(util.IsUnset(request.Body)) { + m := util.AssertAsMap(request.Body) + request_.Body = tea.ToReader(openapiutil.ToForm(m)) + request_.Headers["content-type"] = tea.String("application/x-www-form-urlencoded") + } + + if !tea.BoolValue(util.IsUnset(request.Query)) { + request_.Query = request.Query + } + + if !tea.BoolValue(util.EqualString(authType, tea.String("Anonymous"))) { + accessKeyId, _err := client.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + accessKeySecret, _err := client.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + securityToken, _err := client.GetSecurityToken() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.Empty(securityToken)) { + request_.Headers["x-acs-accesskey-id"] = accessKeyId + request_.Headers["x-acs-security-token"] = securityToken + } + + stringToSign := openapiutil.GetStringToSign(request_) + request_.Headers["authorization"] = tea.String("acs " + tea.StringValue(accessKeyId) + ":" + tea.StringValue(openapiutil.GetROASignature(stringToSign, accessKeySecret))) + } + + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + if tea.BoolValue(util.EqualNumber(response_.StatusCode, tea.Int(204))) { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + if tea.BoolValue(util.Is4xx(response_.StatusCode)) || tea.BoolValue(util.Is5xx(response_.StatusCode)) { + _res, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + err := util.AssertAsMap(_res) + _err = tea.NewSDKError(map[string]interface{}{ + "code": tea.ToString(DefaultAny(err["Code"], err["code"])), + "message": "code: " + tea.ToString(tea.IntValue(response_.StatusCode)) + ", " + tea.ToString(DefaultAny(err["Message"], err["message"])) + " request id: " + tea.ToString(DefaultAny(err["RequestId"], err["requestId"])), + "data": err, + }) + return _result, _err + } + + if tea.BoolValue(util.EqualString(bodyType, tea.String("binary"))) { + resp := map[string]interface{}{ + "body": response_.Body, + "headers": response_.Headers, + } + _result = resp + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("byte"))) { + byt, _err := util.ReadAsBytes(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": byt, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("string"))) { + str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": tea.StringValue(str), + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("json"))) { + obj, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + res := util.AssertAsMap(obj) + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": res, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("array"))) { + arr, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": arr, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +/** + * Encapsulate the request and invoke the network + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) DoRequest(params *Params, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(params) + if _err != nil { + return _result, _err + } + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + request_.Protocol = util.DefaultString(client.Protocol, params.Protocol) + request_.Method = params.Method + request_.Pathname = params.Pathname + request_.Query = request.Query + // endpoint is setted in product client + request_.Headers = tea.Merge(map[string]*string{ + "host": client.Endpoint, + "x-acs-version": params.Version, + "x-acs-action": params.Action, + "user-agent": client.GetUserAgent(), + "x-acs-date": openapiutil.GetTimestamp(), + "x-acs-signature-nonce": util.GetNonce(), + "accept": tea.String("application/json"), + }, request.Headers) + if tea.BoolValue(util.EqualString(params.Style, tea.String("RPC"))) { + headers, _err := client.GetRpcHeaders() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.IsUnset(headers)) { + request_.Headers = tea.Merge(request_.Headers, + headers) + } + + } + + signatureAlgorithm := util.DefaultString(client.SignatureAlgorithm, tea.String("ACS3-HMAC-SHA256")) + hashedRequestPayload := openapiutil.HexEncode(openapiutil.Hash(util.ToBytes(tea.String("")), signatureAlgorithm)) + if !tea.BoolValue(util.IsUnset(request.Stream)) { + tmp, _err := util.ReadAsBytes(request.Stream) + if _err != nil { + return _result, _err + } + + hashedRequestPayload = openapiutil.HexEncode(openapiutil.Hash(tmp, signatureAlgorithm)) + request_.Body = tea.ToReader(tmp) + request_.Headers["content-type"] = tea.String("application/octet-stream") + } else { + if !tea.BoolValue(util.IsUnset(request.Body)) { + if tea.BoolValue(util.EqualString(params.ReqBodyType, tea.String("json"))) { + jsonObj := util.ToJSONString(request.Body) + hashedRequestPayload = openapiutil.HexEncode(openapiutil.Hash(util.ToBytes(jsonObj), signatureAlgorithm)) + request_.Body = tea.ToReader(jsonObj) + request_.Headers["content-type"] = tea.String("application/json; charset=utf-8") + } else { + m := util.AssertAsMap(request.Body) + formObj := openapiutil.ToForm(m) + hashedRequestPayload = openapiutil.HexEncode(openapiutil.Hash(util.ToBytes(formObj), signatureAlgorithm)) + request_.Body = tea.ToReader(formObj) + request_.Headers["content-type"] = tea.String("application/x-www-form-urlencoded") + } + + } + + } + + request_.Headers["x-acs-content-sha256"] = hashedRequestPayload + if !tea.BoolValue(util.EqualString(params.AuthType, tea.String("Anonymous"))) { + authType, _err := client.GetType() + if _err != nil { + return _result, _err + } + + if tea.BoolValue(util.EqualString(authType, tea.String("bearer"))) { + bearerToken, _err := client.GetBearerToken() + if _err != nil { + return _result, _err + } + + request_.Headers["x-acs-bearer-token"] = bearerToken + } else { + accessKeyId, _err := client.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + accessKeySecret, _err := client.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + securityToken, _err := client.GetSecurityToken() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.Empty(securityToken)) { + request_.Headers["x-acs-accesskey-id"] = accessKeyId + request_.Headers["x-acs-security-token"] = securityToken + } + + request_.Headers["Authorization"] = openapiutil.GetAuthorization(request_, signatureAlgorithm, hashedRequestPayload, accessKeyId, accessKeySecret) + } + + } + + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + if tea.BoolValue(util.Is4xx(response_.StatusCode)) || tea.BoolValue(util.Is5xx(response_.StatusCode)) { + err := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(response_.Headers["content-type"])) && tea.BoolValue(util.EqualString(response_.Headers["content-type"], tea.String("text/xml;charset=utf-8"))) { + _str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + respMap := xml.ParseXml(_str, nil) + err = util.AssertAsMap(respMap["Error"]) + } else { + _res, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + err = util.AssertAsMap(_res) + } + + err["statusCode"] = response_.StatusCode + _err = tea.NewSDKError(map[string]interface{}{ + "code": tea.ToString(DefaultAny(err["Code"], err["code"])), + "message": "code: " + tea.ToString(tea.IntValue(response_.StatusCode)) + ", " + tea.ToString(DefaultAny(err["Message"], err["message"])) + " request id: " + tea.ToString(DefaultAny(err["RequestId"], err["requestId"])), + "data": err, + }) + return _result, _err + } + + if tea.BoolValue(util.EqualString(params.BodyType, tea.String("binary"))) { + resp := map[string]interface{}{ + "body": response_.Body, + "headers": response_.Headers, + } + _result = resp + return _result, _err + } else if tea.BoolValue(util.EqualString(params.BodyType, tea.String("byte"))) { + byt, _err := util.ReadAsBytes(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": byt, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(params.BodyType, tea.String("string"))) { + str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": tea.StringValue(str), + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(params.BodyType, tea.String("json"))) { + obj, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + res := util.AssertAsMap(obj) + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": res, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(params.BodyType, tea.String("array"))) { + arr, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": arr, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +/** + * Encapsulate the request and invoke the network + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) Execute(params *Params, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(params) + if _err != nil { + return _result, _err + } + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + // spi = new Gateway();//Gateway implements SPI,这一步在产品 SDK 中实例化 + headers, _err := client.GetRpcHeaders() + if _err != nil { + return _result, _err + } + + requestContext := &spi.InterceptorContextRequest{ + Headers: tea.Merge(request.Headers, + headers), + Query: request.Query, + Body: request.Body, + Stream: request.Stream, + HostMap: request.HostMap, + Pathname: params.Pathname, + ProductId: client.ProductId, + Action: params.Action, + Version: params.Version, + Protocol: util.DefaultString(client.Protocol, params.Protocol), + Method: util.DefaultString(client.Method, params.Method), + AuthType: params.AuthType, + BodyType: params.BodyType, + ReqBodyType: params.ReqBodyType, + Style: params.Style, + Credential: client.Credential, + SignatureVersion: client.SignatureVersion, + SignatureAlgorithm: client.SignatureAlgorithm, + UserAgent: client.GetUserAgent(), + } + configurationContext := &spi.InterceptorContextConfiguration{ + RegionId: client.RegionId, + Endpoint: util.DefaultString(request.EndpointOverride, client.Endpoint), + EndpointRule: client.EndpointRule, + EndpointMap: client.EndpointMap, + EndpointType: client.EndpointType, + Network: client.Network, + Suffix: client.Suffix, + } + interceptorContext := &spi.InterceptorContext{ + Request: requestContext, + Configuration: configurationContext, + } + attributeMap := &spi.AttributeMap{} + // 1. spi.modifyConfiguration(context: SPI.InterceptorContext, attributeMap: SPI.AttributeMap); + _err = client.Spi.ModifyConfiguration(interceptorContext, attributeMap) + if _err != nil { + return _result, _err + } + // 2. spi.modifyRequest(context: SPI.InterceptorContext, attributeMap: SPI.AttributeMap); + _err = client.Spi.ModifyRequest(interceptorContext, attributeMap) + if _err != nil { + return _result, _err + } + request_.Protocol = interceptorContext.Request.Protocol + request_.Method = interceptorContext.Request.Method + request_.Pathname = interceptorContext.Request.Pathname + request_.Query = interceptorContext.Request.Query + request_.Body = interceptorContext.Request.Stream + request_.Headers = interceptorContext.Request.Headers + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + responseContext := &spi.InterceptorContextResponse{ + StatusCode: response_.StatusCode, + Headers: response_.Headers, + Body: response_.Body, + } + interceptorContext.Response = responseContext + // 3. spi.modifyResponse(context: SPI.InterceptorContext, attributeMap: SPI.AttributeMap); + _err = client.Spi.ModifyResponse(interceptorContext, attributeMap) + if _err != nil { + return _result, _err + } + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "headers": interceptorContext.Response.Headers, + "body": interceptorContext.Response.DeserializedBody, + }, &_result) + return _result, _err + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +func (client *Client) CallApi(params *Params, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + if tea.BoolValue(util.IsUnset(tea.ToMap(params))) { + _err = tea.NewSDKError(map[string]interface{}{ + "code": "ParameterMissing", + "message": "'params' can not be unset", + }) + return _result, _err + } + + if tea.BoolValue(util.IsUnset(client.SignatureAlgorithm)) || !tea.BoolValue(util.EqualString(client.SignatureAlgorithm, tea.String("v2"))) { + _result = make(map[string]interface{}) + _body, _err := client.DoRequest(params, request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err + } else if tea.BoolValue(util.EqualString(params.Style, tea.String("ROA"))) && tea.BoolValue(util.EqualString(params.ReqBodyType, tea.String("json"))) { + _result = make(map[string]interface{}) + _body, _err := client.DoROARequest(params.Action, params.Version, params.Protocol, params.Method, params.AuthType, params.Pathname, params.BodyType, request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err + } else if tea.BoolValue(util.EqualString(params.Style, tea.String("ROA"))) { + _result = make(map[string]interface{}) + _body, _err := client.DoROARequestWithForm(params.Action, params.Version, params.Protocol, params.Method, params.AuthType, params.Pathname, params.BodyType, request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err + } else { + _result = make(map[string]interface{}) + _body, _err := client.DoRPCRequest(params.Action, params.Version, params.Protocol, params.Method, params.AuthType, params.BodyType, request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err + } + +} + +/** + * Get user agent + * @return user agent + */ +func (client *Client) GetUserAgent() (_result *string) { + userAgent := util.GetUserAgent(client.UserAgent) + _result = userAgent + return _result +} + +/** + * Get accesskey id by using credential + * @return accesskey id + */ +func (client *Client) GetAccessKeyId() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + accessKeyId, _err := client.Credential.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + _result = accessKeyId + return _result, _err +} + +/** + * Get accesskey secret by using credential + * @return accesskey secret + */ +func (client *Client) GetAccessKeySecret() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + secret, _err := client.Credential.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + _result = secret + return _result, _err +} + +/** + * Get security token by using credential + * @return security token + */ +func (client *Client) GetSecurityToken() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + token, _err := client.Credential.GetSecurityToken() + if _err != nil { + return _result, _err + } + + _result = token + return _result, _err +} + +/** + * Get bearer token by credential + * @return bearer token + */ +func (client *Client) GetBearerToken() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + token := client.Credential.GetBearerToken() + _result = token + return _result, _err +} + +/** + * Get credential type by credential + * @return credential type e.g. access_key + */ +func (client *Client) GetType() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + authType := client.Credential.GetType() + _result = authType + return _result, _err +} + +/** + * If inputValue is not null, return it or return defaultValue + * @param inputValue users input value + * @param defaultValue default value + * @return the final result + */ +func DefaultAny(inputValue interface{}, defaultValue interface{}) (_result interface{}) { + if tea.BoolValue(util.IsUnset(inputValue)) { + _result = defaultValue + return _result + } + + _result = inputValue + return _result +} + +/** + * If the endpointRule and config.endpoint are empty, throw error + * @param config config contains the necessary information to create a client + */ +func (client *Client) CheckConfig(config *Config) (_err error) { + if tea.BoolValue(util.Empty(client.EndpointRule)) && tea.BoolValue(util.Empty(config.Endpoint)) { + _err = tea.NewSDKError(map[string]interface{}{ + "code": "ParameterMissing", + "message": "'config.endpoint' can not be empty", + }) + return _err + } + + return _err +} + +/** + * set RPC header for debug + * @param headers headers for debug, this header can be used only once. + */ +func (client *Client) SetRpcHeaders(headers map[string]*string) (_err error) { + client.Headers = headers + return _err +} + +/** + * get RPC header for debug + */ +func (client *Client) GetRpcHeaders() (_result map[string]*string, _err error) { + headers := client.Headers + client.Headers = nil + _result = headers + return _result, _err +} diff --git a/vendor/github.com/alibabacloud-go/debug/LICENSE b/vendor/github.com/alibabacloud-go/debug/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/debug/debug/assert.go b/vendor/github.com/alibabacloud-go/debug/debug/assert.go new file mode 100644 index 000000000..6fca15a63 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/debug/assert.go @@ -0,0 +1,12 @@ +package debug + +import ( + "reflect" + "testing" +) + +func assertEqual(t *testing.T, a, b interface{}) { + if !reflect.DeepEqual(a, b) { + t.Errorf("%v != %v", a, b) + } +} diff --git a/vendor/github.com/alibabacloud-go/debug/debug/debug.go b/vendor/github.com/alibabacloud-go/debug/debug/debug.go new file mode 100644 index 000000000..c977cb8c3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/debug/debug.go @@ -0,0 +1,36 @@ +package debug + +import ( + "fmt" + "os" + "strings" +) + +type Debug func(format string, v ...interface{}) + +var hookGetEnv = func() string { + return os.Getenv("DEBUG") +} + +var hookPrint = func(input string) { + fmt.Println(input) +} + +func Init(flag string) Debug { + enable := false + + env := hookGetEnv() + parts := strings.Split(env, ",") + for _, part := range parts { + if part == flag { + enable = true + break + } + } + + return func(format string, v ...interface{}) { + if enable { + hookPrint(fmt.Sprintf(format, v...)) + } + } +} diff --git a/vendor/github.com/alibabacloud-go/endpoint-util/LICENSE b/vendor/github.com/alibabacloud-go/endpoint-util/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/alibabacloud-go/endpoint-util/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/endpoint-util/service/service.go b/vendor/github.com/alibabacloud-go/endpoint-util/service/service.go new file mode 100644 index 000000000..85e5fda6e --- /dev/null +++ b/vendor/github.com/alibabacloud-go/endpoint-util/service/service.go @@ -0,0 +1,41 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * Get endpoint + * @return string + */ +package service + +import ( + "fmt" + "strings" + + "github.com/alibabacloud-go/tea/tea" +) + +func GetEndpointRules(product, regionId, endpointType, network, suffix *string) (_result *string, _err error) { + if tea.StringValue(endpointType) == "regional" { + if tea.StringValue(regionId) == "" { + _err = fmt.Errorf("RegionId is empty, please set a valid RegionId") + return tea.String(""), _err + } + _result = tea.String(strings.Replace("..aliyuncs.com", + "", tea.StringValue(regionId), 1)) + } else { + _result = tea.String(".aliyuncs.com") + } + _result = tea.String(strings.Replace(tea.StringValue(_result), + "", strings.ToLower(tea.StringValue(product)), 1)) + if tea.StringValue(network) == "" || tea.StringValue(network) == "public" { + _result = tea.String(strings.Replace(tea.StringValue(_result), "", "", 1)) + } else { + _result = tea.String(strings.Replace(tea.StringValue(_result), + "", "-"+tea.StringValue(network), 1)) + } + if tea.StringValue(suffix) == "" { + _result = tea.String(strings.Replace(tea.StringValue(_result), "", "", 1)) + } else { + _result = tea.String(strings.Replace(tea.StringValue(_result), + "", "-"+tea.StringValue(suffix), 1)) + } + return _result, nil +} diff --git a/vendor/github.com/alibabacloud-go/openapi-util/LICENSE b/vendor/github.com/alibabacloud-go/openapi-util/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/alibabacloud-go/openapi-util/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/openapi-util/service/service.go b/vendor/github.com/alibabacloud-go/openapi-util/service/service.go new file mode 100644 index 000000000..245eeccb0 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/openapi-util/service/service.go @@ -0,0 +1,635 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * This is for OpenApi Util + */ +package service + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "hash" + "io" + "net/http" + "net/textproto" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + util "github.com/alibabacloud-go/tea-utils/service" + "github.com/alibabacloud-go/tea/tea" + "github.com/tjfoc/gmsm/sm3" +) + +const ( + PEM_BEGIN = "-----BEGIN RSA PRIVATE KEY-----\n" + PEM_END = "\n-----END RSA PRIVATE KEY-----" +) + +type Sorter struct { + Keys []string + Vals []string +} + +func newSorter(m map[string]string) *Sorter { + hs := &Sorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +// Sort is an additional function for function SignHeader. +func (hs *Sorter) Sort() { + sort.Sort(hs) +} + +// Len is an additional function for function SignHeader. +func (hs *Sorter) Len() int { + return len(hs.Vals) +} + +// Less is an additional function for function SignHeader. +func (hs *Sorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Swap is an additional function for function SignHeader. +func (hs *Sorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} + +/** + * Convert all params of body other than type of readable into content + * @param body source Model + * @param content target Model + * @return void + */ +func Convert(body interface{}, content interface{}) { + res := make(map[string]interface{}) + val := reflect.ValueOf(body).Elem() + dataType := val.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, _ := field.Tag.Lookup("json") + name = strings.Split(name, ",omitempty")[0] + _, ok := val.Field(i).Interface().(io.Reader) + if !ok { + res[name] = val.Field(i).Interface() + } + } + byt, _ := json.Marshal(res) + json.Unmarshal(byt, content) +} + +/** + * Get the string to be signed according to request + * @param request which contains signed messages + * @return the signed string + */ +func GetStringToSign(request *tea.Request) (_result *string) { + return tea.String(getStringToSign(request)) +} + +func getStringToSign(request *tea.Request) string { + resource := tea.StringValue(request.Pathname) + queryParams := request.Query + // sort QueryParams by key + var queryKeys []string + for key := range queryParams { + queryKeys = append(queryKeys, key) + } + sort.Strings(queryKeys) + tmp := "" + for i := 0; i < len(queryKeys); i++ { + queryKey := queryKeys[i] + v := tea.StringValue(queryParams[queryKey]) + if v != "" { + tmp = tmp + "&" + queryKey + "=" + v + } else { + tmp = tmp + "&" + queryKey + } + } + if tmp != "" { + tmp = strings.TrimLeft(tmp, "&") + resource = resource + "?" + tmp + } + return getSignedStr(request, resource) +} + +func getSignedStr(req *tea.Request, canonicalizedResource string) string { + temp := make(map[string]string) + + for k, v := range req.Headers { + if strings.HasPrefix(strings.ToLower(k), "x-acs-") { + temp[strings.ToLower(k)] = tea.StringValue(v) + } + } + hs := newSorter(temp) + + // Sort the temp by the ascending order + hs.Sort() + + // Get the canonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + // Give other parameters values + // when sign URL, date is expires + date := tea.StringValue(req.Headers["date"]) + accept := tea.StringValue(req.Headers["accept"]) + contentType := tea.StringValue(req.Headers["content-type"]) + contentMd5 := tea.StringValue(req.Headers["content-md5"]) + + signStr := tea.StringValue(req.Method) + "\n" + accept + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource + return signStr +} + +/** + * Get signature according to stringToSign, secret + * @param stringToSign the signed string + * @param secret accesskey secret + * @return the signature + */ +func GetROASignature(stringToSign *string, secret *string) (_result *string) { + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(tea.StringValue(secret))) + io.WriteString(h, tea.StringValue(stringToSign)) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return tea.String(signedStr) +} + +func GetEndpoint(endpoint *string, server *bool, endpointType *string) *string { + if tea.StringValue(endpointType) == "internal" { + strs := strings.Split(tea.StringValue(endpoint), ".") + strs[0] += "-internal" + endpoint = tea.String(strings.Join(strs, ".")) + } + if tea.BoolValue(server) && tea.StringValue(endpointType) == "accelerate" { + return tea.String("oss-accelerate.aliyuncs.com") + } + + return endpoint +} + +func HexEncode(raw []byte) *string { + return tea.String(hex.EncodeToString(raw)) +} + +func Hash(raw []byte, signatureAlgorithm *string) []byte { + signType := tea.StringValue(signatureAlgorithm) + if signType == "ACS3-HMAC-SHA256" || signType == "ACS3-RSA-SHA256" { + h := sha256.New() + h.Write(raw) + return h.Sum(nil) + } else if signType == "ACS3-HMAC-SM3" { + h := sm3.New() + h.Write(raw) + return h.Sum(nil) + } + return nil +} + +func GetEncodePath(path *string) *string { + uri := tea.StringValue(path) + strs := strings.Split(uri, "/") + for i, v := range strs { + strs[i] = url.QueryEscape(v) + } + uri = strings.Join(strs, "/") + uri = strings.Replace(uri, "+", "%20", -1) + uri = strings.Replace(uri, "*", "%2A", -1) + uri = strings.Replace(uri, "%7E", "~", -1) + return tea.String(uri) +} + +func GetEncodeParam(param *string) *string { + uri := tea.StringValue(param) + uri = url.QueryEscape(uri) + uri = strings.Replace(uri, "+", "%20", -1) + uri = strings.Replace(uri, "*", "%2A", -1) + uri = strings.Replace(uri, "%7E", "~", -1) + return tea.String(uri) +} + +func GetAuthorization(request *tea.Request, signatureAlgorithm, payload, acesskey, secret *string) *string { + canonicalURI := tea.StringValue(request.Pathname) + if canonicalURI == "" { + canonicalURI = "/" + } + + canonicalURI = strings.Replace(canonicalURI, "+", "%20", -1) + canonicalURI = strings.Replace(canonicalURI, "*", "%2A", -1) + canonicalURI = strings.Replace(canonicalURI, "%7E", "~", -1) + + method := tea.StringValue(request.Method) + canonicalQueryString := getCanonicalQueryString(request.Query) + canonicalheaders, signedHeaders := getCanonicalHeaders(request.Headers) + + canonicalRequest := method + "\n" + canonicalURI + "\n" + canonicalQueryString + "\n" + canonicalheaders + "\n" + + strings.Join(signedHeaders, ";") + "\n" + tea.StringValue(payload) + signType := tea.StringValue(signatureAlgorithm) + StringToSign := signType + "\n" + tea.StringValue(HexEncode(Hash([]byte(canonicalRequest), signatureAlgorithm))) + signature := tea.StringValue(HexEncode(SignatureMethod(tea.StringValue(secret), StringToSign, signType))) + auth := signType + " Credential=" + tea.StringValue(acesskey) + ",SignedHeaders=" + + strings.Join(signedHeaders, ";") + ",Signature=" + signature + return tea.String(auth) +} + +func SignatureMethod(secret, source, signatureAlgorithm string) []byte { + if signatureAlgorithm == "ACS3-HMAC-SHA256" { + h := hmac.New(sha256.New, []byte(secret)) + h.Write([]byte(source)) + return h.Sum(nil) + } else if signatureAlgorithm == "ACS3-HMAC-SM3" { + h := hmac.New(sm3.New, []byte(secret)) + h.Write([]byte(source)) + return h.Sum(nil) + } else if signatureAlgorithm == "ACS3-RSA-SHA256" { + return rsaSign(source, secret) + } + return nil +} + +func rsaSign(content, secret string) []byte { + h := crypto.SHA256.New() + h.Write([]byte(content)) + hashed := h.Sum(nil) + priv, err := parsePrivateKey(secret) + if err != nil { + return nil + } + sign, err := rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA256, hashed) + if err != nil { + return nil + } + return sign +} + +func parsePrivateKey(privateKey string) (*rsa.PrivateKey, error) { + privateKey = formatPrivateKey(privateKey) + block, _ := pem.Decode([]byte(privateKey)) + if block == nil { + return nil, errors.New("PrivateKey is invalid") + } + priKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + switch priKey.(type) { + case *rsa.PrivateKey: + return priKey.(*rsa.PrivateKey), nil + default: + return nil, nil + } +} + +func formatPrivateKey(privateKey string) string { + if !strings.HasPrefix(privateKey, PEM_BEGIN) { + privateKey = PEM_BEGIN + privateKey + } + + if !strings.HasSuffix(privateKey, PEM_END) { + privateKey += PEM_END + } + return privateKey +} + +func getCanonicalHeaders(headers map[string]*string) (string, []string) { + tmp := make(map[string]string) + tmpHeader := http.Header{} + for k, v := range headers { + if strings.HasPrefix(strings.ToLower(k), "x-acs-") || strings.ToLower(k) == "host" || + strings.ToLower(k) == "content-type" { + tmp[strings.ToLower(k)] = strings.TrimSpace(tea.StringValue(v)) + tmpHeader.Add(strings.ToLower(k), strings.TrimSpace(tea.StringValue(v))) + } + } + hs := newSorter(tmp) + + // Sort the temp by the ascending order + hs.Sort() + canonicalheaders := "" + for _, key := range hs.Keys { + vals := tmpHeader[textproto.CanonicalMIMEHeaderKey(key)] + sort.Strings(vals) + canonicalheaders += key + ":" + strings.Join(vals, ",") + "\n" + } + + return canonicalheaders, hs.Keys +} + +func getCanonicalQueryString(query map[string]*string) string { + canonicalQueryString := "" + if tea.BoolValue(util.IsUnset(query)) { + return canonicalQueryString + } + tmp := make(map[string]string) + for k, v := range query { + tmp[k] = tea.StringValue(v) + } + + hs := newSorter(tmp) + + // Sort the temp by the ascending order + hs.Sort() + for i := range hs.Keys { + if hs.Vals[i] != "" { + canonicalQueryString += "&" + hs.Keys[i] + "=" + url.QueryEscape(hs.Vals[i]) + } else { + canonicalQueryString += "&" + hs.Keys[i] + "=" + } + } + canonicalQueryString = strings.Replace(canonicalQueryString, "+", "%20", -1) + canonicalQueryString = strings.Replace(canonicalQueryString, "*", "%2A", -1) + canonicalQueryString = strings.Replace(canonicalQueryString, "%7E", "~", -1) + + if canonicalQueryString != "" { + canonicalQueryString = strings.TrimLeft(canonicalQueryString, "&") + } + return canonicalQueryString +} + +/** + * Parse filter into a form string + * @param filter object + * @return the string + */ +func ToForm(filter map[string]interface{}) (_result *string) { + tmp := make(map[string]interface{}) + byt, _ := json.Marshal(filter) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + _ = d.Decode(&tmp) + + result := make(map[string]*string) + for key, value := range tmp { + filterValue := reflect.ValueOf(value) + flatRepeatedList(filterValue, result, key) + } + + m := util.AnyifyMapValue(result) + return util.ToFormString(m) +} + +func flatRepeatedList(dataValue reflect.Value, result map[string]*string, prefix string) { + if !dataValue.IsValid() { + return + } + + dataType := dataValue.Type() + if dataType.Kind().String() == "slice" { + handleRepeatedParams(dataValue, result, prefix) + } else if dataType.Kind().String() == "map" { + handleMap(dataValue, result, prefix) + } else { + result[prefix] = tea.String(fmt.Sprintf("%v", dataValue.Interface())) + } +} + +func handleRepeatedParams(repeatedFieldValue reflect.Value, result map[string]*string, prefix string) { + if repeatedFieldValue.IsValid() && !repeatedFieldValue.IsNil() { + for m := 0; m < repeatedFieldValue.Len(); m++ { + elementValue := repeatedFieldValue.Index(m) + key := prefix + "." + strconv.Itoa(m+1) + fieldValue := reflect.ValueOf(elementValue.Interface()) + if fieldValue.Kind().String() == "map" { + handleMap(fieldValue, result, key) + } else { + result[key] = tea.String(fmt.Sprintf("%v", fieldValue.Interface())) + } + } + } +} + +func handleMap(valueField reflect.Value, result map[string]*string, prefix string) { + if valueField.IsValid() && valueField.String() != "" { + valueFieldType := valueField.Type() + if valueFieldType.Kind().String() == "map" { + var byt []byte + byt, _ = json.Marshal(valueField.Interface()) + cache := make(map[string]interface{}) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + _ = d.Decode(&cache) + for key, value := range cache { + pre := "" + if prefix != "" { + pre = prefix + "." + key + } else { + pre = key + } + fieldValue := reflect.ValueOf(value) + flatRepeatedList(fieldValue, result, pre) + } + } + } +} + +/** + * Get timestamp + * @return the timestamp string + */ +func GetTimestamp() (_result *string) { + gmt := time.FixedZone("GMT", 0) + return tea.String(time.Now().In(gmt).Format("2006-01-02T15:04:05Z")) +} + +/** + * Parse filter into a object which's type is map[string]string + * @param filter query param + * @return the object + */ +func Query(filter interface{}) (_result map[string]*string) { + tmp := make(map[string]interface{}) + byt, _ := json.Marshal(filter) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + _ = d.Decode(&tmp) + + result := make(map[string]*string) + for key, value := range tmp { + filterValue := reflect.ValueOf(value) + flatRepeatedList(filterValue, result, key) + } + + return result +} + +/** + * Get signature according to signedParams, method and secret + * @param signedParams params which need to be signed + * @param method http method e.g. GET + * @param secret AccessKeySecret + * @return the signature + */ +func GetRPCSignature(signedParams map[string]*string, method *string, secret *string) (_result *string) { + stringToSign := buildRpcStringToSign(signedParams, tea.StringValue(method)) + signature := sign(stringToSign, tea.StringValue(secret), "&") + return tea.String(signature) +} + +/** + * Parse array into a string with specified style + * @param array the array + * @param prefix the prefix string + * @style specified style e.g. repeatList + * @return the string + */ +func ArrayToStringWithSpecifiedStyle(array interface{}, prefix *string, style *string) (_result *string) { + if tea.BoolValue(util.IsUnset(array)) { + return tea.String("") + } + + sty := tea.StringValue(style) + if sty == "repeatList" { + tmp := map[string]interface{}{ + tea.StringValue(prefix): array, + } + return flatRepeatList(tmp) + } else if sty == "simple" || sty == "spaceDelimited" || sty == "pipeDelimited" { + return flatArray(array, sty) + } else if sty == "json" { + return util.ToJSONString(array) + } + return tea.String("") +} + +func ParseToMap(in interface{}) map[string]interface{} { + if tea.BoolValue(util.IsUnset(in)) { + return nil + } + + tmp := make(map[string]interface{}) + byt, _ := json.Marshal(in) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + err := d.Decode(&tmp) + if err != nil { + return nil + } + return tmp +} + +func flatRepeatList(filter map[string]interface{}) (_result *string) { + tmp := make(map[string]interface{}) + byt, _ := json.Marshal(filter) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + _ = d.Decode(&tmp) + + result := make(map[string]*string) + for key, value := range tmp { + filterValue := reflect.ValueOf(value) + flatRepeatedList(filterValue, result, key) + } + + res := make(map[string]string) + for k, v := range result { + res[k] = tea.StringValue(v) + } + hs := newSorter(res) + + hs.Sort() + + // Get the canonicalizedOSSHeaders + t := "" + for i := range hs.Keys { + if i == len(hs.Keys)-1 { + t += hs.Keys[i] + "=" + hs.Vals[i] + } else { + t += hs.Keys[i] + "=" + hs.Vals[i] + "&&" + } + } + return tea.String(t) +} + +func flatArray(array interface{}, sty string) *string { + t := reflect.ValueOf(array) + strs := make([]string, 0) + for i := 0; i < t.Len(); i++ { + tmp := t.Index(i) + if tmp.Kind() == reflect.Ptr || tmp.Kind() == reflect.Interface { + tmp = tmp.Elem() + } + + if tmp.Kind() == reflect.Ptr { + tmp = tmp.Elem() + } + if tmp.Kind() == reflect.String { + strs = append(strs, tmp.String()) + } else { + inter := tmp.Interface() + byt, _ := json.Marshal(inter) + strs = append(strs, string(byt)) + } + } + str := "" + if sty == "simple" { + str = strings.Join(strs, ",") + } else if sty == "spaceDelimited" { + str = strings.Join(strs, " ") + } else if sty == "pipeDelimited" { + str = strings.Join(strs, "|") + } + return tea.String(str) +} + +func buildRpcStringToSign(signedParam map[string]*string, method string) (stringToSign string) { + signParams := make(map[string]string) + for key, value := range signedParam { + signParams[key] = tea.StringValue(value) + } + + stringToSign = getUrlFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = method + "&%2F&" + stringToSign + return +} + +func getUrlFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func sign(stringToSign, accessKeySecret, secretSuffix string) string { + secret := accessKeySecret + secretSuffix + signedBytes := shaHmac1(stringToSign, secret) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +func shaHmac1(source, secret string) []byte { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + return hmac.Sum(nil) +} diff --git a/vendor/github.com/alibabacloud-go/tea-utils/LICENSE b/vendor/github.com/alibabacloud-go/tea-utils/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-utils/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/tea-utils/service/service.go b/vendor/github.com/alibabacloud-go/tea-utils/service/service.go new file mode 100644 index 000000000..2b6935723 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-utils/service/service.go @@ -0,0 +1,462 @@ +package service + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/alibabacloud-go/tea/tea" +) + +var defaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s TeaDSL/1", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), "0.01") + +type RuntimeOptions struct { + Autoretry *bool `json:"autoretry" xml:"autoretry"` + IgnoreSSL *bool `json:"ignoreSSL" xml:"ignoreSSL"` + MaxAttempts *int `json:"maxAttempts" xml:"maxAttempts"` + BackoffPolicy *string `json:"backoffPolicy" xml:"backoffPolicy"` + BackoffPeriod *int `json:"backoffPeriod" xml:"backoffPeriod"` + ReadTimeout *int `json:"readTimeout" xml:"readTimeout"` + ConnectTimeout *int `json:"connectTimeout" xml:"connectTimeout"` + LocalAddr *string `json:"localAddr" xml:"localAddr"` + HttpProxy *string `json:"httpProxy" xml:"httpProxy"` + HttpsProxy *string `json:"httpsProxy" xml:"httpsProxy"` + NoProxy *string `json:"noProxy" xml:"noProxy"` + MaxIdleConns *int `json:"maxIdleConns" xml:"maxIdleConns"` + Socks5Proxy *string `json:"socks5Proxy" xml:"socks5Proxy"` + Socks5NetWork *string `json:"socks5NetWork" xml:"socks5NetWork"` +} + +func (s RuntimeOptions) String() string { + return tea.Prettify(s) +} + +func (s RuntimeOptions) GoString() string { + return s.String() +} + +func (s *RuntimeOptions) SetAutoretry(v bool) *RuntimeOptions { + s.Autoretry = &v + return s +} + +func (s *RuntimeOptions) SetIgnoreSSL(v bool) *RuntimeOptions { + s.IgnoreSSL = &v + return s +} + +func (s *RuntimeOptions) SetMaxAttempts(v int) *RuntimeOptions { + s.MaxAttempts = &v + return s +} + +func (s *RuntimeOptions) SetBackoffPolicy(v string) *RuntimeOptions { + s.BackoffPolicy = &v + return s +} + +func (s *RuntimeOptions) SetBackoffPeriod(v int) *RuntimeOptions { + s.BackoffPeriod = &v + return s +} + +func (s *RuntimeOptions) SetReadTimeout(v int) *RuntimeOptions { + s.ReadTimeout = &v + return s +} + +func (s *RuntimeOptions) SetConnectTimeout(v int) *RuntimeOptions { + s.ConnectTimeout = &v + return s +} + +func (s *RuntimeOptions) SetHttpProxy(v string) *RuntimeOptions { + s.HttpProxy = &v + return s +} + +func (s *RuntimeOptions) SetHttpsProxy(v string) *RuntimeOptions { + s.HttpsProxy = &v + return s +} + +func (s *RuntimeOptions) SetNoProxy(v string) *RuntimeOptions { + s.NoProxy = &v + return s +} + +func (s *RuntimeOptions) SetMaxIdleConns(v int) *RuntimeOptions { + s.MaxIdleConns = &v + return s +} + +func (s *RuntimeOptions) SetLocalAddr(v string) *RuntimeOptions { + s.LocalAddr = &v + return s +} + +func (s *RuntimeOptions) SetSocks5Proxy(v string) *RuntimeOptions { + s.Socks5Proxy = &v + return s +} + +func (s *RuntimeOptions) SetSocks5NetWork(v string) *RuntimeOptions { + s.Socks5NetWork = &v + return s +} + +func ReadAsString(body io.Reader) (*string, error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return tea.String(""), err + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + return tea.String(string(byt)), nil +} + +func StringifyMapValue(a map[string]interface{}) map[string]*string { + res := make(map[string]*string) + for key, value := range a { + if value != nil { + switch value.(type) { + case string: + res[key] = tea.String(value.(string)) + default: + byt, _ := json.Marshal(value) + res[key] = tea.String(string(byt)) + } + } + } + return res +} + +func AnyifyMapValue(a map[string]*string) map[string]interface{} { + res := make(map[string]interface{}) + for key, value := range a { + res[key] = tea.StringValue(value) + } + return res +} + +func ReadAsBytes(body io.Reader) ([]byte, error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + return byt, nil +} + +func DefaultString(reaStr, defaultStr *string) *string { + if reaStr == nil { + return defaultStr + } + return reaStr +} + +func ToJSONString(a interface{}) *string { + switch v := a.(type) { + case *string: + return v + case string: + return tea.String(v) + case []byte: + return tea.String(string(v)) + case io.Reader: + byt, err := ioutil.ReadAll(v) + if err != nil { + return nil + } + return tea.String(string(byt)) + } + byt, err := json.Marshal(a) + if err != nil { + return nil + } + return tea.String(string(byt)) +} + +func DefaultNumber(reaNum, defaultNum *int) *int { + if reaNum == nil { + return defaultNum + } + return reaNum +} + +func ReadAsJSON(body io.Reader) (result interface{}, err error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return + } + if string(byt) == "" { + return + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + err = d.Decode(&result) + return +} + +func GetNonce() *string { + return tea.String(getUUID()) +} + +func Empty(val *string) *bool { + return tea.Bool(val == nil || tea.StringValue(val) == "") +} + +func ValidateModel(a interface{}) error { + if a == nil { + return nil + } + err := tea.Validate(a) + return err +} + +func EqualString(val1, val2 *string) *bool { + return tea.Bool(tea.StringValue(val1) == tea.StringValue(val2)) +} + +func EqualNumber(val1, val2 *int) *bool { + return tea.Bool(tea.IntValue(val1) == tea.IntValue(val2)) +} + +func IsUnset(val interface{}) *bool { + if val == nil { + return tea.Bool(true) + } + + v := reflect.ValueOf(val) + if v.Kind() == reflect.Ptr || v.Kind() == reflect.Slice || v.Kind() == reflect.Map { + return tea.Bool(v.IsNil()) + } + + valType := reflect.TypeOf(val) + valZero := reflect.Zero(valType) + return tea.Bool(valZero == v) +} + +func ToBytes(a *string) []byte { + return []byte(tea.StringValue(a)) +} + +func AssertAsMap(a interface{}) map[string]interface{} { + r := reflect.ValueOf(a) + if r.Kind().String() != "map" { + panic(fmt.Sprintf("%v is not a map[string]interface{}", a)) + } + + res := make(map[string]interface{}) + tmp := r.MapKeys() + for _, key := range tmp { + res[key.String()] = r.MapIndex(key).Interface() + } + + return res +} + +func AssertAsNumber(a interface{}) *int { + res := 0 + switch a.(type) { + case int: + tmp := a.(int) + res = tmp + case *int: + tmp := a.(*int) + res = tea.IntValue(tmp) + default: + panic(fmt.Sprintf("%v is not a int", a)) + } + + return tea.Int(res) +} + +func AssertAsBoolean(a interface{}) *bool { + res := false + switch a.(type) { + case bool: + tmp := a.(bool) + res = tmp + case *bool: + tmp := a.(*bool) + res = tea.BoolValue(tmp) + default: + panic(fmt.Sprintf("%v is not a bool", a)) + } + + return tea.Bool(res) +} + +func AssertAsString(a interface{}) *string { + res := "" + switch a.(type) { + case string: + tmp := a.(string) + res = tmp + case *string: + tmp := a.(*string) + res = tea.StringValue(tmp) + default: + panic(fmt.Sprintf("%v is not a string", a)) + } + + return tea.String(res) +} + +func AssertAsBytes(a interface{}) []byte { + res, ok := a.([]byte) + if !ok { + panic(fmt.Sprintf("%v is not []byte", a)) + } + return res +} + +func AssertAsReadable(a interface{}) io.Reader { + res, ok := a.(io.Reader) + if !ok { + panic(fmt.Sprintf("%v is not reader", a)) + } + return res +} + +func AssertAsArray(a interface{}) []interface{} { + r := reflect.ValueOf(a) + if r.Kind().String() != "array" && r.Kind().String() != "slice" { + panic(fmt.Sprintf("%v is not a [x]interface{}", a)) + } + aLen := r.Len() + res := make([]interface{}, 0) + for i := 0; i < aLen; i++ { + res = append(res, r.Index(i).Interface()) + } + return res +} + +func ParseJSON(a *string) interface{} { + mapTmp := make(map[string]interface{}) + d := json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a)))) + d.UseNumber() + err := d.Decode(&mapTmp) + if err == nil { + return mapTmp + } + + sliceTmp := make([]interface{}, 0) + d = json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a)))) + d.UseNumber() + err = d.Decode(&sliceTmp) + if err == nil { + return sliceTmp + } + + if num, err := strconv.Atoi(tea.StringValue(a)); err == nil { + return num + } + + if ok, err := strconv.ParseBool(tea.StringValue(a)); err == nil { + return ok + } + + if floa64tVal, err := strconv.ParseFloat(tea.StringValue(a), 64); err == nil { + return floa64tVal + } + return nil +} + +func ToString(a []byte) *string { + return tea.String(string(a)) +} + +func ToMap(in interface{}) map[string]interface{} { + if in == nil { + return nil + } + res := tea.ToMap(in) + return res +} + +func ToFormString(a map[string]interface{}) *string { + if a == nil { + return tea.String("") + } + res := "" + urlEncoder := url.Values{} + for key, value := range a { + v := fmt.Sprintf("%v", value) + urlEncoder.Add(key, v) + } + res = urlEncoder.Encode() + return tea.String(res) +} + +func GetDateUTCString() *string { + return tea.String(time.Now().UTC().Format(http.TimeFormat)) +} + +func GetUserAgent(userAgent *string) *string { + if userAgent != nil && tea.StringValue(userAgent) != "" { + return tea.String(defaultUserAgent + " " + tea.StringValue(userAgent)) + } + return tea.String(defaultUserAgent) +} + +func Is2xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 200 && tmp < 300) +} + +func Is3xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 300 && tmp < 400) +} + +func Is4xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 400 && tmp < 500) +} + +func Is5xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 500 && tmp < 600) +} + +func Sleep(millisecond *int) error { + ms := tea.IntValue(millisecond) + time.Sleep(time.Duration(ms) * time.Millisecond) + return nil +} + +func ToArray(in interface{}) []map[string]interface{} { + if tea.BoolValue(IsUnset(in)) { + return nil + } + + tmp := make([]map[string]interface{}, 0) + byt, _ := json.Marshal(in) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + err := d.Decode(&tmp) + if err != nil { + return nil + } + return tmp +} diff --git a/vendor/github.com/alibabacloud-go/tea-utils/service/util.go b/vendor/github.com/alibabacloud-go/tea-utils/service/util.go new file mode 100644 index 000000000..a73cb5600 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-utils/service/util.go @@ -0,0 +1,52 @@ +package service + +import ( + "crypto/md5" + "crypto/rand" + "encoding/hex" + "hash" + rand2 "math/rand" +) + +type UUID [16]byte + +const numBytes = "1234567890" + +func getUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +func randStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = numBytes[rand2.Intn(len(numBytes))] + } + return string(b) +} + +func newUUID() UUID { + ns := UUID{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, randStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + + return u +} + +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/alibabacloud-go/tea-xml/service/service.go b/vendor/github.com/alibabacloud-go/tea-xml/service/service.go new file mode 100644 index 000000000..33139c74b --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-xml/service/service.go @@ -0,0 +1,105 @@ +package service + +import ( + "bytes" + "encoding/xml" + "fmt" + "reflect" + "strings" + + "github.com/alibabacloud-go/tea/tea" + v2 "github.com/clbanning/mxj/v2" +) + +func ToXML(obj map[string]interface{}) *string { + return tea.String(mapToXML(obj)) +} + +func ParseXml(val *string, result interface{}) map[string]interface{} { + resp := make(map[string]interface{}) + + start := getStartElement([]byte(tea.StringValue(val))) + if result == nil { + vm, err := v2.NewMapXml([]byte(tea.StringValue(val))) + if err != nil { + return nil + } + return vm + } + out, err := xmlUnmarshal([]byte(tea.StringValue(val)), result) + if err != nil { + return resp + } + resp[start] = out + return resp +} + +func mapToXML(val map[string]interface{}) string { + res := "" + for key, value := range val { + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + res += `<` + key + `>` + res += mapToXML(v.(map[string]interface{})) + res += `` + default: + if fmt.Sprintf("%v", v) != `` { + res += `<` + key + `>` + res += fmt.Sprintf("%v", v) + res += `` + } + } + } + case map[string]interface{}: + res += `<` + key + `>` + res += mapToXML(value.(map[string]interface{})) + res += `` + default: + if fmt.Sprintf("%v", value) != `` { + res += `<` + key + `>` + res += fmt.Sprintf("%v", value) + res += `` + } + } + } + return res +} + +func getStartElement(body []byte) string { + d := xml.NewDecoder(bytes.NewReader(body)) + for { + tok, err := d.Token() + if err != nil { + return "" + } + if t, ok := tok.(xml.StartElement); ok { + return t.Name.Local + } + } +} + +func xmlUnmarshal(body []byte, result interface{}) (interface{}, error) { + start := getStartElement(body) + dataValue := reflect.ValueOf(result).Elem() + dataType := dataValue.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, containsNameTag := field.Tag.Lookup("xml") + name = strings.Replace(name, ",omitempty", "", -1) + if containsNameTag { + if name == start { + realType := dataValue.Field(i).Type() + realValue := reflect.New(realType).Interface() + err := xml.Unmarshal(body, realValue) + if err != nil { + return nil, err + } + return realValue, nil + } + } + } + return nil, nil +} diff --git a/vendor/github.com/alibabacloud-go/tea/LICENSE b/vendor/github.com/alibabacloud-go/tea/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go b/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go new file mode 100644 index 000000000..b3f202243 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go @@ -0,0 +1,333 @@ +package tea + +import ( + "encoding/json" + "io" + "math" + "reflect" + "strconv" + "strings" + "unsafe" + + jsoniter "github.com/json-iterator/go" + "github.com/modern-go/reflect2" +) + +const maxUint = ^uint(0) +const maxInt = int(maxUint >> 1) +const minInt = -maxInt - 1 + +var jsonParser jsoniter.API + +func init() { + jsonParser = jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() + + jsonParser.RegisterExtension(newBetterFuzzyExtension()) +} + +func newBetterFuzzyExtension() jsoniter.DecoderExtension { + return jsoniter.DecoderExtension{ + reflect2.DefaultTypeOfKind(reflect.String): &nullableFuzzyStringDecoder{}, + reflect2.DefaultTypeOfKind(reflect.Bool): &fuzzyBoolDecoder{}, + reflect2.DefaultTypeOfKind(reflect.Float32): &nullableFuzzyFloat32Decoder{}, + reflect2.DefaultTypeOfKind(reflect.Float64): &nullableFuzzyFloat64Decoder{}, + reflect2.DefaultTypeOfKind(reflect.Int): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxInt) || val < float64(minInt) { + iter.ReportError("fuzzy decode int", "exceed range") + return + } + *((*int)(ptr)) = int(val) + } else { + *((*int)(ptr)) = iter.ReadInt() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxUint) || val < 0 { + iter.ReportError("fuzzy decode uint", "exceed range") + return + } + *((*uint)(ptr)) = uint(val) + } else { + *((*uint)(ptr)) = iter.ReadUint() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Int8): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt8) || val < float64(math.MinInt8) { + iter.ReportError("fuzzy decode int8", "exceed range") + return + } + *((*int8)(ptr)) = int8(val) + } else { + *((*int8)(ptr)) = iter.ReadInt8() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint8): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint8) || val < 0 { + iter.ReportError("fuzzy decode uint8", "exceed range") + return + } + *((*uint8)(ptr)) = uint8(val) + } else { + *((*uint8)(ptr)) = iter.ReadUint8() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Int16): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt16) || val < float64(math.MinInt16) { + iter.ReportError("fuzzy decode int16", "exceed range") + return + } + *((*int16)(ptr)) = int16(val) + } else { + *((*int16)(ptr)) = iter.ReadInt16() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint16): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint16) || val < 0 { + iter.ReportError("fuzzy decode uint16", "exceed range") + return + } + *((*uint16)(ptr)) = uint16(val) + } else { + *((*uint16)(ptr)) = iter.ReadUint16() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Int32): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt32) || val < float64(math.MinInt32) { + iter.ReportError("fuzzy decode int32", "exceed range") + return + } + *((*int32)(ptr)) = int32(val) + } else { + *((*int32)(ptr)) = iter.ReadInt32() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint32): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint32) || val < 0 { + iter.ReportError("fuzzy decode uint32", "exceed range") + return + } + *((*uint32)(ptr)) = uint32(val) + } else { + *((*uint32)(ptr)) = iter.ReadUint32() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Int64): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt64) || val < float64(math.MinInt64) { + iter.ReportError("fuzzy decode int64", "exceed range") + return + } + *((*int64)(ptr)) = int64(val) + } else { + *((*int64)(ptr)) = iter.ReadInt64() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint64): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint64) || val < 0 { + iter.ReportError("fuzzy decode uint64", "exceed range") + return + } + *((*uint64)(ptr)) = uint64(val) + } else { + *((*uint64)(ptr)) = iter.ReadUint64() + } + }}, + } +} + +type nullableFuzzyStringDecoder struct { +} + +func (decoder *nullableFuzzyStringDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + *((*string)(ptr)) = string(number) + case jsoniter.StringValue: + *((*string)(ptr)) = iter.ReadString() + case jsoniter.BoolValue: + *((*string)(ptr)) = strconv.FormatBool(iter.ReadBool()) + case jsoniter.NilValue: + iter.ReadNil() + *((*string)(ptr)) = "" + default: + iter.ReportError("fuzzyStringDecoder", "not number or string or bool") + } +} + +type fuzzyBoolDecoder struct { +} + +func (decoder *fuzzyBoolDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.BoolValue: + *((*bool)(ptr)) = iter.ReadBool() + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + num, err := number.Int64() + if err != nil { + iter.ReportError("fuzzyBoolDecoder", "get value from json.number failed") + } + if num == 0 { + *((*bool)(ptr)) = false + } else { + *((*bool)(ptr)) = true + } + case jsoniter.StringValue: + strValue := strings.ToLower(iter.ReadString()) + if strValue == "true" { + *((*bool)(ptr)) = true + } else if strValue == "false" || strValue == "" { + *((*bool)(ptr)) = false + } else { + iter.ReportError("fuzzyBoolDecoder", "unsupported bool value: "+strValue) + } + case jsoniter.NilValue: + iter.ReadNil() + *((*bool)(ptr)) = false + default: + iter.ReportError("fuzzyBoolDecoder", "not number or string or nil") + } +} + +type nullableFuzzyIntegerDecoder struct { + fun func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) +} + +func (decoder *nullableFuzzyIntegerDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + str = string(number) + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + str = "0" + } + case jsoniter.BoolValue: + if iter.ReadBool() { + str = "1" + } else { + str = "0" + } + case jsoniter.NilValue: + iter.ReadNil() + str = "0" + default: + iter.ReportError("fuzzyIntegerDecoder", "not number or string") + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + isFloat := strings.IndexByte(str, '.') != -1 + decoder.fun(isFloat, ptr, newIter) + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } +} + +type nullableFuzzyFloat32Decoder struct { +} + +func (decoder *nullableFuzzyFloat32Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float32)(ptr)) = iter.ReadFloat32() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float32)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float32)(ptr)) = newIter.ReadFloat32() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float32 + if iter.ReadBool() { + *((*float32)(ptr)) = 1 + } else { + *((*float32)(ptr)) = 0 + } + case jsoniter.NilValue: + iter.ReadNil() + *((*float32)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string") + } +} + +type nullableFuzzyFloat64Decoder struct { +} + +func (decoder *nullableFuzzyFloat64Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float64)(ptr)) = iter.ReadFloat64() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float64)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float64)(ptr)) = newIter.ReadFloat64() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float64 + if iter.ReadBool() { + *((*float64)(ptr)) = 1 + } else { + *((*float64)(ptr)) = 0 + } + case jsoniter.NilValue: + // support empty string + iter.ReadNil() + *((*float64)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat64Decoder", "not number or string") + } +} diff --git a/vendor/github.com/alibabacloud-go/tea/tea/tea.go b/vendor/github.com/alibabacloud-go/tea/tea/tea.go new file mode 100644 index 000000000..4fbddd3cb --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/tea/tea.go @@ -0,0 +1,1140 @@ +package tea + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/alibabacloud-go/debug/debug" + "github.com/alibabacloud-go/tea/utils" + + "golang.org/x/net/proxy" +) + +var debugLog = debug.Init("tea") + +var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) { + return fn +} + +var basicTypes = []string{ + "int", "int16", "int64", "int32", "float32", "float64", "string", "bool", "uint64", "uint32", "uint16", +} + +// Verify whether the parameters meet the requirements +var validateParams = []string{"require", "pattern", "maxLength", "minLength", "maximum", "minimum", "maxItems", "minItems"} + +// CastError is used for cast type fails +type CastError struct { + Message *string +} + +// Request is used wrap http request +type Request struct { + Protocol *string + Port *int + Method *string + Pathname *string + Domain *string + Headers map[string]*string + Query map[string]*string + Body io.Reader +} + +// Response is use d wrap http response +type Response struct { + Body io.ReadCloser + StatusCode *int + StatusMessage *string + Headers map[string]*string +} + +// SDKError struct is used save error code and message +type SDKError struct { + Code *string + StatusCode *int + Message *string + Data *string + Stack *string + errMsg *string +} + +// RuntimeObject is used for converting http configuration +type RuntimeObject struct { + IgnoreSSL *bool `json:"ignoreSSL" xml:"ignoreSSL"` + ReadTimeout *int `json:"readTimeout" xml:"readTimeout"` + ConnectTimeout *int `json:"connectTimeout" xml:"connectTimeout"` + LocalAddr *string `json:"localAddr" xml:"localAddr"` + HttpProxy *string `json:"httpProxy" xml:"httpProxy"` + HttpsProxy *string `json:"httpsProxy" xml:"httpsProxy"` + NoProxy *string `json:"noProxy" xml:"noProxy"` + MaxIdleConns *int `json:"maxIdleConns" xml:"maxIdleConns"` + Key *string `json:"key" xml:"key"` + Cert *string `json:"cert" xml:"cert"` + CA *string `json:"ca" xml:"ca"` + Socks5Proxy *string `json:"socks5Proxy" xml:"socks5Proxy"` + Socks5NetWork *string `json:"socks5NetWork" xml:"socks5NetWork"` + Listener utils.ProgressListener `json:"listener" xml:"listener"` + Tracker *utils.ReaderTracker `json:"tracker" xml:"tracker"` + Logger *utils.Logger `json:"logger" xml:"logger"` +} + +type teaClient struct { + sync.Mutex + httpClient *http.Client + ifInit bool +} + +var clientPool = &sync.Map{} + +func (r *RuntimeObject) getClientTag(domain string) string { + return strconv.FormatBool(BoolValue(r.IgnoreSSL)) + strconv.Itoa(IntValue(r.ReadTimeout)) + + strconv.Itoa(IntValue(r.ConnectTimeout)) + StringValue(r.LocalAddr) + StringValue(r.HttpProxy) + + StringValue(r.HttpsProxy) + StringValue(r.NoProxy) + StringValue(r.Socks5Proxy) + StringValue(r.Socks5NetWork) + domain +} + +// NewRuntimeObject is used for shortly create runtime object +func NewRuntimeObject(runtime map[string]interface{}) *RuntimeObject { + if runtime == nil { + return &RuntimeObject{} + } + + runtimeObject := &RuntimeObject{ + IgnoreSSL: TransInterfaceToBool(runtime["ignoreSSL"]), + ReadTimeout: TransInterfaceToInt(runtime["readTimeout"]), + ConnectTimeout: TransInterfaceToInt(runtime["connectTimeout"]), + LocalAddr: TransInterfaceToString(runtime["localAddr"]), + HttpProxy: TransInterfaceToString(runtime["httpProxy"]), + HttpsProxy: TransInterfaceToString(runtime["httpsProxy"]), + NoProxy: TransInterfaceToString(runtime["noProxy"]), + MaxIdleConns: TransInterfaceToInt(runtime["maxIdleConns"]), + Socks5Proxy: TransInterfaceToString(runtime["socks5Proxy"]), + Socks5NetWork: TransInterfaceToString(runtime["socks5NetWork"]), + Key: TransInterfaceToString(runtime["key"]), + Cert: TransInterfaceToString(runtime["cert"]), + CA: TransInterfaceToString(runtime["ca"]), + } + if runtime["listener"] != nil { + runtimeObject.Listener = runtime["listener"].(utils.ProgressListener) + } + if runtime["tracker"] != nil { + runtimeObject.Tracker = runtime["tracker"].(*utils.ReaderTracker) + } + if runtime["logger"] != nil { + runtimeObject.Logger = runtime["logger"].(*utils.Logger) + } + return runtimeObject +} + +// NewCastError is used for cast type fails +func NewCastError(message *string) (err error) { + return &CastError{ + Message: message, + } +} + +// NewRequest is used shortly create Request +func NewRequest() (req *Request) { + return &Request{ + Headers: map[string]*string{}, + Query: map[string]*string{}, + } +} + +// NewResponse is create response with http response +func NewResponse(httpResponse *http.Response) (res *Response) { + res = &Response{} + res.Body = httpResponse.Body + res.Headers = make(map[string]*string) + res.StatusCode = Int(httpResponse.StatusCode) + res.StatusMessage = String(httpResponse.Status) + return +} + +// NewSDKError is used for shortly create SDKError object +func NewSDKError(obj map[string]interface{}) *SDKError { + err := &SDKError{} + if val, ok := obj["code"].(int); ok { + err.Code = String(strconv.Itoa(val)) + } else if val, ok := obj["code"].(string); ok { + err.Code = String(val) + } + + if obj["message"] != nil { + err.Message = String(obj["message"].(string)) + } + if data := obj["data"]; data != nil { + r := reflect.ValueOf(data) + if r.Kind().String() == "map" { + res := make(map[string]interface{}) + tmp := r.MapKeys() + for _, key := range tmp { + res[key.String()] = r.MapIndex(key).Interface() + } + if statusCode := res["statusCode"]; statusCode != nil { + if code, ok := statusCode.(int); ok { + err.StatusCode = Int(code) + } else if tmp, ok := statusCode.(string); ok { + code, err_ := strconv.Atoi(tmp) + if err_ == nil { + err.StatusCode = Int(code) + } + } + } + } + byt, _ := json.Marshal(data) + err.Data = String(string(byt)) + } + + if statusCode, ok := obj["statusCode"].(int); ok { + err.StatusCode = Int(statusCode) + } else if status, ok := obj["statusCode"].(string); ok { + statusCode, err_ := strconv.Atoi(status) + if err_ == nil { + err.StatusCode = Int(statusCode) + } + } + + return err +} + +// Set ErrMsg by msg +func (err *SDKError) SetErrMsg(msg string) { + err.errMsg = String(msg) +} + +func (err *SDKError) Error() string { + if err.errMsg == nil { + str := fmt.Sprintf("SDKError:\n StatusCode: %d\n Code: %s\n Message: %s\n Data: %s\n", + IntValue(err.StatusCode), StringValue(err.Code), StringValue(err.Message), StringValue(err.Data)) + err.SetErrMsg(str) + } + return StringValue(err.errMsg) +} + +// Return message of CastError +func (err *CastError) Error() string { + return StringValue(err.Message) +} + +// Convert is use convert map[string]interface object to struct +func Convert(in interface{}, out interface{}) error { + byt, _ := json.Marshal(in) + decoder := jsonParser.NewDecoder(bytes.NewReader(byt)) + decoder.UseNumber() + err := decoder.Decode(&out) + return err +} + +// Convert is use convert map[string]interface object to struct +func Recover(in interface{}) error { + if in == nil { + return nil + } + return errors.New(fmt.Sprint(in)) +} + +// ReadBody is used read response body +func (response *Response) ReadBody() (body []byte, err error) { + defer response.Body.Close() + var buffer [512]byte + result := bytes.NewBuffer(nil) + + for { + n, err := response.Body.Read(buffer[0:]) + result.Write(buffer[0:n]) + if err != nil && err == io.EOF { + break + } else if err != nil { + return nil, err + } + } + return result.Bytes(), nil +} + +func getTeaClient(tag string) *teaClient { + client, ok := clientPool.Load(tag) + if client == nil && !ok { + client = &teaClient{ + httpClient: &http.Client{}, + ifInit: false, + } + clientPool.Store(tag, client) + } + return client.(*teaClient) +} + +// DoRequest is used send request to server +func DoRequest(request *Request, requestRuntime map[string]interface{}) (response *Response, err error) { + runtimeObject := NewRuntimeObject(requestRuntime) + fieldMap := make(map[string]string) + utils.InitLogMsg(fieldMap) + defer func() { + if runtimeObject.Logger != nil { + runtimeObject.Logger.PrintLog(fieldMap, err) + } + }() + if request.Method == nil { + request.Method = String("GET") + } + + if request.Protocol == nil { + request.Protocol = String("http") + } else { + request.Protocol = String(strings.ToLower(StringValue(request.Protocol))) + } + + requestURL := "" + request.Domain = request.Headers["host"] + requestURL = fmt.Sprintf("%s://%s%s", StringValue(request.Protocol), StringValue(request.Domain), StringValue(request.Pathname)) + queryParams := request.Query + // sort QueryParams by key + q := url.Values{} + for key, value := range queryParams { + q.Add(key, StringValue(value)) + } + querystring := q.Encode() + if len(querystring) > 0 { + if strings.Contains(requestURL, "?") { + requestURL = fmt.Sprintf("%s&%s", requestURL, querystring) + } else { + requestURL = fmt.Sprintf("%s?%s", requestURL, querystring) + } + } + debugLog("> %s %s", StringValue(request.Method), requestURL) + + httpRequest, err := http.NewRequest(StringValue(request.Method), requestURL, request.Body) + if err != nil { + return + } + httpRequest.Host = StringValue(request.Domain) + + client := getTeaClient(runtimeObject.getClientTag(StringValue(request.Domain))) + client.Lock() + if !client.ifInit { + trans, err := getHttpTransport(request, runtimeObject) + if err != nil { + return nil, err + } + client.httpClient.Timeout = time.Duration(IntValue(runtimeObject.ReadTimeout)) * time.Millisecond + client.httpClient.Transport = trans + client.ifInit = true + } + client.Unlock() + for key, value := range request.Headers { + if value == nil || key == "content-length" { + continue + } else if key == "host" { + httpRequest.Header["Host"] = []string{*value} + delete(httpRequest.Header, "host") + } else if key == "user-agent" { + httpRequest.Header["User-Agent"] = []string{*value} + delete(httpRequest.Header, "user-agent") + } else { + httpRequest.Header[key] = []string{*value} + } + debugLog("> %s: %s", key, StringValue(value)) + } + contentlength, _ := strconv.Atoi(StringValue(request.Headers["content-length"])) + event := utils.NewProgressEvent(utils.TransferStartedEvent, 0, int64(contentlength), 0) + utils.PublishProgress(runtimeObject.Listener, event) + + putMsgToMap(fieldMap, httpRequest) + startTime := time.Now() + fieldMap["{start_time}"] = startTime.Format("2006-01-02 15:04:05") + res, err := hookDo(client.httpClient.Do)(httpRequest) + fieldMap["{cost}"] = time.Since(startTime).String() + completedBytes := int64(0) + if runtimeObject.Tracker != nil { + completedBytes = runtimeObject.Tracker.CompletedBytes + } + if err != nil { + event = utils.NewProgressEvent(utils.TransferFailedEvent, completedBytes, int64(contentlength), 0) + utils.PublishProgress(runtimeObject.Listener, event) + return + } + + event = utils.NewProgressEvent(utils.TransferCompletedEvent, completedBytes, int64(contentlength), 0) + utils.PublishProgress(runtimeObject.Listener, event) + + response = NewResponse(res) + fieldMap["{code}"] = strconv.Itoa(res.StatusCode) + fieldMap["{res_headers}"] = transToString(res.Header) + debugLog("< HTTP/1.1 %s", res.Status) + for key, value := range res.Header { + debugLog("< %s: %s", key, strings.Join(value, "")) + if len(value) != 0 { + response.Headers[strings.ToLower(key)] = String(value[0]) + } + } + return +} + +func getHttpTransport(req *Request, runtime *RuntimeObject) (*http.Transport, error) { + trans := new(http.Transport) + httpProxy, err := getHttpProxy(StringValue(req.Protocol), StringValue(req.Domain), runtime) + if err != nil { + return nil, err + } + if strings.ToLower(*req.Protocol) == "https" && + runtime.Key != nil && runtime.Cert != nil { + cert, err := tls.X509KeyPair([]byte(StringValue(runtime.Cert)), []byte(StringValue(runtime.Key))) + if err != nil { + return nil, err + } + + trans.TLSClientConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + InsecureSkipVerify: BoolValue(runtime.IgnoreSSL), + } + if runtime.CA != nil { + clientCertPool := x509.NewCertPool() + ok := clientCertPool.AppendCertsFromPEM([]byte(StringValue(runtime.CA))) + if !ok { + return nil, errors.New("Failed to parse root certificate") + } + trans.TLSClientConfig.RootCAs = clientCertPool + } + } else { + trans.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: BoolValue(runtime.IgnoreSSL), + } + } + if httpProxy != nil { + trans.Proxy = http.ProxyURL(httpProxy) + if httpProxy.User != nil { + password, _ := httpProxy.User.Password() + auth := httpProxy.User.Username() + ":" + password + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Headers["Proxy-Authorization"] = String(basic) + } + } + if runtime.Socks5Proxy != nil && StringValue(runtime.Socks5Proxy) != "" { + socks5Proxy, err := getSocks5Proxy(runtime) + if err != nil { + return nil, err + } + if socks5Proxy != nil { + var auth *proxy.Auth + if socks5Proxy.User != nil { + password, _ := socks5Proxy.User.Password() + auth = &proxy.Auth{ + User: socks5Proxy.User.Username(), + Password: password, + } + } + dialer, err := proxy.SOCKS5(strings.ToLower(StringValue(runtime.Socks5NetWork)), socks5Proxy.String(), auth, + &net.Dialer{ + Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Millisecond, + DualStack: true, + LocalAddr: getLocalAddr(StringValue(runtime.LocalAddr)), + }) + if err != nil { + return nil, err + } + trans.Dial = dialer.Dial + } + } else { + trans.DialContext = setDialContext(runtime) + } + return trans, nil +} + +func transToString(object interface{}) string { + byt, _ := json.Marshal(object) + return string(byt) +} + +func putMsgToMap(fieldMap map[string]string, request *http.Request) { + fieldMap["{host}"] = request.Host + fieldMap["{method}"] = request.Method + fieldMap["{uri}"] = request.URL.RequestURI() + fieldMap["{pid}"] = strconv.Itoa(os.Getpid()) + fieldMap["{version}"] = strings.Split(request.Proto, "/")[1] + hostname, _ := os.Hostname() + fieldMap["{hostname}"] = hostname + fieldMap["{req_headers}"] = transToString(request.Header) + fieldMap["{target}"] = request.URL.Path + request.URL.RawQuery +} + +func getNoProxy(protocol string, runtime *RuntimeObject) []string { + var urls []string + if runtime.NoProxy != nil && StringValue(runtime.NoProxy) != "" { + urls = strings.Split(StringValue(runtime.NoProxy), ",") + } else if rawurl := os.Getenv("NO_PROXY"); rawurl != "" { + urls = strings.Split(rawurl, ",") + } else if rawurl := os.Getenv("no_proxy"); rawurl != "" { + urls = strings.Split(rawurl, ",") + } + + return urls +} + +func ToReader(obj interface{}) io.Reader { + switch obj.(type) { + case *string: + tmp := obj.(*string) + return strings.NewReader(StringValue(tmp)) + case []byte: + return strings.NewReader(string(obj.([]byte))) + case io.Reader: + return obj.(io.Reader) + default: + panic("Invalid Body. Please set a valid Body.") + } +} + +func ToString(val interface{}) string { + return fmt.Sprintf("%v", val) +} + +func getHttpProxy(protocol, host string, runtime *RuntimeObject) (proxy *url.URL, err error) { + urls := getNoProxy(protocol, runtime) + for _, url := range urls { + if url == host { + return nil, nil + } + } + if protocol == "https" { + if runtime.HttpsProxy != nil && StringValue(runtime.HttpsProxy) != "" { + proxy, err = url.Parse(StringValue(runtime.HttpsProxy)) + } else if rawurl := os.Getenv("HTTPS_PROXY"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } else if rawurl := os.Getenv("https_proxy"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } + } else { + if runtime.HttpProxy != nil && StringValue(runtime.HttpProxy) != "" { + proxy, err = url.Parse(StringValue(runtime.HttpProxy)) + } else if rawurl := os.Getenv("HTTP_PROXY"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } else if rawurl := os.Getenv("http_proxy"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } + } + + return proxy, err +} + +func getSocks5Proxy(runtime *RuntimeObject) (proxy *url.URL, err error) { + if runtime.Socks5Proxy != nil && StringValue(runtime.Socks5Proxy) != "" { + proxy, err = url.Parse(StringValue(runtime.Socks5Proxy)) + } + return proxy, err +} + +func getLocalAddr(localAddr string) (addr *net.TCPAddr) { + if localAddr != "" { + addr = &net.TCPAddr{ + IP: []byte(localAddr), + } + } + return addr +} + +func setDialContext(runtime *RuntimeObject) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + if runtime.LocalAddr != nil && StringValue(runtime.LocalAddr) != "" { + netAddr := &net.TCPAddr{ + IP: []byte(StringValue(runtime.LocalAddr)), + } + return (&net.Dialer{ + Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Second, + DualStack: true, + LocalAddr: netAddr, + }).DialContext(ctx, network, address) + } + return (&net.Dialer{ + Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Second, + DualStack: true, + }).DialContext(ctx, network, address) + } +} + +func ToObject(obj interface{}) map[string]interface{} { + result := make(map[string]interface{}) + byt, _ := json.Marshal(obj) + err := json.Unmarshal(byt, &result) + if err != nil { + return nil + } + return result +} + +func AllowRetry(retry interface{}, retryTimes *int) *bool { + if IntValue(retryTimes) == 0 { + return Bool(true) + } + retryMap, ok := retry.(map[string]interface{}) + if !ok { + return Bool(false) + } + retryable, ok := retryMap["retryable"].(bool) + if !ok || !retryable { + return Bool(false) + } + + maxAttempts, ok := retryMap["maxAttempts"].(int) + if !ok || maxAttempts < IntValue(retryTimes) { + return Bool(false) + } + return Bool(true) +} + +func Merge(args ...interface{}) map[string]*string { + finalArg := make(map[string]*string) + for _, obj := range args { + switch obj.(type) { + case map[string]*string: + arg := obj.(map[string]*string) + for key, value := range arg { + if value != nil { + finalArg[key] = value + } + } + default: + byt, _ := json.Marshal(obj) + arg := make(map[string]string) + err := json.Unmarshal(byt, &arg) + if err != nil { + return finalArg + } + for key, value := range arg { + if value != "" { + finalArg[key] = String(value) + } + } + } + } + + return finalArg +} + +func isNil(a interface{}) bool { + defer func() { + recover() + }() + vi := reflect.ValueOf(a) + return vi.IsNil() +} + +func ToMap(args ...interface{}) map[string]interface{} { + isNotNil := false + finalArg := make(map[string]interface{}) + for _, obj := range args { + if obj == nil { + continue + } + + if isNil(obj) { + continue + } + isNotNil = true + + switch obj.(type) { + case map[string]*string: + arg := obj.(map[string]*string) + for key, value := range arg { + if value != nil { + finalArg[key] = StringValue(value) + } + } + case map[string]interface{}: + arg := obj.(map[string]interface{}) + for key, value := range arg { + if value != nil { + finalArg[key] = value + } + } + case *string: + str := obj.(*string) + arg := make(map[string]interface{}) + err := json.Unmarshal([]byte(StringValue(str)), &arg) + if err == nil { + for key, value := range arg { + if value != nil { + finalArg[key] = value + } + } + } + tmp := make(map[string]string) + err = json.Unmarshal([]byte(StringValue(str)), &tmp) + if err == nil { + for key, value := range arg { + if value != "" { + finalArg[key] = value + } + } + } + case []byte: + byt := obj.([]byte) + arg := make(map[string]interface{}) + err := json.Unmarshal(byt, &arg) + if err == nil { + for key, value := range arg { + if value != nil { + finalArg[key] = value + } + } + break + } + default: + val := reflect.ValueOf(obj) + res := structToMap(val) + for key, value := range res { + if value != nil { + finalArg[key] = value + } + } + } + } + + if !isNotNil { + return nil + } + return finalArg +} + +func structToMap(dataValue reflect.Value) map[string]interface{} { + out := make(map[string]interface{}) + if !dataValue.IsValid() { + return out + } + if dataValue.Kind().String() == "ptr" { + if dataValue.IsNil() { + return out + } + dataValue = dataValue.Elem() + } + if !dataValue.IsValid() { + return out + } + dataType := dataValue.Type() + if dataType.Kind().String() != "struct" { + return out + } + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, containsNameTag := field.Tag.Lookup("json") + if !containsNameTag { + name = field.Name + } else { + strs := strings.Split(name, ",") + name = strs[0] + } + fieldValue := dataValue.FieldByName(field.Name) + if !fieldValue.IsValid() || fieldValue.IsNil() { + continue + } + if field.Type.String() == "io.Reader" || field.Type.String() == "io.Writer" { + continue + } else if field.Type.Kind().String() == "struct" { + out[name] = structToMap(fieldValue) + } else if field.Type.Kind().String() == "ptr" && + field.Type.Elem().Kind().String() == "struct" { + if fieldValue.Elem().IsValid() { + out[name] = structToMap(fieldValue) + } + } else if field.Type.Kind().String() == "ptr" { + if fieldValue.IsValid() && !fieldValue.IsNil() { + out[name] = fieldValue.Elem().Interface() + } + } else if field.Type.Kind().String() == "slice" { + tmp := make([]interface{}, 0) + num := fieldValue.Len() + for i := 0; i < num; i++ { + value := fieldValue.Index(i) + if !value.IsValid() { + continue + } + if value.Type().Kind().String() == "ptr" && + value.Type().Elem().Kind().String() == "struct" { + if value.IsValid() && !value.IsNil() { + tmp = append(tmp, structToMap(value)) + } + } else if value.Type().Kind().String() == "struct" { + tmp = append(tmp, structToMap(value)) + } else if value.Type().Kind().String() == "ptr" { + if value.IsValid() && !value.IsNil() { + tmp = append(tmp, value.Elem().Interface()) + } + } else { + tmp = append(tmp, value.Interface()) + } + } + if len(tmp) > 0 { + out[name] = tmp + } + } else { + out[name] = fieldValue.Interface() + } + + } + return out +} + +func Retryable(err error) *bool { + if err == nil { + return Bool(false) + } + if realErr, ok := err.(*SDKError); ok { + if realErr.StatusCode == nil { + return Bool(false) + } + code := IntValue(realErr.StatusCode) + return Bool(code >= http.StatusInternalServerError) + } + return Bool(true) +} + +func GetBackoffTime(backoff interface{}, retrytimes *int) *int { + backoffMap, ok := backoff.(map[string]interface{}) + if !ok { + return Int(0) + } + policy, ok := backoffMap["policy"].(string) + if !ok || policy == "no" { + return Int(0) + } + + period, ok := backoffMap["period"].(int) + if !ok || period == 0 { + return Int(0) + } + + maxTime := math.Pow(2.0, float64(IntValue(retrytimes))) + return Int(rand.Intn(int(maxTime-1)) * period) +} + +func Sleep(backoffTime *int) { + sleeptime := time.Duration(IntValue(backoffTime)) * time.Second + time.Sleep(sleeptime) +} + +func Validate(params interface{}) error { + if params == nil { + return nil + } + requestValue := reflect.ValueOf(params) + if requestValue.IsNil() { + return nil + } + err := validate(requestValue.Elem()) + return err +} + +// Verify whether the parameters meet the requirements +func validate(dataValue reflect.Value) error { + if strings.HasPrefix(dataValue.Type().String(), "*") { // Determines whether the input is a structure object or a pointer object + if dataValue.IsNil() { + return nil + } + dataValue = dataValue.Elem() + } + dataType := dataValue.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + valueField := dataValue.Field(i) + for _, value := range validateParams { + err := validateParam(field, valueField, value) + if err != nil { + return err + } + } + } + return nil +} + +func validateParam(field reflect.StructField, valueField reflect.Value, tagName string) error { + tag, containsTag := field.Tag.Lookup(tagName) // Take out the checked regular expression + if containsTag && tagName == "require" { + err := checkRequire(field, valueField) + if err != nil { + return err + } + } + if strings.HasPrefix(field.Type.String(), "[]") { // Verify the parameters of the array type + err := validateSlice(field, valueField, containsTag, tag, tagName) + if err != nil { + return err + } + } else if valueField.Kind() == reflect.Ptr { // Determines whether it is a pointer object + err := validatePtr(field, valueField, containsTag, tag, tagName) + if err != nil { + return err + } + } + return nil +} + +func validateSlice(field reflect.StructField, valueField reflect.Value, containsregexpTag bool, tag, tagName string) error { + if valueField.IsValid() && !valueField.IsNil() { // Determines whether the parameter has a value + if containsregexpTag { + if tagName == "maxItems" { + err := checkMaxItems(field, valueField, tag) + if err != nil { + return err + } + } + + if tagName == "minItems" { + err := checkMinItems(field, valueField, tag) + if err != nil { + return err + } + } + } + + for m := 0; m < valueField.Len(); m++ { + elementValue := valueField.Index(m) + if elementValue.Type().Kind() == reflect.Ptr { // Determines whether the child elements of an array are of a basic type + err := validatePtr(field, elementValue, containsregexpTag, tag, tagName) + if err != nil { + return err + } + } + } + } + return nil +} + +func validatePtr(field reflect.StructField, elementValue reflect.Value, containsregexpTag bool, tag, tagName string) error { + if elementValue.IsNil() { + return nil + } + if isFilterType(elementValue.Elem().Type().String(), basicTypes) { + if containsregexpTag { + if tagName == "pattern" { + err := checkPattern(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + + if tagName == "maxLength" { + err := checkMaxLength(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + + if tagName == "minLength" { + err := checkMinLength(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + + if tagName == "maximum" { + err := checkMaximum(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + + if tagName == "minimum" { + err := checkMinimum(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + } + } else { + err := validate(elementValue) + if err != nil { + return err + } + } + return nil +} + +func checkRequire(field reflect.StructField, valueField reflect.Value) error { + name, _ := field.Tag.Lookup("json") + strs := strings.Split(name, ",") + name = strs[0] + if !valueField.IsNil() && valueField.IsValid() { + return nil + } + return errors.New(name + " should be setted") +} + +func checkPattern(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + value := valueField.String() + r, _ := regexp.Compile("^" + tag + "$") + if match := r.MatchString(value); !match { // Determines whether the parameter value satisfies the regular expression or not, and throws an error + return errors.New(value + " is not matched " + tag) + } + } + return nil +} + +func checkMaxItems(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + maxItems, err := strconv.Atoi(tag) + if err != nil { + return err + } + length := valueField.Len() + if maxItems < length { + errMsg := fmt.Sprintf("The length of %s is %d which is more than %d", field.Name, length, maxItems) + return errors.New(errMsg) + } + } + return nil +} + +func checkMinItems(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() { + minItems, err := strconv.Atoi(tag) + if err != nil { + return err + } + length := valueField.Len() + if minItems > length { + errMsg := fmt.Sprintf("The length of %s is %d which is less than %d", field.Name, length, minItems) + return errors.New(errMsg) + } + } + return nil +} + +func checkMaxLength(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + maxLength, err := strconv.Atoi(tag) + if err != nil { + return err + } + length := valueField.Len() + if valueField.Kind().String() == "string" { + length = strings.Count(valueField.String(), "") - 1 + } + if maxLength < length { + errMsg := fmt.Sprintf("The length of %s is %d which is more than %d", field.Name, length, maxLength) + return errors.New(errMsg) + } + } + return nil +} + +func checkMinLength(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() { + minLength, err := strconv.Atoi(tag) + if err != nil { + return err + } + length := valueField.Len() + if valueField.Kind().String() == "string" { + length = strings.Count(valueField.String(), "") - 1 + } + if minLength > length { + errMsg := fmt.Sprintf("The length of %s is %d which is less than %d", field.Name, length, minLength) + return errors.New(errMsg) + } + } + return nil +} + +func checkMaximum(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + maximum, err := strconv.ParseFloat(tag, 64) + if err != nil { + return err + } + byt, _ := json.Marshal(valueField.Interface()) + num, err := strconv.ParseFloat(string(byt), 64) + if err != nil { + return err + } + if maximum < num { + errMsg := fmt.Sprintf("The size of %s is %f which is greater than %f", field.Name, num, maximum) + return errors.New(errMsg) + } + } + return nil +} + +func checkMinimum(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + minimum, err := strconv.ParseFloat(tag, 64) + if err != nil { + return err + } + + byt, _ := json.Marshal(valueField.Interface()) + num, err := strconv.ParseFloat(string(byt), 64) + if err != nil { + return err + } + if minimum > num { + errMsg := fmt.Sprintf("The size of %s is %f which is less than %f", field.Name, num, minimum) + return errors.New(errMsg) + } + } + return nil +} + +// Determines whether realType is in filterTypes +func isFilterType(realType string, filterTypes []string) bool { + for _, value := range filterTypes { + if value == realType { + return true + } + } + return false +} + +func TransInterfaceToBool(val interface{}) *bool { + if val == nil { + return nil + } + + return Bool(val.(bool)) +} + +func TransInterfaceToInt(val interface{}) *int { + if val == nil { + return nil + } + + return Int(val.(int)) +} + +func TransInterfaceToString(val interface{}) *string { + if val == nil { + return nil + } + + return String(val.(string)) +} + +func Prettify(i interface{}) string { + resp, _ := json.MarshalIndent(i, "", " ") + return string(resp) +} + +func ToInt(a *int32) *int { + return Int(int(Int32Value(a))) +} + +func ToInt32(a *int) *int32 { + return Int32(int32(IntValue(a))) +} diff --git a/vendor/github.com/alibabacloud-go/tea/tea/trans.go b/vendor/github.com/alibabacloud-go/tea/tea/trans.go new file mode 100644 index 000000000..ded1642fa --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/tea/trans.go @@ -0,0 +1,491 @@ +package tea + +func String(a string) *string { + return &a +} + +func StringValue(a *string) string { + if a == nil { + return "" + } + return *a +} + +func Int(a int) *int { + return &a +} + +func IntValue(a *int) int { + if a == nil { + return 0 + } + return *a +} + +func Int8(a int8) *int8 { + return &a +} + +func Int8Value(a *int8) int8 { + if a == nil { + return 0 + } + return *a +} + +func Int16(a int16) *int16 { + return &a +} + +func Int16Value(a *int16) int16 { + if a == nil { + return 0 + } + return *a +} + +func Int32(a int32) *int32 { + return &a +} + +func Int32Value(a *int32) int32 { + if a == nil { + return 0 + } + return *a +} + +func Int64(a int64) *int64 { + return &a +} + +func Int64Value(a *int64) int64 { + if a == nil { + return 0 + } + return *a +} + +func Bool(a bool) *bool { + return &a +} + +func BoolValue(a *bool) bool { + if a == nil { + return false + } + return *a +} + +func Uint(a uint) *uint { + return &a +} + +func UintValue(a *uint) uint { + if a == nil { + return 0 + } + return *a +} + +func Uint8(a uint8) *uint8 { + return &a +} + +func Uint8Value(a *uint8) uint8 { + if a == nil { + return 0 + } + return *a +} + +func Uint16(a uint16) *uint16 { + return &a +} + +func Uint16Value(a *uint16) uint16 { + if a == nil { + return 0 + } + return *a +} + +func Uint32(a uint32) *uint32 { + return &a +} + +func Uint32Value(a *uint32) uint32 { + if a == nil { + return 0 + } + return *a +} + +func Uint64(a uint64) *uint64 { + return &a +} + +func Uint64Value(a *uint64) uint64 { + if a == nil { + return 0 + } + return *a +} + +func Float32(a float32) *float32 { + return &a +} + +func Float32Value(a *float32) float32 { + if a == nil { + return 0 + } + return *a +} + +func Float64(a float64) *float64 { + return &a +} + +func Float64Value(a *float64) float64 { + if a == nil { + return 0 + } + return *a +} + +func IntSlice(a []int) []*int { + if a == nil { + return nil + } + res := make([]*int, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func IntValueSlice(a []*int) []int { + if a == nil { + return nil + } + res := make([]int, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Int8Slice(a []int8) []*int8 { + if a == nil { + return nil + } + res := make([]*int8, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Int8ValueSlice(a []*int8) []int8 { + if a == nil { + return nil + } + res := make([]int8, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Int16Slice(a []int16) []*int16 { + if a == nil { + return nil + } + res := make([]*int16, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Int16ValueSlice(a []*int16) []int16 { + if a == nil { + return nil + } + res := make([]int16, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Int32Slice(a []int32) []*int32 { + if a == nil { + return nil + } + res := make([]*int32, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Int32ValueSlice(a []*int32) []int32 { + if a == nil { + return nil + } + res := make([]int32, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Int64Slice(a []int64) []*int64 { + if a == nil { + return nil + } + res := make([]*int64, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Int64ValueSlice(a []*int64) []int64 { + if a == nil { + return nil + } + res := make([]int64, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func UintSlice(a []uint) []*uint { + if a == nil { + return nil + } + res := make([]*uint, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func UintValueSlice(a []*uint) []uint { + if a == nil { + return nil + } + res := make([]uint, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Uint8Slice(a []uint8) []*uint8 { + if a == nil { + return nil + } + res := make([]*uint8, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Uint8ValueSlice(a []*uint8) []uint8 { + if a == nil { + return nil + } + res := make([]uint8, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Uint16Slice(a []uint16) []*uint16 { + if a == nil { + return nil + } + res := make([]*uint16, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Uint16ValueSlice(a []*uint16) []uint16 { + if a == nil { + return nil + } + res := make([]uint16, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Uint32Slice(a []uint32) []*uint32 { + if a == nil { + return nil + } + res := make([]*uint32, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Uint32ValueSlice(a []*uint32) []uint32 { + if a == nil { + return nil + } + res := make([]uint32, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Uint64Slice(a []uint64) []*uint64 { + if a == nil { + return nil + } + res := make([]*uint64, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Uint64ValueSlice(a []*uint64) []uint64 { + if a == nil { + return nil + } + res := make([]uint64, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Float32Slice(a []float32) []*float32 { + if a == nil { + return nil + } + res := make([]*float32, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Float32ValueSlice(a []*float32) []float32 { + if a == nil { + return nil + } + res := make([]float32, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Float64Slice(a []float64) []*float64 { + if a == nil { + return nil + } + res := make([]*float64, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Float64ValueSlice(a []*float64) []float64 { + if a == nil { + return nil + } + res := make([]float64, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func StringSlice(a []string) []*string { + if a == nil { + return nil + } + res := make([]*string, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func StringSliceValue(a []*string) []string { + if a == nil { + return nil + } + res := make([]string, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func BoolSlice(a []bool) []*bool { + if a == nil { + return nil + } + res := make([]*bool, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func BoolSliceValue(a []*bool) []bool { + if a == nil { + return nil + } + res := make([]bool, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} diff --git a/vendor/github.com/alibabacloud-go/tea/utils/assert.go b/vendor/github.com/alibabacloud-go/tea/utils/assert.go new file mode 100644 index 000000000..7ae677501 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/utils/assert.go @@ -0,0 +1,64 @@ +package utils + +import ( + "reflect" + "strings" + "testing" +) + +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + isNilableKind := containsKind( + []reflect.Kind{ + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice}, + kind) + + if isNilableKind && value.IsNil() { + return true + } + + return false +} + +func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { + for i := 0; i < len(kinds); i++ { + if kind == kinds[i] { + return true + } + } + + return false +} + +func AssertEqual(t *testing.T, a, b interface{}) { + if !reflect.DeepEqual(a, b) { + t.Errorf("%v != %v", a, b) + } +} + +func AssertNil(t *testing.T, object interface{}) { + if !isNil(object) { + t.Errorf("%v is not nil", object) + } +} + +func AssertNotNil(t *testing.T, object interface{}) { + if isNil(object) { + t.Errorf("%v is nil", object) + } +} + +func AssertContains(t *testing.T, contains string, msgAndArgs ...string) { + for _, value := range msgAndArgs { + if ok := strings.Contains(contains, value); !ok { + t.Errorf("%s does not contain %s", contains, value) + } + } +} diff --git a/vendor/github.com/alibabacloud-go/tea/utils/logger.go b/vendor/github.com/alibabacloud-go/tea/utils/logger.go new file mode 100644 index 000000000..051366887 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/utils/logger.go @@ -0,0 +1,109 @@ +package utils + +import ( + "io" + "log" + "strings" + "time" +) + +type Logger struct { + *log.Logger + formatTemplate string + isOpen bool + lastLogMsg string +} + +var defaultLoggerTemplate = `{time} {channel}: "{method} {uri} HTTP/{version}" {code} {cost} {hostname}` +var loggerParam = []string{"{time}", "{start_time}", "{ts}", "{channel}", "{pid}", "{host}", "{method}", "{uri}", "{version}", "{target}", "{hostname}", "{code}", "{error}", "{req_headers}", "{res_body}", "{res_headers}", "{cost}"} +var logChannel string + +func InitLogMsg(fieldMap map[string]string) { + for _, value := range loggerParam { + fieldMap[value] = "" + } +} + +func (logger *Logger) SetFormatTemplate(template string) { + logger.formatTemplate = template + +} + +func (logger *Logger) GetFormatTemplate() string { + return logger.formatTemplate + +} + +func NewLogger(level string, channel string, out io.Writer, template string) *Logger { + if level == "" { + level = "info" + } + + logChannel = "AlibabaCloud" + if channel != "" { + logChannel = channel + } + log := log.New(out, "["+strings.ToUpper(level)+"]", log.Lshortfile) + if template == "" { + template = defaultLoggerTemplate + } + + return &Logger{ + Logger: log, + formatTemplate: template, + isOpen: true, + } +} + +func (logger *Logger) OpenLogger() { + logger.isOpen = true +} + +func (logger *Logger) CloseLogger() { + logger.isOpen = false +} + +func (logger *Logger) SetIsopen(isopen bool) { + logger.isOpen = isopen +} + +func (logger *Logger) GetIsopen() bool { + return logger.isOpen +} + +func (logger *Logger) SetLastLogMsg(lastLogMsg string) { + logger.lastLogMsg = lastLogMsg +} + +func (logger *Logger) GetLastLogMsg() string { + return logger.lastLogMsg +} + +func SetLogChannel(channel string) { + logChannel = channel +} + +func (logger *Logger) PrintLog(fieldMap map[string]string, err error) { + if err != nil { + fieldMap["{error}"] = err.Error() + } + fieldMap["{time}"] = time.Now().Format("2006-01-02 15:04:05") + fieldMap["{ts}"] = getTimeInFormatISO8601() + fieldMap["{channel}"] = logChannel + if logger != nil { + logMsg := logger.formatTemplate + for key, value := range fieldMap { + logMsg = strings.Replace(logMsg, key, value, -1) + } + logger.lastLogMsg = logMsg + if logger.isOpen == true { + logger.Output(2, logMsg) + } + } +} + +func getTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} diff --git a/vendor/github.com/alibabacloud-go/tea/utils/progress.go b/vendor/github.com/alibabacloud-go/tea/utils/progress.go new file mode 100644 index 000000000..2f5364aea --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/utils/progress.go @@ -0,0 +1,60 @@ +package utils + +// ProgressEventType defines transfer progress event type +type ProgressEventType int + +const ( + // TransferStartedEvent transfer started, set TotalBytes + TransferStartedEvent ProgressEventType = 1 + iota + // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes + TransferDataEvent + // TransferCompletedEvent transfer completed + TransferCompletedEvent + // TransferFailedEvent transfer encounters an error + TransferFailedEvent +) + +// ProgressEvent defines progress event +type ProgressEvent struct { + ConsumedBytes int64 + TotalBytes int64 + RwBytes int64 + EventType ProgressEventType +} + +// ProgressListener listens progress change +type ProgressListener interface { + ProgressChanged(event *ProgressEvent) +} + +// -------------------- Private -------------------- + +func NewProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent { + return &ProgressEvent{ + ConsumedBytes: consumed, + TotalBytes: total, + RwBytes: rwBytes, + EventType: eventType} +} + +// publishProgress +func PublishProgress(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChanged(event) + } +} + +func GetProgressListener(obj interface{}) ProgressListener { + if obj == nil { + return nil + } + listener, ok := obj.(ProgressListener) + if !ok { + return nil + } + return listener +} + +type ReaderTracker struct { + CompletedBytes int64 +} diff --git a/vendor/github.com/aliyun/credentials-go/LICENSE b/vendor/github.com/aliyun/credentials-go/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go new file mode 100644 index 000000000..7bcaa9740 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go @@ -0,0 +1,41 @@ +package credentials + +import "github.com/alibabacloud-go/tea/tea" + +// AccessKeyCredential is a kind of credential +type AccessKeyCredential struct { + AccessKeyId string + AccessKeySecret string +} + +func newAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential { + return &AccessKeyCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } +} + +// GetAccessKeyId reutrns AccessKeyCreential's AccessKeyId +func (a *AccessKeyCredential) GetAccessKeyId() (*string, error) { + return tea.String(a.AccessKeyId), nil +} + +// GetAccessSecret reutrns AccessKeyCreential's AccessKeySecret +func (a *AccessKeyCredential) GetAccessKeySecret() (*string, error) { + return tea.String(a.AccessKeySecret), nil +} + +// GetSecurityToken is useless for AccessKeyCreential +func (a *AccessKeyCredential) GetSecurityToken() (*string, error) { + return tea.String(""), nil +} + +// GetBearerToken is useless for AccessKeyCreential +func (a *AccessKeyCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns AccessKeyCreential's type +func (a *AccessKeyCredential) GetType() *string { + return tea.String("access_key") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go new file mode 100644 index 000000000..cca291621 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go @@ -0,0 +1,40 @@ +package credentials + +import "github.com/alibabacloud-go/tea/tea" + +// BearerTokenCredential is a kind of credential +type BearerTokenCredential struct { + BearerToken string +} + +// newBearerTokenCredential return a BearerTokenCredential object +func newBearerTokenCredential(token string) *BearerTokenCredential { + return &BearerTokenCredential{ + BearerToken: token, + } +} + +// GetAccessKeyId is useless for BearerTokenCredential +func (b *BearerTokenCredential) GetAccessKeyId() (*string, error) { + return tea.String(""), nil +} + +// GetAccessSecret is useless for BearerTokenCredential +func (b *BearerTokenCredential) GetAccessKeySecret() (*string, error) { + return tea.String(("")), nil +} + +// GetSecurityToken is useless for BearerTokenCredential +func (b *BearerTokenCredential) GetSecurityToken() (*string, error) { + return tea.String(""), nil +} + +// GetBearerToken reutrns BearerTokenCredential's BearerToken +func (b *BearerTokenCredential) GetBearerToken() *string { + return tea.String(b.BearerToken) +} + +// GetType reutrns BearerTokenCredential's type +func (b *BearerTokenCredential) GetType() *string { + return tea.String("bearer") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/credential.go b/vendor/github.com/aliyun/credentials-go/credentials/credential.go new file mode 100644 index 000000000..62e647541 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/credential.go @@ -0,0 +1,401 @@ +package credentials + +import ( + "bufio" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/alibabacloud-go/debug/debug" + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/response" + "github.com/aliyun/credentials-go/credentials/utils" +) + +var debuglog = debug.Init("credential") + +var hookParse = func(err error) error { + return err +} + +// Credential is an interface for getting actual credential +type Credential interface { + GetAccessKeyId() (*string, error) + GetAccessKeySecret() (*string, error) + GetSecurityToken() (*string, error) + GetBearerToken() *string + GetType() *string +} + +// Config is important when call NewCredential +type Config struct { + Type *string `json:"type"` + AccessKeyId *string `json:"access_key_id"` + AccessKeySecret *string `json:"access_key_secret"` + OIDCProviderArn *string `json:"oidc_provider_arn"` + OIDCTokenFilePath *string `json:"oidc_token"` + RoleArn *string `json:"role_arn"` + RoleSessionName *string `json:"role_session_name"` + PublicKeyId *string `json:"public_key_id"` + RoleName *string `json:"role_name"` + SessionExpiration *int `json:"session_expiration"` + PrivateKeyFile *string `json:"private_key_file"` + BearerToken *string `json:"bearer_token"` + SecurityToken *string `json:"security_token"` + RoleSessionExpiration *int `json:"role_session_expiratioon"` + Policy *string `json:"policy"` + Host *string `json:"host"` + Timeout *int `json:"timeout"` + ConnectTimeout *int `json:"connect_timeout"` + Proxy *string `json:"proxy"` + InAdvanceScale *float64 `json:"inAdvanceScale"` + Url *string `json:"url"` +} + +func (s Config) String() string { + return tea.Prettify(s) +} + +func (s Config) GoString() string { + return s.String() +} + +func (s *Config) SetAccessKeyId(v string) *Config { + s.AccessKeyId = &v + return s +} + +func (s *Config) SetAccessKeySecret(v string) *Config { + s.AccessKeySecret = &v + return s +} + +func (s *Config) SetSecurityToken(v string) *Config { + s.SecurityToken = &v + return s +} + +func (s *Config) SetRoleArn(v string) *Config { + s.RoleArn = &v + return s +} + +func (s *Config) SetRoleSessionName(v string) *Config { + s.RoleSessionName = &v + return s +} + +func (s *Config) SetPublicKeyId(v string) *Config { + s.PublicKeyId = &v + return s +} + +func (s *Config) SetRoleName(v string) *Config { + s.RoleName = &v + return s +} + +func (s *Config) SetSessionExpiration(v int) *Config { + s.SessionExpiration = &v + return s +} + +func (s *Config) SetPrivateKeyFile(v string) *Config { + s.PrivateKeyFile = &v + return s +} + +func (s *Config) SetBearerToken(v string) *Config { + s.BearerToken = &v + return s +} + +func (s *Config) SetRoleSessionExpiration(v int) *Config { + s.RoleSessionExpiration = &v + return s +} + +func (s *Config) SetPolicy(v string) *Config { + s.Policy = &v + return s +} + +func (s *Config) SetHost(v string) *Config { + s.Host = &v + return s +} + +func (s *Config) SetTimeout(v int) *Config { + s.Timeout = &v + return s +} + +func (s *Config) SetConnectTimeout(v int) *Config { + s.ConnectTimeout = &v + return s +} + +func (s *Config) SetProxy(v string) *Config { + s.Proxy = &v + return s +} + +func (s *Config) SetType(v string) *Config { + s.Type = &v + return s +} + +func (s *Config) SetOIDCTokenFilePath(v string) *Config { + s.OIDCTokenFilePath = &v + return s +} + +func (s *Config) SetOIDCProviderArn(v string) *Config { + s.OIDCProviderArn = &v + return s +} + +func (s *Config) SetURLCredential(v string) *Config { + if v == "" { + v = os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") + } + s.Url = &v + return s +} + +// NewCredential return a credential according to the type in config. +// if config is nil, the function will use default provider chain to get credential. +// please see README.md for detail. +func NewCredential(config *Config) (credential Credential, err error) { + if config == nil { + config, err = defaultChain.resolve() + if err != nil { + return + } + return NewCredential(config) + } + switch tea.StringValue(config.Type) { + case "credentials_uri": + credential = newURLCredential(tea.StringValue(config.Url)) + case "oidc_role_arn": + err = checkoutAssumeRamoidc(config) + if err != nil { + return + } + runtime := &utils.Runtime{ + Host: tea.StringValue(config.Host), + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + } + credential = newOIDCRoleArnCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.RoleArn), tea.StringValue(config.OIDCProviderArn), tea.StringValue(config.OIDCTokenFilePath), tea.StringValue(config.RoleSessionName), tea.StringValue(config.Policy), tea.IntValue(config.RoleSessionExpiration), runtime) + case "access_key": + err = checkAccessKey(config) + if err != nil { + return + } + credential = newAccessKeyCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret)) + case "sts": + err = checkSTS(config) + if err != nil { + return + } + credential = newStsTokenCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.SecurityToken)) + case "ecs_ram_role": + checkEcsRAMRole(config) + runtime := &utils.Runtime{ + Host: tea.StringValue(config.Host), + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + } + credential = newEcsRAMRoleCredential(tea.StringValue(config.RoleName), tea.Float64Value(config.InAdvanceScale), runtime) + case "ram_role_arn": + err = checkRAMRoleArn(config) + if err != nil { + return + } + runtime := &utils.Runtime{ + Host: tea.StringValue(config.Host), + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + } + credential = newRAMRoleArnCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.RoleArn), tea.StringValue(config.RoleSessionName), tea.StringValue(config.Policy), tea.IntValue(config.RoleSessionExpiration), runtime) + case "rsa_key_pair": + err = checkRSAKeyPair(config) + if err != nil { + return + } + file, err1 := os.Open(tea.StringValue(config.PrivateKeyFile)) + if err1 != nil { + err = fmt.Errorf("InvalidPath: Can not open PrivateKeyFile, err is %s", err1.Error()) + return + } + defer file.Close() + var privateKey string + scan := bufio.NewScanner(file) + for scan.Scan() { + if strings.HasPrefix(scan.Text(), "----") { + continue + } + privateKey += scan.Text() + "\n" + } + runtime := &utils.Runtime{ + Host: tea.StringValue(config.Host), + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + } + credential = newRsaKeyPairCredential(privateKey, tea.StringValue(config.PublicKeyId), tea.IntValue(config.SessionExpiration), runtime) + case "bearer": + if tea.StringValue(config.BearerToken) == "" { + err = errors.New("BearerToken cannot be empty") + return + } + credential = newBearerTokenCredential(tea.StringValue(config.BearerToken)) + default: + err = errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair") + return + } + return credential, nil +} + +func checkRSAKeyPair(config *Config) (err error) { + if tea.StringValue(config.PrivateKeyFile) == "" { + err = errors.New("PrivateKeyFile cannot be empty") + return + } + if tea.StringValue(config.PublicKeyId) == "" { + err = errors.New("PublicKeyId cannot be empty") + return + } + return +} + +func checkoutAssumeRamoidc(config *Config) (err error) { + if tea.StringValue(config.RoleArn) == "" { + err = errors.New("RoleArn cannot be empty") + return + } + if tea.StringValue(config.OIDCProviderArn) == "" { + err = errors.New("OIDCProviderArn cannot be empty") + return + } + return +} + +func checkRAMRoleArn(config *Config) (err error) { + if tea.StringValue(config.AccessKeySecret) == "" { + err = errors.New("AccessKeySecret cannot be empty") + return + } + if tea.StringValue(config.RoleArn) == "" { + err = errors.New("RoleArn cannot be empty") + return + } + if tea.StringValue(config.RoleSessionName) == "" { + err = errors.New("RoleSessionName cannot be empty") + return + } + if tea.StringValue(config.AccessKeyId) == "" { + err = errors.New("AccessKeyId cannot be empty") + return + } + return +} + +func checkEcsRAMRole(config *Config) (err error) { + return +} + +func checkSTS(config *Config) (err error) { + if tea.StringValue(config.AccessKeyId) == "" { + err = errors.New("AccessKeyId cannot be empty") + return + } + if tea.StringValue(config.AccessKeySecret) == "" { + err = errors.New("AccessKeySecret cannot be empty") + return + } + if tea.StringValue(config.SecurityToken) == "" { + err = errors.New("SecurityToken cannot be empty") + return + } + return +} + +func checkAccessKey(config *Config) (err error) { + if tea.StringValue(config.AccessKeyId) == "" { + err = errors.New("AccessKeyId cannot be empty") + return + } + if tea.StringValue(config.AccessKeySecret) == "" { + err = errors.New("AccessKeySecret cannot be empty") + return + } + return +} + +func doAction(request *request.CommonRequest, runtime *utils.Runtime) (content []byte, err error) { + var urlEncoded string + if request.BodyParams != nil { + urlEncoded = utils.GetURLFormedMap(request.BodyParams) + } + httpRequest, err := http.NewRequest(request.Method, request.URL, strings.NewReader(urlEncoded)) + if err != nil { + return + } + httpRequest.Proto = "HTTP/1.1" + httpRequest.Host = request.Domain + debuglog("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto) + debuglog("> Host: %s", httpRequest.Host) + for key, value := range request.Headers { + if value != "" { + debuglog("> %s: %s", key, value) + httpRequest.Header[key] = []string{value} + } + } + debuglog(">") + httpClient := &http.Client{} + httpClient.Timeout = time.Duration(runtime.ReadTimeout) * time.Second + proxy := &url.URL{} + if runtime.Proxy != "" { + proxy, err = url.Parse(runtime.Proxy) + if err != nil { + return + } + } + trans := &http.Transport{} + if proxy != nil && runtime.Proxy != "" { + trans.Proxy = http.ProxyURL(proxy) + } + trans.DialContext = utils.Timeout(time.Duration(runtime.ConnectTimeout) * time.Second) + httpClient.Transport = trans + httpResponse, err := hookDo(httpClient.Do)(httpRequest) + if err != nil { + return + } + debuglog("< %s %s", httpResponse.Proto, httpResponse.Status) + for key, value := range httpResponse.Header { + debuglog("< %s: %v", key, strings.Join(value, "")) + } + debuglog("<") + + resp := &response.CommonResponse{} + err = hookParse(resp.ParseFromHTTPResponse(httpResponse)) + if err != nil { + return + } + debuglog("%s", resp.GetHTTPContentString()) + if resp.GetHTTPStatus() != http.StatusOK { + err = fmt.Errorf("httpStatus: %d, message = %s", resp.GetHTTPStatus(), resp.GetHTTPContentString()) + return + } + return resp.GetHTTPContentBytes(), nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go b/vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go new file mode 100644 index 000000000..8d4433b7e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go @@ -0,0 +1,25 @@ +package credentials + +import ( + "net/http" + "time" +) + +const defaultInAdvanceScale = 0.95 + +var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) { + return fn +} + +type credentialUpdater struct { + credentialExpiration int + lastUpdateTimestamp int64 + inAdvanceScale float64 +} + +func (updater *credentialUpdater) needUpdateCredential() (result bool) { + if updater.inAdvanceScale == 0 { + updater.inAdvanceScale = defaultInAdvanceScale + } + return time.Now().Unix()-updater.lastUpdateTimestamp >= int64(float64(updater.credentialExpiration)*updater.inAdvanceScale) +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go b/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go new file mode 100644 index 000000000..5e7ddf4de --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go @@ -0,0 +1,149 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +// EcsRAMRoleCredential is a kind of credential +type EcsRAMRoleCredential struct { + *credentialUpdater + RoleName string + sessionCredential *sessionCredential + runtime *utils.Runtime +} + +type ecsRAMRoleResponse struct { + Code string `json:"Code" xml:"Code"` + AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newEcsRAMRoleCredential(roleName string, inAdvanceScale float64, runtime *utils.Runtime) *EcsRAMRoleCredential { + credentialUpdater := new(credentialUpdater) + if inAdvanceScale < 1 && inAdvanceScale > 0 { + credentialUpdater.inAdvanceScale = inAdvanceScale + } + return &EcsRAMRoleCredential{ + RoleName: roleName, + credentialUpdater: credentialUpdater, + runtime: runtime, + } +} + +// GetAccessKeyId reutrns EcsRAMRoleCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (e *EcsRAMRoleCredential) GetAccessKeyId() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.AccessKeyId, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns EcsRAMRoleCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (e *EcsRAMRoleCredential) GetAccessKeySecret() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.AccessKeySecret, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken reutrns EcsRAMRoleCredential's SecurityToken +// if SecurityToken is not exist or out of date, the function will update it. +func (e *EcsRAMRoleCredential) GetSecurityToken() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.SecurityToken, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.SecurityToken), nil +} + +// GetBearerToken is useless for EcsRAMRoleCredential +func (e *EcsRAMRoleCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns EcsRAMRoleCredential's type +func (e *EcsRAMRoleCredential) GetType() *string { + return tea.String("ecs_ram_role") +} + +func getRoleName() (string, error) { + runtime := utils.NewRuntime(1, 1, "", "") + request := request.NewCommonRequest() + request.URL = securityCredURL + request.Method = "GET" + content, err := doAction(request, runtime) + if err != nil { + return "", err + } + return string(content), nil +} + +func (e *EcsRAMRoleCredential) updateCredential() (err error) { + if e.runtime == nil { + e.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + if e.RoleName == "" { + e.RoleName, err = getRoleName() + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + } + } + request.URL = securityCredURL + e.RoleName + request.Method = "GET" + content, err := doAction(request, e.runtime) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + } + var resp *ecsRAMRoleResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: Json Unmarshal fail: %s", err.Error()) + } + if resp.Code != "Success" { + return fmt.Errorf("refresh Ecs sts token err: Code is not Success") + } + if resp.AccessKeyId == "" || resp.AccessKeySecret == "" || resp.SecurityToken == "" || resp.Expiration == "" { + return fmt.Errorf("refresh Ecs sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", resp.AccessKeyId, resp.AccessKeySecret, resp.SecurityToken, resp.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", resp.Expiration) + e.lastUpdateTimestamp = time.Now().Unix() + e.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + e.sessionCredential = &sessionCredential{ + AccessKeyId: resp.AccessKeyId, + AccessKeySecret: resp.AccessKeySecret, + SecurityToken: resp.SecurityToken, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go new file mode 100644 index 000000000..89df42f8c --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go @@ -0,0 +1,47 @@ +package credentials + +import ( + "errors" + "os" + + "github.com/alibabacloud-go/tea/tea" +) + +type envProvider struct{} + +var providerEnv = new(envProvider) + +const ( + // EnvVarAccessKeyId is a name of ALIBABA_CLOUD_ACCESS_KEY_Id + EnvVarAccessKeyId = "ALIBABA_CLOUD_ACCESS_KEY_Id" + EnvVarAccessKeyIdNew = "ALIBABA_CLOUD_ACCESS_KEY_ID" + // EnvVarAccessKeySecret is a name of ALIBABA_CLOUD_ACCESS_KEY_SECRET + EnvVarAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET" +) + +func newEnvProvider() Provider { + return &envProvider{} +} + +func (p *envProvider) resolve() (*Config, error) { + accessKeyId, ok1 := os.LookupEnv(EnvVarAccessKeyIdNew) + if !ok1 || accessKeyId == "" { + accessKeyId, ok1 = os.LookupEnv(EnvVarAccessKeyId) + } + accessKeySecret, ok2 := os.LookupEnv(EnvVarAccessKeySecret) + if !ok1 || !ok2 { + return nil, nil + } + if accessKeyId == "" { + return nil, errors.New(EnvVarAccessKeyIdNew + " or " + EnvVarAccessKeyId + " cannot be empty") + } + if accessKeySecret == "" { + return nil, errors.New(EnvVarAccessKeySecret + " cannot be empty") + } + config := &Config{ + Type: tea.String("access_key"), + AccessKeyId: tea.String(accessKeyId), + AccessKeySecret: tea.String(accessKeySecret), + } + return config, nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go new file mode 100644 index 000000000..7e2ea07bb --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go @@ -0,0 +1,28 @@ +package credentials + +import ( + "os" + + "github.com/alibabacloud-go/tea/tea" +) + +type instanceCredentialsProvider struct{} + +var providerInstance = new(instanceCredentialsProvider) + +func newInstanceCredentialsProvider() Provider { + return &instanceCredentialsProvider{} +} + +func (p *instanceCredentialsProvider) resolve() (*Config, error) { + roleName, ok := os.LookupEnv(ENVEcsMetadata) + if !ok { + return nil, nil + } + + config := &Config{ + Type: tea.String("ecs_ram_role"), + RoleName: tea.String(roleName), + } + return config, nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go new file mode 100644 index 000000000..76192cbee --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go @@ -0,0 +1,178 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +const defaultOIDCDurationSeconds = 3600 + +// OIDCCredential is a kind of credentials +type OIDCCredential struct { + *credentialUpdater + AccessKeyId string + AccessKeySecret string + RoleArn string + OIDCProviderArn string + OIDCTokenFilePath string + Policy string + RoleSessionName string + RoleSessionExpiration int + sessionCredential *sessionCredential + runtime *utils.Runtime +} + +type OIDCResponse struct { + Credentials *credentialsInResponse `json:"Credentials" xml:"Credentials"` +} + +type OIDCcredentialsInResponse struct { + AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newOIDCRoleArnCredential(accessKeyId, accessKeySecret, roleArn, OIDCProviderArn, OIDCTokenFilePath, RoleSessionName, policy string, RoleSessionExpiration int, runtime *utils.Runtime) *OIDCCredential { + return &OIDCCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + OIDCProviderArn: OIDCProviderArn, + OIDCTokenFilePath: OIDCTokenFilePath, + RoleSessionName: RoleSessionName, + Policy: policy, + RoleSessionExpiration: RoleSessionExpiration, + credentialUpdater: new(credentialUpdater), + runtime: runtime, + } +} + +// GetAccessKeyId reutrns OIDCCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (r *OIDCCredential) GetAccessKeyId() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns OIDCCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (r *OIDCCredential) GetAccessKeySecret() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken reutrns OIDCCredential's SecurityToken +// if SecurityToken is not exist or out of date, the function will update it. +func (r *OIDCCredential) GetSecurityToken() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.SecurityToken), nil +} + +// GetBearerToken is useless OIDCCredential +func (r *OIDCCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns OIDCCredential's type +func (r *OIDCCredential) GetType() *string { + return tea.String("oidc_role_arn") +} + +func (r *OIDCCredential) GetOIDCToken(OIDCTokenFilePath string) *string { + tokenPath := OIDCTokenFilePath + _, err := os.Stat(tokenPath) + if os.IsNotExist(err) { + tokenPath = os.Getenv("ALIBABA_CLOUD_OIDC_TOKEN_FILE") + if tokenPath == "" { + return nil + } + } + byt, err := ioutil.ReadFile(tokenPath) + if err != nil { + return nil + } + return tea.String(string(byt)) +} + +func (r *OIDCCredential) updateCredential() (err error) { + if r.runtime == nil { + r.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + request.Domain = "sts.aliyuncs.com" + request.Scheme = "HTTPS" + request.Method = "POST" + request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601() + request.QueryParams["Action"] = "AssumeRoleWithOIDC" + request.QueryParams["Format"] = "JSON" + request.BodyParams["RoleArn"] = r.RoleArn + request.BodyParams["OIDCProviderArn"] = r.OIDCProviderArn + token := r.GetOIDCToken(r.OIDCTokenFilePath) + request.BodyParams["OIDCToken"] = tea.StringValue(token) + if r.Policy != "" { + request.QueryParams["Policy"] = r.Policy + } + request.QueryParams["RoleSessionName"] = r.RoleSessionName + request.QueryParams["Version"] = "2015-04-01" + request.QueryParams["SignatureNonce"] = utils.GetUUID() + if r.AccessKeyId != "" && r.AccessKeySecret != "" { + signature := utils.ShaHmac1(request.BuildStringToSign(), r.AccessKeySecret+"&") + request.QueryParams["Signature"] = signature + request.QueryParams["AccessKeyId"] = r.AccessKeyId + request.QueryParams["AccessKeySecret"] = r.AccessKeySecret + } + request.Headers["Host"] = request.Domain + request.Headers["Accept-Encoding"] = "identity" + request.Headers["content-type"] = "application/x-www-form-urlencoded" + request.URL = request.BuildURL() + content, err := doAction(request, r.runtime) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err: %s", err.Error()) + } + var resp *OIDCResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err: Json.Unmarshal fail: %s", err.Error()) + } + if resp == nil || resp.Credentials == nil { + return fmt.Errorf("refresh RoleArn sts token err: Credentials is empty") + } + respCredentials := resp.Credentials + if respCredentials.AccessKeyId == "" || respCredentials.AccessKeySecret == "" || respCredentials.SecurityToken == "" || respCredentials.Expiration == "" { + return fmt.Errorf("refresh RoleArn sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", respCredentials.AccessKeyId, respCredentials.AccessKeySecret, respCredentials.SecurityToken, respCredentials.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", respCredentials.Expiration) + r.lastUpdateTimestamp = time.Now().Unix() + r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + r.sessionCredential = &sessionCredential{ + AccessKeyId: respCredentials.AccessKeyId, + AccessKeySecret: respCredentials.AccessKeySecret, + SecurityToken: respCredentials.SecurityToken, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/oidc_token b/vendor/github.com/aliyun/credentials-go/credentials/oidc_token new file mode 100644 index 000000000..653e068df --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/oidc_token @@ -0,0 +1 @@ +test_long_oidc_token_eyJhbGciOiJSUzI1NiIsImtpZCI6ImFQaXlpNEVGSU8wWnlGcFh1V0psQUNWbklZVlJsUkNmM2tlSzNMUlhWT1UifQ.eyJhdWQiOlsic3RzLmFsaXl1bmNzLmNvbSJdLCJleHAiOjE2NDUxMTk3ODAsImlhdCI6MTY0NTA4Mzc4MCwiaXNzIjoiaHR0cHM6Ly9vaWRjLWFjay1jbi1oYW5nemhvdS5vc3MtY24taGFuZ3pob3UtaW50ZXJuYWwuYWxpeXVuY3MuY29tL2NmMWQ4ZGIwMjM0ZDk0YzEyOGFiZDM3MTc4NWJjOWQxNSIsImt1YmVybmV0ZXMuaW8iOnsibmFtZXNwYWNlIjoidGVzdC1ycnNhIiwicG9kIjp7Im5hbWUiOiJydW4tYXMtcm9vdCIsInVpZCI6ImIzMGI0MGY2LWNiZTAtNGY0Yy1hZGYyLWM1OGQ4ZmExZTAxMCJ9LCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoidXNlcjEiLCJ1aWQiOiJiZTEyMzdjYS01MTY4LTQyMzYtYWUyMC00NDM1YjhmMGI4YzAifX0sIm5iZiI6MTY0NTA4Mzc4MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OnRlc3QtcnJzYTp1c2VyMSJ9.XGP-wgLj-iMiAHjLe0lZLh7y48Qsj9HzsEbNh706WwerBoxnssdsyGFb9lzd2FyM8CssbAOCstr7OuAMWNdJmDZgpiOGGSbQ-KXXmbfnIS4ix-V3pQF6LVBFr7xJlj20J6YY89um3rv_04t0iCGxKWs2ZMUyU1FbZpIPRep24LVKbUz1saiiVGgDBTIZdHA13Z-jUvYAnsxK_Kj5tc1K-IuQQU0IwSKJh5OShMcdPugMV5LwTL3ogCikfB7yljq5vclBhCeF2lXLIibvwF711TOhuJ5lMlh-a2KkIgwBHhANg_U9k4Mt_VadctfUGc4hxlSbBD0w9o9mDGKwgGmW5Q \ No newline at end of file diff --git a/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go new file mode 100644 index 000000000..d6292b035 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go @@ -0,0 +1,350 @@ +package credentials + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" + + "github.com/alibabacloud-go/tea/tea" + ini "gopkg.in/ini.v1" +) + +type profileProvider struct { + Profile string +} + +var providerProfile = newProfileProvider() + +var hookOS = func(goos string) string { + return goos +} + +var hookState = func(info os.FileInfo, err error) (os.FileInfo, error) { + return info, err +} + +// NewProfileProvider receive zero or more parameters, +// when length of name is 0, the value of field Profile will be "default", +// and when there are multiple inputs, the function will take the +// first one and discard the other values. +func newProfileProvider(name ...string) Provider { + p := new(profileProvider) + if len(name) == 0 { + p.Profile = "default" + } else { + p.Profile = name[0] + } + return p +} + +// resolve implements the Provider interface +// when credential type is rsa_key_pair, the content of private_key file +// must be able to be parsed directly into the required string +// that NewRsaKeyPairCredential function needed +func (p *profileProvider) resolve() (*Config, error) { + path, ok := os.LookupEnv(ENVCredentialFile) + if !ok { + path, err := checkDefaultPath() + if err != nil { + return nil, err + } + if path == "" { + return nil, nil + } + } else if path == "" { + return nil, errors.New(ENVCredentialFile + " cannot be empty") + } + + value, section, err := getType(path, p.Profile) + if err != nil { + return nil, err + } + switch value.String() { + case "access_key": + config, err := getAccessKey(section) + if err != nil { + return nil, err + } + return config, nil + case "sts": + config, err := getSTS(section) + if err != nil { + return nil, err + } + return config, nil + case "bearer": + config, err := getBearerToken(section) + if err != nil { + return nil, err + } + return config, nil + case "ecs_ram_role": + config, err := getEcsRAMRole(section) + if err != nil { + return nil, err + } + return config, nil + case "ram_role_arn": + config, err := getRAMRoleArn(section) + if err != nil { + return nil, err + } + return config, nil + case "rsa_key_pair": + config, err := getRSAKeyPair(section) + if err != nil { + return nil, err + } + return config, nil + default: + return nil, errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair") + } +} + +func getRSAKeyPair(section *ini.Section) (*Config, error) { + publicKeyId, err := section.GetKey("public_key_id") + if err != nil { + return nil, errors.New("Missing required public_key_id option in profile for rsa_key_pair") + } + if publicKeyId.String() == "" { + return nil, errors.New("public_key_id cannot be empty") + } + privateKeyFile, err := section.GetKey("private_key_file") + if err != nil { + return nil, errors.New("Missing required private_key_file option in profile for rsa_key_pair") + } + if privateKeyFile.String() == "" { + return nil, errors.New("private_key_file cannot be empty") + } + sessionExpiration, _ := section.GetKey("session_expiration") + expiration := 0 + if sessionExpiration != nil { + expiration, err = sessionExpiration.Int() + if err != nil { + return nil, errors.New("session_expiration must be an int") + } + } + config := &Config{ + Type: tea.String("rsa_key_pair"), + PublicKeyId: tea.String(publicKeyId.String()), + PrivateKeyFile: tea.String(privateKeyFile.String()), + SessionExpiration: tea.Int(expiration), + } + err = setRuntimeToConfig(config, section) + if err != nil { + return nil, err + } + return config, nil +} + +func getRAMRoleArn(section *ini.Section) (*Config, error) { + accessKeyId, err := section.GetKey("access_key_id") + if err != nil { + return nil, errors.New("Missing required access_key_id option in profile for ram_role_arn") + } + if accessKeyId.String() == "" { + return nil, errors.New("access_key_id cannot be empty") + } + accessKeySecret, err := section.GetKey("access_key_secret") + if err != nil { + return nil, errors.New("Missing required access_key_secret option in profile for ram_role_arn") + } + if accessKeySecret.String() == "" { + return nil, errors.New("access_key_secret cannot be empty") + } + roleArn, err := section.GetKey("role_arn") + if err != nil { + return nil, errors.New("Missing required role_arn option in profile for ram_role_arn") + } + if roleArn.String() == "" { + return nil, errors.New("role_arn cannot be empty") + } + roleSessionName, err := section.GetKey("role_session_name") + if err != nil { + return nil, errors.New("Missing required role_session_name option in profile for ram_role_arn") + } + if roleSessionName.String() == "" { + return nil, errors.New("role_session_name cannot be empty") + } + roleSessionExpiration, _ := section.GetKey("role_session_expiration") + expiration := 0 + if roleSessionExpiration != nil { + expiration, err = roleSessionExpiration.Int() + if err != nil { + return nil, errors.New("role_session_expiration must be an int") + } + } + config := &Config{ + Type: tea.String("ram_role_arn"), + AccessKeyId: tea.String(accessKeyId.String()), + AccessKeySecret: tea.String(accessKeySecret.String()), + RoleArn: tea.String(roleArn.String()), + RoleSessionName: tea.String(roleSessionName.String()), + RoleSessionExpiration: tea.Int(expiration), + } + err = setRuntimeToConfig(config, section) + if err != nil { + return nil, err + } + return config, nil +} + +func getEcsRAMRole(section *ini.Section) (*Config, error) { + roleName, _ := section.GetKey("role_name") + config := &Config{ + Type: tea.String("ecs_ram_role"), + } + if roleName != nil { + config.RoleName = tea.String(roleName.String()) + } + err := setRuntimeToConfig(config, section) + if err != nil { + return nil, err + } + return config, nil +} + +func getBearerToken(section *ini.Section) (*Config, error) { + bearerToken, err := section.GetKey("bearer_token") + if err != nil { + return nil, errors.New("Missing required bearer_token option in profile for bearer") + } + if bearerToken.String() == "" { + return nil, errors.New("bearer_token cannot be empty") + } + config := &Config{ + Type: tea.String("bearer"), + BearerToken: tea.String(bearerToken.String()), + } + return config, nil +} + +func getSTS(section *ini.Section) (*Config, error) { + accesskeyid, err := section.GetKey("access_key_id") + if err != nil { + return nil, errors.New("Missing required access_key_id option in profile for sts") + } + if accesskeyid.String() == "" { + return nil, errors.New("access_key_id cannot be empty") + } + accessKeySecret, err := section.GetKey("access_key_secret") + if err != nil { + return nil, errors.New("Missing required access_key_secret option in profile for sts") + } + if accessKeySecret.String() == "" { + return nil, errors.New("access_key_secret cannot be empty") + } + securityToken, err := section.GetKey("security_token") + if err != nil { + return nil, errors.New("Missing required security_token option in profile for sts") + } + if securityToken.String() == "" { + return nil, errors.New("security_token cannot be empty") + } + config := &Config{ + Type: tea.String("sts"), + AccessKeyId: tea.String(accesskeyid.String()), + AccessKeySecret: tea.String(accessKeySecret.String()), + SecurityToken: tea.String(securityToken.String()), + } + return config, nil +} + +func getAccessKey(section *ini.Section) (*Config, error) { + accesskeyid, err := section.GetKey("access_key_id") + if err != nil { + return nil, errors.New("Missing required access_key_id option in profile for access_key") + } + if accesskeyid.String() == "" { + return nil, errors.New("access_key_id cannot be empty") + } + accessKeySecret, err := section.GetKey("access_key_secret") + if err != nil { + return nil, errors.New("Missing required access_key_secret option in profile for access_key") + } + if accessKeySecret.String() == "" { + return nil, errors.New("access_key_secret cannot be empty") + } + config := &Config{ + Type: tea.String("access_key"), + AccessKeyId: tea.String(accesskeyid.String()), + AccessKeySecret: tea.String(accessKeySecret.String()), + } + return config, nil +} + +func getType(path, profile string) (*ini.Key, *ini.Section, error) { + ini, err := ini.Load(path) + if err != nil { + return nil, nil, errors.New("ERROR: Can not open file " + err.Error()) + } + + section, err := ini.GetSection(profile) + if err != nil { + return nil, nil, errors.New("ERROR: Can not load section " + err.Error()) + } + + value, err := section.GetKey("type") + if err != nil { + return nil, nil, errors.New("Missing required type option " + err.Error()) + } + return value, section, nil +} + +func getHomePath() string { + if hookOS(runtime.GOOS) == "windows" { + path, ok := os.LookupEnv("USERPROFILE") + if !ok { + return "" + } + return path + } + path, ok := os.LookupEnv("HOME") + if !ok { + return "" + } + return path +} + +func checkDefaultPath() (path string, err error) { + path = getHomePath() + if path == "" { + return "", errors.New("The default credential file path is invalid") + } + path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1) + _, err = hookState(os.Stat(path)) + if err != nil { + return "", nil + } + return path, nil +} + +func setRuntimeToConfig(config *Config, section *ini.Section) error { + rawTimeout, _ := section.GetKey("timeout") + rawConnectTimeout, _ := section.GetKey("connect_timeout") + rawProxy, _ := section.GetKey("proxy") + rawHost, _ := section.GetKey("host") + if rawProxy != nil { + config.Proxy = tea.String(rawProxy.String()) + } + if rawConnectTimeout != nil { + connectTimeout, err := rawConnectTimeout.Int() + if err != nil { + return fmt.Errorf("Please set connect_timeout with an int value") + } + config.ConnectTimeout = tea.Int(connectTimeout) + } + if rawTimeout != nil { + timeout, err := rawTimeout.Int() + if err != nil { + return fmt.Errorf("Please set timeout with an int value") + } + config.Timeout = tea.Int(timeout) + } + if rawHost != nil { + config.Host = tea.String(rawHost.String()) + } + return nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/provider.go b/vendor/github.com/aliyun/credentials-go/credentials/provider.go new file mode 100644 index 000000000..f9d4ae574 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/provider.go @@ -0,0 +1,13 @@ +package credentials + +//Environmental virables that may be used by the provider +const ( + ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE" + ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA" + PATHCredentialFile = "~/.alibabacloud/credentials" +) + +// Provider will be implemented When you want to customize the provider. +type Provider interface { + resolve() (*Config, error) +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go b/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go new file mode 100644 index 000000000..2764a701c --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go @@ -0,0 +1,32 @@ +package credentials + +import ( + "errors" +) + +type providerChain struct { + Providers []Provider +} + +var defaultproviders = []Provider{providerEnv, providerProfile, providerInstance} +var defaultChain = newProviderChain(defaultproviders) + +func newProviderChain(providers []Provider) Provider { + return &providerChain{ + Providers: providers, + } +} + +func (p *providerChain) resolve() (*Config, error) { + for _, provider := range p.Providers { + config, err := provider.resolve() + if err != nil { + return nil, err + } else if config == nil { + continue + } + return config, err + } + return nil, errors.New("No credential found") + +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go b/vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go new file mode 100644 index 000000000..47a92a766 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go @@ -0,0 +1,63 @@ +package request + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/aliyun/credentials-go/credentials/utils" +) + +// CommonRequest is for requesting credential +type CommonRequest struct { + Scheme string + Method string + Domain string + RegionId string + URL string + ReadTimeout time.Duration + ConnectTimeout time.Duration + isInsecure *bool + BodyParams map[string]string + userAgent map[string]string + QueryParams map[string]string + Headers map[string]string + + queries string +} + +// NewCommonRequest returns a CommonRequest +func NewCommonRequest() *CommonRequest { + return &CommonRequest{ + BodyParams: make(map[string]string), + QueryParams: make(map[string]string), + Headers: make(map[string]string), + } +} + +// BuildURL returns a url +func (request *CommonRequest) BuildURL() string { + url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain) + request.queries = "/?" + utils.GetURLFormedMap(request.QueryParams) + return url + request.queries +} + +// BuildStringToSign returns BuildStringToSign +func (request *CommonRequest) BuildStringToSign() (stringToSign string) { + signParams := make(map[string]string) + for key, value := range request.QueryParams { + signParams[key] = value + } + + for key, value := range request.BodyParams { + signParams[key] = value + } + stringToSign = utils.GetURLFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = request.Method + "&%2F&" + stringToSign + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go b/vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go new file mode 100644 index 000000000..ef489c11d --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go @@ -0,0 +1,53 @@ +package response + +import ( + "io" + "io/ioutil" + "net/http" +) + +var hookReadAll = func(fn func(r io.Reader) (b []byte, err error)) func(r io.Reader) (b []byte, err error) { + return fn +} + +// CommonResponse is for storing message of httpResponse +type CommonResponse struct { + httpStatus int + httpHeaders map[string][]string + httpContentString string + httpContentBytes []byte +} + +// ParseFromHTTPResponse assigns for CommonResponse, returns err when body is too large. +func (resp *CommonResponse) ParseFromHTTPResponse(httpResponse *http.Response) (err error) { + defer httpResponse.Body.Close() + body, err := hookReadAll(ioutil.ReadAll)(httpResponse.Body) + if err != nil { + return + } + resp.httpStatus = httpResponse.StatusCode + resp.httpHeaders = httpResponse.Header + resp.httpContentBytes = body + resp.httpContentString = string(body) + return +} + +// GetHTTPStatus returns httpStatus +func (resp *CommonResponse) GetHTTPStatus() int { + return resp.httpStatus +} + +// GetHTTPHeaders returns httpresponse's headers +func (resp *CommonResponse) GetHTTPHeaders() map[string][]string { + return resp.httpHeaders +} + +// GetHTTPContentString return body content as string +func (resp *CommonResponse) GetHTTPContentString() string { + return resp.httpContentString +} + +// GetHTTPContentBytes return body content as []byte +func (resp *CommonResponse) GetHTTPContentBytes() []byte { + return resp.httpContentBytes +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go new file mode 100644 index 000000000..3e4310eca --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go @@ -0,0 +1,145 @@ +package credentials + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +// RsaKeyPairCredential is a kind of credentials +type RsaKeyPairCredential struct { + *credentialUpdater + PrivateKey string + PublicKeyId string + SessionExpiration int + sessionCredential *sessionCredential + runtime *utils.Runtime +} + +type rsaKeyPairResponse struct { + SessionAccessKey *sessionAccessKey `json:"SessionAccessKey" xml:"SessionAccessKey"` +} + +type sessionAccessKey struct { + SessionAccessKeyId string `json:"SessionAccessKeyId" xml:"SessionAccessKeyId"` + SessionAccessKeySecret string `json:"SessionAccessKeySecret" xml:"SessionAccessKeySecret"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int, runtime *utils.Runtime) *RsaKeyPairCredential { + return &RsaKeyPairCredential{ + PrivateKey: privateKey, + PublicKeyId: publicKeyId, + SessionExpiration: sessionExpiration, + credentialUpdater: new(credentialUpdater), + runtime: runtime, + } +} + +// GetAccessKeyId reutrns RsaKeyPairCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (r *RsaKeyPairCredential) GetAccessKeyId() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns RsaKeyPairCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (r *RsaKeyPairCredential) GetAccessKeySecret() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken is useless RsaKeyPairCredential +func (r *RsaKeyPairCredential) GetSecurityToken() (*string, error) { + return tea.String(""), nil +} + +// GetBearerToken is useless for RsaKeyPairCredential +func (r *RsaKeyPairCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns RsaKeyPairCredential's type +func (r *RsaKeyPairCredential) GetType() *string { + return tea.String("rsa_key_pair") +} + +func (r *RsaKeyPairCredential) updateCredential() (err error) { + if r.runtime == nil { + r.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + request.Domain = "sts.aliyuncs.com" + if r.runtime.Host != "" { + request.Domain = r.runtime.Host + } + request.Scheme = "HTTPS" + request.Method = "GET" + request.QueryParams["AccessKeyId"] = r.PublicKeyId + request.QueryParams["Action"] = "GenerateSessionAccessKey" + request.QueryParams["Format"] = "JSON" + if r.SessionExpiration > 0 { + if r.SessionExpiration >= 900 && r.SessionExpiration <= 3600 { + request.QueryParams["DurationSeconds"] = strconv.Itoa(r.SessionExpiration) + } else { + err = errors.New("[InvalidParam]:Key Pair session duration should be in the range of 15min - 1Hr") + return + } + } else { + request.QueryParams["DurationSeconds"] = strconv.Itoa(defaultDurationSeconds) + } + request.QueryParams["SignatureMethod"] = "SHA256withRSA" + request.QueryParams["SignatureType"] = "PRIVATEKEY" + request.QueryParams["SignatureVersion"] = "1.0" + request.QueryParams["Version"] = "2015-04-01" + request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601() + request.QueryParams["SignatureNonce"] = utils.GetUUID() + signature := utils.Sha256WithRsa(request.BuildStringToSign(), r.PrivateKey) + request.QueryParams["Signature"] = signature + request.Headers["Host"] = request.Domain + request.Headers["Accept-Encoding"] = "identity" + request.URL = request.BuildURL() + content, err := doAction(request, r.runtime) + if err != nil { + return fmt.Errorf("refresh KeyPair err: %s", err.Error()) + } + var resp *rsaKeyPairResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh KeyPair err: Json Unmarshal fail: %s", err.Error()) + } + if resp == nil || resp.SessionAccessKey == nil { + return fmt.Errorf("refresh KeyPair err: SessionAccessKey is empty") + } + sessionAccessKey := resp.SessionAccessKey + if sessionAccessKey.SessionAccessKeyId == "" || sessionAccessKey.SessionAccessKeySecret == "" || sessionAccessKey.Expiration == "" { + return fmt.Errorf("refresh KeyPair err: SessionAccessKeyId: %v, SessionAccessKeySecret: %v, Expiration: %v", sessionAccessKey.SessionAccessKeyId, sessionAccessKey.SessionAccessKeySecret, sessionAccessKey.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", sessionAccessKey.Expiration) + r.lastUpdateTimestamp = time.Now().Unix() + r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + r.sessionCredential = &sessionCredential{ + AccessKeyId: sessionAccessKey.SessionAccessKeyId, + AccessKeySecret: sessionAccessKey.SessionAccessKeySecret, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/session_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/session_credential.go new file mode 100644 index 000000000..dd48dc929 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/session_credential.go @@ -0,0 +1,7 @@ +package credentials + +type sessionCredential struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go new file mode 100644 index 000000000..ba07dab49 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go @@ -0,0 +1,43 @@ +package credentials + +import "github.com/alibabacloud-go/tea/tea" + +// StsTokenCredential is a kind of credentials +type StsTokenCredential struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string +} + +func newStsTokenCredential(accessKeyId, accessKeySecret, securityToken string) *StsTokenCredential { + return &StsTokenCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + } +} + +// GetAccessKeyId reutrns StsTokenCredential's AccessKeyId +func (s *StsTokenCredential) GetAccessKeyId() (*string, error) { + return tea.String(s.AccessKeyId), nil +} + +// GetAccessSecret reutrns StsTokenCredential's AccessKeySecret +func (s *StsTokenCredential) GetAccessKeySecret() (*string, error) { + return tea.String(s.AccessKeySecret), nil +} + +// GetSecurityToken reutrns StsTokenCredential's SecurityToken +func (s *StsTokenCredential) GetSecurityToken() (*string, error) { + return tea.String(s.SecurityToken), nil +} + +// GetBearerToken is useless StsTokenCredential +func (s *StsTokenCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns StsTokenCredential's type +func (s *StsTokenCredential) GetType() *string { + return tea.String("sts") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go new file mode 100644 index 000000000..f31ba1e32 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go @@ -0,0 +1,163 @@ +package credentials + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +const defaultDurationSeconds = 3600 + +// RAMRoleArnCredential is a kind of credentials +type RAMRoleArnCredential struct { + *credentialUpdater + AccessKeyId string + AccessKeySecret string + RoleArn string + RoleSessionName string + RoleSessionExpiration int + Policy string + sessionCredential *sessionCredential + runtime *utils.Runtime +} + +type ramRoleArnResponse struct { + Credentials *credentialsInResponse `json:"Credentials" xml:"Credentials"` +} + +type credentialsInResponse struct { + AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newRAMRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int, runtime *utils.Runtime) *RAMRoleArnCredential { + return &RAMRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + Policy: policy, + credentialUpdater: new(credentialUpdater), + runtime: runtime, + } +} + +// GetAccessKeyId reutrns RamRoleArnCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (r *RAMRoleArnCredential) GetAccessKeyId() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns RamRoleArnCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (r *RAMRoleArnCredential) GetAccessKeySecret() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken reutrns RamRoleArnCredential's SecurityToken +// if SecurityToken is not exist or out of date, the function will update it. +func (r *RAMRoleArnCredential) GetSecurityToken() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.SecurityToken), nil +} + +// GetBearerToken is useless RamRoleArnCredential +func (r *RAMRoleArnCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns RamRoleArnCredential's type +func (r *RAMRoleArnCredential) GetType() *string { + return tea.String("ram_role_arn") +} + +func (r *RAMRoleArnCredential) updateCredential() (err error) { + if r.runtime == nil { + r.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + request.Domain = "sts.aliyuncs.com" + request.Scheme = "HTTPS" + request.Method = "GET" + request.QueryParams["AccessKeyId"] = r.AccessKeyId + request.QueryParams["Action"] = "AssumeRole" + request.QueryParams["Format"] = "JSON" + if r.RoleSessionExpiration > 0 { + if r.RoleSessionExpiration >= 900 && r.RoleSessionExpiration <= 3600 { + request.QueryParams["DurationSeconds"] = strconv.Itoa(r.RoleSessionExpiration) + } else { + err = errors.New("[InvalidParam]:Assume Role session duration should be in the range of 15min - 1Hr") + return + } + } else { + request.QueryParams["DurationSeconds"] = strconv.Itoa(defaultDurationSeconds) + } + request.QueryParams["RoleArn"] = r.RoleArn + if r.Policy != "" { + request.QueryParams["Policy"] = r.Policy + } + request.QueryParams["RoleSessionName"] = r.RoleSessionName + request.QueryParams["SignatureMethod"] = "HMAC-SHA1" + request.QueryParams["SignatureVersion"] = "1.0" + request.QueryParams["Version"] = "2015-04-01" + request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601() + request.QueryParams["SignatureNonce"] = utils.GetUUID() + signature := utils.ShaHmac1(request.BuildStringToSign(), r.AccessKeySecret+"&") + request.QueryParams["Signature"] = signature + request.Headers["Host"] = request.Domain + request.Headers["Accept-Encoding"] = "identity" + request.URL = request.BuildURL() + content, err := doAction(request, r.runtime) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err: %s", err.Error()) + } + var resp *ramRoleArnResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err: Json.Unmarshal fail: %s", err.Error()) + } + if resp == nil || resp.Credentials == nil { + return fmt.Errorf("refresh RoleArn sts token err: Credentials is empty") + } + respCredentials := resp.Credentials + if respCredentials.AccessKeyId == "" || respCredentials.AccessKeySecret == "" || respCredentials.SecurityToken == "" || respCredentials.Expiration == "" { + return fmt.Errorf("refresh RoleArn sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", respCredentials.AccessKeyId, respCredentials.AccessKeySecret, respCredentials.SecurityToken, respCredentials.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", respCredentials.Expiration) + r.lastUpdateTimestamp = time.Now().Unix() + r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + r.sessionCredential = &sessionCredential{ + AccessKeyId: respCredentials.AccessKeyId, + AccessKeySecret: respCredentials.AccessKeySecret, + SecurityToken: respCredentials.SecurityToken, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go new file mode 100644 index 000000000..e8f303fc7 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go @@ -0,0 +1,125 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "os" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +// URLCredential is a kind of credential +type URLCredential struct { + URL string + *credentialUpdater + *sessionCredential + runtime *utils.Runtime +} + +type URLResponse struct { + AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newURLCredential(URL string) *URLCredential { + credentialUpdater := new(credentialUpdater) + if URL == "" { + URL = os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") + } + return &URLCredential{ + URL: URL, + credentialUpdater: credentialUpdater, + } +} + +// GetAccessKeyId reutrns URLCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (e *URLCredential) GetAccessKeyId() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.AccessKeyId, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns URLCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (e *URLCredential) GetAccessKeySecret() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.AccessKeySecret, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken reutrns URLCredential's SecurityToken +// if SecurityToken is not exist or out of date, the function will update it. +func (e *URLCredential) GetSecurityToken() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.SecurityToken, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.SecurityToken), nil +} + +// GetBearerToken is useless for URLCredential +func (e *URLCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns URLCredential's type +func (e *URLCredential) GetType() *string { + return tea.String("credential_uri") +} + +func (e *URLCredential) updateCredential() (err error) { + if e.runtime == nil { + e.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + request.URL = e.URL + request.Method = "GET" + content, err := doAction(request, e.runtime) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + } + var resp *URLResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: Json Unmarshal fail: %s", err.Error()) + } + if resp.AccessKeyId == "" || resp.AccessKeySecret == "" || resp.SecurityToken == "" || resp.Expiration == "" { + return fmt.Errorf("refresh Ecs sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", resp.AccessKeyId, resp.AccessKeySecret, resp.SecurityToken, resp.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", resp.Expiration) + e.lastUpdateTimestamp = time.Now().Unix() + e.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + e.sessionCredential = &sessionCredential{ + AccessKeyId: resp.AccessKeyId, + AccessKeySecret: resp.AccessKeySecret, + SecurityToken: resp.SecurityToken, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go b/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go new file mode 100644 index 000000000..d4a27c9cd --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go @@ -0,0 +1,35 @@ +package utils + +import ( + "context" + "net" + "time" +) + +// Runtime is for setting timeout, proxy and host +type Runtime struct { + ReadTimeout int + ConnectTimeout int + Proxy string + Host string +} + +// NewRuntime returns a Runtime +func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Runtime { + return &Runtime{ + ReadTimeout: readTimeout, + ConnectTimeout: connectTimeout, + Proxy: proxy, + Host: host, + } +} + +// Timeout is for connect Timeout +func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: connectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go b/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go new file mode 100644 index 000000000..7468407fb --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go @@ -0,0 +1,146 @@ +package utils + +import ( + "crypto" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "hash" + "io" + rand2 "math/rand" + "net/url" + "time" +) + +type uuid [16]byte + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +var hookRead = func(fn func(p []byte) (n int, err error)) func(p []byte) (n int, err error) { + return fn +} + +var hookRSA = func(fn func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)) func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) { + return fn +} + +// GetUUID returns a uuid +func GetUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +// RandStringBytes returns a rand string +func RandStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand2.Intn(len(letterBytes))] + } + return string(b) +} + +// ShaHmac1 return a string which has been hashed +func ShaHmac1(source, secret string) string { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + signedBytes := hmac.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +// Sha256WithRsa return a string which has been hashed with Rsa +func Sha256WithRsa(source, secret string) string { + decodeString, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + panic(err) + } + private, err := x509.ParsePKCS8PrivateKey(decodeString) + if err != nil { + panic(err) + } + + h := crypto.Hash.New(crypto.SHA256) + h.Write([]byte(source)) + hashed := h.Sum(nil) + signature, err := hookRSA(rsa.SignPKCS1v15)(rand.Reader, private.(*rsa.PrivateKey), + crypto.SHA256, hashed) + if err != nil { + panic(err) + } + + return base64.StdEncoding.EncodeToString(signature) +} + +// GetMD5Base64 returns a string which has been base64 +func GetMD5Base64(bytes []byte) (base64Value string) { + md5Ctx := md5.New() + md5Ctx.Write(bytes) + md5Value := md5Ctx.Sum(nil) + base64Value = base64.StdEncoding.EncodeToString(md5Value) + return +} + +// GetTimeInFormatISO8601 returns a time string +func GetTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} + +// GetURLFormedMap returns a url encoded string +func GetURLFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func newUUID() uuid { + ns := uuid{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, RandStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + + return u +} + +func newFromHash(h hash.Hash, ns uuid, name string) uuid { + u := uuid{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + if _, err := hookRead(rand.Read)(dest); err != nil { + panic(err) + } +} + +func (u uuid) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.gitignore b/vendor/github.com/aws/aws-sdk-go-v2/.gitignore new file mode 100644 index 000000000..5f8b8c94f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/.gitignore @@ -0,0 +1,14 @@ +dist +/doc +/doc-staging +.yardoc +Gemfile.lock +/internal/awstesting/integration/smoke/**/importmarker__.go +/internal/awstesting/integration/smoke/_test/ +/vendor +/private/model/cli/gen-api/gen-api +.gradle/ +build/ +.idea/ +bin/ +.vscode/ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml b/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml new file mode 100644 index 000000000..8792d0ca6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml @@ -0,0 +1,27 @@ +[run] +concurrency = 4 +timeout = "1m" +issues-exit-code = 0 +modules-download-mode = "readonly" +allow-parallel-runners = true +skip-dirs = ["internal/repotools"] +skip-dirs-use-default = true +skip-files = ["service/transcribestreaming/eventstream_test.go"] +[output] +format = "github-actions" + +[linters-settings.cyclop] +skip-tests = false + +[linters-settings.errcheck] +check-blank = true + +[linters] +disable-all = true +enable = ["errcheck"] +fast = false + +[issues] +exclude-use-default = false + +# Refer config definitions at https://golangci-lint.run/usage/configuration/#config-file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml b/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml new file mode 100644 index 000000000..4b498a7a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml @@ -0,0 +1,31 @@ +language: go +sudo: true +dist: bionic + +branches: + only: + - main + +os: + - linux + - osx + # Travis doesn't work with windows and Go tip + #- windows + +go: + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi + - (cd /tmp/; go get golang.org/x/lint/golint) + +env: + - EACHMODULE_CONCURRENCY=4 + +script: + - make ci-test-no-generate; + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md new file mode 100644 index 000000000..c3512dd7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md @@ -0,0 +1,10199 @@ +# Release (2023-06-13) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.27.0](service/cloudtrail/CHANGELOG.md#v1270-2023-06-13) + * **Feature**: This feature allows users to view dashboards for CloudTrail Lake event data stores. +* `github.com/aws/aws-sdk-go-v2/service/codegurusecurity`: [v1.0.0](service/codegurusecurity/CHANGELOG.md#v100-2023-06-13) + * **Release**: New AWS service client module + * **Feature**: Initial release of Amazon CodeGuru Security APIs +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.14.0](service/drs/CHANGELOG.md#v1140-2023-06-13) + * **Feature**: Added APIs to support network replication and recovery using AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.100.0](service/ec2/CHANGELOG.md#v11000-2023-06-13) + * **Feature**: This release introduces a new feature, EC2 Instance Connect Endpoint, that enables you to connect to a resource over TCP, without requiring the resource to have a public IPv4 address. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.23.5](service/imagebuilder/CHANGELOG.md#v1235-2023-06-13) + * **Documentation**: Change the Image Builder ImagePipeline dateNextRun field to more accurately describe the data. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.27.0](service/lightsail/CHANGELOG.md#v1270-2023-06-13) + * **Feature**: This release adds pagination for the Get Certificates API operation. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.34.0](service/s3/CHANGELOG.md#v1340-2023-06-13) + * **Feature**: Integrate double encryption feature to SDKs. + * **Bug Fix**: Fix HeadObject to return types.Nound when an object does not exist. Fixes [2084](https://github.com/aws/aws-sdk-go-v2/issues/2084) +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.33.0](service/securityhub/CHANGELOG.md#v1330-2023-06-13) + * **Feature**: Add support for Security Hub Automation Rules +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.3.0](service/simspaceweaver/CHANGELOG.md#v130-2023-06-13) + * **Feature**: This release fixes using aws-us-gov ARNs in API calls and adds documentation for snapshot APIs. +* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.0.0](service/verifiedpermissions/CHANGELOG.md#v100-2023-06-13) + * **Release**: New AWS service client module + * **Feature**: GA release of Amazon Verified Permissions. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.35.0](service/wafv2/CHANGELOG.md#v1350-2023-06-13) + * **Feature**: You can now detect and block fraudulent account creation attempts with the new AWS WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.21.0](service/wellarchitected/CHANGELOG.md#v1210-2023-06-13) + * **Feature**: AWS Well-Architected now supports Profiles that help customers prioritize which questions to focus on first by providing a list of prioritized questions that are better aligned with their business goals and outcomes. + +# Release (2023-06-12) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.11.0](service/amplifyuibuilder/CHANGELOG.md#v1110-2023-06-12) + * **Feature**: AWS Amplify UIBuilder is launching Codegen UI, a new feature that enables you to generate your amplify uibuilder components and forms. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.8](service/dynamodb/CHANGELOG.md#v1198-2023-06-12) + * **Documentation**: Documentation updates for DynamoDB +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.12](service/dynamodbstreams/CHANGELOG.md#v11412-2023-06-12) + * **Documentation**: Documentation updates for DynamoDB Streams +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.29.0](service/fsx/CHANGELOG.md#v1290-2023-06-12) + * **Feature**: Amazon FSx for NetApp ONTAP now supports joining a storage virtual machine (SVM) to Active Directory after the SVM has been created. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.18.0](service/opensearch/CHANGELOG.md#v1180-2023-06-12) + * **Feature**: This release adds support for SkipUnavailable connection property for cross cluster search +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.29.0](service/rekognition/CHANGELOG.md#v1290-2023-06-12) + * **Feature**: This release adds support for improved accuracy with user vector in Amazon Rekognition Face Search. Adds new APIs: AssociateFaces, CreateUser, DeleteUser, DisassociateFaces, ListUsers, SearchUsers, SearchUsersByImage. Also adds new face metadata that can be stored: user vector. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.84.0](service/sagemaker/CHANGELOG.md#v1840-2023-06-12) + * **Feature**: Sagemaker Neo now supports compilation for inferentia2 (ML_INF2) and Trainium1 (ML_TRN1) as available targets. With these devices, you can run your workloads at highest performance with lowest cost. inferentia2 (ML_INF2) is available in CMH and Trainium1 (ML_TRN1) is available in IAD currently + +# Release (2023-06-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.13](service/acmpca/CHANGELOG.md#v12113-2023-06-09) + * **Documentation**: Document-only update to refresh CLI documentation for AWS Private CA. No change to the service. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.58.0](service/connect/CHANGELOG.md#v1580-2023-06-09) + * **Feature**: This release adds search APIs for Prompts, Quick Connects and Hours of Operations, which can be used to search for those resources within a Connect Instance. + +# Release (2023-06-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.30.0](service/athena/CHANGELOG.md#v1300-2023-06-08) + * **Feature**: You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.16.0](service/comprehendmedical/CHANGELOG.md#v1160-2023-06-08) + * **Feature**: This release supports a new set of entities and traits. +* `github.com/aws/aws-sdk-go-v2/service/paymentcryptography`: [v1.0.0](service/paymentcryptography/CHANGELOG.md#v100-2023-06-08) + * **Release**: New AWS service client module + * **Feature**: Initial release of AWS Payment Cryptography Control Plane service for creating and managing cryptographic keys used during card payment processing. +* `github.com/aws/aws-sdk-go-v2/service/paymentcryptographydata`: [v1.0.0](service/paymentcryptographydata/CHANGELOG.md#v100-2023-06-08) + * **Release**: New AWS service client module + * **Feature**: Initial release of AWS Payment Cryptography DataPlane Plane service for performing cryptographic operations typically used during card payment processing. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.19.0](service/servicecatalog/CHANGELOG.md#v1190-2023-06-08) + * **Feature**: New parameter added in ServiceCatalog DescribeProvisioningArtifact api - IncludeProvisioningArtifactParameters. This parameter can be used to return information about the parameters used to provision the product +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.17.0](service/timestreamwrite/CHANGELOG.md#v1170-2023-06-08) + * **Feature**: This release adds the capability for customers to define how their data should be partitioned, optimizing for certain access patterns. This definition will take place as a part of the table creation. + +# Release (2023-06-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.29.0](service/cloudformation/CHANGELOG.md#v1290-2023-06-07) + * **Feature**: AWS CloudFormation StackSets is updating the deployment experience for all stackset operations to skip suspended AWS accounts during deployments. StackSets will skip target AWS accounts that are suspended and set the Detailed Status of the corresponding stack instances as SKIPPED_SUSPENDED_ACCOUNT +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.21.0](service/cloudwatchlogs/CHANGELOG.md#v1210-2023-06-07) + * **Feature**: This change adds support for account level data protection policies using 3 new APIs, PutAccountPolicy, DeleteAccountPolicy and DescribeAccountPolicy. DescribeLogGroup API has been modified to indicate if account level policy is applied to the LogGroup via "inheritedProperties" list in the response. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.25.0](service/customerprofiles/CHANGELOG.md#v1250-2023-06-07) + * **Feature**: This release introduces event stream related APIs. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.15](service/directconnect/CHANGELOG.md#v11815-2023-06-07) + * **Documentation**: This update corrects the jumbo frames mtu values from 9100 to 8500 for transit virtual interfaces. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.19.0](service/emrcontainers/CHANGELOG.md#v1190-2023-06-07) + * **Feature**: EMR on EKS adds support for log rotation of Spark container logs with EMR-6.11.0 onwards, to the StartJobRun API. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.19.0](service/iotdeviceadvisor/CHANGELOG.md#v1190-2023-06-07) + * **Feature**: AWS IoT Core Device Advisor now supports new Qualification Suite test case list. With this update, customers can more easily create new qualification test suite with an empty rootGroup input. + +# Release (2023-06-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.57.1](service/connect/CHANGELOG.md#v1571-2023-06-06) + * **Documentation**: GetMetricDataV2 API is now available in AWS GovCloud(US) region. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.26.0](service/emr/CHANGELOG.md#v1260-2023-06-06) + * **Feature**: This release provides customers the ability to specify an allocation strategies amongst PRICE_CAPACITY_OPTIMIZED, CAPACITY_OPTIMIZED, LOWEST_PRICE, DIVERSIFIED for Spot instances in Instance Feet cluster. This enables customers to choose an allocation strategy best suited for their workload. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.20.0](service/iam/CHANGELOG.md#v1200-2023-06-06) + * **Feature**: This release updates the AccountAlias regex pattern with the same length restrictions enforced by the length constraint. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.14.0](service/inspector2/CHANGELOG.md#v1140-2023-06-06) + * **Feature**: Adds new response properties and request parameters for 'last scanned at' on the ListCoverage operation. This feature allows you to search and view the date of which your resources were last scanned by Inspector. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.38.0](service/iot/CHANGELOG.md#v1380-2023-06-06) + * **Feature**: Adding IoT Device Management Software Package Catalog APIs to register, store, and report system software packages, along with their versions and metadata in a centralized location. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.29.0](service/lexmodelsv2/CHANGELOG.md#v1290-2023-06-06) + * **Feature**: This release adds support for Lex Developers to create test sets and to execute those test-sets against their bots. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.37.0](service/quicksight/CHANGELOG.md#v1370-2023-06-06) + * **Feature**: QuickSight support for pivot table field collapse state, radar chart range scale and multiple scope options in conditional formatting. +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.15.0](service/signer/CHANGELOG.md#v1150-2023-06-06) + * **Feature**: AWS Signer is launching Container Image Signing, a new feature that enables you to sign and verify container images. This feature enables you to validate that only container images you approve are used in your enterprise. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.23.0](service/sqs/CHANGELOG.md#v1230-2023-06-06) + * **Feature**: Amazon SQS adds three new APIs - StartMessageMoveTask, CancelMessageMoveTask, and ListMessageMoveTasks to automate redriving messages from dead-letter queues to source queues or a custom destination. + +# Release (2023-06-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.28.0](service/cloudformation/CHANGELOG.md#v1280-2023-06-05) + * **Feature**: AWS CloudFormation StackSets provides customers with three new APIs to activate, deactivate, and describe AWS Organizations trusted access which is needed to get started with service-managed StackSets. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.99.0](service/ec2/CHANGELOG.md#v1990-2023-06-05) + * **Feature**: Making InstanceTagAttribute as the required parameter for the DeregisterInstanceEventNotificationAttributes and RegisterInstanceEventNotificationAttributes APIs. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.10.0](service/finspace/CHANGELOG.md#v1100-2023-06-05) + * **Feature**: Releasing new Managed kdb Insights APIs +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.25.0](service/frauddetector/CHANGELOG.md#v1250-2023-06-05) + * **Feature**: Added new variable types, new DateTime data type, and new rules engine functions for interacting and working with DateTime data types. +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.3.0](service/keyspaces/CHANGELOG.md#v130-2023-06-05) + * **Feature**: This release adds support for MRR GA launch, and includes multiregion support in create-keyspace, get-keyspace, and list-keyspace. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.22.0](service/kms/CHANGELOG.md#v1220-2023-06-05) + * **Feature**: This release includes feature to import customer's asymmetric (RSA and ECC) and HMAC keys into KMS. It also includes feature to allow customers to specify number of days to schedule a KMS key deletion as a policy condition key. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.35.0](service/lambda/CHANGELOG.md#v1350-2023-06-05) + * **Feature**: Add Ruby 3.2 (ruby3.2) Runtime support to AWS Lambda. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.16.0](service/mwaa/CHANGELOG.md#v1160-2023-06-05) + * **Feature**: This release adds ROLLING_BACK and CREATING_SNAPSHOT environment statuses for Amazon MWAA environments. + +# Release (2023-06-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.29.0](service/athena/CHANGELOG.md#v1290-2023-06-02) + * **Feature**: This release introduces the DeleteCapacityReservation API and the ability to manage capacity reservations using CloudFormation +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.26.0](service/cloudtrail/CHANGELOG.md#v1260-2023-06-02) + * **Feature**: This feature allows users to start and stop event ingestion on a CloudTrail Lake event data store. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.83.0](service/sagemaker/CHANGELOG.md#v1830-2023-06-02) + * **Feature**: This release adds Selective Execution feature that allows SageMaker Pipelines users to run selected steps in a pipeline. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.34.0](service/wafv2/CHANGELOG.md#v1340-2023-06-02) + * **Feature**: Added APIs to describe managed products. The APIs retrieve information about rule groups that are managed by AWS and by AWS Marketplace sellers. + +# Release (2023-06-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.11](service/alexaforbusiness/CHANGELOG.md#v11511-2023-06-01) + * **Documentation**: Alexa for Business has been deprecated and is no longer supported. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.30.0](service/appflow/CHANGELOG.md#v1300-2023-06-01) + * **Feature**: Added ability to select DataTransferApiType for DescribeConnector and CreateFlow requests when using Async supported connectors. Added supportedDataTransferType to DescribeConnector/DescribeConnectors/ListConnector response. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.24.0](service/customerprofiles/CHANGELOG.md#v1240-2023-06-01) + * **Feature**: This release introduces calculated attribute related APIs. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.22.0](service/ivs/CHANGELOG.md#v1220-2023-06-01) + * **Feature**: API Update for IVS Advanced Channel type +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.82.1](service/sagemaker/CHANGELOG.md#v1821-2023-06-01) + * **Documentation**: Amazon Sagemaker Autopilot adds support for Parquet file input to NLP text classification jobs. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.33.1](service/wafv2/CHANGELOG.md#v1331-2023-06-01) + * **Documentation**: Corrected the information for the header order FieldToMatch setting + +# Release (2023-05-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.33.0](service/configservice/CHANGELOG.md#v1330-2023-05-31) + * **Feature**: Resource Types Exclusion feature launch by AWS Config +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.24.0](service/frauddetector/CHANGELOG.md#v1240-2023-05-31) + * **Feature**: This release enables publishing event predictions from Amazon Fraud Detector (AFD) to Amazon EventBridge. For example, after getting predictions from AFD, Amazon EventBridge rules can be configured to trigger notification through an SNS topic, send a message with SES, or trigger Lambda workflows. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.16.0](service/healthlake/CHANGELOG.md#v1160-2023-05-31) + * **Feature**: This release adds a new request parameter to the CreateFHIRDatastore API operation. IdentityProviderConfiguration specifies how you want to authenticate incoming requests to your Healthlake Data Store. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.5.0](service/m2/CHANGELOG.md#v150-2023-05-31) + * **Feature**: Adds an optional create-only 'roleArn' property to Application resources. Enables PS and PO data set org types. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.45.0](service/rds/CHANGELOG.md#v1450-2023-05-31) + * **Feature**: This release adds support for changing the engine for Oracle using the ModifyDbInstance API +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.5](service/servicecatalog/CHANGELOG.md#v1185-2023-05-31) + * **Documentation**: Documentation updates for ServiceCatalog. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.10.0](service/workspacesweb/CHANGELOG.md#v1100-2023-05-31) + * **Feature**: WorkSpaces Web now allows you to control which IP addresses your WorkSpaces Web portal may be accessed from. + +# Release (2023-05-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.6.0](service/chimesdkvoice/CHANGELOG.md#v160-2023-05-30) + * **Feature**: Added optional CallLeg field to StartSpeakerSearchTask API request +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.50.0](service/glue/CHANGELOG.md#v1500-2023-05-30) + * **Feature**: Added Runtime parameter to allow selection of Ray Runtime +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.18.3](service/groundstation/CHANGELOG.md#v1183-2023-05-30) + * **Documentation**: Updating description of GetMinuteUsage to be clearer. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.4.0](service/iotfleetwise/CHANGELOG.md#v140-2023-05-30) + * **Feature**: Campaigns now support selecting Timestream or S3 as the data destination, Signal catalogs now support "Deprecation" keyword released in VSS v2.1 and "Comment" keyword released in VSS v3.0 +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.23.0](service/location/CHANGELOG.md#v1230-2023-05-30) + * **Feature**: This release adds API support for political views for the maps service APIs: CreateMap, UpdateMap, DescribeMap. +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.13.0](service/memorydb/CHANGELOG.md#v1130-2023-05-30) + * **Feature**: Amazon MemoryDB for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0 +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.24.0](service/personalize/CHANGELOG.md#v1240-2023-05-30) + * **Feature**: This release provides support for the exclusion of certain columns for training when creating a solution and creating or updating a recommender with Amazon Personalize. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.26.0](service/polly/CHANGELOG.md#v1260-2023-05-30) + * **Feature**: Amazon Polly adds 2 new voices - Sofie (da-DK) and Niamh (en-IE) +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.32.0](service/securityhub/CHANGELOG.md#v1320-2023-05-30) + * **Feature**: Added new resource detail objects to ASFF, including resources for AwsGuardDutyDetector, AwsAmazonMqBroker, AwsEventSchemasRegistry, AwsAppSyncGraphQlApi and AwsStepFunctionStateMachine. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.4.0](service/securitylake/CHANGELOG.md#v140-2023-05-30) + * **Feature**: Log sources are now versioned. AWS log sources and custom sources will now come with a version identifier that enables producers to vend multiple schema versions to subscribers. Security Lake API have been refactored to more closely align with AWS API conventions. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.33.0](service/wafv2/CHANGELOG.md#v1330-2023-05-30) + * **Feature**: This SDK release provides customers the ability to use Header Order as a field to match. + +# Release (2023-05-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.57.0](service/connect/CHANGELOG.md#v1570-2023-05-26) + * **Feature**: Documentation update for a new Initiation Method value in DescribeContact API +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.28.0](service/iotwireless/CHANGELOG.md#v1280-2023-05-26) + * **Feature**: Add Multicast Group support in Network Analyzer Configuration. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.82.0](service/sagemaker/CHANGELOG.md#v1820-2023-05-26) + * **Feature**: Added ml.p4d and ml.inf1 as supported instance type families for SageMaker Notebook Instances. + +# Release (2023-05-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.21.0](service/applicationautoscaling/CHANGELOG.md#v1210-2023-05-25) + * **Feature**: With this release, ElastiCache customers will be able to use predefined metricType "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage" for their ElastiCache instances. +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.15.0](service/codepipeline/CHANGELOG.md#v1150-2023-05-25) + * **Feature**: Add PollingDisabledAt time information in PipelineMetadata object of GetPipeline API. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.19.0](service/gamelift/CHANGELOG.md#v1190-2023-05-25) + * **Feature**: GameLift FleetIQ users can now filter game server claim requests to exclude servers on instances that are draining. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.49.0](service/glue/CHANGELOG.md#v1490-2023-05-25) + * **Feature**: Added ability to create data quality rulesets for shared, cross-account Glue Data Catalog tables. Added support for dataset comparison rules through a new parameter called AdditionalDataSources. Enhanced the data quality results with a map containing profiled metric values. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.10.0](service/migrationhubrefactorspaces/CHANGELOG.md#v1100-2023-05-25) + * **Feature**: This SDK update allows for path parameter syntax to be passed to the CreateRoute API. Path parameter syntax require parameters to be enclosed in {} characters. This update also includes a new AppendSourcePath field which lets users forward the source path to the Service URL endpoint. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.81.0](service/sagemaker/CHANGELOG.md#v1810-2023-05-25) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports enabling Autotune for tuning jobs which can choose tuning job configurations. + +# Release (2023-05-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.21.0](service/appsync/CHANGELOG.md#v1210-2023-05-24) + * **Feature**: This release introduces AppSync Merged APIs, which provide the ability to compose multiple source APIs into a single federated/merged API. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.56.0](service/connect/CHANGELOG.md#v1560-2023-05-24) + * **Feature**: Amazon Connect Evaluation Capabilities: validation improvements +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.16.0](service/costandusagereportservice/CHANGELOG.md#v1160-2023-05-24) + * **Feature**: Add support for split cost allocation data on a report. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.80.0](service/sagemaker/CHANGELOG.md#v1800-2023-05-24) + * **Feature**: SageMaker now provides an instantaneous deployment recommendation through the DescribeModel API + +# Release (2023-05-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.24.0](service/fms/CHANGELOG.md#v1240-2023-05-23) + * **Feature**: Fixes issue that could cause calls to GetAdminScope and ListAdminAccountsForOrganization to return a 500 Internal Server error. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.79.0](service/sagemaker/CHANGELOG.md#v1790-2023-05-23) + * **Feature**: Added ModelNameEquals, ModelPackageVersionArnEquals in request and ModelName, SamplePayloadUrl, ModelPackageVersionArn in response of ListInferenceRecommendationsJobs API. Added Invocation timestamps in response of DescribeInferenceRecommendationsJob API & ListInferenceRecommendationsJobSteps API. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.18.0](service/translate/CHANGELOG.md#v1180-2023-05-23) + * **Feature**: Added support for calling TranslateDocument API. + +# Release (2023-05-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.22.0](service/backup/CHANGELOG.md#v1220-2023-05-22) + * **Feature**: Added support for tags on restore. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.19.2](service/pinpoint/CHANGELOG.md#v1192-2023-05-22) + * **Documentation**: Amazon Pinpoint is deprecating the tags parameter in the UpdateSegment, UpdateCampaign, UpdateEmailTemplate, UpdateSmsTemplate, UpdatePushTemplate, UpdateInAppTemplate and UpdateVoiceTemplate. Amazon Pinpoint will end support tags parameter by May 22, 2023. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.36.0](service/quicksight/CHANGELOG.md#v1360-2023-05-22) + * **Feature**: Add support for Asset Bundle, Geospatial Heatmaps. + +# Release (2023-05-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.21.0](service/backup/CHANGELOG.md#v1210-2023-05-19) + * **Feature**: Add ResourceArn, ResourceType, and BackupVaultName to ListRecoveryPointsByLegalHold API response. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.4.0](service/connectcases/CHANGELOG.md#v140-2023-05-19) + * **Feature**: This release adds the ability to create fields with type Url through the CreateField API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.0.0](service/mediapackagev2/CHANGELOG.md#v100-2023-05-19) + * **Release**: New AWS service client module + * **Feature**: Adds support for the MediaPackage Live v2 API +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.18.0](service/sesv2/CHANGELOG.md#v1180-2023-05-19) + * **Feature**: This release allows customers to update scaling mode property of dedicated IP pools with PutDedicatedIpPoolScalingAttributes call. + +# Release (2023-05-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.28.0](service/athena/CHANGELOG.md#v1280-2023-05-18) + * **Feature**: Removing SparkProperties from EngineConfiguration object for StartSession API call +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.25.0](service/cloudtrail/CHANGELOG.md#v1250-2023-05-18) + * **Feature**: Add ConflictException to PutEventSelectors, add (Channel/EDS)ARNInvalidException to Tag APIs. These exceptions provide customers with more specific error messages instead of internal errors. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.24.0](service/computeoptimizer/CHANGELOG.md#v1240-2023-05-18) + * **Feature**: In this launch, we add support for showing integration status with external metric providers such as Instana, Datadog ...etc in GetEC2InstanceRecommendations and ExportEC2InstanceRecommendations apis +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.55.0](service/connect/CHANGELOG.md#v1550-2023-05-18) + * **Feature**: You can programmatically create and manage prompts using APIs, for example, to extract prompts stored within Amazon Connect and add them to your Amazon S3 bucket. AWS CloudTrail, AWS CloudFormation and tagging are supported. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.98.0](service/ec2/CHANGELOG.md#v1980-2023-05-18) + * **Feature**: Add support for i4g.large, i4g.xlarge, i4g.2xlarge, i4g.4xlarge, i4g.8xlarge and i4g.16xlarge instances powered by AWS Graviton2 processors that deliver up to 15% better compute performance than our other storage-optimized instances. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.27.1](service/ecs/CHANGELOG.md#v1271-2023-05-18) + * **Documentation**: Documentation only release to address various tickets. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.37.0](service/mediaconvert/CHANGELOG.md#v1370-2023-05-18) + * **Feature**: This release introduces a new MXF Profile for XDCAM which is strictly compliant with the SMPTE RDD 9 standard and improved handling of output name modifiers. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.44.1](service/rds/CHANGELOG.md#v1441-2023-05-18) + * **Documentation**: RDS documentation update for the EngineVersion parameter of ModifyDBSnapshot +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.3.0](service/sagemakergeospatial/CHANGELOG.md#v130-2023-05-18) + * **Feature**: This release makes ExecutionRoleArn a required field in the StartEarthObservationJob API. + +# Release (2023-05-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.19.0](service/detective/CHANGELOG.md#v1190-2023-05-16) + * **Feature**: Added and updated API operations in Detective to support the integration of ASFF Security Hub findings. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.14](service/directconnect/CHANGELOG.md#v11814-2023-05-16) + * **Documentation**: This release includes an update to the mtu value for CreateTransitVirtualInterface from 9001 mtu to 8500 mtu. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.48.0](service/glue/CHANGELOG.md#v1480-2023-05-16) + * **Feature**: Add Support for Tags for Custom Entity Types +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.8](service/secretsmanager/CHANGELOG.md#v1198-2023-05-16) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.32.0](service/wafv2/CHANGELOG.md#v1320-2023-05-16) + * **Feature**: My AWS Service (placeholder) - You can now rate limit web requests based on aggregation keys other than IP addresses, and you can aggregate using combinations of keys. You can also rate limit all requests that match a scope-down statement, without further aggregation. + +# Release (2023-05-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.27.0](service/athena/CHANGELOG.md#v1270-2023-05-15) + * **Feature**: You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.3.0](service/codecatalyst/CHANGELOG.md#v130-2023-05-15) + * **Feature**: With this release, the users can list the active sessions connected to their Dev Environment on AWS CodeCatalyst +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.28.0](service/rekognition/CHANGELOG.md#v1280-2023-05-15) + * **Feature**: This release adds a new EyeDirection attribute in Amazon Rekognition DetectFaces and IndexFaces APIs which predicts the yaw and pitch angles of a person's eye gaze direction for each face detected in the image. +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.2.0](service/rolesanywhere/CHANGELOG.md#v120-2023-05-15) + * **Feature**: Adds support for custom notification settings in a trust anchor. Introduces PutNotificationSettings and ResetNotificationSettings API's. Updates DurationSeconds max value to 3600. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.29.0](service/transfer/CHANGELOG.md#v1290-2023-05-15) + * **Feature**: This release introduces the ability to require both password and SSH key when users authenticate to your Transfer Family servers that use the SFTP protocol. + +# Release (2023-05-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.2](service/connect/CHANGELOG.md#v1542-2023-05-11) + * **Documentation**: This release updates GetMetricDataV2 API, to support metric data up-to last 35 days +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.27.0](service/elasticache/CHANGELOG.md#v1270-2023-05-11) + * **Feature**: Added support to modify the cluster mode configuration for the existing ElastiCache ReplicationGroups. Customers can now modify the configuration from cluster mode disabled to cluster mode enabled. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.19.0](service/elasticsearchservice/CHANGELOG.md#v1190-2023-05-11) + * **Feature**: This release fixes DescribePackages API error with null filter value parameter. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.17.0](service/health/CHANGELOG.md#v1170-2023-05-11) + * **Feature**: Add support for regional endpoints +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.2.0](service/ivsrealtime/CHANGELOG.md#v120-2023-05-11) + * **Feature**: Add methods for inspecting and debugging stages: ListStageSessions, GetStageSession, ListParticipants, GetParticipant, and ListParticipantEvents. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.4.0](service/omics/CHANGELOG.md#v140-2023-05-11) + * **Feature**: This release provides support for Ready2Run and GPU workflows, an improved read set filter, the direct upload of read sets into Omics Storage, and annotation parsing for analytics stores. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.15.0](service/support/CHANGELOG.md#v1150-2023-05-11) + * **Feature**: This release adds 2 new Support APIs, DescribeCreateCaseOptions and DescribeSupportedLanguages. You can use these new APIs to get available support languages. + +# Release (2023-05-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.25.0](service/emr/CHANGELOG.md#v1250-2023-05-10) + * **Feature**: EMR Studio now supports programmatically executing a Notebooks on an EMR on EKS cluster. In addition, notebooks can now be executed by specifying its location in S3. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.44.0](service/rds/CHANGELOG.md#v1440-2023-05-10) + * **Feature**: Amazon Relational Database Service (RDS) updates for the new Aurora I/O-Optimized storage type for Amazon Aurora DB clusters +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.15.0](service/swf/CHANGELOG.md#v1150-2023-05-10) + * **Feature**: This release adds a new API parameter to exclude old history events from decision tasks. + +# Release (2023-05-09) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.20.0](service/applicationautoscaling/CHANGELOG.md#v1200-2023-05-09) + * **Feature**: With this release, Amazon SageMaker Serverless Inference customers can use Application Auto Scaling to auto scale the provisioned concurrency of their serverless endpoints. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.47.0](service/glue/CHANGELOG.md#v1470-2023-05-09) + * **Feature**: This release adds AmazonRedshift Source and Target nodes in addition to DynamicTransform OutputSchemas +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.78.0](service/sagemaker/CHANGELOG.md#v1780-2023-05-09) + * **Feature**: This release includes support for (1) Provisioned Concurrency for Amazon SageMaker Serverless Inference and (2) UpdateEndpointWeightsAndCapacities API for Serverless endpoints. + +# Release (2023-05-08) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.46.0](service/glue/CHANGELOG.md#v1460-2023-05-08) + * **Feature**: Support large worker types G.4x and G.8x for Glue Spark +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.23.0](service/guardduty/CHANGELOG.md#v1230-2023-05-08) + * **Feature**: Add AccessDeniedException 403 Error message code to support 3 Tagging related APIs +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.29.0](service/iotsitewise/CHANGELOG.md#v1290-2023-05-08) + * **Feature**: Provide support for 20,000 max results for GetAssetPropertyValueHistory/BatchGetAssetPropertyValueHistory and 15 minute aggregate resolution for GetAssetPropertyAggregates/BatchGetAssetPropertyAggregates +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.19.0](service/sts/CHANGELOG.md#v1190-2023-05-08) + * **Feature**: Documentation updates for AWS Security Token Service. + +# Release (2023-05-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.97.0](service/ec2/CHANGELOG.md#v1970-2023-05-05) + * **Feature**: This release adds support the inf2 and trn1n instances. inf2 instances are purpose built for deep learning inference while trn1n instances are powered by AWS Trainium accelerators and they build on the capabilities of Trainium-powered trn1 instances. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.13.0](service/inspector2/CHANGELOG.md#v1130-2023-05-05) + * **Feature**: Amazon Inspector now allows customers to search its vulnerability intelligence database if any of the Inspector scanning types are activated. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.23.0](service/mediatailor/CHANGELOG.md#v1230-2023-05-05) + * **Feature**: This release adds support for AFTER_LIVE_EDGE mode configuration for avail suppression, and adding a fill-policy setting that sets the avail suppression to PARTIAL_AVAIL or FULL_AVAIL_ONLY when AFTER_LIVE_EDGE is enabled. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.22.0](service/sqs/CHANGELOG.md#v1220-2023-05-05) + * **Feature**: Revert previous SQS protocol change. + +# Release (2023-05-04) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.26.0](service/cloudwatch/CHANGELOG.md#v1260-2023-05-04) + * **Feature**: Adds support for filtering by metric names in CloudWatch Metric Streams. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.32.0](service/configservice/CHANGELOG.md#v1320-2023-05-04) + * **Feature**: Updated ResourceType enum with new resource types onboarded by AWS Config in April 2023. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.1](service/connect/CHANGELOG.md#v1541-2023-05-04) + * **Documentation**: Remove unused InvalidParameterException from CreateParticipant API +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.27.0](service/ecs/CHANGELOG.md#v1270-2023-05-04) + * **Feature**: Documentation update for new error type NamespaceNotFoundException for CreateCluster and UpdateCluster +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.28.0](service/networkfirewall/CHANGELOG.md#v1280-2023-05-04) + * **Feature**: This release adds support for the Suricata REJECT option in midstream exception configurations. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.17.0](service/opensearch/CHANGELOG.md#v1170-2023-05-04) + * **Feature**: DescribeDomainNodes: A new API that provides configuration information for nodes part of the domain +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.35.0](service/quicksight/CHANGELOG.md#v1350-2023-05-04) + * **Feature**: Add support for Topic, Dataset parameters and VPC +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.27.0](service/rekognition/CHANGELOG.md#v1270-2023-05-04) + * **Feature**: This release adds a new attribute FaceOccluded. Additionally, you can now select attributes individually (e.g. ["DEFAULT", "FACE_OCCLUDED", "AGE_RANGE"] instead of ["ALL"]), which can reduce response time. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.33.1](service/s3/CHANGELOG.md#v1331-2023-05-04) + * **Documentation**: Documentation updates for Amazon S3 +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.77.0](service/sagemaker/CHANGELOG.md#v1770-2023-05-04) + * **Feature**: We added support for ml.inf2 and ml.trn1 family of instances on Amazon SageMaker for deploying machine learning (ML) models for Real-time and Asynchronous inference. You can use these instances to achieve high performance at a low cost for generative artificial intelligence (AI) models. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.31.0](service/securityhub/CHANGELOG.md#v1310-2023-05-04) + * **Feature**: Add support for Finding History. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.21.0](service/sqs/CHANGELOG.md#v1210-2023-05-04) + * **Feature**: This release enables customers to call SQS using AWS JSON-1.0 protocol. + +# Release (2023-05-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.20.0](service/appsync/CHANGELOG.md#v1200-2023-05-03) + * **Feature**: Private API support for AWS AppSync. With Private APIs, you can now create GraphQL APIs that can only be accessed from your Amazon Virtual Private Cloud ("VPC"). +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.96.0](service/ec2/CHANGELOG.md#v1960-2023-05-03) + * **Feature**: Adds an SDK paginator for GetNetworkInsightsAccessScopeAnalysisFindings +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.12.0](service/inspector2/CHANGELOG.md#v1120-2023-05-03) + * **Feature**: This feature provides deep inspection for linux based instance +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.12.0](service/iottwinmaker/CHANGELOG.md#v1120-2023-05-03) + * **Feature**: This release adds a field for GetScene API to return error code and message from dependency services. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.27.0](service/networkfirewall/CHANGELOG.md#v1270-2023-05-03) + * **Feature**: AWS Network Firewall now supports policy level HOME_NET variable overrides. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.16.0](service/opensearch/CHANGELOG.md#v1160-2023-05-03) + * **Feature**: Amazon OpenSearch Service adds the option to deploy a domain across multiple Availability Zones, with each AZ containing a complete copy of data and with nodes in one AZ acting as a standby. This option provides 99.99% availability and consistent performance in the event of infrastructure failure. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.20.0](service/wellarchitected/CHANGELOG.md#v1200-2023-05-03) + * **Feature**: This release deepens integration with AWS Service Catalog AppRegistry to improve workload resource discovery. + +# Release (2023-05-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.29.0](service/appflow/CHANGELOG.md#v1290-2023-05-02) + * **Feature**: This release adds new API to cancel flow executions. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.54.0](service/connect/CHANGELOG.md#v1540-2023-05-02) + * **Feature**: Amazon Connect Service Rules API update: Added OnContactEvaluationSubmit event source to support user configuring evaluation form rules. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.3](service/ecs/CHANGELOG.md#v1263-2023-05-02) + * **Documentation**: Documentation only update to address Amazon ECS tickets. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.40.0](service/kendra/CHANGELOG.md#v1400-2023-05-02) + * **Feature**: AWS Kendra now supports configuring document fields/attributes via the GetQuerySuggestions API. You can now base query suggestions on the contents of document fields. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.11.0](service/resiliencehub/CHANGELOG.md#v1110-2023-05-02) + * **Feature**: This release will improve resource level transparency in applications by discovering previously hidden resources. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.76.0](service/sagemaker/CHANGELOG.md#v1760-2023-05-02) + * **Feature**: Amazon Sagemaker Autopilot supports training models with sample weights and additional objective metrics. + +# Release (2023-05-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.23.0](service/computeoptimizer/CHANGELOG.md#v1230-2023-05-01) + * **Feature**: support for tag filtering within compute optimizer. ability to filter recommendation results by tag and tag key value pairs. ability to filter by inferred workload type added. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.21.0](service/kms/CHANGELOG.md#v1210-2023-05-01) + * **Feature**: This release makes the NitroEnclave request parameter Recipient and the response field for CiphertextForRecipient available in AWS SDKs. It also adds the regex pattern for CloudHsmClusterId validation. + +# Release (2023-04-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.28.0](service/appflow/CHANGELOG.md#v1280-2023-04-28) + * **Feature**: Adds Jwt Support for Salesforce Credentials. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.26.0](service/athena/CHANGELOG.md#v1260-2023-04-28) + * **Feature**: You can now use capacity reservations on Amazon Athena to run SQL queries on fully-managed compute capacity. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.12](service/directconnect/CHANGELOG.md#v11812-2023-04-28) + * **Documentation**: This release corrects the jumbo frames MTU from 9100 to 8500. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.20.0](service/efs/CHANGELOG.md#v1200-2023-04-28) + * **Feature**: This release adds PAUSED and PAUSING state as a returned value for DescribeReplicationConfigurations response. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.13.0](service/grafana/CHANGELOG.md#v1130-2023-04-28) + * **Feature**: This release adds support for the grafanaVersion parameter in CreateWorkspace. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.37.0](service/iot/CHANGELOG.md#v1370-2023-04-28) + * **Feature**: This release allows AWS IoT Core users to specify a TLS security policy when creating and updating AWS IoT Domain Configurations. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.26.0](service/rekognition/CHANGELOG.md#v1260-2023-04-28) + * **Feature**: Added support for aggregating moderation labels by video segment timestamps for Stored Video Content Moderation APIs and added additional information about the job to all Stored Video Get API responses. +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.2.0](service/simspaceweaver/CHANGELOG.md#v120-2023-04-28) + * **Feature**: Added a new CreateSnapshot API. For the StartSimulation API, SchemaS3Location is now optional, added a new SnapshotS3Location parameter. For the DescribeSimulation API, added SNAPSHOT_IN_PROGRESS simulation state, deprecated SchemaError, added new fields: StartError and SnapshotS3Location. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.31.0](service/wafv2/CHANGELOG.md#v1310-2023-04-28) + * **Feature**: You can now associate a web ACL with a Verified Access instance. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.11](service/workspaces/CHANGELOG.md#v12811-2023-04-28) + * **Documentation**: Added Windows 11 to support Microsoft_Office_2019 + +# Release (2023-04-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.95.0](service/ec2/CHANGELOG.md#v1950-2023-04-27) + * **Feature**: This release adds support for AMD SEV-SNP on EC2 instances. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.18.0](service/emrcontainers/CHANGELOG.md#v1180-2023-04-27) + * **Feature**: This release adds GetManagedEndpointSessionCredentials, a new API that allows customers to generate an auth token to connect to a managed endpoint, enabling features such as self-hosted Jupyter notebooks for EMR on EKS. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.22.0](service/guardduty/CHANGELOG.md#v1220-2023-04-27) + * **Feature**: Added API support to initiate on-demand malware scan on specific resources. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.18.0](service/iotdeviceadvisor/CHANGELOG.md#v1180-2023-04-27) + * **Feature**: AWS IoT Core Device Advisor now supports MQTT over WebSocket. With this update, customers can run all three test suites of AWS IoT Core Device Advisor - qualification, custom, and long duration tests - using Signature Version 4 for MQTT over WebSocket. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.20.0](service/kafka/CHANGELOG.md#v1200-2023-04-27) + * **Feature**: Amazon MSK has added new APIs that allows multi-VPC private connectivity and cluster policy support for Amazon MSK clusters that simplify connectivity and access between your Apache Kafka clients hosted in different VPCs and AWS accounts and your Amazon MSK clusters. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.34.0](service/lambda/CHANGELOG.md#v1340-2023-04-27) + * **Feature**: Add Java 17 (java17) support to AWS Lambda +* `github.com/aws/aws-sdk-go-v2/service/osis`: [v1.0.1](service/osis/CHANGELOG.md#v101-2023-04-27) + * **Documentation**: Documentation updates for OpenSearch Ingestion +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.10](service/qldb/CHANGELOG.md#v11510-2023-04-27) + * **Documentation**: Documentation updates for Amazon QLDB +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.75.0](service/sagemaker/CHANGELOG.md#v1750-2023-04-27) + * **Feature**: Added ml.p4d.24xlarge and ml.p4de.24xlarge as supported instances for SageMaker Studio + +# Release (2023-04-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/osis`: [v1.0.0](service/osis/CHANGELOG.md#v100-2023-04-26) + * **Release**: New AWS service client module + * **Feature**: Initial release for OpenSearch Ingestion + +# Release (2023-04-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.15.0](service/chimesdkmessaging/CHANGELOG.md#v1150-2023-04-25) + * **Feature**: Remove non actionable field from UpdateChannelReadMarker and DeleteChannelRequest. Add precise exceptions to DeleteChannel and DeleteStreamingConfigurations error cases. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.53.0](service/connect/CHANGELOG.md#v1530-2023-04-25) + * **Feature**: Amazon Connect, Contact Lens Evaluation API release including ability to manage forms and to submit contact evaluations. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.24.0](service/datasync/CHANGELOG.md#v1240-2023-04-25) + * **Feature**: This release adds 13 new APIs to support AWS DataSync Discovery GA. +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.17.0](service/directoryservice/CHANGELOG.md#v1170-2023-04-25) + * **Feature**: New field added in AWS Managed Microsoft AD DescribeSettings response and regex pattern update for UpdateSettings value. Added length validation to RemoteDomainName. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.19.0](service/pinpoint/CHANGELOG.md#v1190-2023-04-25) + * **Feature**: Adds support for journey runs and querying journey execution metrics based on journey runs. Adds execution metrics to campaign activities. Updates docs for Advanced Quiet Time. + +# Release (2023-04-24) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.18.0 + * **Feature**: add recursion detection middleware to all SDK requests to avoid recursion invocation in Lambda +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.27.0](service/appflow/CHANGELOG.md#v1270-2023-04-24) + * **Feature**: Increased the max length for RefreshToken and AuthCode from 2048 to 4096. +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.2.5](service/codecatalyst/CHANGELOG.md#v125-2023-04-24) + * **Documentation**: Documentation updates for Amazon CodeCatalyst. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.94.0](service/ec2/CHANGELOG.md#v1940-2023-04-24) + * **Feature**: API changes to AWS Verified Access related to identity providers' information. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.36.0](service/mediaconvert/CHANGELOG.md#v1360-2023-04-24) + * **Feature**: This release introduces a noise reduction pre-filter, linear interpolation deinterlace mode, video pass-through, updated default job settings, and expanded LC-AAC Stereo audio bitrate ranges. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.25.0](service/rekognition/CHANGELOG.md#v1250-2023-04-24) + * **Feature**: Added new status result to Liveness session status. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.28.0](service/route53/CHANGELOG.md#v1280-2023-04-24) + * **Feature**: added paginator for listResourceRecordSets +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.33.0](service/s3/CHANGELOG.md#v1330-2023-04-24) + * **Feature**: added custom paginators for listMultipartUploads and ListObjectVersions + +# Release (2023-04-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.52.0](service/connect/CHANGELOG.md#v1520-2023-04-21) + * **Feature**: This release adds a new API CreateParticipant. For Amazon Connect Chat, you can use this new API to customize chat flow experiences. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.1](service/ecs/CHANGELOG.md#v1261-2023-04-21) + * **Documentation**: Documentation update to address various Amazon ECS tickets. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.23.0](service/fms/CHANGELOG.md#v1230-2023-04-21) + * **Feature**: AWS Firewall Manager adds support for multiple administrators. You can now delegate more than one administrator per organization. + +# Release (2023-04-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.23.0](service/chime/CHANGELOG.md#v1230-2023-04-20) + * **Feature**: Adds support for Hindi and Thai languages and additional Amazon Transcribe parameters to the StartMeetingTranscription API. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.4.0](service/chimesdkmediapipelines/CHANGELOG.md#v140-2023-04-20) + * **Feature**: This release adds support for specifying the recording file format in an S3 recording sink configuration. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.15.0](service/chimesdkmeetings/CHANGELOG.md#v1150-2023-04-20) + * **Feature**: Adds support for Hindi and Thai languages and additional Amazon Transcribe parameters to the StartMeetingTranscription API. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.18.0](service/gamelift/CHANGELOG.md#v1180-2023-04-20) + * **Feature**: Amazon GameLift supports creating Builds for Windows 2016 operating system. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.21.0](service/guardduty/CHANGELOG.md#v1210-2023-04-20) + * **Feature**: This release adds support for the new Lambda Protection feature. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.36.0](service/iot/CHANGELOG.md#v1360-2023-04-20) + * **Feature**: Support additional OTA states in GetOTAUpdate API +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.74.0](service/sagemaker/CHANGELOG.md#v1740-2023-04-20) + * **Feature**: Amazon SageMaker Canvas adds ModelRegisterSettings support for CanvasAppSettings. +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.19.0](service/snowball/CHANGELOG.md#v1190-2023-04-20) + * **Feature**: Adds support for Amazon S3 compatible storage. AWS Snow Family customers can now use Amazon S3 compatible storage on Snowball Edge devices. Also adds support for V3_5S. This is a refreshed AWS Snowball Edge Storage Optimized device type with 210TB SSD (customer usable). +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.30.0](service/wafv2/CHANGELOG.md#v1300-2023-04-20) + * **Feature**: You can now create encrypted API keys to use in a client application integration of the JavaScript CAPTCHA API . You can also retrieve a list of your API keys and the JavaScript application integration URL. + +# Release (2023-04-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.24.0](service/comprehend/CHANGELOG.md#v1240-2023-04-19) + * **Feature**: This release supports native document models for custom classification, in addition to plain-text models. You train native document models using documents (PDF, Word, images) in their native format. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.26.0](service/ecs/CHANGELOG.md#v1260-2023-04-19) + * **Feature**: This release supports the Account Setting "TagResourceAuthorization" that allows for enhanced Tagging security controls. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.18.0](service/ram/CHANGELOG.md#v1180-2023-04-19) + * **Feature**: This release adds support for customer managed permissions. Customer managed permissions enable customers to author and manage tailored permissions for resources shared using RAM. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.43.1](service/rds/CHANGELOG.md#v1431-2023-04-19) + * **Documentation**: Adds support for the ImageId parameter of CreateCustomDBEngineVersion to RDS Custom for Oracle +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.32.0](service/s3/CHANGELOG.md#v1320-2023-04-19) + * **Feature**: Provides support for "Snow" Storage class. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.4](service/secretsmanager/CHANGELOG.md#v1194-2023-04-19) + * **Documentation**: Documentation updates for Secrets Manager + +# Release (2023-04-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.26.0](service/appflow/CHANGELOG.md#v1260-2023-04-17) + * **Feature**: This release adds a Client Token parameter to the following AppFlow APIs: Create/Update Connector Profile, Create/Update Flow, Start Flow, Register Connector, Update Connector Registration. The Client Token parameter allows idempotent operations for these APIs. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.13.0](service/drs/CHANGELOG.md#v1130-2023-04-17) + * **Feature**: Changed existing APIs and added new APIs to support using an account-level launch configuration template with AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.5](service/dynamodb/CHANGELOG.md#v1195-2023-04-17) + * **Documentation**: Documentation updates for DynamoDB API +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.7.0](service/emrserverless/CHANGELOG.md#v170-2023-04-17) + * **Feature**: The GetJobRun API has been updated to include the job's billed resource utilization. This utilization shows the aggregate vCPU, memory and storage that AWS has billed for the job run. The billed resources include a 1-minute minimum usage for workers, plus additional storage over 20 GB per worker. +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.2.0](service/internetmonitor/CHANGELOG.md#v120-2023-04-17) + * **Feature**: This release includes a new configurable value, TrafficPercentageToMonitor, which allows users to adjust the amount of traffic monitored by percentage +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.27.0](service/iotwireless/CHANGELOG.md#v1270-2023-04-17) + * **Feature**: Supports the new feature of LoRaWAN roaming, allows to configure MaxEirp for LoRaWAN gateway, and allows to configure PingSlotPeriod for LoRaWAN multicast group +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.33.0](service/lambda/CHANGELOG.md#v1330-2023-04-17) + * **Feature**: Add Python 3.10 (python3.10) support to AWS Lambda + +# Release (2023-04-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.25.1](service/ecs/CHANGELOG.md#v1251-2023-04-14) + * **Documentation**: This release supports ephemeral storage for AWS Fargate Windows containers. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.32.0](service/lambda/CHANGELOG.md#v1320-2023-04-14) + * **Feature**: This release adds SnapStart related exceptions to InvokeWithResponseStream API. IAM access related documentation is also added for this API. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.8](service/migrationhubrefactorspaces/CHANGELOG.md#v198-2023-04-14) + * **Documentation**: Doc only update for Refactor Spaces environments without network bridge feature. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.43.0](service/rds/CHANGELOG.md#v1430-2023-04-14) + * **Feature**: This release adds support of modifying the engine mode of database clusters. + +# Release (2023-04-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.5.0](service/chimesdkvoice/CHANGELOG.md#v150-2023-04-13) + * **Feature**: This release adds tagging support for Voice Connectors and SIP Media Applications +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.19.0](service/mediaconnect/CHANGELOG.md#v1190-2023-04-13) + * **Feature**: Gateway is a new feature of AWS Elemental MediaConnect. Gateway allows the deployment of on-premises resources for the purpose of transporting live video to and from the AWS Cloud. + +# Release (2023-04-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.18.0](service/groundstation/CHANGELOG.md#v1180-2023-04-12) + * **Feature**: AWS Ground Station Wideband DigIF GA Release +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.15.5](service/managedblockchain/CHANGELOG.md#v1155-2023-04-12) + * **Documentation**: Removal of the Ropsten network. The Ethereum foundation ceased support of Ropsten on December 31st, 2022.. + +# Release (2023-04-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.16.0](service/ecrpublic/CHANGELOG.md#v1160-2023-04-11) + * **Feature**: This release will allow using registry alias as registryId in BatchDeleteImage request. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.6.0](service/emrserverless/CHANGELOG.md#v160-2023-04-11) + * **Feature**: This release extends GetJobRun API to return job run timeout (executionTimeoutMinutes) specified during StartJobRun call (or default timeout of 720 minutes if none was specified). +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.19.0](service/eventbridge/CHANGELOG.md#v1190-2023-04-11) + * **Feature**: EventBridge PutTarget support for multiple SQL arguments on RedshiftDataParameters +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.15.0](service/iotdataplane/CHANGELOG.md#v1150-2023-04-11) + * **Feature**: This release adds support for MQTT5 user properties when calling the AWS IoT GetRetainedMessage API +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.29.0](service/wafv2/CHANGELOG.md#v1290-2023-04-11) + * **Feature**: For web ACLs that protect CloudFront protections, the default request body inspection size is now 16 KB, and you can use the new association configuration to increase the inspection size further, up to 64 KB. Sizes over 16 KB can incur additional costs. + +# Release (2023-04-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.51.0](service/connect/CHANGELOG.md#v1510-2023-04-10) + * **Feature**: This release adds the ability to configure an agent's routing profile to receive contacts from multiple channels at the same time via extending the UpdateRoutingProfileConcurrency, CreateRoutingProfile and DescribeRoutingProfile APIs. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.25.0](service/ecs/CHANGELOG.md#v1250-2023-04-10) + * **Feature**: This release adds support for enabling FIPS compliance on Amazon ECS Fargate tasks +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.16.0](service/marketplacecatalog/CHANGELOG.md#v1160-2023-04-10) + * **Feature**: Added three new APIs to support resource sharing: GetResourcePolicy, PutResourcePolicy, and DeleteResourcePolicy. Added new OwnershipType field to ListEntities request to let users filter on entities that are shared with them. Increased max page size of ListEntities response from 20 to 50 results. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.35.0](service/mediaconvert/CHANGELOG.md#v1350-2023-04-10) + * **Feature**: AWS Elemental MediaConvert SDK now supports conversion of 608 paint-on captions to pop-on captions for SCC sources. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.3.0](service/omics/CHANGELOG.md#v130-2023-04-10) + * **Feature**: Remove unexpected API changes. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.24.0](service/rekognition/CHANGELOG.md#v1240-2023-04-10) + * **Feature**: This release adds support for Face Liveness APIs in Amazon Rekognition. Updates UpdateStreamProcessor to return ResourceInUseException Exception. Minor updates to API documentation. + +# Release (2023-04-07) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.15.0](service/dlm/CHANGELOG.md#v1150-2023-04-07) + * **Announcement**: This release includes breaking changes for the timestamp trait on the data lifecycle management client. + * **Feature**: Updated timestamp format for GetLifecyclePolicy API + * **Bug Fix**: Correct timestamp type for data lifecycle manager. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.21.0](service/docdb/CHANGELOG.md#v1210-2023-04-07) + * **Feature**: This release adds a new parameter 'DBClusterParameterGroupName' to 'RestoreDBClusterFromSnapshot' API to associate the name of the DB cluster parameter group while performing restore. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.8](service/fsx/CHANGELOG.md#v1288-2023-04-07) + * **Documentation**: Amazon FSx for Lustre now supports creating data repository associations on Persistent_1 and Scratch_2 file systems. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.31.0](service/lambda/CHANGELOG.md#v1310-2023-04-07) + * **Feature**: This release adds a new Lambda InvokeWithResponseStream API to support streaming Lambda function responses. The release also adds a new InvokeMode parameter to Function Url APIs to control whether the response will be streamed or buffered. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.34.0](service/quicksight/CHANGELOG.md#v1340-2023-04-07) + * **Feature**: This release has two changes: adding the OR condition to tag-based RLS rules in CreateDataSet and UpdateDataSet; adding RefreshSchedule and Incremental RefreshProperties operations for users to programmatically configure SPICE dataset ingestions. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.19.3](service/redshiftdata/CHANGELOG.md#v1193-2023-04-07) + * **Documentation**: Update documentation of API descriptions as needed in support of temporary credentials with IAM identity. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.1](service/servicecatalog/CHANGELOG.md#v1181-2023-04-07) + * **Documentation**: Updates description for property + +# Release (2023-04-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.27.0](service/cloudformation/CHANGELOG.md#v1270-2023-04-06) + * **Feature**: Including UPDATE_COMPLETE as a failed status for DeleteStack waiter. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.22.0](service/greengrassv2/CHANGELOG.md#v1220-2023-04-06) + * **Feature**: Add support for SUCCEEDED value in coreDeviceExecutionStatus field. Documentation updates for Greengrass V2. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.21.0](service/proton/CHANGELOG.md#v1210-2023-04-06) + * **Feature**: This release adds support for the AWS Proton service sync feature. Service sync enables managing an AWS Proton service (creating and updating instances) and all of it's corresponding service instances from a Git repository. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.42.1](service/rds/CHANGELOG.md#v1421-2023-04-06) + * **Documentation**: Adds and updates the SDK examples + +# Release (2023-04-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.31.0](service/configservice/CHANGELOG.md#v1310-2023-04-05) + * **Feature**: This release adds resourceType enums for types released in March 2023. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.24.3](service/ecs/CHANGELOG.md#v1243-2023-04-05) + * **Documentation**: This is a document only updated to add information about Amazon Elastic Inference (EI). +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.7](service/identitystore/CHANGELOG.md#v1167-2023-04-05) + * **Documentation**: Documentation updates for Identity Store CLI command reference. +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.1.0](service/ivsrealtime/CHANGELOG.md#v110-2023-04-05) + * **Feature**: Fix ParticipantToken ExpirationTime format +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.26.0](service/networkfirewall/CHANGELOG.md#v1260-2023-04-05) + * **Feature**: AWS Network Firewall now supports IPv6-only subnets. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.18.0](service/servicecatalog/CHANGELOG.md#v1180-2023-04-05) + * **Feature**: removed incorrect product type value +* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.0.1](service/vpclattice/CHANGELOG.md#v101-2023-04-05) + * **Documentation**: This release removes the entities in the API doc model package for auth policies. + +# Release (2023-04-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.10.0](service/amplifyuibuilder/CHANGELOG.md#v1100-2023-04-04) + * **Feature**: Support StorageField and custom displays for data-bound options in form builder. Support non-string operands for predicates in collections. Support choosing client to get token from. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.28.1](service/autoscaling/CHANGELOG.md#v1281-2023-04-04) + * **Documentation**: Documentation updates for Amazon EC2 Auto Scaling +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.93.0](service/ec2/CHANGELOG.md#v1930-2023-04-04) + * **Feature**: C6in, M6in, M6idn, R6in and R6idn bare metal instances are powered by 3rd Generation Intel Xeon Scalable processors and offer up to 200 Gbps of network bandwidth. +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.13.0](service/elasticinference/CHANGELOG.md#v1130-2023-04-04) + * **Feature**: Updated public documentation for the Describe and Tagging APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.73.0](service/sagemaker/CHANGELOG.md#v1730-2023-04-04) + * **Feature**: Amazon SageMaker Asynchronous Inference now allows customer's to receive failure model responses in S3 and receive success/failure model responses in SNS notifications. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.19.0](service/sagemakerruntime/CHANGELOG.md#v1190-2023-04-04) + * **Feature**: Amazon SageMaker Asynchronous Inference now provides customers a FailureLocation as a response parameter in InvokeEndpointAsync API to capture the model failure responses. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.28.0](service/wafv2/CHANGELOG.md#v1280-2023-04-04) + * **Feature**: This release rolls back association config feature for webACLs that protect CloudFront protections. + +# Release (2023-04-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.45.0](service/glue/CHANGELOG.md#v1450-2023-04-03) + * **Feature**: Add support for database-level federation +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.21.0](service/lakeformation/CHANGELOG.md#v1210-2023-04-03) + * **Feature**: Add support for database-level federation +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.18.0](service/licensemanager/CHANGELOG.md#v1180-2023-04-03) + * **Feature**: This release adds grant override options to the CreateGrantVersion API. These options can be used to specify grant replacement behavior during grant activation. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.15.0](service/mwaa/CHANGELOG.md#v1150-2023-04-03) + * **Feature**: This Amazon MWAA release adds the ability to customize the Apache Airflow environment by launching a shell script at startup. This shell script is hosted in your environment's Amazon S3 bucket. Amazon MWAA runs the script before installing requirements and initializing the Apache Airflow process. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.17.0](service/servicecatalog/CHANGELOG.md#v1170-2023-04-03) + * **Feature**: This release introduces Service Catalog support for Terraform open source. It enables 1. The notify* APIs to Service Catalog. These APIs are used by the terraform engine to notify the result of the provisioning engine execution. 2. Adds a new TERRAFORM_OPEN_SOURCE product type in CreateProduct API. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.27.0](service/wafv2/CHANGELOG.md#v1270-2023-04-03) + * **Feature**: For web ACLs that protect CloudFront protections, the default request body inspection size is now 16 KB, and you can use the new association configuration to increase the inspection size further, up to 64 KB. Sizes over 16 KB can incur additional costs. + +# Release (2023-03-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.92.1](service/ec2/CHANGELOG.md#v1921-2023-03-31) + * **Documentation**: Documentation updates for EC2 On Demand Capacity Reservations +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.1.0](service/internetmonitor/CHANGELOG.md#v110-2023-03-31) + * **Feature**: This release adds a new feature for Amazon CloudWatch Internet Monitor that enables customers to deliver internet measurements to Amazon S3 buckets as well as CloudWatch Logs. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.10.1](service/resiliencehub/CHANGELOG.md#v1101-2023-03-31) + * **Documentation**: Adding EKS related documentation for appTemplateBody +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.31.1](service/s3/CHANGELOG.md#v1311-2023-03-31) + * **Documentation**: Documentation updates for Amazon S3 +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.14.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1140-2023-03-31) + * **Feature**: In this release, you can now chose between soft delete and hard delete when calling the DeleteRecord API, so you have more flexibility when it comes to managing online store data. + +# Release (2023-03-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.25.0](service/athena/CHANGELOG.md#v1250-2023-03-30) + * **Feature**: Make DefaultExecutorDpuSize and CoordinatorDpuSize fields optional in StartSession +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.28.0](service/autoscaling/CHANGELOG.md#v1280-2023-03-30) + * **Feature**: Amazon EC2 Auto Scaling now supports Elastic Load Balancing traffic sources with the AttachTrafficSources, DetachTrafficSources, and DescribeTrafficSources APIs. This release also introduces a new activity status, "WaitingForConnectionDraining", for VPC Lattice to the DescribeScalingActivities API. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.23.0](service/batch/CHANGELOG.md#v1230-2023-03-30) + * **Feature**: This feature allows Batch on EKS to support configuration of Pod Labels through Metadata for Batch on EKS Jobs. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.22.0](service/computeoptimizer/CHANGELOG.md#v1220-2023-03-30) + * **Feature**: This release adds support for HDD EBS volume types and io2 Block Express. We are also adding support for 61 new instance types and instances that have non consecutive runtime. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.12.0](service/drs/CHANGELOG.md#v1120-2023-03-30) + * **Feature**: Adding a field to the replication configuration APIs to support the auto replicate new disks feature. We also deprecated RetryDataReplication. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.92.0](service/ec2/CHANGELOG.md#v1920-2023-03-30) + * **Feature**: This release adds support for Tunnel Endpoint Lifecycle control, a new feature that provides Site-to-Site VPN customers with better visibility and control of their VPN tunnel maintenance updates. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.24.0](service/emr/CHANGELOG.md#v1240-2023-03-30) + * **Feature**: Updated DescribeCluster and ListClusters API responses to include ErrorDetail that specifies error code, programmatically accessible error data,and an error message. ErrorDetail provides the underlying reason for cluster failure and recommends actions to simplify troubleshooting of EMR clusters. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.44.0](service/glue/CHANGELOG.md#v1440-2023-03-30) + * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.20.0](service/guardduty/CHANGELOG.md#v1200-2023-03-30) + * **Feature**: Added EKS Runtime Monitoring feature support to existing detector, finding APIs and introducing new Coverage APIs +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.23.0](service/imagebuilder/CHANGELOG.md#v1230-2023-03-30) + * **Feature**: Adds support for new image workflow details and image vulnerability detection. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.21.0](service/ivs/CHANGELOG.md#v1210-2023-03-30) + * **Feature**: Amazon Interactive Video Service (IVS) now offers customers the ability to configure IVS channels to allow insecure RTMP ingest. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.39.0](service/kendra/CHANGELOG.md#v1390-2023-03-30) + * **Feature**: AWS Kendra now supports featured results for a query. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.25.0](service/networkfirewall/CHANGELOG.md#v1250-2023-03-30) + * **Feature**: AWS Network Firewall added TLS inspection configurations to allow TLS traffic inspection. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.2.0](service/sagemakergeospatial/CHANGELOG.md#v120-2023-03-30) + * **Feature**: Amazon SageMaker geospatial capabilities now supports server-side encryption with customer managed KMS key and SageMaker notebooks with a SageMaker geospatial image in a Amazon SageMaker Domain with VPC only mode. +* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.0.0](service/vpclattice/CHANGELOG.md#v100-2023-03-30) + * **Release**: New AWS service client module + * **Feature**: General Availability (GA) release of Amazon VPC Lattice +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.19.0](service/wellarchitected/CHANGELOG.md#v1190-2023-03-30) + * **Feature**: AWS Well-Architected SDK now supports getting consolidated report metrics and generating a consolidated report PDF. + +# Release (2023-03-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.2.0](service/opensearchserverless/CHANGELOG.md#v120-2023-03-29) + * **Feature**: This release includes two new exception types "ServiceQuotaExceededException" and "OcuLimitExceededException". +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.42.0](service/rds/CHANGELOG.md#v1420-2023-03-29) + * **Feature**: Add support for creating a read replica DB instance from a Multi-AZ DB cluster. + +# Release (2023-03-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.15.0](service/ssmcontacts/CHANGELOG.md#v1150-2023-03-28) + * **Feature**: This release adds 12 new APIs as part of Oncall Schedule feature release, adds support for a new contact type: ONCALL_SCHEDULE. Check public documentation for AWS ssm-contacts for more information +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.21.0](service/ssmincidents/CHANGELOG.md#v1210-2023-03-28) + * **Feature**: Increased maximum length of "TriggerDetails.rawData" to 10K characters and "IncidentSummary" to 8K characters. + +# Release (2023-03-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.24.0](service/athena/CHANGELOG.md#v1240-2023-03-27) + * **Feature**: Enforces a minimal level of encryption for the workgroup for query and calculation results that are written to Amazon S3. When enabled, workgroup users can set encryption only to the minimum level set by the administrator or higher when they submit queries. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.4.0](service/chimesdkvoice/CHANGELOG.md#v140-2023-03-27) + * **Feature**: Documentation updates for Amazon Chime SDK Voice. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.50.0](service/connect/CHANGELOG.md#v1500-2023-03-27) + * **Feature**: This release introduces support for RelatedContactId in the StartChatContact API. Interactive message and interactive message response have been added to the list of supported message content types for this API as well. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.7](service/connectparticipant/CHANGELOG.md#v1157-2023-03-27) + * **Documentation**: This release provides an update to the SendMessage API to handle interactive message response content-types. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.26.0](service/iotwireless/CHANGELOG.md#v1260-2023-03-27) + * **Feature**: Introducing new APIs that enable Sidewalk devices to communicate with AWS IoT Core through Sidewalk gateways. This will empower AWS customers to connect Sidewalk devices with other AWS IoT Services, creating possibilities for seamless integration and advanced device management. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.31.0](service/medialive/CHANGELOG.md#v1310-2023-03-27) + * **Feature**: AWS Elemental MediaLive now supports ID3 tag insertion for audio only HLS output groups. AWS Elemental Link devices now support tagging. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.72.1](service/sagemaker/CHANGELOG.md#v1721-2023-03-27) + * **Documentation**: Fixed some improperly rendered links in SDK documentation. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.30.0](service/securityhub/CHANGELOG.md#v1300-2023-03-27) + * **Feature**: Added new resource detail objects to ASFF, including resources for AwsEksCluster, AWSS3Bucket, AwsEc2RouteTable and AwsEC2Instance. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.17.0](service/servicecatalogappregistry/CHANGELOG.md#v1170-2023-03-27) + * **Feature**: In this release, we started supporting ARN in applicationSpecifier and attributeGroupSpecifier. GetAttributeGroup, ListAttributeGroups and ListAttributeGroupsForApplication APIs will now have CreatedBy field in the response. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.13.0](service/voiceid/CHANGELOG.md#v1130-2023-03-27) + * **Feature**: Amazon Connect Voice ID now supports multiple fraudster watchlists. Every domain has a default watchlist where all existing fraudsters are placed by default. Custom watchlists may now be created, managed, and evaluated against for known fraudster detection. + +# Release (2023-03-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.7](service/cloudwatch/CHANGELOG.md#v1257-2023-03-24) + * **Documentation**: Doc-only update to correct alarm actions list +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.23.0](service/comprehend/CHANGELOG.md#v1230-2023-03-24) + * **Feature**: This release adds a new field (FlywheelArn) to the EntitiesDetectionJobProperties object. The FlywheelArn field is returned in the DescribeEntitiesDetectionJob and ListEntitiesDetectionJobs responses when the EntitiesDetection job is started with a FlywheelArn instead of an EntityRecognizerArn . +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.41.0](service/rds/CHANGELOG.md#v1410-2023-03-24) + * **Feature**: Added error code CreateCustomDBEngineVersionFault for when the create custom engine version for Custom engines fails. + +# Release (2023-03-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.22.0](service/batch/CHANGELOG.md#v1220-2023-03-23) + * **Feature**: This feature allows Batch to support configuration of ephemeral storage size for jobs running on FARGATE +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.11.0](service/chimesdkidentity/CHANGELOG.md#v1110-2023-03-23) + * **Feature**: AppInstanceBots can be used to add a bot powered by Amazon Lex to chat channels. ExpirationSettings provides automatic resource deletion for AppInstanceUsers. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.3.0](service/chimesdkmediapipelines/CHANGELOG.md#v130-2023-03-23) + * **Feature**: This release adds Amazon Chime SDK call analytics. Call analytics include voice analytics, which provides speaker search and voice tone analysis. These capabilities can be used with Amazon Transcribe and Transcribe Call Analytics to generate machine-learning-powered insights from real-time audio. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.14.0](service/chimesdkmessaging/CHANGELOG.md#v1140-2023-03-23) + * **Feature**: ExpirationSettings provides automatic resource deletion for Channels. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.3.0](service/chimesdkvoice/CHANGELOG.md#v130-2023-03-23) + * **Feature**: This release adds Amazon Chime SDK call analytics. Call analytics include voice analytics, which provides speaker search and voice tone analysis. These capabilities can be used with Amazon Transcribe and Transcribe Call Analytics to generate machine-learning-powered insights from real-time audio. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.18.0](service/codeartifact/CHANGELOG.md#v1180-2023-03-23) + * **Feature**: Repository CreationTime is added to the CreateRepository and ListRepositories API responses. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.19.0](service/guardduty/CHANGELOG.md#v1190-2023-03-23) + * **Feature**: Adds AutoEnableOrganizationMembers attribute to DescribeOrganizationConfiguration and UpdateOrganizationConfiguration APIs. +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.0.0](service/ivsrealtime/CHANGELOG.md#v100-2023-03-23) + * **Release**: New AWS service client module + * **Feature**: Initial release of the Amazon Interactive Video Service RealTime API. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.34.0](service/mediaconvert/CHANGELOG.md#v1340-2023-03-23) + * **Feature**: AWS Elemental MediaConvert SDK now supports passthrough of ID3v2 tags for audio inputs to audio-only HLS outputs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.72.0](service/sagemaker/CHANGELOG.md#v1720-2023-03-23) + * **Feature**: Amazon SageMaker Autopilot adds two new APIs - CreateAutoMLJobV2 and DescribeAutoMLJobV2. Amazon SageMaker Notebook Instances now supports the ml.geospatial.interactive instance type. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.21.0](service/servicediscovery/CHANGELOG.md#v1210-2023-03-23) + * **Feature**: Reverted the throttling exception RequestLimitExceeded for AWS Cloud Map APIs introduced in SDK version 1.12.424 2023-03-09 to previous exception specified in the ErrorCode. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.21.0](service/textract/CHANGELOG.md#v1210-2023-03-23) + * **Feature**: The AnalyzeDocument - Tables feature adds support for new elements in the API: table titles, footers, section titles, summary cells/tables, and table type. + +# Release (2023-03-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.8](service/iam/CHANGELOG.md#v1198-2023-03-22) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.11.0](service/iottwinmaker/CHANGELOG.md#v1110-2023-03-22) + * **Feature**: This release adds support of adding metadata when creating a new scene or updating an existing scene. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.8](service/networkmanager/CHANGELOG.md#v1178-2023-03-22) + * **Documentation**: This release includes an update to create-transit-gateway-route-table-attachment, showing example usage for TransitGatewayRouteTableArn. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.10.0](service/resiliencehub/CHANGELOG.md#v1100-2023-03-22) + * **Feature**: This release provides customers with the ability to import resources from within an EKS cluster and assess the resiliency of EKS cluster workloads. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.36.0](service/ssm/CHANGELOG.md#v1360-2023-03-22) + * **Feature**: This Patch Manager release supports creating, updating, and deleting Patch Baselines for AmazonLinux2023, AlmaLinux. + +# Release (2023-03-21) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.13.0](service/chimesdkmessaging/CHANGELOG.md#v1130-2023-03-21) + * **Feature**: Amazon Chime SDK messaging customers can now manage streaming configuration for messaging data for archival and analysis. +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.1.0](service/cleanrooms/CHANGELOG.md#v110-2023-03-21) + * **Feature**: GA Release of AWS Clean Rooms, Added Tagging Functionality +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.91.0](service/ec2/CHANGELOG.md#v1910-2023-03-21) + * **Feature**: This release adds support for AWS Network Firewall, AWS PrivateLink, and Gateway Load Balancers to Amazon VPC Reachability Analyzer, and it makes the path destination optional as long as a destination address in the filter at source is provided. +* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.14.0](service/internal/s3shared/CHANGELOG.md#v1140-2023-03-21) + * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.28.0](service/iotsitewise/CHANGELOG.md#v1280-2023-03-21) + * **Feature**: Provide support for tagging of data streams and enabling tag based authorization for property alias +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.18.0](service/mgn/CHANGELOG.md#v1180-2023-03-21) + * **Feature**: This release introduces the Import and export feature and expansion of the post-launch actions +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.31.0](service/s3/CHANGELOG.md#v1310-2023-03-21) + * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config + +# Release (2023-03-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.19.0](service/applicationautoscaling/CHANGELOG.md#v1190-2023-03-20) + * **Feature**: With this release customers can now tag their Application Auto Scaling registered targets with key-value pairs and manage IAM permissions for all the tagged resources centrally. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.20.0](service/neptune/CHANGELOG.md#v1200-2023-03-20) + * **Feature**: This release makes following few changes. db-cluster-identifier is now a required parameter of create-db-instance. describe-db-cluster will now return PendingModifiedValues and GlobalClusterIdentifier fields in the response. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.16.0](service/s3outposts/CHANGELOG.md#v1160-2023-03-20) + * **Feature**: S3 On Outposts added support for endpoint status, and a failed endpoint reason, if any +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.14.0](service/workdocs/CHANGELOG.md#v1140-2023-03-20) + * **Feature**: This release adds a new API, SearchResources, which enable users to search through metadata and content of folders, documents, document versions and comments in a WorkDocs site. + +# Release (2023-03-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.6.0](service/billingconductor/CHANGELOG.md#v160-2023-03-17) + * **Feature**: This release adds a new filter to ListAccountAssociations API and a new filter to ListBillingGroups API. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.30.0](service/configservice/CHANGELOG.md#v1300-2023-03-17) + * **Feature**: This release adds resourceType enums for types released from October 2022 through February 2023. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.25.0](service/databasemigrationservice/CHANGELOG.md#v1250-2023-03-17) + * **Feature**: S3 setting to create AWS Glue Data Catalog. Oracle setting to control conversion of timestamp column. Support for Kafka SASL Plain authentication. Setting to map boolean from PostgreSQL to Redshift. SQL Server settings to force lob lookup on inline LOBs and to control access of database logs. + +# Release (2023-03-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.18](config/CHANGELOG.md#v11818-2023-03-16) + * **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.18.0](service/guardduty/CHANGELOG.md#v1180-2023-03-16) + * **Feature**: Updated 9 APIs for feature enablement to reflect expansion of GuardDuty to features. Added new APIs and updated existing APIs to support RDS Protection GA. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.7](service/resourceexplorer2/CHANGELOG.md#v127-2023-03-16) + * **Documentation**: Documentation updates for APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.7](service/sagemakerruntime/CHANGELOG.md#v1187-2023-03-16) + * **Documentation**: Documentation updates for SageMaker Runtime + +# Release (2023-03-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.9.0](service/migrationhubstrategy/CHANGELOG.md#v190-2023-03-15) + * **Feature**: This release adds the binary analysis that analyzes IIS application DLLs on Windows and Java applications on Linux to provide anti-pattern report without configuring access to the source code. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.31.0](service/s3control/CHANGELOG.md#v1310-2023-03-15) + * **Feature**: Added support for S3 Object Lambda aliases. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.3.0](service/securitylake/CHANGELOG.md#v130-2023-03-15) + * **Feature**: Make Create/Get/ListSubscribers APIs return resource share ARN and name so they can be used to validate the RAM resource share to accept. GetDatalake can be used to track status of UpdateDatalake and DeleteDatalake requests. + +# Release (2023-03-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.13.0](feature/ec2/imds/CHANGELOG.md#v1130-2023-03-14) + * **Feature**: Add flag to disable IMDSv1 fallback +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.18.0](service/applicationautoscaling/CHANGELOG.md#v1180-2023-03-14) + * **Feature**: Application Auto Scaling customers can now use mathematical functions to customize the metric used with Target Tracking policies within the policy configuration itself, saving the cost and effort of publishing the customizations as a separate metric. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.19.0](service/dataexchange/CHANGELOG.md#v1190-2023-03-14) + * **Feature**: This release enables data providers to license direct access to S3 objects encrypted with Customer Managed Keys (CMK) in AWS KMS through AWS Data Exchange. Subscribers can use these keys to decrypt, then use the encrypted S3 objects shared with them, without creating or managing copies. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.7](service/directconnect/CHANGELOG.md#v1187-2023-03-14) + * **Documentation**: describe-direct-connect-gateway-associations includes a new status, updating, indicating that the association is currently in-process of updating. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.90.0](service/ec2/CHANGELOG.md#v1900-2023-03-14) + * **Feature**: This release adds a new DnsOptions key (PrivateDnsOnlyForInboundResolverEndpoint) to CreateVpcEndpoint and ModifyVpcEndpoint APIs. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.6](service/iam/CHANGELOG.md#v1196-2023-03-14) + * **Documentation**: Documentation only updates to correct customer-reported issues +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.2.0](service/keyspaces/CHANGELOG.md#v120-2023-03-14) + * **Feature**: Adding support for client-side timestamps +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.6](service/support/CHANGELOG.md#v1146-2023-03-14) + * **Announcement**: Model regenerated with support for null string values to properly implement `support` service operations `DescribeTrustedAdvisorCheckRefreshStatuses` and `DescribeTrustedAdvisorCheckSummaries` + +# Release (2023-03-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.15.0](service/appintegrations/CHANGELOG.md#v1150-2023-03-13) + * **Feature**: Adds FileConfiguration to Amazon AppIntegrations CreateDataIntegration supporting scheduled downloading of third party files into Amazon Connect from sources such as Microsoft SharePoint. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.20.2](service/lakeformation/CHANGELOG.md#v1202-2023-03-13) + * **Documentation**: This release updates the documentation regarding Get/Update DataCellsFilter +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.30.0](service/s3control/CHANGELOG.md#v1300-2023-03-13) + * **Feature**: Added support for cross-account Multi-Region Access Points. Added support for S3 Replication for S3 on Outposts. +* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.1.0](service/tnb/CHANGELOG.md#v110-2023-03-13) + * **Feature**: This release adds tagging support to the following Network Instance APIs : Instantiate, Update, Terminate. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.13.0](service/wisdom/CHANGELOG.md#v1130-2023-03-13) + * **Feature**: This release extends Wisdom CreateKnowledgeBase API to support SharePoint connector type by removing the @required trait for objectField + +# Release (2023-03-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.4.0](service/ivschat/CHANGELOG.md#v140-2023-03-10) + * **Feature**: This release adds a new exception returned when calling AWS IVS chat UpdateLoggingConfiguration. Now UpdateLoggingConfiguration can return ConflictException when invalid updates are made in sequence to Logging Configurations. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.19.0](service/secretsmanager/CHANGELOG.md#v1190-2023-03-10) + * **Feature**: The type definitions of SecretString and SecretBinary now have a minimum length of 1 in the model to match the exception thrown when you pass in empty values. + +# Release (2023-03-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.17.0](service/codeartifact/CHANGELOG.md#v1170-2023-03-09) + * **Feature**: This release introduces the generic package format, a mechanism for storing arbitrary binary assets. It also adds a new API, PublishPackageVersion, to allow for publishing generic packages. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.49.0](service/connect/CHANGELOG.md#v1490-2023-03-09) + * **Feature**: This release adds a new API, GetMetricDataV2, which returns metric data for Amazon Connect. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.11.0](service/evidently/CHANGELOG.md#v1110-2023-03-09) + * **Feature**: Updated entity override documentation +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.5](service/networkmanager/CHANGELOG.md#v1175-2023-03-09) + * **Documentation**: This update provides example usage for TransitGatewayRouteTableArn. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.33.0](service/quicksight/CHANGELOG.md#v1330-2023-03-09) + * **Feature**: This release has two changes: add state persistence feature for embedded dashboard and console in GenerateEmbedUrlForRegisteredUser API; add properties for hidden collapsed row dimensions in PivotTableOptions. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.19.0](service/redshiftdata/CHANGELOG.md#v1190-2023-03-09) + * **Feature**: Added support for Redshift Serverless workgroup-arn wherever the WorkgroupName parameter is available. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.71.0](service/sagemaker/CHANGELOG.md#v1710-2023-03-09) + * **Feature**: Amazon SageMaker Inference now allows SSM access to customer's model container by setting the "EnableSSMAccess" parameter for a ProductionVariant in CreateEndpointConfig API. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.20.0](service/servicediscovery/CHANGELOG.md#v1200-2023-03-09) + * **Feature**: Updated all AWS Cloud Map APIs to provide consistent throttling exception (RequestLimitExceeded) +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.17.0](service/sesv2/CHANGELOG.md#v1170-2023-03-09) + * **Feature**: This release introduces a new recommendation in Virtual Deliverability Manager Advisor, which detects missing or misconfigured Brand Indicator for Message Identification (BIMI) DNS records for customer sending identities. + +# Release (2023-03-08) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.23.0](service/athena/CHANGELOG.md#v1230-2023-03-08) + * **Feature**: A new field SubstatementType is added to GetQueryExecution API, so customers have an error free way to detect the query type and interpret the result. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.19.0](service/dynamodb/CHANGELOG.md#v1190-2023-03-08) + * **Feature**: Adds deletion protection support to DynamoDB tables. Tables with deletion protection enabled cannot be deleted. Deletion protection is disabled by default, can be enabled via the CreateTable or UpdateTable APIs, and is visible in TableDescription. This setting is not replicated for Global Tables. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.89.0](service/ec2/CHANGELOG.md#v1890-2023-03-08) + * **Feature**: Introducing Amazon EC2 C7g, M7g and R7g instances, powered by the latest generation AWS Graviton3 processors and deliver up to 25% better performance over Graviton2-based instances. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.20.0](service/lakeformation/CHANGELOG.md#v1200-2023-03-08) + * **Feature**: This release adds two new API support "GetDataCellsFiler" and "UpdateDataCellsFilter", and also updates the corresponding documentation. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.21.0](service/mediapackage/CHANGELOG.md#v1210-2023-03-08) + * **Feature**: This release provides the date and time live resources were created. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.22.0](service/mediapackagevod/CHANGELOG.md#v1220-2023-03-08) + * **Feature**: This release provides the date and time VOD resources were created. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.17.0](service/route53resolver/CHANGELOG.md#v1170-2023-03-08) + * **Feature**: Add dual-stack and IPv6 support for Route 53 Resolver Endpoint,Add IPv6 target IP in Route 53 Resolver Forwarding Rule +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.70.0](service/sagemaker/CHANGELOG.md#v1700-2023-03-08) + * **Feature**: There needs to be a user identity to specify the SageMaker user who perform each action regarding the entity. However, these is a not a unified concept of user identity across SageMaker service that could be used today. + +# Release (2023-03-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.24.0](service/databasemigrationservice/CHANGELOG.md#v1240-2023-03-07) + * **Feature**: This release adds DMS Fleet Advisor Target Recommendation APIs and exposes functionality for DMS Fleet Advisor. It adds functionality to start Target Recommendation calculation. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.22.1](service/location/CHANGELOG.md#v1221-2023-03-07) + * **Documentation**: Documentation update for the release of 3 additional map styles for use with Open Data Maps: Open Data Standard Dark, Open Data Visualization Light & Open Data Visualization Dark. + +# Release (2023-03-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.10.0](service/account/CHANGELOG.md#v1100-2023-03-06) + * **Feature**: AWS Account alternate contact email addresses can now have a length of 254 characters and contain the character "|". +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.6](service/ivs/CHANGELOG.md#v1206-2023-03-06) + * **Documentation**: Updated text description in DeleteChannel, Stream, and StreamSummary. + +# Release (2023-03-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.6](service/dynamodb/CHANGELOG.md#v1186-2023-03-03) + * **Documentation**: Documentation updates for DynamoDB. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.88.0](service/ec2/CHANGELOG.md#v1880-2023-03-03) + * **Feature**: This release adds support for a new boot mode for EC2 instances called 'UEFI Preferred'. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.27.1](service/macie2/CHANGELOG.md#v1271-2023-03-03) + * **Documentation**: Documentation updates for Amazon Macie +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.33.0](service/mediaconvert/CHANGELOG.md#v1330-2023-03-03) + * **Feature**: The AWS Elemental MediaConvert SDK has improved handling for different input and output color space combinations. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.30.0](service/medialive/CHANGELOG.md#v1300-2023-03-03) + * **Feature**: AWS Elemental MediaLive adds support for Nielsen watermark timezones. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.26.0](service/transcribe/CHANGELOG.md#v1260-2023-03-03) + * **Feature**: Amazon Transcribe now supports role access for these API operations: CreateVocabulary, UpdateVocabulary, CreateVocabularyFilter, and UpdateVocabularyFilter. + +# Release (2023-03-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.35.0](service/iot/CHANGELOG.md#v1350-2023-03-02) + * **Feature**: A recurring maintenance window is an optional configuration used for rolling out the job document to all devices in the target group observing a predetermined start time, duration, and frequency that the maintenance window occurs. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.8.0](service/migrationhubstrategy/CHANGELOG.md#v180-2023-03-02) + * **Feature**: This release updates the File Import API to allow importing servers already discovered by customers with reduced pre-requisites. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.19.0](service/organizations/CHANGELOG.md#v1190-2023-03-02) + * **Feature**: This release introduces a new reason code, ACCOUNT_CREATION_NOT_COMPLETE, to ConstraintViolationException in CreateOrganization API. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.17.0](service/pi/CHANGELOG.md#v1170-2023-03-02) + * **Feature**: This release adds a new field PeriodAlignment to allow the customer specifying the returned timestamp of time periods to be either the start or end time. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.2.0](service/pipes/CHANGELOG.md#v120-2023-03-02) + * **Feature**: This release fixes some input parameter range and patterns. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.69.0](service/sagemaker/CHANGELOG.md#v1690-2023-03-02) + * **Feature**: Add a new field "EndpointMetrics" in SageMaker Inference Recommender "ListInferenceRecommendationsJobSteps" API response. + +# Release (2023-03-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.2.0](service/codecatalyst/CHANGELOG.md#v120-2023-03-01) + * **Feature**: Published Dev Environments StopDevEnvironmentSession API +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.19.0](service/pricing/CHANGELOG.md#v1190-2023-03-01) + * **Feature**: This release adds 2 new APIs - ListPriceLists which returns a list of applicable price lists, and GetPriceListFileUrl which outputs a URL to retrieve your price lists from the generated file from ListPriceLists +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.15.0](service/s3outposts/CHANGELOG.md#v1150-2023-03-01) + * **Feature**: S3 on Outposts introduces a new API ListOutpostsWithS3, with this API you can list all your Outposts with S3 capacity. + +# Release (2023-02-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.22.0](service/comprehend/CHANGELOG.md#v1220-2023-02-28) + * **Feature**: Amazon Comprehend now supports flywheels to help you train and manage new model versions for custom models. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.87.0](service/ec2/CHANGELOG.md#v1870-2023-02-28) + * **Feature**: This release allows IMDS support to be set to v2-only on an existing AMI, so that all future instances launched from that AMI will use IMDSv2 by default. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.6](service/kms/CHANGELOG.md#v1206-2023-02-28) + * **Documentation**: AWS KMS is deprecating the RSAES_PKCS1_V1_5 wrapping algorithm option in the GetParametersForImport API that is used in the AWS KMS Import Key Material feature. AWS KMS will end support for this wrapping algorithm by October 1, 2023. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.26.0](service/lightsail/CHANGELOG.md#v1260-2023-02-28) + * **Feature**: This release adds Lightsail for Research feature support, such as GUI session access, cost estimates, stop instance on idle, and disk auto mount. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.15.0](service/managedblockchain/CHANGELOG.md#v1150-2023-02-28) + * **Feature**: This release adds support for tagging to the accessor resource in Amazon Managed Blockchain +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.2.0](service/omics/CHANGELOG.md#v120-2023-02-28) + * **Feature**: Minor model changes to accomodate batch imports feature + +# Release (2023-02-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.23.0](service/devopsguru/CHANGELOG.md#v1230-2023-02-27) + * **Feature**: This release adds the description field on ListAnomaliesForInsight and DescribeAnomaly API responses for proactive anomalies. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.11.0](service/drs/CHANGELOG.md#v1110-2023-02-27) + * **Feature**: New fields were added to reflect availability zone data in source server and recovery instance description commands responses, as well as source server launch status. +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.0.0](service/internetmonitor/CHANGELOG.md#v100-2023-02-27) + * **Release**: New AWS service client module + * **Feature**: CloudWatch Internet Monitor is a a new service within CloudWatch that will help application developers and network engineers continuously monitor internet performance metrics such as availability and performance between their AWS-hosted applications and end-users of these applications +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.30.0](service/lambda/CHANGELOG.md#v1300-2023-02-27) + * **Feature**: This release adds the ability to create ESMs with Document DB change streams as event source. For more information see https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.32.0](service/mediaconvert/CHANGELOG.md#v1320-2023-02-27) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for HDR10 to SDR tone mapping, and animated GIF video input sources. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.16.0](service/timestreamwrite/CHANGELOG.md#v1160-2023-02-27) + * **Feature**: This release adds the ability to ingest batched historical data or migrate data in bulk from S3 into Timestream using CSV files. + +# Release (2023-02-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.48.0](service/connect/CHANGELOG.md#v1480-2023-02-24) + * **Feature**: StartTaskContact API now supports linked task creation with a new optional RelatedContactId parameter +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.3.0](service/connectcases/CHANGELOG.md#v130-2023-02-24) + * **Feature**: This release adds the ability to delete domains through the DeleteDomain API. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.5](service/redshift/CHANGELOG.md#v1275-2023-02-24) + * **Documentation**: Documentation updates for Redshift API bringing it in line with IAM best practices. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.29.0](service/securityhub/CHANGELOG.md#v1290-2023-02-24) + * **Feature**: New Security Hub APIs and updates to existing APIs that help you consolidate control findings and enable and disable controls across all supported standards +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.5](service/servicecatalog/CHANGELOG.md#v1165-2023-02-24) + * **Documentation**: Documentation updates for Service Catalog + +# Release (2023-02-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.25.0](service/appflow/CHANGELOG.md#v1250-2023-02-23) + * **Feature**: This release enables the customers to choose whether to use Private Link for Metadata and Authorization call when using a private Salesforce connections +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.24.0](service/ecs/CHANGELOG.md#v1240-2023-02-23) + * **Feature**: This release supports deleting Amazon ECS task definitions that are in the INACTIVE state. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.12.3](service/grafana/CHANGELOG.md#v1123-2023-02-23) + * **Documentation**: Doc-only update. Updated information on attached role policies for customer provided roles +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.6](service/guardduty/CHANGELOG.md#v1176-2023-02-23) + * **Documentation**: Updated API and data types descriptions for CreateFilter, UpdateFilter, and TriggerDetails. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.25.0](service/iotwireless/CHANGELOG.md#v1250-2023-02-23) + * **Feature**: In this release, we add additional capabilities for the FUOTA which allows user to configure the fragment size, the sending interval and the redundancy ratio of the FUOTA tasks +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.22.0](service/location/CHANGELOG.md#v1220-2023-02-23) + * **Feature**: This release adds support for using Maps APIs with an API Key in addition to AWS Cognito. This includes support for adding, listing, updating and deleting API Keys. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.27.0](service/macie2/CHANGELOG.md#v1270-2023-02-23) + * **Feature**: This release adds support for a new finding type, Policy:IAMUser/S3BucketSharedWithCloudFront, and S3 bucket metadata that indicates if a bucket is shared with an Amazon CloudFront OAI or OAC. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.26.0](service/wafv2/CHANGELOG.md#v1260-2023-02-23) + * **Feature**: You can now associate an AWS WAF v2 web ACL with an AWS App Runner service. + +# Release (2023-02-22) + +## General Highlights +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.2.0](service/chimesdkvoice/CHANGELOG.md#v120-2023-02-22) + * **Feature**: This release introduces support for Voice Connector media metrics in the Amazon Chime SDK Voice namespace +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.26.0](service/cloudfront/CHANGELOG.md#v1260-2023-02-22) + * **Feature**: CloudFront now supports block lists in origin request policies so that you can forward all headers, cookies, or query string from viewer requests to the origin *except* for those specified in the block list. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.23.0](service/datasync/CHANGELOG.md#v1230-2023-02-22) + * **Feature**: AWS DataSync has relaxed the minimum length constraint of AccessKey for Object Storage locations to 1. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.15.0](service/opensearch/CHANGELOG.md#v1150-2023-02-22) + * **Feature**: This release lets customers configure Off-peak window and software update related properties for a new/existing domain. It enhances the capabilities of StartServiceSoftwareUpdate API; adds 2 new APIs - ListScheduledActions & UpdateScheduledAction; and allows Auto-tune to make use of Off-peak window. +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.10.0](service/rum/CHANGELOG.md#v1100-2023-02-22) + * **Feature**: CloudWatch RUM now supports CloudWatch Custom Metrics +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.5](service/ssm/CHANGELOG.md#v1355-2023-02-22) + * **Documentation**: Document only update for Feb 2023 + +# Release (2023-02-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.32.0](service/quicksight/CHANGELOG.md#v1320-2023-02-21) + * **Feature**: S3 data sources now accept a custom IAM role. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.9.0](service/resiliencehub/CHANGELOG.md#v190-2023-02-21) + * **Feature**: In this release we improved resilience hub application creation and maintenance by introducing new resource and app component crud APIs, improving visibility and maintenance of application input sources and added support for additional information attributes to be provided by customers. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.4](service/securityhub/CHANGELOG.md#v1284-2023-02-21) + * **Documentation**: Documentation updates for AWS Security Hub +* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.0.0](service/tnb/CHANGELOG.md#v100-2023-02-21) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Telco Network Builder (TNB). AWS Telco Network Builder is a network automation service that helps you deploy and manage telecom networks. + +# Release (2023-02-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.17.5 + * **Bug Fix**: fix int overflow bug on 32 bit architecture +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.24.0](service/auditmanager/CHANGELOG.md#v1240-2023-02-20) + * **Feature**: This release introduces a ServiceQuotaExceededException to the UpdateAssessmentFrameworkShare API operation. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.47.0](service/connect/CHANGELOG.md#v1470-2023-02-20) + * **Feature**: Reasons for failed diff has been approved by SDK Reviewer + +# Release (2023-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.17.0](service/apprunner/CHANGELOG.md#v1170-2023-02-17) + * **Feature**: This release supports removing MaxSize limit for AutoScalingConfiguration. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.43.0](service/glue/CHANGELOG.md#v1430-2023-02-17) + * **Feature**: Release of Delta Lake Data Lake Format for Glue Studio Service + +# Release (2023-02-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.23.0](service/emr/CHANGELOG.md#v1230-2023-02-16) + * **Feature**: This release provides customers the ability to define a timeout period for procuring capacity during a resize operation for Instance Fleet clusters. Customers can specify this timeout using the ResizeSpecifications parameter supported by RunJobFlow, ModifyInstanceFleet and AddInstanceFleet APIs. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.12.0](service/grafana/CHANGELOG.md#v1120-2023-02-16) + * **Feature**: With this release Amazon Managed Grafana now supports inbound Network Access Control that helps you to restrict user access to your Grafana workspaces +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.3](service/ivs/CHANGELOG.md#v1203-2023-02-16) + * **Documentation**: Doc-only update. Updated text description in DeleteChannel, Stream, and StreamSummary. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.25.1](service/wafv2/CHANGELOG.md#v1251-2023-02-16) + * **Documentation**: Added a notice for account takeover prevention (ATP). The interface incorrectly lets you to configure ATP response inspection in regional web ACLs in Region US East (N. Virginia), without returning an error. ATP response inspection is only available in web ACLs that protect CloudFront distributions. + +# Release (2023-02-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.19.3](service/accessanalyzer/CHANGELOG.md#v1193-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.9.1](service/account/CHANGELOG.md#v191-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.17.3](service/acm/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.2](service/acmpca/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.2](service/alexaforbusiness/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.16.2](service/amp/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.13.2](service/amplify/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.14.2](service/amplifybackend/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.9.2](service/amplifyuibuilder/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.16.3](service/apigateway/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.11.2](service/apigatewaymanagementapi/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.3](service/apigatewayv2/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.17.1](service/appconfig/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.6.1](service/appconfigdata/CHANGELOG.md#v161-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.24.2](service/appflow/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.14.2](service/appintegrations/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.17.3](service/applicationautoscaling/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.10.2](service/applicationcostprofiler/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.15.2](service/applicationdiscoveryservice/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.17.3](service/applicationinsights/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.17.2](service/appmesh/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.16.2](service/apprunner/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.20.2](service/appstream/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.19.2](service/appsync/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.1.3](service/arczonalshift/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.22.2](service/athena/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.23.2](service/auditmanager/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.13.2](service/autoscalingplans/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.20.1](service/backup/CHANGELOG.md#v1201-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.9.2](service/backupgateway/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.1.2](service/backupstorage/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.21.3](service/batch/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.5.2](service/billingconductor/CHANGELOG.md#v152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.17.2](service/braket/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.14.2](service/budgets/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.22.2](service/chime/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.10.2](service/chimesdkidentity/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.2.2](service/chimesdkmediapipelines/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.3](service/chimesdkmeetings/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.12.2](service/chimesdkmessaging/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.1.2](service/chimesdkvoice/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.0.2](service/cleanrooms/CHANGELOG.md#v102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.17.2](service/cloud9/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.11.3](service/cloudcontrol/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.13.2](service/clouddirectory/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.13.2](service/cloudhsm/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.14.2](service/cloudhsmv2/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.12.2](service/cloudsearchdomain/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.24.0](service/cloudtrail/CHANGELOG.md#v1240-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release adds an InsufficientEncryptionPolicyException type to the StartImport endpoint + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.0.2](service/cloudtraildata/CHANGELOG.md#v102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.15.3](service/cloudwatchevents/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.20.3](service/cloudwatchlogs/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.16.2](service/codeartifact/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.20.3](service/codebuild/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.1.2](service/codecatalyst/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.14.2](service/codecommit/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.16.3](service/codedeploy/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.13.2](service/codeguruprofiler/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.17.2](service/codegurureviewer/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.14.2](service/codepipeline/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.13.2](service/codestar/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.14.2](service/codestarconnections/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.14.2](service/codestarnotifications/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.15.2](service/cognitoidentity/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.22.2](service/cognitoidentityprovider/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.12.2](service/cognitosync/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.21.2](service/comprehend/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.15.2](service/comprehendmedical/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.21.1](service/computeoptimizer/CHANGELOG.md#v1211-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.29.3](service/configservice/CHANGELOG.md#v1293-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.46.1](service/connect/CHANGELOG.md#v1461-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.2.3](service/connectcampaigns/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.2.3](service/connectcases/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.13.2](service/connectcontactlens/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.2](service/connectparticipant/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.1.2](service/controltower/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.15.2](service/costandusagereportservice/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.25.2](service/costexplorer/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.23.1](service/customerprofiles/CHANGELOG.md#v1231-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.23.3](service/databasemigrationservice/CHANGELOG.md#v1233-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.21.3](service/databrew/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.18.2](service/dataexchange/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.14.2](service/datapipeline/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.22.1](service/datasync/CHANGELOG.md#v1221-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.12.2](service/dax/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.18.2](service/detective/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.15.2](service/devicefarm/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.22.2](service/devopsguru/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.3](service/directconnect/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.16.3](service/directoryservice/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.14.4](service/dlm/CHANGELOG.md#v1144-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.1.2](service/docdbelastic/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.10.2](service/drs/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.3](service/dynamodb/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.3](service/dynamodbstreams/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.16.4](service/ebs/CHANGELOG.md#v1164-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.15.2](service/ec2instanceconnect/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.18.3](service/ecr/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.15.2](service/ecrpublic/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.23.3](service/ecs/CHANGELOG.md#v1233-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.4](service/efs/CHANGELOG.md#v1194-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. + * **Documentation**: Documentation update for EFS to support IAM best practices. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.27.3](service/eks/CHANGELOG.md#v1273-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.12.2](service/elasticinference/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.18.3](service/elasticsearchservice/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.14.2](service/elastictranscoder/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.22.3](service/emr/CHANGELOG.md#v1223-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.17.1](service/emrcontainers/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.5.2](service/emrserverless/CHANGELOG.md#v152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.18.3](service/eventbridge/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.10.2](service/evidently/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.9.2](service/finspace/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.14.2](service/finspacedata/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.16.3](service/firehose/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.14.2](service/fis/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.22.3](service/fms/CHANGELOG.md#v1223-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.25.2](service/forecast/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.13.2](service/forecastquery/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.23.0](service/frauddetector/CHANGELOG.md#v1230-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release introduces Lists feature which allows customers to reference a set of values in Fraud Detector's rules. With Lists, customers can dynamically manage these attributes in real time. Lists can be created/deleted and its contents can be modified using the Fraud Detector API. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.3](service/fsx/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.17.2](service/gamelift/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.2.2](service/gamesparks/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.14.3](service/glacier/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.16.2](service/globalaccelerator/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.42.0](service/glue/CHANGELOG.md#v1420-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: Fix DirectJDBCSource not showing up in CLI code gen + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.11.2](service/grafana/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.15.3](service/greengrass/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.21.3](service/greengrassv2/CHANGELOG.md#v1213-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.17.2](service/groundstation/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.3](service/guardduty/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.16.2](service/health/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.15.2](service/healthlake/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.13.2](service/honeycode/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.2](service/identitystore/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.22.2](service/imagebuilder/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.13.2](service/inspector/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.11.3](service/inspector2/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.34.2](service/iot/CHANGELOG.md#v1342-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.11.2](service/iot1clickdevicesservice/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.12.2](service/iot1clickprojects/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.14.2](service/iotanalytics/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.14.2](service/iotdataplane/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.17.2](service/iotdeviceadvisor/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.15.2](service/iotevents/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.13.2](service/ioteventsdata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.13.2](service/iotfleethub/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.3.2](service/iotfleetwise/CHANGELOG.md#v132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.12.2](service/iotjobsdataplane/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.1.2](service/iotroborunner/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.15.2](service/iotsecuretunneling/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.27.2](service/iotsitewise/CHANGELOG.md#v1272-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.14.2](service/iotthingsgraph/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.10.2](service/iottwinmaker/CHANGELOG.md#v1102-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.24.2](service/iotwireless/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.2](service/ivs/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.3.2](service/ivschat/CHANGELOG.md#v132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.19.2](service/kafka/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.9.2](service/kafkaconnect/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.38.3](service/kendra/CHANGELOG.md#v1383-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.0.4](service/kendraranking/CHANGELOG.md#v104-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.1.2](service/keyspaces/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.17.4](service/kinesis/CHANGELOG.md#v1174-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.14.2](service/kinesisanalytics/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.16.2](service/kinesisanalyticsv2/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.15.3](service/kinesisvideo/CHANGELOG.md#v1153-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.14.3](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.11.3](service/kinesisvideomedia/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.11.3](service/kinesisvideosignaling/CHANGELOG.md#v1113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.2.3](service/kinesisvideowebrtcstorage/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.3](service/kms/CHANGELOG.md#v1203-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.19.2](service/lakeformation/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.29.2](service/lambda/CHANGELOG.md#v1292-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.17.2](service/lexmodelbuildingservice/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.28.1](service/lexmodelsv2/CHANGELOG.md#v1281-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.13.2](service/lexruntimeservice/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.17.1](service/lexruntimev2/CHANGELOG.md#v1171-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.17.2](service/licensemanager/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.1.2](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.2.2](service/licensemanagerusersubscriptions/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.3](service/lightsail/CHANGELOG.md#v1253-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.21.2](service/location/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.17.2](service/lookoutequipment/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.19.2](service/lookoutmetrics/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.15.2](service/lookoutvision/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.4.2](service/m2/CHANGELOG.md#v142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.15.2](service/machinelearning/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.15.2](service/macie/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.26.2](service/macie2/CHANGELOG.md#v1262-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.14.2](service/managedblockchain/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.15.2](service/marketplacecatalog/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.12.2](service/marketplacecommerceanalytics/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.12.2](service/marketplaceentitlementservice/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.14.3](service/marketplacemetering/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.18.2](service/mediaconnect/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.31.1](service/mediaconvert/CHANGELOG.md#v1311-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.29.2](service/medialive/CHANGELOG.md#v1292-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.20.2](service/mediapackage/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.21.2](service/mediapackagevod/CHANGELOG.md#v1212-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.13.2](service/mediastore/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.13.2](service/mediastoredata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.22.2](service/mediatailor/CHANGELOG.md#v1222-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.12.2](service/memorydb/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.17.2](service/mgn/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.13.2](service/migrationhub/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.13.2](service/migrationhubconfig/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.1.2](service/migrationhuborchestrator/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.1](service/migrationhubrefactorspaces/CHANGELOG.md#v191-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.7.2](service/migrationhubstrategy/CHANGELOG.md#v172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.12.2](service/mobile/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.14.2](service/mq/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.14.2](service/mturk/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.14.2](service/mwaa/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.24.2](service/networkfirewall/CHANGELOG.md#v1242-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.2](service/networkmanager/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.16.2](service/nimble/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.1.3](service/oam/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.1.2](service/omics/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.14.2](service/opensearch/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.1.3](service/opensearchserverless/CHANGELOG.md#v113-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.14.2](service/opsworks/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.15.2](service/opsworkscm/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.18.2](service/organizations/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.27.2](service/outposts/CHANGELOG.md#v1272-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.11.2](service/panorama/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.23.2](service/personalize/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.13.2](service/personalizeevents/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.13.2](service/personalizeruntime/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.16.3](service/pi/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.18.2](service/pinpoint/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.12.2](service/pinpointemail/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.11.2](service/pinpointsmsvoice/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.1.2](service/pinpointsmsvoicev2/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.1.2](service/pipes/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.25.1](service/polly/CHANGELOG.md#v1251-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.18.2](service/pricing/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.2.0](service/privatenetworks/CHANGELOG.md#v120-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: This release introduces a new StartNetworkResourceUpdate API, which enables return/replacement of hardware from a NetworkSite. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.20.1](service/proton/CHANGELOG.md#v1201-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.2](service/qldb/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.14.2](service/qldbsession/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.31.2](service/quicksight/CHANGELOG.md#v1312-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.17.3](service/ram/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.8.3](service/rbin/CHANGELOG.md#v183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.3](service/rds/CHANGELOG.md#v1403-2023-02-15) + * **Documentation**: Database Activity Stream support for RDS for SQL Server. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.13.2](service/rdsdata/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.18.2](service/redshiftdata/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.3](service/redshiftserverless/CHANGELOG.md#v143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.23.2](service/rekognition/CHANGELOG.md#v1232-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.8.2](service/resiliencehub/CHANGELOG.md#v182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.3](service/resourceexplorer2/CHANGELOG.md#v123-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.14.3](service/resourcegroups/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.14.3](service/resourcegroupstaggingapi/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.18.2](service/robomaker/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.1.2](service/rolesanywhere/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.14.2](service/route53domains/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.11.2](service/route53recoverycluster/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.11.2](service/route53recoverycontrolconfig/CHANGELOG.md#v1112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.9.2](service/route53recoveryreadiness/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.16.3](service/route53resolver/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.9.2](service/rum/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.14.2](service/s3outposts/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.68.1](service/sagemaker/CHANGELOG.md#v1681-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.15.2](service/sagemakera2iruntime/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.13.2](service/sagemakeredge/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.13.2](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.1.2](service/sagemakergeospatial/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.0.5](service/sagemakermetrics/CHANGELOG.md#v105-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.3](service/sagemakerruntime/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.2](service/savingsplans/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.1.2](service/scheduler/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.15.2](service/schemas/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.4](service/secretsmanager/CHANGELOG.md#v1184-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.2](service/securityhub/CHANGELOG.md#v1282-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.2.2](service/securitylake/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.12.2](service/serverlessapplicationrepository/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.2](service/servicecatalog/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.16.3](service/servicecatalogappregistry/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.19.2](service/servicediscovery/CHANGELOG.md#v1192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.14.3](service/servicequotas/CHANGELOG.md#v1143-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.16.2](service/sesv2/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.17.3](service/sfn/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.18.2](service/shield/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.14.2](service/signer/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.1.2](service/simspaceweaver/CHANGELOG.md#v112-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.13.2](service/sms/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.18.1](service/snowball/CHANGELOG.md#v1181-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.9.2](service/snowdevicemanagement/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.3](service/ssm/CHANGELOG.md#v1353-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.14.2](service/ssmcontacts/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.20.2](service/ssmincidents/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.2.2](service/ssmsap/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.12.2](service/sso/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.16.2](service/ssoadmin/CHANGELOG.md#v1162-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.14.2](service/ssooidc/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.18.3](service/storagegateway/CHANGELOG.md#v1183-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.2](service/support/CHANGELOG.md#v1142-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.2.2](service/supportapp/CHANGELOG.md#v122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.14.4](service/swf/CHANGELOG.md#v1144-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.17.3](service/synthetics/CHANGELOG.md#v1173-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.20.2](service/textract/CHANGELOG.md#v1202-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.15.2](service/timestreamquery/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.15.2](service/timestreamwrite/CHANGELOG.md#v1152-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.25.2](service/transcribe/CHANGELOG.md#v1252-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.9.2](service/transcribestreaming/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.3](service/transfer/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.17.2](service/translate/CHANGELOG.md#v1172-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.12.2](service/voiceid/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.12.2](service/waf/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.13.3](service/wafregional/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.25.0](service/wafv2/CHANGELOG.md#v1250-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Feature**: For protected CloudFront distributions, you can now use the AWS WAF Fraud Control account takeover prevention (ATP) managed rule group to block new login attempts from clients that have recently submitted too many failed login attempts. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.18.2](service/wellarchitected/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.12.2](service/wisdom/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.3](service/workdocs/CHANGELOG.md#v1133-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.13.2](service/worklink/CHANGELOG.md#v1132-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.18.2](service/workmail/CHANGELOG.md#v1182-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.12.2](service/workmailmessageflow/CHANGELOG.md#v1122-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.3](service/workspaces/CHANGELOG.md#v1283-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.9.2](service/workspacesweb/CHANGELOG.md#v192-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.16.3](service/xray/CHANGELOG.md#v1163-2023-02-15) + * **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. + * **Bug Fix**: Correct error type parsing for restJson services. + +# Release (2023-02-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.17.0](service/appconfig/CHANGELOG.md#v1170-2023-02-14) + * **Feature**: AWS AppConfig now offers the option to set a version label on hosted configuration versions. Version labels allow you to identify specific hosted configuration versions based on an alternate versioning scheme that you define. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.22.0](service/datasync/CHANGELOG.md#v1220-2023-02-14) + * **Feature**: With this launch, we are giving customers the ability to use older SMB protocol versions, enabling them to use DataSync to copy data to and from their legacy storage arrays. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.86.0](service/ec2/CHANGELOG.md#v1860-2023-02-14) + * **Feature**: With this release customers can turn host maintenance on or off when allocating or modifying a supported dedicated host. Host maintenance is turned on by default for supported hosts. + +# Release (2023-02-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.9.0](service/account/CHANGELOG.md#v190-2023-02-13) + * **Feature**: This release of the Account Management API enables customers to view and manage whether AWS Opt-In Regions are enabled or disabled for their Account. For more information, see https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.6.0](service/appconfigdata/CHANGELOG.md#v160-2023-02-13) + * **Feature**: AWS AppConfig now offers the option to set a version label on hosted configuration versions. If a labeled hosted configuration version is deployed, its version label is available in the GetLatestConfiguration response. +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.18.0](service/snowball/CHANGELOG.md#v1180-2023-02-13) + * **Feature**: Adds support for EKS Anywhere on Snowball. AWS Snow Family customers can now install EKS Anywhere service on Snowball Edge Compute Optimized devices. + +# Release (2023-02-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.27.0](service/autoscaling/CHANGELOG.md#v1270-2023-02-10) + * **Feature**: You can now either terminate/replace, ignore, or wait for EC2 Auto Scaling instances on standby or protected from scale in. Also, you can also roll back changes from a failed instance refresh. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.46.0](service/connect/CHANGELOG.md#v1460-2023-02-10) + * **Feature**: This update provides the Wisdom session ARN for contacts enabled for Wisdom in the chat channel. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.85.0](service/ec2/CHANGELOG.md#v1850-2023-02-10) + * **Feature**: Adds support for waiters that automatically poll for an imported snapshot until it reaches the completed state. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.25.0](service/polly/CHANGELOG.md#v1250-2023-02-10) + * **Feature**: Amazon Polly adds two new neural Japanese voices - Kazuha, Tomoko +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.68.0](service/sagemaker/CHANGELOG.md#v1680-2023-02-10) + * **Feature**: Amazon SageMaker Autopilot adds support for selecting algorithms in CreateAutoMLJob API. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.2](service/sns/CHANGELOG.md#v1202-2023-02-10) + * **Documentation**: This release adds support for SNS X-Ray active tracing as well as other updates. + +# Release (2023-02-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.2](service/chimesdkmeetings/CHANGELOG.md#v1142-2023-02-09) + * **Documentation**: Documentation updates for Chime Meetings SDK +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.17.0](service/emrcontainers/CHANGELOG.md#v1170-2023-02-09) + * **Feature**: EMR on EKS allows configuring retry policies for job runs through the StartJobRun API. Using retry policies, a job cause a driver pod to be restarted automatically if it fails or is deleted. The job's status can be seen in the DescribeJobRun and ListJobRun APIs and monitored using CloudWatch events. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.28.0](service/lexmodelsv2/CHANGELOG.md#v1280-2023-02-09) + * **Feature**: AWS Lex now supports Network of Bots. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.17.0](service/lexruntimev2/CHANGELOG.md#v1170-2023-02-09) + * **Feature**: AWS Lex now supports Network of Bots. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.2](service/lightsail/CHANGELOG.md#v1252-2023-02-09) + * **Documentation**: Documentation updates for Lightsail +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.9.0](service/migrationhubrefactorspaces/CHANGELOG.md#v190-2023-02-09) + * **Feature**: This release adds support for creating environments with a network fabric type of NONE +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.2](service/workdocs/CHANGELOG.md#v1132-2023-02-09) + * **Documentation**: Doc only update for the WorkDocs APIs. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.2](service/workspaces/CHANGELOG.md#v1282-2023-02-09) + * **Documentation**: Removed Windows Server 2016 BYOL and made changes based on IAM campaign. + +# Release (2023-02-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.20.0](service/backup/CHANGELOG.md#v1200-2023-02-08) + * **Feature**: This release added one attribute (resource name) in the output model of our 9 existing APIs in AWS backup so that customers will see the resource name at the output. No input required from Customers. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.25.0](service/cloudfront/CHANGELOG.md#v1250-2023-02-08) + * **Feature**: CloudFront Origin Access Control extends support to AWS Elemental MediaStore origins. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.41.0](service/glue/CHANGELOG.md#v1410-2023-02-08) + * **Feature**: DirectJDBCSource + Glue 4.0 streaming options + +# Release (2023-02-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.2](service/transfer/CHANGELOG.md#v1282-2023-02-07) + * **Documentation**: Updated the documentation for the ImportCertificate API call, and added examples. + +# Release (2023-02-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.21.0](service/computeoptimizer/CHANGELOG.md#v1210-2023-02-06) + * **Feature**: AWS Compute optimizer can now infer if Kafka is running on an instance. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.23.0](service/customerprofiles/CHANGELOG.md#v1230-2023-02-06) + * **Feature**: This release deprecates the PartyType and Gender enum data types from the Profile model and replaces them with new PartyTypeString and GenderString attributes, which accept any string of length up to 255. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.22.0](service/frauddetector/CHANGELOG.md#v1220-2023-02-06) + * **Feature**: My AWS Service (Amazon Fraud Detector) - This release introduces Cold Start Model Training which optimizes training for small datasets and adds intelligent methods for treating unlabeled data. You can now train Online Fraud Insights or Transaction Fraud Insights models with minimal historical-data. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.31.0](service/mediaconvert/CHANGELOG.md#v1310-2023-02-06) + * **Feature**: The AWS Elemental MediaConvert SDK has added improved scene change detection capabilities and a bandwidth reduction filter, along with video quality enhancements, to the AVC encoder. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.27.0](service/outposts/CHANGELOG.md#v1270-2023-02-06) + * **Feature**: Adds OrderType to Order structure. Adds PreviousOrderId and PreviousLineItemId to LineItem structure. Adds new line item status REPLACED. Increases maximum length of pagination token. + +# Release (2023-02-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.26.2](service/autoscaling/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.26.2](service/cloudformation/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.14.1](service/cloudsearch/CHANGELOG.md#v1141-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.2](service/cloudwatch/CHANGELOG.md#v1252-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.20.2](service/docdb/CHANGELOG.md#v1202-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.84.1](service/ec2/CHANGELOG.md#v1841-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.26.2](service/elasticache/CHANGELOG.md#v1262-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.15.1](service/elasticbeanstalk/CHANGELOG.md#v1151-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.15.2](service/elasticloadbalancing/CHANGELOG.md#v1152-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.3](service/elasticloadbalancingv2/CHANGELOG.md#v1193-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.2](service/iam/CHANGELOG.md#v1192-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.19.2](service/neptune/CHANGELOG.md#v1192-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.20.0](service/proton/CHANGELOG.md#v1200-2023-02-03) + * **Feature**: Add new GetResourcesSummary API +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.2](service/rds/CHANGELOG.md#v1402-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.2](service/redshift/CHANGELOG.md#v1272-2023-02-03) + * **Documentation**: Corrects descriptions of the parameters for the API operations RestoreFromClusterSnapshot, RestoreTableFromClusterSnapshot, and CreateCluster. + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.15.1](service/ses/CHANGELOG.md#v1151-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.1](service/sns/CHANGELOG.md#v1201-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.20.2](service/sqs/CHANGELOG.md#v1202-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.3](service/sts/CHANGELOG.md#v1183-2023-02-03) + * **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# Release (2023-02-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.16.0](service/appconfig/CHANGELOG.md#v1160-2023-02-02) + * **Feature**: AWS AppConfig introduces KMS customer-managed key (CMK) encryption of configuration data, along with AWS Secrets Manager as a new configuration data source. S3 objects using SSE-KMS encryption and SSM Parameter Store SecureStrings are also now supported. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.84.0](service/ec2/CHANGELOG.md#v1840-2023-02-02) + * **Feature**: Documentation updates for EC2. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.2](service/elasticloadbalancingv2/CHANGELOG.md#v1192-2023-02-02) + * **Documentation**: The GWLB Flex Health Check project updates the default values of healthy-threshold-count from 3 to 5 and unhealthy-threshold-count from 3 to 2 +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.31.0](service/quicksight/CHANGELOG.md#v1310-2023-02-02) + * **Feature**: QuickSight support for Radar Chart and Dashboard Publish Options + +# Release (2023-02-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.22.0](service/devopsguru/CHANGELOG.md#v1220-2023-02-01) + * **Feature**: This release adds filter support ListAnomalyForInsight API. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.25.0](service/forecast/CHANGELOG.md#v1250-2023-02-01) + * **Feature**: This release will enable customer select INCREMENTAL as ImportModel in Forecast's CreateDatasetImportJob API. Verified latest SDK containing required attribute, following https://w.amazon.com/bin/view/AWS-Seer/Launch/Trebuchet/ +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.1](service/iam/CHANGELOG.md#v1191-2023-02-01) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.22.0](service/mediatailor/CHANGELOG.md#v1220-2023-02-01) + * **Feature**: The AWS Elemental MediaTailor SDK for Channel Assembly has added support for program updates, and the ability to clip the end of VOD sources in programs. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.20.0](service/sns/CHANGELOG.md#v1200-2023-02-01) + * **Feature**: Additional attributes added for set-topic-attributes. + +# Release (2023-01-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.19.0](service/appsync/CHANGELOG.md#v1190-2023-01-31) + * **Feature**: This release introduces the feature to support EventBridge as AppSync data source. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.23.0](service/cloudtrail/CHANGELOG.md#v1230-2023-01-31) + * **Feature**: Add new "Channel" APIs to enable users to manage channels used for CloudTrail Lake integrations, and "Resource Policy" APIs to enable users to manage the resource-based permissions policy attached to a channel. +* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.0.0](service/cloudtraildata/CHANGELOG.md#v100-2023-01-31) + * **Release**: New AWS service client module + * **Feature**: Add CloudTrail Data Service to enable users to ingest activity events from non-AWS sources into CloudTrail Lake. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.16.0](service/codeartifact/CHANGELOG.md#v1160-2023-01-31) + * **Feature**: This release introduces a new DeletePackage API, which enables deletion of a package and all of its versions from a repository. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.83.0](service/ec2/CHANGELOG.md#v1830-2023-01-31) + * **Feature**: This launch allows customers to associate up to 8 IP addresses to their NAT Gateways to increase the limit on concurrent connections to a single destination by eight times from 55K to 440K. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.17.0](service/groundstation/CHANGELOG.md#v1170-2023-01-31) + * **Feature**: DigIF Expansion changes to the Customer APIs. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.34.0](service/iot/CHANGELOG.md#v1340-2023-01-31) + * **Feature**: Added support for IoT Rules Engine Cloudwatch Logs action batch mode. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.14.0](service/opensearch/CHANGELOG.md#v1140-2023-01-31) + * **Feature**: Amazon OpenSearch Service adds the option for a VPC endpoint connection between two domains when the local domain uses OpenSearch version 1.3 or 2.3. You can now use remote reindex to copy indices from one VPC domain to another without a reverse proxy. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.24.0](service/polly/CHANGELOG.md#v1240-2023-01-31) + * **Feature**: Amazon Polly adds two new neural American English voices - Ruth, Stephen +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.67.0](service/sagemaker/CHANGELOG.md#v1670-2023-01-31) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports more completion criteria for Hyperparameter Optimization. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.28.0](service/securityhub/CHANGELOG.md#v1280-2023-01-31) + * **Feature**: New fields have been added to the AWS Security Finding Format. Compliance.SecurityControlId is a unique identifier for a security control across standards. Compliance.AssociatedStandards contains all enabled standards in which a security control is enabled. + +# Release (2023-01-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.26.0](service/cloudformation/CHANGELOG.md#v1260-2023-01-30) + * **Feature**: This feature provides a method of obtaining which regions a stackset has stack instances deployed in. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.82.0](service/ec2/CHANGELOG.md#v1820-2023-01-30) + * **Feature**: We add Prefix Lists as a new route destination option for LocalGatewayRoutes. This will allow customers to create routes to Prefix Lists. Prefix List routes will allow customers to group individual CIDR routes with the same target into a single route. + +# Release (2023-01-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.20.0](service/appstream/CHANGELOG.md#v1200-2023-01-27) + * **Feature**: Fixing the issue where Appstream waiters hang for fleet_started and fleet_stopped. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.21.0](service/mediatailor/CHANGELOG.md#v1210-2023-01-27) + * **Feature**: This release introduces the As Run logging type, along with API and documentation updates. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.26.0](service/outposts/CHANGELOG.md#v1260-2023-01-27) + * **Feature**: Adding support for payment term in GetOrder, CreateOrder responses. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.66.0](service/sagemaker/CHANGELOG.md#v1660-2023-01-27) + * **Feature**: This release supports running SageMaker Training jobs with container images that are in a private Docker registry. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.18.0](service/sagemakerruntime/CHANGELOG.md#v1180-2023-01-27) + * **Feature**: Amazon SageMaker Runtime which supports InvokeEndpointAsync asynchronously can now invoke endpoints with custom timeout values. Asynchronous invocations support longer processing times. + +# Release (2023-01-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.18.0](service/eventbridge/CHANGELOG.md#v1180-2023-01-26) + * **Feature**: Minor comments for Redshift Serverless workgroup target support. + +# Release (2023-01-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.81.0](service/ec2/CHANGELOG.md#v1810-2023-01-25) + * **Feature**: This release adds new functionality that allows customers to provision IPv6 CIDR blocks through Amazon VPC IP Address Manager (IPAM) as well as allowing customers to utilize IPAM Resource Discovery APIs. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.4.0](service/m2/CHANGELOG.md#v140-2023-01-25) + * **Feature**: Add returnCode, batchJobIdentifier in GetBatchJobExecution response, for user to view the batch job execution result & unique identifier from engine. Also removed unused headers from REST APIs +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.23.0](service/polly/CHANGELOG.md#v1230-2023-01-25) + * **Feature**: Add 5 new neural voices - Sergio (es-ES), Andres (es-MX), Remi (fr-FR), Adriano (it-IT) and Thiago (pt-BR). +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.1](service/redshiftserverless/CHANGELOG.md#v141-2023-01-25) + * **Documentation**: Added query monitoring rules as possible parameters for create and update workgroup operations. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.65.0](service/sagemaker/CHANGELOG.md#v1650-2023-01-25) + * **Feature**: SageMaker Inference Recommender now decouples from Model Registry and could accept Model Name to invoke inference recommendations job; Inference Recommender now provides CPU/Memory Utilization metrics data in recommendation output. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.2](service/sts/CHANGELOG.md#v1182-2023-01-25) + * **Documentation**: Doc only change to update wording in a key topic + +# Release (2023-01-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.27.0](service/route53/CHANGELOG.md#v1270-2023-01-24) + * **Feature**: Amazon Route 53 now supports the Asia Pacific (Melbourne) Region (ap-southeast-4) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.2.0](service/ssmsap/CHANGELOG.md#v120-2023-01-24) + * **Feature**: This release provides updates to documentation and support for listing operations performed by AWS Systems Manager for SAP. + +# Release (2023-01-23) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.29.0](service/lambda/CHANGELOG.md#v1290-2023-01-23) + * **Feature**: Release Lambda RuntimeManagementConfig, enabling customers to better manage runtime updates to their Lambda functions. This release adds two new APIs, GetRuntimeManagementConfig and PutRuntimeManagementConfig, as well as support on existing Create/Get/Update function APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.64.0](service/sagemaker/CHANGELOG.md#v1640-2023-01-23) + * **Feature**: Amazon SageMaker Inference now supports P4de instance types. + +# Release (2023-01-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.80.0](service/ec2/CHANGELOG.md#v1800-2023-01-20) + * **Feature**: C6in, M6in, M6idn, R6in and R6idn instances are powered by 3rd Generation Intel Xeon Scalable processors (code named Ice Lake) with an all-core turbo frequency of 3.5 GHz. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.20.0](service/ivs/CHANGELOG.md#v1200-2023-01-20) + * **Feature**: API and Doc update. Update to arns field in BatchGetStreamKey. Also updates to operations and structures. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.30.0](service/quicksight/CHANGELOG.md#v1300-2023-01-20) + * **Feature**: This release adds support for data bars in QuickSight table and increases pivot table field well limit. + +# Release (2023-01-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.24.0](service/appflow/CHANGELOG.md#v1240-2023-01-19) + * **Feature**: Adding support for Salesforce Pardot connector in Amazon AppFlow. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.20.0](service/cloudwatchlogs/CHANGELOG.md#v1200-2023-01-19) + * **Feature**: Bug fix - Removed the regex pattern validation from CoralModel to avoid potential security issue. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.15.0](service/codeartifact/CHANGELOG.md#v1150-2023-01-19) + * **Feature**: Documentation updates for CodeArtifact +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.45.0](service/connect/CHANGELOG.md#v1450-2023-01-19) + * **Feature**: Amazon Connect Chat introduces Persistent Chat, allowing customers to resume previous conversations with context and transcripts carried over from previous chats, eliminating the need to repeat themselves and allowing agents to provide personalized service with access to entire conversation history. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.15.0](service/connectparticipant/CHANGELOG.md#v1150-2023-01-19) + * **Feature**: This release updates Amazon Connect Participant's GetTranscript api to provide transcripts of past chats on a persistent chat session. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.79.0](service/ec2/CHANGELOG.md#v1790-2023-01-19) + * **Feature**: Adds SSM Parameter Resource Aliasing support to EC2 Launch Templates. Launch Templates can now store parameter aliases in place of AMI Resource IDs. CreateLaunchTemplateVersion and DescribeLaunchTemplateVersions now support a convenience flag, ResolveAlias, to return the resolved parameter value. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.40.0](service/glue/CHANGELOG.md#v1400-2023-01-19) + * **Feature**: Release Glue Studio Hudi Data Lake Format for SDK/CLI +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.16.0](service/groundstation/CHANGELOG.md#v1160-2023-01-19) + * **Feature**: Add configurable prepass and postpass times for DataflowEndpointGroup. Add Waiter to allow customers to wait for a contact that was reserved through ReserveContact +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.29.0](service/medialive/CHANGELOG.md#v1290-2023-01-19) + * **Feature**: AWS Elemental MediaLive adds support for SCTE 35 preRollMilliSeconds. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.13.0](service/opensearch/CHANGELOG.md#v1130-2023-01-19) + * **Feature**: This release adds the enhanced dry run option, that checks for validation errors that might occur when deploying configuration changes and provides a summary of these errors, if any. The feature will also indicate whether a blue/green deployment will be required to apply a change. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.11.0](service/panorama/CHANGELOG.md#v1110-2023-01-19) + * **Feature**: Added AllowMajorVersionUpdate option to OTAJobConfig to make appliance software major version updates opt-in. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.63.0](service/sagemaker/CHANGELOG.md#v1630-2023-01-19) + * **Feature**: HyperParameterTuningJobs now allow passing environment variables into the corresponding TrainingJobs + +# Release (2023-01-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.25.0](service/cloudwatch/CHANGELOG.md#v1250-2023-01-18) + * **Feature**: Enable cross-account streams in CloudWatch Metric Streams via Observability Access Manager. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.1](service/efs/CHANGELOG.md#v1191-2023-01-18) + * **Documentation**: Documentation updates for EFS access points limit increase +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.24.2](service/wafv2/CHANGELOG.md#v1242-2023-01-18) + * **Documentation**: Improved the visibility of the guidance for updating AWS WAF resources, such as web ACLs and rule groups. + +# Release (2023-01-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.5.0](service/billingconductor/CHANGELOG.md#v150-2023-01-17) + * **Feature**: This release adds support for SKU Scope for pricing plans. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.22.0](service/imagebuilder/CHANGELOG.md#v1220-2023-01-17) + * **Feature**: Add support for AWS Marketplace product IDs as input during CreateImageRecipe for the parent-image parameter. Add support for listing third-party components. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.24.0](service/networkfirewall/CHANGELOG.md#v1240-2023-01-17) + * **Feature**: Network Firewall now allows creation of dual stack endpoints, enabling inspection of IPv6 traffic. + +# Release (2023-01-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.44.0](service/connect/CHANGELOG.md#v1440-2023-01-13) + * **Feature**: This release updates the responses of UpdateContactFlowContent, UpdateContactFlowMetadata, UpdateContactFlowName and DeleteContactFlow API with empty responses. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.78.0](service/ec2/CHANGELOG.md#v1780-2023-01-13) + * **Feature**: Documentation updates for EC2. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.25.0](service/outposts/CHANGELOG.md#v1250-2023-01-13) + * **Feature**: This release adds POWER_30_KVA as an option for PowerDrawKva. PowerDrawKva is part of the RackPhysicalProperties structure in the CreateSite request. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.14.0](service/resourcegroups/CHANGELOG.md#v1140-2023-01-13) + * **Feature**: AWS Resource Groups customers can now turn on Group Lifecycle Events in their AWS account. When you turn this on, Resource Groups monitors your groups for changes to group state or membership. Those changes are sent to Amazon EventBridge as events that you can respond to using rules you create. + +# Release (2023-01-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.0.0](service/cleanrooms/CHANGELOG.md#v100-2023-01-12) + * **Release**: New AWS service client module + * **Feature**: Initial release of AWS Clean Rooms +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.19.0](service/cloudwatchlogs/CHANGELOG.md#v1190-2023-01-12) + * **Feature**: Bug fix: logGroupName is now not a required field in GetLogEvents, FilterLogEvents, GetLogGroupFields, and DescribeLogStreams APIs as logGroupIdentifier can be provided instead +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.28.0](service/lambda/CHANGELOG.md#v1280-2023-01-12) + * **Feature**: Add support for MaximumConcurrency parameter for SQS event source. Customers can now limit the maximum concurrent invocations for their SQS Event Source Mapping. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.30.0](service/mediaconvert/CHANGELOG.md#v1300-2023-01-12) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for compact DASH manifest generation, audio normalization using TruePeak measurements, and the ability to clip the sample range in the color corrector. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.1](service/secretsmanager/CHANGELOG.md#v1181-2023-01-12) + * **Documentation**: Update documentation for new ListSecrets and DescribeSecret parameters + +# Release (2023-01-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.38.0](service/kendra/CHANGELOG.md#v1380-2023-01-11) + * **Feature**: This release adds support to new document types - RTF, XML, XSLT, MS_EXCEL, CSV, JSON, MD + +# Release (2023-01-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.21.0](service/location/CHANGELOG.md#v1210-2023-01-10) + * **Feature**: This release adds support for two new route travel models, Bicycle and Motorcycle which can be used with Grab data source. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.40.0](service/rds/CHANGELOG.md#v1400-2023-01-10) + * **Feature**: This release adds support for configuring allocated storage on the CreateDBInstanceReadReplica, RestoreDBInstanceFromDBSnapshot, and RestoreDBInstanceToPointInTime APIs. + +# Release (2023-01-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.15.0](service/ecrpublic/CHANGELOG.md#v1150-2023-01-09) + * **Feature**: This release for Amazon ECR Public makes several change to bring the SDK into sync with the API. +* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.0.0](service/kendraranking/CHANGELOG.md#v100-2023-01-09) + * **Release**: New AWS service client module + * **Feature**: Introducing Amazon Kendra Intelligent Ranking, a new set of Kendra APIs that leverages Kendra semantic ranking capabilities to improve the quality of search results from other search services (i.e. OpenSearch, ElasticSearch, Solr). +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.23.0](service/networkfirewall/CHANGELOG.md#v1230-2023-01-09) + * **Feature**: Network Firewall now supports the Suricata rule action reject, in addition to the actions pass, drop, and alert. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.9.0](service/workspacesweb/CHANGELOG.md#v190-2023-01-09) + * **Feature**: This release adds support for a new portal authentication type: AWS IAM Identity Center (successor to AWS Single Sign-On). + +# Release (2023-01-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.21.0](service/acmpca/CHANGELOG.md#v1210-2023-01-06) + * **Feature**: Added revocation parameter validation: bucket names must match S3 bucket naming rules and CNAMEs conform to RFC2396 restrictions on the use of special characters in URIs. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.23.0](service/auditmanager/CHANGELOG.md#v1230-2023-01-06) + * **Feature**: This release introduces a new data retention option in your Audit Manager settings. You can now use the DeregistrationPolicy parameter to specify if you want to delete your data when you deregister Audit Manager. + +# Release (2023-01-05) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.19.0](service/accessanalyzer/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.8.0](service/account/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.17.0](service/acm/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.20.0](service/acmpca/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.15.0](service/alexaforbusiness/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.16.0](service/amp/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.13.0](service/amplify/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.14.0](service/amplifybackend/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Updated GetBackendAPIModels response to include ModelIntrospectionSchema json string +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.9.0](service/amplifyuibuilder/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.16.0](service/apigateway/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.11.0](service/apigatewaymanagementapi/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.13.0](service/apigatewayv2/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.15.0](service/appconfig/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.5.0](service/appconfigdata/CHANGELOG.md#v150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.23.0](service/appflow/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.14.0](service/appintegrations/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.17.0](service/applicationautoscaling/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.10.0](service/applicationcostprofiler/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.15.0](service/applicationdiscoveryservice/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.17.0](service/applicationinsights/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.17.0](service/appmesh/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.16.0](service/apprunner/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: This release adds support of securely referencing secrets and configuration data that are stored in Secrets Manager and SSM Parameter Store by adding them as environment secrets in your App Runner service. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.19.0](service/appstream/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.18.0](service/appsync/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.1.0](service/arczonalshift/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.22.0](service/athena/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.22.0](service/auditmanager/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.26.0](service/autoscaling/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.13.0](service/autoscalingplans/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.19.0](service/backup/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.9.0](service/backupgateway/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.1.0](service/backupstorage/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.21.0](service/batch/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.4.0](service/billingconductor/CHANGELOG.md#v140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.17.0](service/braket/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.14.0](service/budgets/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.22.0](service/chime/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.10.0](service/chimesdkidentity/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.2.0](service/chimesdkmediapipelines/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.14.0](service/chimesdkmeetings/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.12.0](service/chimesdkmessaging/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.1.0](service/chimesdkvoice/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.17.0](service/cloud9/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.11.0](service/cloudcontrol/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.13.0](service/clouddirectory/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.25.0](service/cloudformation/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.24.0](service/cloudfront/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.13.0](service/cloudhsm/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.14.0](service/cloudhsmv2/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.14.0](service/cloudsearch/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.12.0](service/cloudsearchdomain/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.22.0](service/cloudtrail/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.24.0](service/cloudwatch/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.15.0](service/cloudwatchevents/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.18.0](service/cloudwatchlogs/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.14.0](service/codeartifact/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.20.0](service/codebuild/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.1.0](service/codecatalyst/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.14.0](service/codecommit/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.16.0](service/codedeploy/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.13.0](service/codeguruprofiler/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.17.0](service/codegurureviewer/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.14.0](service/codepipeline/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.13.0](service/codestar/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.14.0](service/codestarconnections/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.14.0](service/codestarnotifications/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.15.0](service/cognitoidentity/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.22.0](service/cognitoidentityprovider/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.12.0](service/cognitosync/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.21.0](service/comprehend/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.15.0](service/comprehendmedical/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.20.0](service/computeoptimizer/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.29.0](service/configservice/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.43.0](service/connect/CHANGELOG.md#v1430-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Documentation update for a new Initiation Method value in DescribeContact API +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.2.0](service/connectcampaigns/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.2.0](service/connectcases/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.13.0](service/connectcontactlens/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.14.0](service/connectparticipant/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.1.0](service/controltower/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.15.0](service/costandusagereportservice/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.25.0](service/costexplorer/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.22.0](service/customerprofiles/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.23.0](service/databasemigrationservice/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.21.0](service/databrew/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.18.0](service/dataexchange/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.14.0](service/datapipeline/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.21.0](service/datasync/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.12.0](service/dax/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.18.0](service/detective/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.15.0](service/devicefarm/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.21.0](service/devopsguru/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.18.0](service/directconnect/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.16.0](service/directoryservice/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.14.0](service/dlm/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.20.0](service/docdb/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.1.0](service/docdbelastic/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.10.0](service/drs/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.18.0](service/dynamodb/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.14.0](service/dynamodbstreams/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.16.0](service/ebs/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.15.0](service/ec2instanceconnect/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.18.0](service/ecr/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.14.0](service/ecrpublic/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.23.0](service/ecs/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.19.0](service/efs/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.27.0](service/eks/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.26.0](service/elasticache/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.15.0](service/elasticbeanstalk/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.12.0](service/elasticinference/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.15.0](service/elasticloadbalancing/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.19.0](service/elasticloadbalancingv2/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.18.0](service/elasticsearchservice/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.14.0](service/elastictranscoder/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.22.0](service/emr/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.16.0](service/emrcontainers/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.5.0](service/emrserverless/CHANGELOG.md#v150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: Adds support for customized images. You can now provide runtime images when creating or updating EMR Serverless Applications. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.17.0](service/eventbridge/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.10.0](service/evidently/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.9.0](service/finspace/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.14.0](service/finspacedata/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.16.0](service/firehose/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.14.0](service/fis/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.22.0](service/fms/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.24.0](service/forecast/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.13.0](service/forecastquery/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.21.0](service/frauddetector/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.28.0](service/fsx/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.17.0](service/gamelift/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.2.0](service/gamesparks/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.14.0](service/glacier/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.16.0](service/globalaccelerator/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.39.0](service/glue/CHANGELOG.md#v1390-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.11.0](service/grafana/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.15.0](service/greengrass/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.21.0](service/greengrassv2/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.15.0](service/groundstation/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.17.0](service/guardduty/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.16.0](service/health/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.15.0](service/healthlake/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.13.0](service/honeycode/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.19.0](service/iam/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.16.0](service/identitystore/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.21.0](service/imagebuilder/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.13.0](service/inspector/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.11.0](service/inspector2/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.33.0](service/iot/CHANGELOG.md#v1330-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.11.0](service/iot1clickdevicesservice/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.12.0](service/iot1clickprojects/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.14.0](service/iotanalytics/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.14.0](service/iotdataplane/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.17.0](service/iotdeviceadvisor/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.15.0](service/iotevents/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.13.0](service/ioteventsdata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.13.0](service/iotfleethub/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.3.0](service/iotfleetwise/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.12.0](service/iotjobsdataplane/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.1.0](service/iotroborunner/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.15.0](service/iotsecuretunneling/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.27.0](service/iotsitewise/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.14.0](service/iotthingsgraph/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.10.0](service/iottwinmaker/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.24.0](service/iotwireless/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.19.0](service/ivs/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.3.0](service/ivschat/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.19.0](service/kafka/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.9.0](service/kafkaconnect/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.37.0](service/kendra/CHANGELOG.md#v1370-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.1.0](service/keyspaces/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.17.0](service/kinesis/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.14.0](service/kinesisanalytics/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.16.0](service/kinesisanalyticsv2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.15.0](service/kinesisvideo/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.14.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.11.0](service/kinesisvideomedia/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.11.0](service/kinesisvideosignaling/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.2.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.20.0](service/kms/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.19.0](service/lakeformation/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.27.0](service/lambda/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.17.0](service/lexmodelbuildingservice/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.27.0](service/lexmodelsv2/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.13.0](service/lexruntimeservice/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.16.0](service/lexruntimev2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.17.0](service/licensemanager/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.1.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.2.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.25.0](service/lightsail/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Documentation**: Documentation updates for Amazon Lightsail. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.20.0](service/location/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.17.0](service/lookoutequipment/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.19.0](service/lookoutmetrics/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.15.0](service/lookoutvision/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.3.0](service/m2/CHANGELOG.md#v130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.15.0](service/machinelearning/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.15.0](service/macie/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.26.0](service/macie2/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.14.0](service/managedblockchain/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.15.0](service/marketplacecatalog/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.12.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.12.0](service/marketplaceentitlementservice/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.14.0](service/marketplacemetering/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.18.0](service/mediaconnect/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.29.0](service/mediaconvert/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.28.0](service/medialive/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.20.0](service/mediapackage/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.21.0](service/mediapackagevod/CHANGELOG.md#v1210-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.13.0](service/mediastore/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.13.0](service/mediastoredata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.20.0](service/mediatailor/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.12.0](service/memorydb/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.17.0](service/mgn/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.13.0](service/migrationhub/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.13.0](service/migrationhubconfig/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.1.0](service/migrationhuborchestrator/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.8.0](service/migrationhubrefactorspaces/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.7.0](service/migrationhubstrategy/CHANGELOG.md#v170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.12.0](service/mobile/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.14.0](service/mq/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.14.0](service/mturk/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.14.0](service/mwaa/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Documentation**: MWAA supports Apache Airflow version 2.4.3. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.19.0](service/neptune/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.22.0](service/networkfirewall/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.17.0](service/networkmanager/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.16.0](service/nimble/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.1.0](service/oam/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.1.0](service/omics/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.12.0](service/opensearch/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.1.0](service/opensearchserverless/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.14.0](service/opsworks/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.15.0](service/opsworkscm/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.18.0](service/organizations/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.24.0](service/outposts/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.10.0](service/panorama/CHANGELOG.md#v1100-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.23.0](service/personalize/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.13.0](service/personalizeevents/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.13.0](service/personalizeruntime/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.16.0](service/pi/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.18.0](service/pinpoint/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.12.0](service/pinpointemail/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.11.0](service/pinpointsmsvoice/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.1.0](service/pinpointsmsvoicev2/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.1.0](service/pipes/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.22.0](service/polly/CHANGELOG.md#v1220-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.18.0](service/pricing/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.1.0](service/privatenetworks/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.19.0](service/proton/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.15.0](service/qldb/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.14.0](service/qldbsession/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.29.0](service/quicksight/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.17.0](service/ram/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.8.0](service/rbin/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.39.0](service/rds/CHANGELOG.md#v1390-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + * **Feature**: This release adds support for specifying which certificate authority (CA) to use for a DB instance's server certificate during DB instance creation, as well as other CA enhancements. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.13.0](service/rdsdata/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.27.0](service/redshift/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.18.0](service/redshiftdata/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.4.0](service/redshiftserverless/CHANGELOG.md#v140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.23.0](service/rekognition/CHANGELOG.md#v1230-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.8.0](service/resiliencehub/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.2.0](service/resourceexplorer2/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.13.0](service/resourcegroups/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.14.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.18.0](service/robomaker/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.1.0](service/rolesanywhere/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.26.0](service/route53/CHANGELOG.md#v1260-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.14.0](service/route53domains/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.11.0](service/route53recoverycluster/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.11.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.9.0](service/route53recoveryreadiness/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.16.0](service/route53resolver/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.9.0](service/rum/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.30.0](service/s3/CHANGELOG.md#v1300-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.29.0](service/s3control/CHANGELOG.md#v1290-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.14.0](service/s3outposts/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.62.0](service/sagemaker/CHANGELOG.md#v1620-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.15.0](service/sagemakera2iruntime/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.13.0](service/sagemakeredge/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.13.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.1.0](service/sagemakergeospatial/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.17.0](service/sagemakerruntime/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.12.0](service/savingsplans/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.1.0](service/scheduler/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.15.0](service/schemas/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.18.0](service/secretsmanager/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.27.0](service/securityhub/CHANGELOG.md#v1270-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.2.0](service/securitylake/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.12.0](service/serverlessapplicationrepository/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.16.0](service/servicecatalog/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.16.0](service/servicecatalogappregistry/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.19.0](service/servicediscovery/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.14.0](service/servicequotas/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.15.0](service/ses/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.16.0](service/sesv2/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.17.0](service/sfn/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.18.0](service/shield/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.14.0](service/signer/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.1.0](service/simspaceweaver/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.13.0](service/sms/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.17.0](service/snowball/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.9.0](service/snowdevicemanagement/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.19.0](service/sns/CHANGELOG.md#v1190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.20.0](service/sqs/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.35.0](service/ssm/CHANGELOG.md#v1350-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.14.0](service/ssmcontacts/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.20.0](service/ssmincidents/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.1.0](service/ssmsap/CHANGELOG.md#v110-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.12.0](service/sso/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.16.0](service/ssoadmin/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.14.0](service/ssooidc/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.18.0](service/storagegateway/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.18.0](service/sts/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.14.0](service/support/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.2.0](service/supportapp/CHANGELOG.md#v120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.14.0](service/swf/CHANGELOG.md#v1140-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.17.0](service/synthetics/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.20.0](service/textract/CHANGELOG.md#v1200-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.15.0](service/timestreamquery/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.15.0](service/timestreamwrite/CHANGELOG.md#v1150-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.25.0](service/transcribe/CHANGELOG.md#v1250-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.9.0](service/transcribestreaming/CHANGELOG.md#v190-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.28.0](service/transfer/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.17.0](service/translate/CHANGELOG.md#v1170-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.12.0](service/voiceid/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.12.0](service/waf/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.13.0](service/wafregional/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.24.0](service/wafv2/CHANGELOG.md#v1240-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.18.0](service/wellarchitected/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.12.0](service/wisdom/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.13.0](service/workdocs/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.13.0](service/worklink/CHANGELOG.md#v1130-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.18.0](service/workmail/CHANGELOG.md#v1180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.12.0](service/workmailmessageflow/CHANGELOG.md#v1120-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.28.0](service/workspaces/CHANGELOG.md#v1280-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.8.0](service/workspacesweb/CHANGELOG.md#v180-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.16.0](service/xray/CHANGELOG.md#v1160-2023-01-05) + * **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# Release (2023-01-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.16.0](service/applicationautoscaling/CHANGELOG.md#v1160-2023-01-04) + * **Feature**: Customers can now use the existing DescribeScalingActivities API to also see the detailed and machine-readable reasons for Application Auto Scaling not scaling their resources and, if needed, take the necessary corrective actions. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.4](service/cloudwatchlogs/CHANGELOG.md#v1174-2023-01-04) + * **Documentation**: Update to remove sequenceToken as a required field in PutLogEvents calls. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.34.0](service/ssm/CHANGELOG.md#v1340-2023-01-04) + * **Feature**: Adding support for QuickSetup Document Type in Systems Manager + +# Release (2023-01-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.1.0](service/securitylake/CHANGELOG.md#v110-2023-01-03) + * **Feature**: Allow CreateSubscriber API to take string input that allows setting more descriptive SubscriberDescription field. Make souceTypes field required in model level for UpdateSubscriberRequest as it is required for every API call on the backend. Allow ListSubscribers take any String as nextToken param. + +# Release (2022-12-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.23.0](service/cloudfront/CHANGELOG.md#v1230-2022-12-30) + * **Feature**: Extend response headers policy to support removing headers from viewer responses +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.2.1](service/iotfleetwise/CHANGELOG.md#v121-2022-12-30) + * **Documentation**: Update documentation - correct the epoch constant value of default value for expiryTime field in CreateCampaign request. + +# Release (2022-12-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.28](service/apigateway/CHANGELOG.md#v11528-2022-12-29) + * **Documentation**: Documentation updates for Amazon API Gateway +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.21.0](service/emr/CHANGELOG.md#v1210-2022-12-29) + * **Feature**: Added GetClusterSessionCredentials API to allow Amazon SageMaker Studio to connect to EMR on EC2 clusters with runtime roles and AWS Lake Formation-based access control for Apache Spark, Apache Hive, and Presto queries. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.17.0](service/secretsmanager/CHANGELOG.md#v1170-2022-12-29) + * **Feature**: Added owning service filter, include planned deletion flag, and next rotation date response parameter in ListSecrets. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.11.0](service/wisdom/CHANGELOG.md#v1110-2022-12-29) + * **Feature**: This release extends Wisdom CreateContent and StartContentUpload APIs to support PDF and MicrosoftWord docx document uploading. + +# Release (2022-12-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.25.0](service/elasticache/CHANGELOG.md#v1250-2022-12-28) + * **Feature**: This release allows you to modify the encryption in transit setting, for existing Redis clusters. You can now change the TLS configuration of your Redis clusters without the need to re-build or re-provision the clusters or impact application availability. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.21.0](service/networkfirewall/CHANGELOG.md#v1210-2022-12-28) + * **Feature**: AWS Network Firewall now provides status messages for firewalls to help you troubleshoot when your endpoint fails. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.38.0](service/rds/CHANGELOG.md#v1380-2022-12-28) + * **Feature**: This release adds support for Custom Engine Version (CEV) on RDS Custom SQL Server. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.10.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1100-2022-12-28) + * **Feature**: Added support for Python paginators in the route53-recovery-control-config List* APIs. + +# Release (2022-12-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.11.0](service/memorydb/CHANGELOG.md#v1110-2022-12-27) + * **Feature**: This release adds support for MemoryDB Reserved nodes which provides a significant discount compared to on-demand node pricing. Reserved nodes are not physical nodes, but rather a billing discount applied to the use of on-demand nodes in your account. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.27.0](service/transfer/CHANGELOG.md#v1270-2022-12-27) + * **Feature**: Add additional operations to throw ThrottlingExceptions + +# Release (2022-12-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.42.0](service/connect/CHANGELOG.md#v1420-2022-12-23) + * **Feature**: Support for Routing Profile filter, SortCriteria, and grouping by Routing Profiles for GetCurrentMetricData API. Support for RoutingProfiles, UserHierarchyGroups, and Agents as filters, NextStatus and AgentStatusName for GetCurrentUserData. Adds ApproximateTotalCount to both APIs. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.13.0](service/connectparticipant/CHANGELOG.md#v1130-2022-12-23) + * **Feature**: Amazon Connect Chat introduces the Message Receipts feature. This feature allows agents and customers to receive message delivered and read receipts after they send a chat message. +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.17.0](service/detective/CHANGELOG.md#v1170-2022-12-23) + * **Feature**: This release adds a missed AccessDeniedException type to several endpoints. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.27.0](service/fsx/CHANGELOG.md#v1270-2022-12-23) + * **Feature**: Fix a bug where a recent release might break certain existing SDKs. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.10.0](service/inspector2/CHANGELOG.md#v1100-2022-12-23) + * **Feature**: Amazon Inspector adds support for scanning NodeJS 18.x and Go 1.x AWS Lambda function runtimes. + +# Release (2022-12-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.19.0](service/computeoptimizer/CHANGELOG.md#v1190-2022-12-22) + * **Feature**: This release enables AWS Compute Optimizer to analyze and generate optimization recommendations for ecs services running on Fargate. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.41.0](service/connect/CHANGELOG.md#v1410-2022-12-22) + * **Feature**: Amazon Connect Chat introduces the Idle Participant/Autodisconnect feature, which allows users to set timeouts relating to the activity of chat participants, using the new UpdateParticipantRoleConfig API. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.16.0](service/iotdeviceadvisor/CHANGELOG.md#v1160-2022-12-22) + * **Feature**: This release adds the following new features: 1) Documentation updates for IoT Device Advisor APIs. 2) Updated required request parameters for IoT Device Advisor APIs. 3) Added new service feature: ability to provide the test endpoint when customer executing the StartSuiteRun API. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.1.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v110-2022-12-22) + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.37.0](service/rds/CHANGELOG.md#v1370-2022-12-22) + * **Feature**: Add support for managing master user password in AWS Secrets Manager for the DBInstance and DBCluster. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.11](service/secretsmanager/CHANGELOG.md#v11611-2022-12-22) + * **Documentation**: Documentation updates for Secrets Manager + +# Release (2022-12-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.0.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v100-2022-12-21) + * **Release**: New AWS service client module + * **Feature**: AWS License Manager now offers cross-region, cross-account tracking of commercial Linux subscriptions on AWS. This includes subscriptions purchased as part of EC2 subscription-included AMIs, on the AWS Marketplace, or brought to AWS via Red Hat Cloud Access Program. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.25.0](service/macie2/CHANGELOG.md#v1250-2022-12-21) + * **Feature**: This release adds support for analyzing Amazon S3 objects that use the S3 Glacier Instant Retrieval (Glacier_IR) storage class. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.61.0](service/sagemaker/CHANGELOG.md#v1610-2022-12-21) + * **Feature**: This release enables adding RStudio Workbench support to an existing Amazon SageMaker Studio domain. It allows setting your RStudio on SageMaker environment configuration parameters and also updating the RStudioConnectUrl and RStudioPackageManagerUrl parameters for existing domains +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.4](service/ssm/CHANGELOG.md#v1334-2022-12-21) + * **Documentation**: Doc-only updates for December 2022. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.13.22](service/support/CHANGELOG.md#v11322-2022-12-21) + * **Documentation**: Documentation updates for the AWS Support API +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.26.0](service/transfer/CHANGELOG.md#v1260-2022-12-21) + * **Feature**: This release adds support for Decrypt as a workflow step type. + +# Release (2022-12-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.20.0](service/batch/CHANGELOG.md#v1200-2022-12-20) + * **Feature**: Adds isCancelled and isTerminated to DescribeJobs response. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.77.0](service/ec2/CHANGELOG.md#v1770-2022-12-20) + * **Feature**: Adds support for pagination in the EC2 DescribeImages API. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.16.0](service/lookoutequipment/CHANGELOG.md#v1160-2022-12-20) + * **Feature**: This release adds support for listing inference schedulers by status. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.27.0](service/medialive/CHANGELOG.md#v1270-2022-12-20) + * **Feature**: This release adds support for two new features to AWS Elemental MediaLive. First, you can now burn-in timecodes to your MediaLive outputs. Second, we now now support the ability to decode Dolby E audio when it comes in on an input. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.15.0](service/nimble/CHANGELOG.md#v1150-2022-12-20) + * **Feature**: Amazon Nimble Studio now supports configuring session storage volumes and persistence, as well as backup and restore sessions through launch profiles. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.1.0](service/resourceexplorer2/CHANGELOG.md#v110-2022-12-20) + * **Feature**: Documentation updates for AWS Resource Explorer. +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.13.0](service/route53domains/CHANGELOG.md#v1130-2022-12-20) + * **Feature**: Use Route 53 domain APIs to change owner, create/delete DS record, modify IPS tag, resend authorization. New: AssociateDelegationSignerToDomain, DisassociateDelegationSignerFromDomain, PushDomain, ResendOperationAuthorization. Updated: UpdateDomainContact, ListOperations, CheckDomainTransferability. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.60.0](service/sagemaker/CHANGELOG.md#v1600-2022-12-20) + * **Feature**: Amazon SageMaker Autopilot adds support for new objective metrics in CreateAutoMLJob API. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.24.0](service/transcribe/CHANGELOG.md#v1240-2022-12-20) + * **Feature**: Enable our batch transcription jobs for Swedish and Vietnamese. + +# Release (2022-12-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.21.0](service/athena/CHANGELOG.md#v1210-2022-12-19) + * **Feature**: Add missed InvalidRequestException in GetCalculationExecutionCode,StopCalculationExecution APIs. Correct required parameters (Payload and Type) in UpdateNotebook API. Change Notebook size from 15 Mb to 10 Mb. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.22.0](service/ecs/CHANGELOG.md#v1220-2022-12-19) + * **Feature**: This release adds support for alarm-based rollbacks in ECS, a new feature that allows customers to add automated safeguards for Amazon ECS service rolling updates. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.14.0](service/kinesisvideo/CHANGELOG.md#v1140-2022-12-19) + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.0.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v100-2022-12-19) + * **Release**: New AWS service client module + * **Feature**: Amazon Kinesis Video Streams offers capabilities to stream video and audio in real-time via WebRTC to the cloud for storage, playback, and analytical processing. Customers can use our enhanced WebRTC SDK and cloud APIs to enable real-time streaming, as well as media ingestion to the cloud. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.36.0](service/rds/CHANGELOG.md#v1360-2022-12-19) + * **Feature**: Add support for --enable-customer-owned-ip to RDS create-db-instance-read-replica API for RDS on Outposts. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.59.0](service/sagemaker/CHANGELOG.md#v1590-2022-12-19) + * **Feature**: AWS Sagemaker - Sagemaker Images now supports Aliases as secondary identifiers for ImageVersions. SageMaker Images now supports additional metadata for ImageVersions for better images management. + +# Release (2022-12-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.22.0](service/appflow/CHANGELOG.md#v1220-2022-12-16) + * **Feature**: This release updates the ListConnectorEntities API action so that it returns paginated responses that customers can retrieve with next tokens. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.22.2](service/cloudfront/CHANGELOG.md#v1222-2022-12-16) + * **Documentation**: Updated documentation for CloudFront +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.20.0](service/datasync/CHANGELOG.md#v1200-2022-12-16) + * **Feature**: AWS DataSync now supports the use of tags with task executions. With this new feature, you can apply tags each time you execute a task, giving you greater control and management over your task executions. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.3](service/efs/CHANGELOG.md#v1183-2022-12-16) + * **Documentation**: General documentation updates for EFS. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.16.6](service/guardduty/CHANGELOG.md#v1166-2022-12-16) + * **Documentation**: This release provides the valid characters for the Description and Name field. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.2.0](service/iotfleetwise/CHANGELOG.md#v120-2022-12-16) + * **Feature**: Updated error handling for empty resource names in "UpdateSignalCatalog" and "GetModelManifest" operations. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.58.0](service/sagemaker/CHANGELOG.md#v1580-2022-12-16) + * **Feature**: AWS sagemaker - Features: This release adds support for random seed, it's an integer value used to initialize a pseudo-random number generator. Setting a random seed will allow the hyperparameter tuning search strategies to produce more consistent configurations for the same tuning job. + +# Release (2022-12-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.17.3 + * **Bug Fix**: Unify logic between shared config and in finding home directory +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.5](config/CHANGELOG.md#v1185-2022-12-15) + * **Bug Fix**: Unify logic between shared config and in finding home directory +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.13.5](credentials/CHANGELOG.md#v1135-2022-12-15) + * **Bug Fix**: Unify logic between shared config and in finding home directory +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.8.0](service/backupgateway/CHANGELOG.md#v180-2022-12-15) + * **Feature**: This release adds support for VMware vSphere tags, enabling customer to protect VMware virtual machines using tag-based policies for AWS tags mapped from vSphere tags. This release also adds support for customer-accessible gateway-hypervisor interaction log and upload bandwidth rate limit schedule. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.40.0](service/connect/CHANGELOG.md#v1400-2022-12-15) + * **Feature**: Added support for "English - New Zealand" and "English - South African" to be used with Amazon Connect Custom Vocabulary APIs. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.21.0](service/ecs/CHANGELOG.md#v1210-2022-12-15) + * **Feature**: This release adds support for container port ranges in ECS, a new capability that allows customers to provide container port ranges to simplify use cases where multiple ports are in use in a container. This release updates TaskDefinition mutation APIs and the Task description APIs. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.26.0](service/eks/CHANGELOG.md#v1260-2022-12-15) + * **Feature**: Add support for Windows managed nodes groups. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.38.0](service/glue/CHANGELOG.md#v1380-2022-12-15) + * **Feature**: This release adds support for AWS Glue Crawler with native DeltaLake tables, allowing Crawlers to classify Delta Lake format tables and catalog them for query engines to query against. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.16.0](service/kinesis/CHANGELOG.md#v1160-2022-12-15) + * **Feature**: Added StreamARN parameter for Kinesis Data Streams APIs. Added a new opaque pagination token for ListStreams. SDKs will auto-generate Account Endpoint when accessing Kinesis Data Streams. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.5](service/location/CHANGELOG.md#v1195-2022-12-15) + * **Documentation**: This release adds support for a new style, "VectorOpenDataStandardLight" which can be used with the new data source, "Open Data Maps (Preview)". +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.2.0](service/m2/CHANGELOG.md#v120-2022-12-15) + * **Feature**: Adds an optional create-only `KmsKeyId` property to Environment and Application resources. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.57.0](service/sagemaker/CHANGELOG.md#v1570-2022-12-15) + * **Feature**: SageMaker Inference Recommender now allows customers to load tests their models on various instance types using private VPC. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.26.0](service/securityhub/CHANGELOG.md#v1260-2022-12-15) + * **Feature**: Added new resource details objects to ASFF, including resources for AwsEc2LaunchTemplate, AwsSageMakerNotebookInstance, AwsWafv2WebAcl and AwsWafv2RuleGroup. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.16.0](service/translate/CHANGELOG.md#v1160-2022-12-15) + * **Feature**: Raised the input byte size limit of the Text field in the TranslateText API to 10000 bytes. + +# Release (2022-12-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.23.0](service/cloudwatch/CHANGELOG.md#v1230-2022-12-14) + * **Feature**: Adding support for Metrics Insights Alarms +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.24.0](service/costexplorer/CHANGELOG.md#v1240-2022-12-14) + * **Feature**: This release supports percentage-based thresholds on Cost Anomaly Detection alert subscriptions. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.16.0](service/networkmanager/CHANGELOG.md#v1160-2022-12-14) + * **Feature**: Appliance Mode support for AWS Cloud WAN. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.17.0](service/redshiftdata/CHANGELOG.md#v1170-2022-12-14) + * **Feature**: This release adds a new --client-token field to ExecuteStatement and BatchExecuteStatement operations. Customers can now run queries with the additional client token parameter to ensures idempotency. +* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.0.1](service/sagemakermetrics/CHANGELOG.md#v101-2022-12-14) + * **Documentation**: Update SageMaker Metrics documentation. + +# Release (2022-12-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.21.0](service/cloudtrail/CHANGELOG.md#v1210-2022-12-13) + * **Feature**: Merging mainline branch for service model into mainline release branch. There are no new APIs. +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.11.21](service/marketplaceentitlementservice/CHANGELOG.md#v11121-2022-12-13) + * **Bug Fix**: Fixing a shape type in the marketplaceentitlementservice client +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.35.0](service/rds/CHANGELOG.md#v1350-2022-12-13) + * **Feature**: This deployment adds ClientPasswordAuthType field to the Auth structure of the DBProxy. + +# Release (2022-12-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.21.0](service/customerprofiles/CHANGELOG.md#v1210-2022-12-12) + * **Feature**: This release allows custom strings in PartyType and Gender through 2 new attributes in the CreateProfile and UpdateProfile APIs: PartyTypeString and GenderString. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.76.0](service/ec2/CHANGELOG.md#v1760-2022-12-12) + * **Feature**: This release updates DescribeFpgaImages to show supported instance types of AFIs in its response. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.13.0](service/kinesisvideo/CHANGELOG.md#v1130-2022-12-12) + * **Feature**: This release adds support for public preview of Kinesis Video Stream at Edge enabling customers to provide configuration for the Kinesis Video Stream EdgeAgent running on an on-premise IoT device. Customers can now locally record from cameras and stream videos to the cloud on configured schedule. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.14.13](service/lookoutvision/CHANGELOG.md#v11413-2022-12-12) + * **Documentation**: This documentation update adds kms:GenerateDataKey as a required permission to StartModelPackagingJob. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.7.0](service/migrationhubrefactorspaces/CHANGELOG.md#v170-2022-12-12) + * **Feature**: This release adds support for Lambda alias service endpoints. Lambda alias ARNs can now be passed into CreateService. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.34.0](service/rds/CHANGELOG.md#v1340-2022-12-12) + * **Feature**: Update the RDS API model to support copying option groups during the CopyDBSnapshot operation +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.22.0](service/rekognition/CHANGELOG.md#v1220-2022-12-12) + * **Feature**: Adds support for "aliases" and "categories", inclusion and exclusion filters for labels and label categories, and aggregating labels by video segment timestamps for Stored Video Label Detection APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.0.0](service/sagemakermetrics/CHANGELOG.md#v100-2022-12-12) + * **Release**: New AWS service client module + * **Feature**: This release introduces support SageMaker Metrics APIs. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.3](service/wafv2/CHANGELOG.md#v1233-2022-12-12) + * **Documentation**: Documents the naming requirement for logging destinations that you use with web ACLs. + +# Release (2022-12-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.2](service/cloudwatchlogs/CHANGELOG.md#v1172-2022-12-09) + * **Documentation**: Doc-only update for CloudWatch Logs, for Tagging Permissions clarifications +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.1.0](service/iotfleetwise/CHANGELOG.md#v110-2022-12-09) + * **Feature**: Deprecated assignedValue property for actuators and attributes. Added a message to invalid nodes and invalid decoder manifest exceptions. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.26.0](service/medialive/CHANGELOG.md#v1260-2022-12-09) + * **Feature**: Link devices now support buffer size (latency) configuration. A higher latency value means a longer delay in transmitting from the device to MediaLive, but improved resiliency. A lower latency value means a shorter delay, but less resiliency. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.20.0](service/mediapackagevod/CHANGELOG.md#v1200-2022-12-09) + * **Feature**: This release provides the approximate number of assets in a packaging group. + +# Release (2022-12-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.25.0](service/autoscaling/CHANGELOG.md#v1250-2022-12-08) + * **Feature**: Adds support for metric math for target tracking scaling policies, saving you the cost and effort of publishing a custom metric to CloudWatch. Also adds support for VPC Lattice by adding the Attach/Detach/DescribeTrafficSources APIs and a new health check type to the CreateAutoScalingGroup API. +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.9.0](service/iottwinmaker/CHANGELOG.md#v190-2022-12-08) + * **Feature**: This release adds the following new features: 1) New APIs for managing a continuous sync of assets and asset models from AWS IoT SiteWise. 2) Support user friendly names for component types (ComponentTypeName) and properties (DisplayName). +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.6.0](service/migrationhubstrategy/CHANGELOG.md#v160-2022-12-08) + * **Feature**: This release adds known application filtering, server selection for assessments, support for potential recommendations, and indications for configuration and assessment status. For more information, see the AWS Migration Hub documentation at https://docs.aws.amazon.com/migrationhub/index.html + +# Release (2022-12-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.22.0](service/cloudfront/CHANGELOG.md#v1220-2022-12-07) + * **Feature**: Introducing UpdateDistributionWithStagingConfig that can be used to promote the staging configuration to the production. +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.23.0](service/costexplorer/CHANGELOG.md#v1230-2022-12-07) + * **Feature**: This release adds the LinkedAccountName field to the GetAnomalies API response under RootCause +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.25.0](service/eks/CHANGELOG.md#v1250-2022-12-07) + * **Feature**: Adds support for EKS add-ons configurationValues fields and DescribeAddonConfiguration function +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.19.2](service/kms/CHANGELOG.md#v1192-2022-12-07) + * **Documentation**: Updated examples and exceptions for External Key Store (XKS). + +# Release (2022-12-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.3.0](service/billingconductor/CHANGELOG.md#v130-2022-12-06) + * **Feature**: This release adds the Tiering Pricing Rule feature. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.39.0](service/connect/CHANGELOG.md#v1390-2022-12-06) + * **Feature**: This release provides APIs that enable you to programmatically manage rules for Contact Lens conversational analytics and third party applications. For more information, see https://docs.aws.amazon.com/connect/latest/APIReference/rules-api.html +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.33.0](service/rds/CHANGELOG.md#v1330-2022-12-06) + * **Feature**: This release adds the BlueGreenDeploymentNotFoundFault to the AddTagsToResource, ListTagsForResource, and RemoveTagsFromResource operations. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.12.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1120-2022-12-06) + * **Feature**: For online + offline Feature Groups, added ability to target PutRecord and DeleteRecord actions to only online store, or only offline store. If target store parameter is not specified, actions will apply to both stores. + +# Release (2022-12-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.22.0](service/costexplorer/CHANGELOG.md#v1220-2022-12-05) + * **Feature**: This release introduces two new APIs that offer a 1-click experience to refresh Savings Plans recommendations. The two APIs are StartSavingsPlansPurchaseRecommendationGeneration and ListSavingsPlansPurchaseRecommendationGeneration. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.75.0](service/ec2/CHANGELOG.md#v1750-2022-12-05) + * **Feature**: Documentation updates for EC2. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.2.0](service/ivschat/CHANGELOG.md#v120-2022-12-05) + * **Feature**: Adds PendingVerification error type to messaging APIs to block the resource usage for accounts identified as being fraudulent. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.32.0](service/rds/CHANGELOG.md#v1320-2022-12-05) + * **Feature**: This release adds the InvalidDBInstanceStateFault to the RestoreDBClusterFromSnapshot operation. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.23.0](service/transcribe/CHANGELOG.md#v1230-2022-12-05) + * **Feature**: Amazon Transcribe now supports creating custom language models in the following languages: Japanese (ja-JP) and German (de-DE). + +# Release (2022-12-02) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.17.0](service/appsync/CHANGELOG.md#v1170-2022-12-02) + * **Feature**: Fixes the URI for the evaluatecode endpoint to include the /v1 prefix (ie. "/v1/dataplane-evaluatecode"). +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.1](service/ecs/CHANGELOG.md#v1201-2022-12-02) + * **Documentation**: Documentation updates for Amazon ECS +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.21.0](service/fms/CHANGELOG.md#v1210-2022-12-02) + * **Feature**: AWS Firewall Manager now supports Fortigate Cloud Native Firewall as a Service as a third-party policy type. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.28.0](service/mediaconvert/CHANGELOG.md#v1280-2022-12-02) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for configurable ID3 eMSG box attributes and the ability to signal them with InbandEventStream tags in DASH and CMAF outputs. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.25.0](service/medialive/CHANGELOG.md#v1250-2022-12-02) + * **Feature**: Updates to Event Signaling and Management (ESAM) API and documentation. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.21.0](service/polly/CHANGELOG.md#v1210-2022-12-02) + * **Feature**: Add language code for Finnish (fi-FI) +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.18.0](service/proton/CHANGELOG.md#v1180-2022-12-02) + * **Feature**: CreateEnvironmentAccountConnection RoleArn input is now optional +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.3.0](service/redshiftserverless/CHANGELOG.md#v130-2022-12-02) + * **Feature**: Add Table Level Restore operations for Amazon Redshift Serverless. Add multi-port support for Amazon Redshift Serverless endpoints. Add Tagging support to Snapshots and Recovery Points in Amazon Redshift Serverless. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.18.7](service/sns/CHANGELOG.md#v1187-2022-12-02) + * **Documentation**: This release adds the message payload-filtering feature to the SNS Subscribe, SetSubscriptionAttributes, and GetSubscriptionAttributes API actions + +# Release (2022-12-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.0.0](service/codecatalyst/CHANGELOG.md#v100-2022-12-01) + * **Release**: New AWS service client module + * **Feature**: This release adds operations that support customers using the AWS Toolkits and Amazon CodeCatalyst, a unified software development service that helps developers develop, deploy, and maintain applications in the cloud. For more information, see the documentation. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.20.0](service/comprehend/CHANGELOG.md#v1200-2022-12-01) + * **Feature**: Comprehend now supports semi-structured documents (such as PDF files or image files) as inputs for custom analysis using the synchronous APIs (ClassifyDocument and DetectEntities). +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.16.0](service/gamelift/CHANGELOG.md#v1160-2022-12-01) + * **Feature**: GameLift introduces a new feature, GameLift Anywhere. GameLift Anywhere allows you to integrate your own compute resources with GameLift. You can also use GameLift Anywhere to iteratively test your game servers without uploading the build to GameLift for every iteration. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.0.0](service/pipes/CHANGELOG.md#v100-2022-12-01) + * **Release**: New AWS service client module + * **Feature**: AWS introduces new Amazon EventBridge Pipes which allow you to connect sources (SQS, Kinesis, DDB, Kafka, MQ) to Targets (14+ EventBridge Targets) without any code, with filtering, batching, input transformation, and an optional Enrichment stage (Lambda, StepFunctions, ApiGateway, ApiDestinations) +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.16.0](service/sfn/CHANGELOG.md#v1160-2022-12-01) + * **Feature**: This release adds support for the AWS Step Functions Map state in Distributed mode. The changes include a new MapRun resource and several new and modified APIs. + +# Release (2022-11-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.18.0](service/accessanalyzer/CHANGELOG.md#v1180-2022-11-30) + * **Feature**: This release adds support for S3 cross account access points. IAM Access Analyzer will now produce public or cross account findings when it detects bucket delegation to external account access points. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.20.0](service/athena/CHANGELOG.md#v1200-2022-11-30) + * **Feature**: This release includes support for using Apache Spark in Amazon Athena. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.17.0](service/dataexchange/CHANGELOG.md#v1170-2022-11-30) + * **Feature**: This release enables data providers to license direct access to data in their Amazon S3 buckets or AWS Lake Formation data lakes through AWS Data Exchange. Subscribers get read-only access to the data and can use it in downstream AWS services, like Amazon Athena, without creating or managing copies. +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.0.0](service/docdbelastic/CHANGELOG.md#v100-2022-11-30) + * **Release**: New AWS service client module + * **Feature**: Launched Amazon DocumentDB Elastic Clusters. You can now use the SDK to create, list, update and delete Amazon DocumentDB Elastic Cluster resources +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.37.0](service/glue/CHANGELOG.md#v1370-2022-11-30) + * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.28.0](service/s3control/CHANGELOG.md#v1280-2022-11-30) + * **Feature**: Amazon S3 now supports cross-account access points. S3 bucket owners can now allow trusted AWS accounts to create access points associated with their bucket. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.56.0](service/sagemaker/CHANGELOG.md#v1560-2022-11-30) + * **Feature**: Added Models as part of the Search API. Added Model shadow deployments in realtime inference, and shadow testing in managed inference. Added support for shared spaces, geospatial APIs, Model Cards, AutoMLJobStep in pipelines, Git repositories on user profiles and domains, Model sharing in Jumpstart. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.0.0](service/sagemakergeospatial/CHANGELOG.md#v100-2022-11-30) + * **Release**: New AWS service client module + * **Feature**: This release provides Amazon SageMaker geospatial APIs to build, train, deploy and visualize geospatial models. + +# Release (2022-11-29.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.74.0](service/ec2/CHANGELOG.md#v1740-2022-11-292) + * **Feature**: This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.15.0](service/firehose/CHANGELOG.md#v1150-2022-11-292) + * **Feature**: Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.19.0](service/kms/CHANGELOG.md#v1190-2022-11-292) + * **Feature**: AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.0.0](service/omics/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare. +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.0.0](service/opensearchserverless/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Publish SDK for Amazon OpenSearch Serverless +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.0.0](service/securitylake/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.0.0](service/simspaceweaver/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver + +# Release (2022-11-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.0.0](service/arczonalshift/CHANGELOG.md#v100-2022-11-29) + * **Release**: New AWS service client module + * **Feature**: Amazon Route 53 Application Recovery Controller Zonal Shift is a new service that makes it easy to shift traffic away from an Availability Zone in a Region. See the developer guide for more information: https://docs.aws.amazon.com/r53recovery/latest/dg/what-is-route53-recovery.html +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.18.0](service/computeoptimizer/CHANGELOG.md#v1180-2022-11-29) + * **Feature**: Adds support for a new recommendation preference that makes it possible for customers to optimize their EC2 recommendations by utilizing an external metrics ingestion service to provide metrics. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.28.0](service/configservice/CHANGELOG.md#v1280-2022-11-29) + * **Feature**: With this release, you can use AWS Config to evaluate your resources for compliance with Config rules before they are created or updated. Using Config rules in proactive mode enables you to test and build compliant resource templates or check resource configurations at the time they are provisioned. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.73.0](service/ec2/CHANGELOG.md#v1730-2022-11-29) + * **Feature**: Introduces ENA Express, which uses AWS SRD and dynamic routing to increase throughput and minimize latency, adds support for trust relationships between Reachability Analyzer and AWS Organizations to enable cross-account analysis, and adds support for Infrastructure Performance metric subscriptions. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.24.0](service/eks/CHANGELOG.md#v1240-2022-11-29) + * **Feature**: Adds support for additional EKS add-ons metadata and filtering fields +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.26.0](service/fsx/CHANGELOG.md#v1260-2022-11-29) + * **Feature**: This release adds support for 4GB/s / 160K PIOPS FSx for ONTAP file systems and 10GB/s / 350K PIOPS FSx for OpenZFS file systems (Single_AZ_2). For FSx for ONTAP, this also adds support for DP volumes, snapshot policy, copy tags to backups, and Multi-AZ route table updates. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.36.0](service/glue/CHANGELOG.md#v1360-2022-11-29) + * **Feature**: This release allows the creation of Custom Visual Transforms (Dynamic Transforms) to be created via AWS Glue CLI/SDK. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.9.0](service/inspector2/CHANGELOG.md#v190-2022-11-29) + * **Feature**: This release adds support for Inspector to scan AWS Lambda. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.26.0](service/lambda/CHANGELOG.md#v1260-2022-11-29) + * **Feature**: Adds support for Lambda SnapStart, which helps improve the startup performance of functions. Customers can now manage SnapStart based functions via CreateFunction and UpdateFunctionConfiguration APIs +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.1.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v110-2022-11-29) + * **Feature**: AWS now offers fully-compliant, Amazon-provided licenses for Microsoft Office Professional Plus 2021 Amazon Machine Images (AMIs) on Amazon EC2. These AMIs are now available on the Amazon EC2 console and on AWS Marketplace to launch instances on-demand without any long-term licensing commitments. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.24.0](service/macie2/CHANGELOG.md#v1240-2022-11-29) + * **Feature**: Added support for configuring Macie to continually sample objects from S3 buckets and inspect them for sensitive data. Results appear in statistics, findings, and other data that Macie provides. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.28.0](service/quicksight/CHANGELOG.md#v1280-2022-11-29) + * **Feature**: This release adds new Describe APIs and updates Create and Update APIs to support the data model for Dashboards, Analyses, and Templates. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.27.0](service/s3control/CHANGELOG.md#v1270-2022-11-29) + * **Feature**: Added two new APIs to support Amazon S3 Multi-Region Access Point failover controls: GetMultiRegionAccessPointRoutes and SubmitMultiRegionAccessPointRoutes. The failover control APIs are supported in the following Regions: us-east-1, us-west-2, eu-west-1, ap-southeast-2, and ap-northeast-1. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.25.0](service/securityhub/CHANGELOG.md#v1250-2022-11-29) + * **Feature**: Adding StandardsManagedBy field to DescribeStandards API response + +# Release (2022-11-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.18.0](service/backup/CHANGELOG.md#v1180-2022-11-28) + * **Feature**: AWS Backup introduces support for legal hold and application stack backups. AWS Backup Audit Manager introduces support for cross-Region, cross-account reports. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.22.0](service/cloudwatch/CHANGELOG.md#v1220-2022-11-28) + * **Feature**: Adds cross-account support to the GetMetricData API. Adds cross-account support to the ListMetrics API through the usage of the IncludeLinkedAccounts flag and the new OwningAccounts field. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.0](service/cloudwatchlogs/CHANGELOG.md#v1170-2022-11-28) + * **Feature**: Updates to support CloudWatch Logs data protection and CloudWatch cross-account observability +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.9.0](service/drs/CHANGELOG.md#v190-2022-11-28) + * **Feature**: Non breaking changes to existing APIs, and additional APIs added to support in-AWS failing back using AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.0](service/ecs/CHANGELOG.md#v1200-2022-11-28) + * **Feature**: This release adds support for ECS Service Connect, a new capability that simplifies writing and operating resilient distributed applications. This release updates the TaskDefinition, Cluster, Service mutation APIs with Service connect constructs and also adds a new ListServicesByNamespace API. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.0](service/efs/CHANGELOG.md#v1180-2022-11-28) + * **Feature**: This release adds elastic as a new ThroughputMode value for EFS file systems and adds AFTER_1_DAY as a value for TransitionToIARules. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.32.0](service/iot/CHANGELOG.md#v1320-2022-11-28) + * **Feature**: Job scheduling enables the scheduled rollout of a Job with start and end times and a customizable end behavior when end time is reached. This is available for continuous and snapshot jobs. Added support for MQTT5 properties to AWS IoT TopicRule Republish Action. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.13.0](service/iotdataplane/CHANGELOG.md#v1130-2022-11-28) + * **Feature**: This release adds support for MQTT5 properties to AWS IoT HTTP Publish API. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.23.0](service/iotwireless/CHANGELOG.md#v1230-2022-11-28) + * **Feature**: This release includes a new feature for customers to calculate the position of their devices by adding three new APIs: UpdateResourcePosition, GetResourcePosition, and GetPositionEstimate. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.36.0](service/kendra/CHANGELOG.md#v1360-2022-11-28) + * **Feature**: Amazon Kendra now supports preview of table information from HTML tables in the search results. The most relevant cells with their corresponding rows, columns are displayed as a preview in the search result. The most relevant table cell or cells are also highlighted in table preview. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.16.0](service/mgn/CHANGELOG.md#v1160-2022-11-28) + * **Feature**: This release adds support for Application and Wave management. We also now support custom post-launch actions. +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.0.0](service/oam/CHANGELOG.md#v100-2022-11-28) + * **Release**: New AWS service client module + * **Feature**: Amazon CloudWatch Observability Access Manager is a new service that allows configuration of the CloudWatch cross-account observability feature. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.17.0](service/organizations/CHANGELOG.md#v1170-2022-11-28) + * **Feature**: This release introduces delegated administrator for AWS Organizations, a new feature to help you delegate the management of your Organizations policies, enabling you to govern your AWS organization in a decentralized way. You can now allow member accounts to manage Organizations policies. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.31.0](service/rds/CHANGELOG.md#v1310-2022-11-28) + * **Feature**: This release enables new Aurora and RDS feature called Blue/Green Deployments that makes updates to databases safer, simpler and faster. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.19.0](service/textract/CHANGELOG.md#v1190-2022-11-28) + * **Feature**: This release adds support for classifying and splitting lending documents by type, and extracting information by using the Analyze Lending APIs. This release also includes support for summarized information of the processed lending document package, in addition to per document results. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.22.0](service/transcribe/CHANGELOG.md#v1220-2022-11-28) + * **Feature**: This release adds support for 'inputType' for post-call and real-time (streaming) Call Analytics within Amazon Transcribe. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.8.0](service/transcribestreaming/CHANGELOG.md#v180-2022-11-28) + * **Feature**: This release adds support for real-time (streaming) and post-call Call Analytics within Amazon Transcribe. + +# Release (2022-11-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.10.0](service/grafana/CHANGELOG.md#v1100-2022-11-23) + * **Feature**: This release includes support for configuring a Grafana workspace to connect to a datasource within a VPC as well as new APIs for configuring Grafana settings. +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.7.0](service/rbin/CHANGELOG.md#v170-2022-11-23) + * **Feature**: This release adds support for Rule Lock for Recycle Bin, which allows you to lock retention rules so that they can no longer be modified or deleted. + +# Release (2022-11-22) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.21.0](service/appflow/CHANGELOG.md#v1210-2022-11-22) + * **Feature**: Adding support for Amazon AppFlow to transfer the data to Amazon Redshift databases through Amazon Redshift Data API service. This feature will support the Redshift destination connector on both public and private accessible Amazon Redshift Clusters and Amazon Redshift Serverless. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.15.0](service/kinesisanalyticsv2/CHANGELOG.md#v1150-2022-11-22) + * **Feature**: Support for Apache Flink 1.15 in Kinesis Data Analytics. + +# Release (2022-11-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.25.0](service/route53/CHANGELOG.md#v1250-2022-11-21) + * **Feature**: Amazon Route 53 now supports the Asia Pacific (Hyderabad) Region (ap-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +# Release (2022-11-18.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.1](service/ssmsap/CHANGELOG.md#v101-2022-11-182) + * **Bug Fix**: Removes old model file for ssm sap and uses the new model file to regenerate client + +# Release (2022-11-18) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.20.0](service/appflow/CHANGELOG.md#v1200-2022-11-18) + * **Feature**: AppFlow provides a new API called UpdateConnectorRegistration to update a custom connector that customers have previously registered. With this API, customers no longer need to unregister and then register a connector to make an update. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.21.0](service/auditmanager/CHANGELOG.md#v1210-2022-11-18) + * **Feature**: This release introduces a new feature for Audit Manager: Evidence finder. You can now use evidence finder to quickly query your evidence, and add the matching evidence results to an assessment report. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.0.0](service/chimesdkvoice/CHANGELOG.md#v100-2022-11-18) + * **Release**: New AWS service client module + * **Feature**: Amazon Chime Voice Connector, Voice Connector Group and PSTN Audio Service APIs are now available in the Amazon Chime SDK Voice namespace. See https://docs.aws.amazon.com/chime-sdk/latest/dg/sdk-available-regions.html for more details. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.21.0](service/cloudfront/CHANGELOG.md#v1210-2022-11-18) + * **Feature**: CloudFront API support for staging distributions and associated traffic management policies. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.38.0](service/connect/CHANGELOG.md#v1380-2022-11-18) + * **Feature**: Added AllowedAccessControlTags and TagRestrictedResource for Tag Based Access Control on Amazon Connect Webpage +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.17.6](service/dynamodb/CHANGELOG.md#v1176-2022-11-18) + * **Documentation**: Updated minor fixes for DynamoDB documentation. +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.25](service/dynamodbstreams/CHANGELOG.md#v11325-2022-11-18) + * **Documentation**: Updated minor fixes for DynamoDB documentation. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.72.0](service/ec2/CHANGELOG.md#v1720-2022-11-18) + * **Feature**: This release adds support for copying an Amazon Machine Image's tags when copying an AMI. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.35.0](service/glue/CHANGELOG.md#v1350-2022-11-18) + * **Feature**: AWSGlue Crawler - Adding support for Table and Column level Comments with database level datatypes for JDBC based crawler. +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.0.0](service/iotroborunner/CHANGELOG.md#v100-2022-11-18) + * **Release**: New AWS service client module + * **Feature**: AWS IoT RoboRunner is a new service that makes it easy to build applications that help multi-vendor robots work together seamlessly. See the IoT RoboRunner developer guide for more details on getting started. https://docs.aws.amazon.com/iotroborunner/latest/dev/iotroborunner-welcome.html +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.27.0](service/quicksight/CHANGELOG.md#v1270-2022-11-18) + * **Feature**: This release adds the following: 1) Asset management for centralized assets governance 2) QuickSight Q now supports public embedding 3) New Termination protection flag to mitigate accidental deletes 4) Athena data sources now accept a custom IAM role 5) QuickSight supports connectivity to Databricks +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.55.0](service/sagemaker/CHANGELOG.md#v1550-2022-11-18) + * **Feature**: Added DisableProfiler flag as a new field in ProfilerConfig +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.15.0](service/servicecatalog/CHANGELOG.md#v1150-2022-11-18) + * **Feature**: This release 1. adds support for Principal Name Sharing with Service Catalog portfolio sharing. 2. Introduces repo sourced products which are created and managed with existing SC APIs. These products are synced to external repos and auto create new product versions based on changes in the repo. +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.15.0](service/sfn/CHANGELOG.md#v1150-2022-11-18) + * **Feature**: This release adds support for using Step Functions service integrations to invoke any cross-account AWS resource, even if that service doesn't support resource-based policies or cross-account calls. See https://docs.aws.amazon.com/step-functions/latest/dg/concepts-access-cross-acct-resources.html +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.25.0](service/transfer/CHANGELOG.md#v1250-2022-11-18) + * **Feature**: Adds a NONE encryption algorithm type to AS2 connectors, providing support for skipping encryption of the AS2 message body when a HTTPS URL is also specified. + +# Release (2022-11-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.12.0](service/amplify/CHANGELOG.md#v1120-2022-11-17) + * **Feature**: Adds a new value (WEB_COMPUTE) to the Platform enum that allows customers to create Amplify Apps with Server-Side Rendering support. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.19.0](service/appflow/CHANGELOG.md#v1190-2022-11-17) + * **Feature**: AppFlow simplifies the preparation and cataloging of SaaS data into the AWS Glue Data Catalog where your data can be discovered and accessed by AWS analytics and ML services. AppFlow now also supports data field partitioning and file size optimization to improve query performance and reduce cost. +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.16.0](service/appsync/CHANGELOG.md#v1160-2022-11-17) + * **Feature**: This release introduces the APPSYNC_JS runtime, and adds support for JavaScript in AppSync functions and AppSync pipeline resolvers. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.22.0](service/databasemigrationservice/CHANGELOG.md#v1220-2022-11-17) + * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) on DMS Replication Instances +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.71.0](service/ec2/CHANGELOG.md#v1710-2022-11-17) + * **Feature**: This release adds a new optional parameter "privateIpAddress" for the CreateNatGateway API. PrivateIPAddress will allow customers to select a custom Private IPv4 address instead of having it be auto-assigned. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.25](service/elasticloadbalancingv2/CHANGELOG.md#v11825-2022-11-17) + * **Documentation**: Provides new target group attributes to turn on/off cross zone load balancing and configure target group health for Network Load Balancers and Application Load Balancers. Provides improvements to health check configuration for Network Load Balancers. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.4.0](service/emrserverless/CHANGELOG.md#v140-2022-11-17) + * **Feature**: Adds support for AWS Graviton2 based applications. You can now select CPU architecture when creating new applications or updating existing ones. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.1.0](service/ivschat/CHANGELOG.md#v110-2022-11-17) + * **Feature**: Adds LoggingConfiguration APIs for IVS Chat - a feature that allows customers to store and record sent messages in a chat room to S3 buckets, CloudWatch logs, or Kinesis firehose. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.25.0](service/lambda/CHANGELOG.md#v1250-2022-11-17) + * **Feature**: Add Node 18 (nodejs18.x) support to AWS Lambda. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.22.0](service/personalize/CHANGELOG.md#v1220-2022-11-17) + * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.20.0](service/polly/CHANGELOG.md#v1200-2022-11-17) + * **Feature**: Add two new neural voices - Ola (pl-PL) and Hala (ar-AE). +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.8.0](service/rum/CHANGELOG.md#v180-2022-11-17) + * **Feature**: CloudWatch RUM now supports custom events. To use custom events, create an app monitor or update an app monitor with CustomEvent Status as ENABLED. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.26.0](service/s3control/CHANGELOG.md#v1260-2022-11-17) + * **Feature**: Added 34 new S3 Storage Lens metrics to support additional customer use cases. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.7](service/secretsmanager/CHANGELOG.md#v1167-2022-11-17) + * **Documentation**: Documentation updates for Secrets Manager. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.24.0](service/securityhub/CHANGELOG.md#v1240-2022-11-17) + * **Feature**: Added SourceLayerArn and SourceLayerHash field for security findings. Updated AwsLambdaFunction Resource detail +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.15.0](service/servicecatalogappregistry/CHANGELOG.md#v1150-2022-11-17) + * **Feature**: This release adds support for tagged resource associations, which allows you to associate a group of resources with a defined resource tag key and value to the application. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.17.4](service/sts/CHANGELOG.md#v1174-2022-11-17) + * **Documentation**: Documentation updates for AWS Security Token Service. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.18.0](service/textract/CHANGELOG.md#v1180-2022-11-17) + * **Feature**: This release adds support for specifying and extracting information from documents using the Signatures feature within Analyze Document API +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.27.0](service/workspaces/CHANGELOG.md#v1270-2022-11-17) + * **Feature**: The release introduces CreateStandbyWorkspaces, an API that allows you to create standby WorkSpaces associated with a primary WorkSpace in another Region. DescribeWorkspaces now includes related WorkSpaces properties. DescribeWorkspaceBundles and CreateWorkspaceBundle now return more bundle details. + +# Release (2022-11-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.19.1](service/batch/CHANGELOG.md#v1191-2022-11-16) + * **Documentation**: Documentation updates related to Batch on EKS +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.2.0](service/billingconductor/CHANGELOG.md#v120-2022-11-16) + * **Feature**: This release adds a new feature BillingEntity pricing rule. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.24.0](service/cloudformation/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: Added UnsupportedTarget HandlerErrorCode for use with CFN Resource Hooks +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.14.0](service/comprehendmedical/CHANGELOG.md#v1140-2022-11-16) + * **Feature**: This release supports new set of entities and traits. It also adds new category (BEHAVIORAL_ENVIRONMENTAL_SOCIAL). +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.37.0](service/connect/CHANGELOG.md#v1370-2022-11-16) + * **Feature**: This release adds a new MonitorContact API for initiating monitoring of ongoing Voice and Chat contacts. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.23.0](service/eks/CHANGELOG.md#v1230-2022-11-16) + * **Feature**: Adds support for customer-provided placement groups for Kubernetes control plane instances when creating local EKS clusters on Outposts +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.24.0](service/elasticache/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0 +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.8.0](service/iottwinmaker/CHANGELOG.md#v180-2022-11-16) + * **Feature**: This release adds the following: 1) ExecuteQuery API allows users to query their AWS IoT TwinMaker Knowledge Graph 2) Pricing plan APIs allow users to configure and manage their pricing mode 3) Support for property groups and tabular property values in existing AWS IoT TwinMaker APIs. +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.12.0](service/personalizeevents/CHANGELOG.md#v1120-2022-11-16) + * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.17.0](service/proton/CHANGELOG.md#v1170-2022-11-16) + * **Feature**: Add support for sorting and filtering in ListServiceInstances +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.30.0](service/rds/CHANGELOG.md#v1300-2022-11-16) + * **Feature**: This release adds support for container databases (CDBs) to Amazon RDS Custom for Oracle. A CDB contains one PDB at creation. You can add more PDBs using Oracle SQL. You can also customize your database installation by setting the Oracle base, Oracle home, and the OS user name and group. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.0](service/ssm/CHANGELOG.md#v1330-2022-11-16) + * **Feature**: This release adds support for cross account access in CreateOpsItem, UpdateOpsItem and GetOpsItem. It introduces new APIs to setup resource policies for SSM resources: PutResourcePolicy, GetResourcePolicies and DeleteResourcePolicy. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.19.0](service/ssmincidents/CHANGELOG.md#v1190-2022-11-16) + * **Feature**: Add support for PagerDuty integrations on ResponsePlan, IncidentRecord, and RelatedItem APIs +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.24.0](service/transfer/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: Allow additional operations to throw ThrottlingException +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.15.0](service/xray/CHANGELOG.md#v1150-2022-11-16) + * **Feature**: This release adds new APIs - PutResourcePolicy, DeleteResourcePolicy, ListResourcePolicies for supporting resource based policies for AWS X-Ray. + +# Release (2022-11-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.36.0](service/connect/CHANGELOG.md#v1360-2022-11-15) + * **Feature**: This release updates the APIs: UpdateInstanceAttribute, DescribeInstanceAttribute, and ListInstanceAttributes. You can use it to programmatically enable/disable enhanced contact monitoring using attribute type ENHANCED_CONTACT_MONITORING on the specified Amazon Connect instance. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.20.0](service/greengrassv2/CHANGELOG.md#v1200-2022-11-15) + * **Feature**: Adds new parent target ARN paramater to CreateDeployment, GetDeployment, and ListDeployments APIs for the new subdeployments feature. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.24.0](service/route53/CHANGELOG.md#v1240-2022-11-15) + * **Feature**: Amazon Route 53 now supports the Europe (Spain) Region (eu-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.0](service/ssmsap/CHANGELOG.md#v100-2022-11-15) + * **Release**: New AWS service client module + * **Feature**: AWS Systems Manager for SAP provides simplified operations and management of SAP applications such as SAP HANA. With this release, SAP customers and partners can automate and simplify their SAP system administration tasks such as backup/restore of SAP HANA. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.26.0](service/workspaces/CHANGELOG.md#v1260-2022-11-15) + * **Feature**: This release introduces ModifyCertificateBasedAuthProperties, a new API that allows control of certificate-based auth properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return certificate-based auth properties in its responses. + +# Release (2022-11-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.20.0](service/customerprofiles/CHANGELOG.md#v1200-2022-11-14) + * **Feature**: This release enhances the SearchProfiles API by providing functionality to search for profiles using multiple keys and logical operators. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.18.0](service/lakeformation/CHANGELOG.md#v1180-2022-11-14) + * **Feature**: This release adds a new parameter "Parameters" in the DataLakeSettings. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.13.3](service/managedblockchain/CHANGELOG.md#v1133-2022-11-14) + * **Documentation**: Updating the API docs data type: NetworkEthereumAttributes, and the operations DeleteNode, and CreateNode to also include the supported Goerli network. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.16.0](service/proton/CHANGELOG.md#v1160-2022-11-14) + * **Feature**: Add support for CodeBuild Provisioning +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.29.0](service/rds/CHANGELOG.md#v1290-2022-11-14) + * **Feature**: This release adds support for restoring an RDS Multi-AZ DB cluster snapshot to a Single-AZ deployment or a Multi-AZ DB instance deployment. +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.12.0](service/workdocs/CHANGELOG.md#v1120-2022-11-14) + * **Feature**: Added 2 new document related operations, DeleteDocumentVersion and RestoreDocumentVersions. +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.14.0](service/xray/CHANGELOG.md#v1140-2022-11-14) + * **Feature**: This release enhances GetServiceGraph API to support new type of edge to represent links between SQS and Lambda in event-driven applications. + +# Release (2022-11-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.0](config/CHANGELOG.md#v1180-2022-11-11) + * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 + * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.13.0](credentials/CHANGELOG.md#v1130-2022-11-11) + * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 + * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.1](service/glue/CHANGELOG.md#v1341-2022-11-11) + * **Documentation**: Added links related to enabling job bookmarks. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.31.0](service/iot/CHANGELOG.md#v1310-2022-11-11) + * **Feature**: This release add new api listRelatedResourcesForAuditFinding and new member type IssuerCertificates for Iot device device defender Audit. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.16.0](service/licensemanager/CHANGELOG.md#v1160-2022-11-11) + * **Feature**: AWS License Manager now supports onboarded Management Accounts or Delegated Admins to view granted licenses aggregated from all accounts in the organization. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.14.0](service/marketplacecatalog/CHANGELOG.md#v1140-2022-11-11) + * **Feature**: Added three new APIs to support tagging and tag-based authorization: TagResource, UntagResource, and ListTagsForResource. Added optional parameters to the StartChangeSet API to support tagging a resource while making a request to create it. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.21.0](service/rekognition/CHANGELOG.md#v1210-2022-11-11) + * **Feature**: Adding support for ImageProperties feature to detect dominant colors and image brightness, sharpness, and contrast, inclusion and exclusion filters for labels and label categories, new fields to the API response, "aliases" and "categories" +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.8](service/securityhub/CHANGELOG.md#v1238-2022-11-11) + * **Documentation**: Documentation updates for Security Hub +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.18.0](service/ssmincidents/CHANGELOG.md#v1180-2022-11-11) + * **Feature**: RelatedItems now have an ID field which can be used for referencing them else where. Introducing event references in TimelineEvent API and increasing maximum length of "eventData" to 12K characters. + +# Release (2022-11-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.1](service/autoscaling/CHANGELOG.md#v1241-2022-11-10) + * **Documentation**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.70.0](service/ec2/CHANGELOG.md#v1700-2022-11-10) + * **Feature**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.19.0](service/ecs/CHANGELOG.md#v1190-2022-11-10) + * **Feature**: This release adds support for task scale-in protection with updateTaskProtection and getTaskProtection APIs. UpdateTaskProtection API can be used to protect a service managed task from being terminated by scale-in events and getTaskProtection API to get the scale-in protection status of a task. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.17.0](service/elasticsearchservice/CHANGELOG.md#v1170-2022-11-10) + * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.1](service/resourceexplorer2/CHANGELOG.md#v101-2022-11-10) + * **Documentation**: Text only updates to some Resource Explorer descriptions. +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.0.0](service/scheduler/CHANGELOG.md#v100-2022-11-10) + * **Release**: New AWS service client module + * **Feature**: AWS introduces the new Amazon EventBridge Scheduler. EventBridge Scheduler is a serverless scheduler that allows you to create, run, and manage tasks from one central, managed service. + +# Release (2022-11-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.35.0](service/connect/CHANGELOG.md#v1350-2022-11-09) + * **Feature**: This release adds new fields SignInUrl, UserArn, and UserId to GetFederationToken response payload. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.1.0](service/connectcases/CHANGELOG.md#v110-2022-11-09) + * **Feature**: This release adds the ability to disable templates through the UpdateTemplate API. Disabling templates prevents customers from creating cases using the template. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.69.0](service/ec2/CHANGELOG.md#v1690-2022-11-09) + * **Feature**: Amazon EC2 Trn1 instances, powered by AWS Trainium chips, are purpose built for high-performance deep learning training. u-24tb1.112xlarge and u-18tb1.112xlarge High Memory instances are purpose-built to run large in-memory databases. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.14.0](service/groundstation/CHANGELOG.md#v1140-2022-11-09) + * **Feature**: This release adds the preview of customer-provided ephemeris support for AWS Ground Station, allowing space vehicle owners to provide their own position and trajectory information for a satellite. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.19.0](service/mediapackagevod/CHANGELOG.md#v1190-2022-11-09) + * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.7.0](service/transcribestreaming/CHANGELOG.md#v170-2022-11-09) + * **Feature**: This will release hi-IN and th-TH + +# Release (2022-11-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.16.0](service/acm/CHANGELOG.md#v1160-2022-11-08) + * **Feature**: Support added for requesting elliptic curve certificate key algorithm types P-256 (EC_prime256v1) and P-384 (EC_secp384r1). +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.1.0](service/billingconductor/CHANGELOG.md#v110-2022-11-08) + * **Feature**: This release adds the Recurring Custom Line Item feature along with a new API ListCustomLineItemVersions. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.68.0](service/ec2/CHANGELOG.md#v1680-2022-11-08) + * **Feature**: This release enables sharing of EC2 Placement Groups across accounts and within AWS Organizations using Resource Access Manager +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.20.0](service/fms/CHANGELOG.md#v1200-2022-11-08) + * **Feature**: AWS Firewall Manager now supports importing existing AWS Network Firewall firewalls into Firewall Manager policies. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.24.0](service/lightsail/CHANGELOG.md#v1240-2022-11-08) + * **Feature**: This release adds support for Amazon Lightsail to automate the delegation of domains registered through Amazon Route 53 to Lightsail DNS management and to automate record creation for DNS validation of Lightsail SSL/TLS certificates. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.11.0](service/opensearch/CHANGELOG.md#v1110-2022-11-08) + * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.19.0](service/polly/CHANGELOG.md#v1190-2022-11-08) + * **Feature**: Amazon Polly adds new voices: Elin (sv-SE), Ida (nb-NO), Laura (nl-NL) and Suvi (fi-FI). They are available as neural voices only. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.0](service/resourceexplorer2/CHANGELOG.md#v100-2022-11-08) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Resource Explorer. AWS Resource Explorer lets your users search for and discover your AWS resources across the AWS Regions in your account. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.23.0](service/route53/CHANGELOG.md#v1230-2022-11-08) + * **Feature**: Amazon Route 53 now supports the Europe (Zurich) Region (eu-central-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +# Release (2022-11-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.19.0](service/athena/CHANGELOG.md#v1190-2022-11-07) + * **Feature**: Adds support for using Query Result Reuse +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.0](service/autoscaling/CHANGELOG.md#v1240-2022-11-07) + * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.20.0](service/cloudtrail/CHANGELOG.md#v1200-2022-11-07) + * **Feature**: This release includes support for configuring a delegated administrator to manage an AWS Organizations organization CloudTrail trails and event data stores, and AWS Key Management Service encryption of CloudTrail Lake event data stores. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.67.0](service/ec2/CHANGELOG.md#v1670-2022-11-07) + * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.23.0](service/elasticache/CHANGELOG.md#v1230-2022-11-07) + * **Feature**: Added support for IPv6 and dual stack for Memcached and Redis clusters. Customers can now launch new Redis and Memcached clusters with IPv6 and dual stack networking support. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.26.0](service/lexmodelsv2/CHANGELOG.md#v1260-2022-11-07) + * **Feature**: Amazon Lex now supports new APIs for viewing and editing Custom Vocabulary in bots. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.27.0](service/mediaconvert/CHANGELOG.md#v1270-2022-11-07) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for setting the SDR reference white point for HDR conversions and conversion of HDR10 to DolbyVision without mastering metadata. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.32.0](service/ssm/CHANGELOG.md#v1320-2022-11-07) + * **Feature**: This release includes support for applying a CloudWatch alarm to multi account multi region Systems Manager Automation +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.1](service/wafv2/CHANGELOG.md#v1231-2022-11-07) + * **Documentation**: The geo match statement now adds labels for country and region. You can match requests at the region level by combining a geo match statement with label match statements. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.17.0](service/wellarchitected/CHANGELOG.md#v1170-2022-11-07) + * **Feature**: This release adds support for integrations with AWS Trusted Advisor and AWS Service Catalog AppRegistry to improve workload discovery and speed up your workload reviews. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.25.0](service/workspaces/CHANGELOG.md#v1250-2022-11-07) + * **Feature**: This release adds protocols attribute to workspaces properties data type. This enables customers to migrate workspaces from PC over IP (PCoIP) to WorkSpaces Streaming Protocol (WSP) using create and modify workspaces public APIs. + +# Release (2022-11-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.1](service/cloudwatchlogs/CHANGELOG.md#v1161-2022-11-04) + * **Documentation**: Doc-only update for bug fixes and support of export to buckets encrypted with SSE-KMS +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.66.0](service/ec2/CHANGELOG.md#v1660-2022-11-04) + * **Feature**: This release adds API support for the recipient of an AMI account share to remove shared AMI launch permissions. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.15.0](service/emrcontainers/CHANGELOG.md#v1150-2022-11-04) + * **Feature**: Adding support for Job templates. Job templates allow you to create and store templates to configure Spark applications parameters. This helps you ensure consistent settings across applications by reusing and enforcing configuration overrides in data pipelines. +* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.37](service/internal/eventstreamtesting/CHANGELOG.md#v1037-2022-11-04) + * **Dependency Update**: update golang.org/x/net dependency to 0.1.0 + +# Release (2022-11-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.10.0](service/memorydb/CHANGELOG.md#v1100-2022-11-03) + * **Feature**: Adding support for r6gd instances for MemoryDB Redis with data tiering. In a cluster with data tiering enabled, when available memory capacity is exhausted, the least recently used data is automatically tiered to solid state drives for cost-effective capacity scaling with minimal performance impact. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.54.0](service/sagemaker/CHANGELOG.md#v1540-2022-11-03) + * **Feature**: Amazon SageMaker now supports running training jobs on ml.trn1 instance types. + +# Release (2022-11-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.26.0](service/iotsitewise/CHANGELOG.md#v1260-2022-11-02) + * **Feature**: This release adds the ListAssetModelProperties and ListAssetProperties APIs. You can list all properties that belong to a single asset model or asset using these two new APIs. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.25.0](service/s3control/CHANGELOG.md#v1250-2022-11-02) + * **Feature**: S3 on Outposts launches support for Lifecycle configuration for Outposts buckets. With S3 Lifecycle configuration, you can mange objects so they are stored cost effectively. You can manage objects using size-based rules and specify how many noncurrent versions bucket will retain. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.53.0](service/sagemaker/CHANGELOG.md#v1530-2022-11-02) + * **Feature**: This release updates Framework model regex for ModelPackage to support new Framework version xgboost, sklearn. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.17.0](service/ssmincidents/CHANGELOG.md#v1170-2022-11-02) + * **Feature**: Adds support for tagging replication-set on creation. + +# Release (2022-11-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.28.0](service/rds/CHANGELOG.md#v1280-2022-11-01) + * **Feature**: Relational Database Service - This release adds support for configuring Storage Throughput on RDS database instances. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.17.0](service/textract/CHANGELOG.md#v1170-2022-11-01) + * **Feature**: Add ocr results in AnalyzeIDResponse as blocks + +# Release (2022-10-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.15.0](service/apprunner/CHANGELOG.md#v1150-2022-10-31) + * **Feature**: This release adds support for private App Runner services. Services may now be configured to be made private and only accessible from a VPC. The changes include a new VpcIngressConnection resource and several new and modified APIs. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.0](service/cloudwatchlogs/CHANGELOG.md#v1160-2022-10-31) + * **Feature**: SDK release to support tagging for destinations and log groups with TagResource. Also supports tag on create with PutDestination. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.34.0](service/connect/CHANGELOG.md#v1340-2022-10-31) + * **Feature**: Amazon connect now support a new API DismissUserContact to dismiss or remove terminated contacts in Agent CCP +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.65.0](service/ec2/CHANGELOG.md#v1650-2022-10-31) + * **Feature**: Elastic IP transfer is a new Amazon VPC feature that allows you to transfer your Elastic IP addresses from one AWS Account to another. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.30.0](service/iot/CHANGELOG.md#v1300-2022-10-31) + * **Feature**: This release adds the Amazon Location action to IoT Rules Engine. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.15.0](service/sesv2/CHANGELOG.md#v1150-2022-10-31) + * **Feature**: This release includes support for interacting with the Virtual Deliverability Manager, allowing you to opt in/out of the feature and to retrieve recommendations and metric data. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.16.0](service/textract/CHANGELOG.md#v1160-2022-10-31) + * **Feature**: This release introduces additional support for 30+ normalized fields such as vendor address and currency. It also includes OCR output in the response and accuracy improvements for the already supported fields in previous version + +# Release (2022-10-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.14.0](service/apprunner/CHANGELOG.md#v1140-2022-10-28) + * **Feature**: AWS App Runner adds .NET 6, Go 1, PHP 8.1 and Ruby 3.1 runtimes. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.18.0](service/appstream/CHANGELOG.md#v1180-2022-10-28) + * **Feature**: This release includes CertificateBasedAuthProperties in CreateDirectoryConfig and UpdateDirectoryConfig. +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.16.20](service/cloud9/CHANGELOG.md#v11620-2022-10-28) + * **Documentation**: Update to the documentation section of the Cloud9 API Reference guide. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.23.0](service/cloudformation/CHANGELOG.md#v1230-2022-10-28) + * **Feature**: This release adds more fields to improves visibility of AWS CloudFormation StackSets information in following APIs: ListStackInstances, DescribeStackInstance, ListStackSetOperationResults, ListStackSetOperations, DescribeStackSetOperation. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.19.0](service/mediatailor/CHANGELOG.md#v1190-2022-10-28) + * **Feature**: This release introduces support for SCTE-35 segmentation descriptor messages which can be sent within time signal messages. + +# Release (2022-10-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.64.0](service/ec2/CHANGELOG.md#v1640-2022-10-27) + * **Feature**: Feature supports the replacement of instance root volume using an updated AMI without requiring customers to stop their instance. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.19.0](service/fms/CHANGELOG.md#v1190-2022-10-27) + * **Feature**: Add support NetworkFirewall Managed Rule Group Override flag in GetViolationDetails API +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.0](service/glue/CHANGELOG.md#v1340-2022-10-27) + * **Feature**: Added support for custom datatypes when using custom csv classifier. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.13](service/redshift/CHANGELOG.md#v12613-2022-10-27) + * **Documentation**: This release clarifies use for the ElasticIp parameter of the CreateCluster and RestoreFromClusterSnapshot APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.52.0](service/sagemaker/CHANGELOG.md#v1520-2022-10-27) + * **Feature**: This change allows customers to provide a custom entrypoint script for the docker container to be run while executing training jobs, and provide custom arguments to the entrypoint script. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.0](service/wafv2/CHANGELOG.md#v1230-2022-10-27) + * **Feature**: This release adds the following: Challenge rule action, to silently verify client browsers; rule group rule action override to any valid rule action, not just Count; token sharing between protected applications for challenge/CAPTCHA token; targeted rules option for Bot Control managed rule group. + +# Release (2022-10-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.23](service/iam/CHANGELOG.md#v11823-2022-10-26) + * **Documentation**: Doc only update that corrects instances of CLI not using an entity. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.18.0](service/kafka/CHANGELOG.md#v1180-2022-10-26) + * **Feature**: This release adds support for Tiered Storage. UpdateStorage allows you to control the Storage Mode for supported storage tiers. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.18.0](service/neptune/CHANGELOG.md#v1180-2022-10-26) + * **Feature**: Added a new cluster-level attribute to set the capacity range for Neptune Serverless instances. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.51.0](service/sagemaker/CHANGELOG.md#v1510-2022-10-26) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying Grid Search strategy for tuning jobs, which evaluates all hyperparameter combinations exhaustively based on the categorical hyperparameters provided. + +# Release (2022-10-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.17.0](service/accessanalyzer/CHANGELOG.md#v1170-2022-10-25) + * **Feature**: This release adds support for six new resource types in IAM Access Analyzer to help you easily identify public and cross-account access to your AWS resources. Updated service API, documentation, and paginators. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.3](service/location/CHANGELOG.md#v1193-2022-10-25) + * **Documentation**: Added new map styles with satellite imagery for map resources using HERE as a data provider. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.18.0](service/mediatailor/CHANGELOG.md#v1180-2022-10-25) + * **Feature**: This release is a documentation update +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.27.0](service/rds/CHANGELOG.md#v1270-2022-10-25) + * **Feature**: Relational Database Service - This release adds support for exporting DB cluster data to Amazon S3. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.24.0](service/workspaces/CHANGELOG.md#v1240-2022-10-25) + * **Feature**: This release adds new enums for supporting Workspaces Core features, including creating Manual running mode workspaces, importing regular Workspaces Core images and importing g4dn Workspaces Core images. + +# Release (2022-10-24) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.12.19](feature/ec2/imds/CHANGELOG.md#v11219-2022-10-24) + * **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.19.0](service/acmpca/CHANGELOG.md#v1190-2022-10-24) + * **Feature**: AWS Private Certificate Authority (AWS Private CA) now offers usage modes which are combination of features to address specific use cases. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.19.0](service/batch/CHANGELOG.md#v1190-2022-10-24) + * **Feature**: This release adds support for AWS Batch on Amazon EKS. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.19.0](service/datasync/CHANGELOG.md#v1190-2022-10-24) + * **Feature**: Added support for self-signed certificates when using object storage locations; added BytesCompressed to the TaskExecution response. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.50.0](service/sagemaker/CHANGELOG.md#v1500-2022-10-24) + * **Feature**: SageMaker Inference Recommender now supports a new API ListInferenceRecommendationJobSteps to return the details of all the benchmark we create for an inference recommendation job. + +# Release (2022-10-21) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.17.0 + * **Feature**: Adds `aws.IsCredentialsProvider` for inspecting `CredentialProvider` types when needing to determine if the underlying implementation type matches a target type. This resolves an issue where `CredentialsCache` could mask `AnonymousCredentials` providers, breaking downstream detection logic. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.21.0](service/cognitoidentityprovider/CHANGELOG.md#v1210-2022-10-21) + * **Feature**: This release adds a new "DeletionProtection" field to the UserPool in Cognito. Application admins can configure this value with either ACTIVE or INACTIVE value. Setting this field to ACTIVE will prevent a user pool from accidental deletion. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.16.16](service/eventbridge/CHANGELOG.md#v11616-2022-10-21) + * **Bug Fix**: The SDK client has been updated to utilize the `aws.IsCredentialsProvider` function for determining if `aws.AnonymousCredentials` has been configured for the `CredentialProvider`. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.29.0](service/s3/CHANGELOG.md#v1290-2022-10-21) + * **Feature**: S3 on Outposts launches support for automatic bucket-style alias. You can use the automatic access point alias instead of an access point ARN for any object-level operation in an Outposts bucket. + * **Bug Fix**: The SDK client has been updated to utilize the `aws.IsCredentialsProvider` function for determining if `aws.AnonymousCredentials` has been configured for the `CredentialProvider`. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.49.0](service/sagemaker/CHANGELOG.md#v1490-2022-10-21) + * **Feature**: CreateInferenceRecommenderjob API now supports passing endpoint details directly, that will help customers to identify the max invocation and max latency they can achieve for their model and the associated endpoint along with getting recommendations on other instances. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.17.0](service/sts/CHANGELOG.md#v1170-2022-10-21) + * **Feature**: Add presign functionality for sts:AssumeRole operation + +# Release (2022-10-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.20.0](service/devopsguru/CHANGELOG.md#v1200-2022-10-20) + * **Feature**: This release adds information about the resources DevOps Guru is analyzing. +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.15.0](service/globalaccelerator/CHANGELOG.md#v1150-2022-10-20) + * **Feature**: Global Accelerator now supports AddEndpoints and RemoveEndpoints operations for standard endpoint groups. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.7.0](service/resiliencehub/CHANGELOG.md#v170-2022-10-20) + * **Feature**: In this release, we are introducing support for regional optimization for AWS Resilience Hub applications. It also includes a few documentation updates to improve clarity. +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.7.0](service/rum/CHANGELOG.md#v170-2022-10-20) + * **Feature**: CloudWatch RUM now supports Extended CloudWatch Metrics with Additional Dimensions + +# Release (2022-10-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.11.6](service/chimesdkmessaging/CHANGELOG.md#v1116-2022-10-19) + * **Documentation**: Documentation updates for Chime Messaging SDK +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.19.0](service/cloudtrail/CHANGELOG.md#v1190-2022-10-19) + * **Feature**: This release includes support for exporting CloudTrail Lake query results to an Amazon S3 bucket. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.27.0](service/configservice/CHANGELOG.md#v1270-2022-10-19) + * **Feature**: This release adds resourceType enums for AppConfig, AppSync, DataSync, EC2, EKS, Glue, GuardDuty, SageMaker, ServiceDiscovery, SES, Route53 types. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.33.0](service/connect/CHANGELOG.md#v1330-2022-10-19) + * **Feature**: This release adds API support for managing phone numbers that can be used across multiple AWS regions through telephony traffic distribution. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.13.0](service/managedblockchain/CHANGELOG.md#v1130-2022-10-19) + * **Feature**: Adding new Accessor APIs for Amazon Managed Blockchain +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.28.0](service/s3/CHANGELOG.md#v1280-2022-10-19) + * **Feature**: Updates internal logic for constructing API endpoints. We have added rule-based endpoints and internal model parameters. +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.1.0](service/supportapp/CHANGELOG.md#v110-2022-10-19) + * **Feature**: This release adds the RegisterSlackWorkspaceForOrganization API. You can use the API to register a Slack workspace for an AWS account that is part of an organization. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.7.0](service/workspacesweb/CHANGELOG.md#v170-2022-10-19) + * **Feature**: WorkSpaces Web now supports user access logging for recording session start, stop, and URL navigation. + +# Release (2022-10-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.20.10](service/frauddetector/CHANGELOG.md#v12010-2022-10-18) + * **Documentation**: Documentation Updates for Amazon Fraud Detector +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.48.0](service/sagemaker/CHANGELOG.md#v1480-2022-10-18) + * **Feature**: This change allows customers to enable data capturing while running a batch transform job, and configure monitoring schedule to monitoring the captured data. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.18.0](service/servicediscovery/CHANGELOG.md#v1180-2022-10-18) + * **Feature**: Updated the ListNamespaces API to support the NAME and HTTP_NAME filters, and the BEGINS_WITH filter condition. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.14.0](service/sesv2/CHANGELOG.md#v1140-2022-10-18) + * **Feature**: This release allows subscribers to enable Dedicated IPs (managed) to send email via a fully managed dedicated IP experience. It also adds identities' VerificationStatus in the response of GetEmailIdentity and ListEmailIdentities APIs, and ImportJobs counts in the response of ListImportJobs API. + +# Release (2022-10-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.14.0](service/greengrass/CHANGELOG.md#v1140-2022-10-17) + * **Feature**: This change allows customers to specify FunctionRuntimeOverride in FunctionDefinitionVersion. This configuration can be used if the runtime on the device is different from the AWS Lambda runtime specified for that function. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.47.0](service/sagemaker/CHANGELOG.md#v1470-2022-10-17) + * **Feature**: This release adds support for C7g, C6g, C6gd, C6gn, M6g, M6gd, R6g, and R6gn Graviton instance types in Amazon SageMaker Inference. + +# Release (2022-10-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.26.0](service/mediaconvert/CHANGELOG.md#v1260-2022-10-14) + * **Feature**: MediaConvert now supports specifying the minimum percentage of the HRD buffer available at the end of each encoded video segment. + +# Release (2022-10-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.8.0](service/amplifyuibuilder/CHANGELOG.md#v180-2022-10-13) + * **Feature**: We are releasing the ability for fields to be configured as arrays. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.18.0](service/appflow/CHANGELOG.md#v1180-2022-10-13) + * **Feature**: With this update, you can choose which Salesforce API is used by Amazon AppFlow to transfer data to or from your Salesforce account. You can choose the Salesforce REST API or Bulk API 2.0. You can also choose for Amazon AppFlow to pick the API automatically. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.32.0](service/connect/CHANGELOG.md#v1320-2022-10-13) + * **Feature**: This release adds support for a secondary email and a mobile number for Amazon Connect instance users. +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.15.0](service/directoryservice/CHANGELOG.md#v1150-2022-10-13) + * **Feature**: This release adds support for describing and updating AWS Managed Microsoft AD set up. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.24](service/ecs/CHANGELOG.md#v11824-2022-10-13) + * **Documentation**: Documentation update to address tickets. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.16.0](service/guardduty/CHANGELOG.md#v1160-2022-10-13) + * **Feature**: Add UnprocessedDataSources to CreateDetectorResponse which specifies the data sources that couldn't be enabled during the CreateDetector request. In addition, update documentations. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.20](service/iam/CHANGELOG.md#v11820-2022-10-13) + * **Documentation**: Documentation updates for the AWS Identity and Access Management API Reference. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.0.1](service/iotfleetwise/CHANGELOG.md#v101-2022-10-13) + * **Documentation**: Documentation update for AWS IoT FleetWise +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.24.0](service/medialive/CHANGELOG.md#v1240-2022-10-13) + * **Feature**: AWS Elemental MediaLive now supports forwarding SCTE-35 messages through the Event Signaling and Management (ESAM) API, and can read those SCTE-35 messages from an inactive source. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.18.0](service/mediapackagevod/CHANGELOG.md#v1180-2022-10-13) + * **Feature**: This release adds SPEKE v2 support for MediaPackage VOD. Speke v2 is an upgrade to the existing SPEKE API to support multiple encryption keys, based on an encryption contract selected by the customer. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.9.0](service/panorama/CHANGELOG.md#v190-2022-10-13) + * **Feature**: Pause and resume camera stream processing with SignalApplicationInstanceNodeInstances. Reboot an appliance with CreateJobForDevices. More application state information in DescribeApplicationInstance response. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.16](service/rdsdata/CHANGELOG.md#v11216-2022-10-13) + * **Documentation**: Doc update to reflect no support for schema parameter on BatchExecuteStatement API +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.31.0](service/ssm/CHANGELOG.md#v1310-2022-10-13) + * **Feature**: Support of AmazonLinux2022 by Patch Manager +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.16.0](service/ssmincidents/CHANGELOG.md#v1160-2022-10-13) + * **Feature**: Update RelatedItem enum to support Tasks +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.23.0](service/transfer/CHANGELOG.md#v1230-2022-10-13) + * **Feature**: This release adds an option for customers to configure workflows that are triggered when files are only partially received from a client due to premature session disconnect. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.15.1](service/translate/CHANGELOG.md#v1151-2022-10-13) + * **Documentation**: This release enables customers to specify multiple target languages in asynchronous batch translation requests. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.10.0](service/wisdom/CHANGELOG.md#v1100-2022-10-13) + * **Feature**: This release updates the GetRecommendations API to include a trigger event list for classifying and grouping recommendations. + +# Release (2022-10-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.15](service/codegurureviewer/CHANGELOG.md#v11615-2022-10-07) + * **Documentation**: Documentation update to replace broken link. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.20](service/elasticloadbalancingv2/CHANGELOG.md#v11820-2022-10-07) + * **Documentation**: Gateway Load Balancer adds a new feature (target_failover) for customers to rebalance existing flows to a healthy target after marked unhealthy or deregistered. This allows graceful patching/upgrades of target appliances during maintenance windows, and helps reduce unhealthy target failover time. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.19.0](service/greengrassv2/CHANGELOG.md#v1190-2022-10-07) + * **Feature**: This release adds error status details for deployments and components that failed on a device and adds features to improve visibility into component installation. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.26.0](service/quicksight/CHANGELOG.md#v1260-2022-10-07) + * **Feature**: Amazon QuickSight now supports SecretsManager Secret ARN in place of CredentialPair for DataSource creation and update. This release also has some minor documentation updates and removes CountryCode as a required parameter in GeoSpatialColumnGroup + +# Release (2022-10-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.6.15](service/resiliencehub/CHANGELOG.md#v1615-2022-10-06) + * **Documentation**: Documentation change for AWS Resilience Hub. Doc-only update to fix Documentation layout + +# Release (2022-10-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.33.0](service/glue/CHANGELOG.md#v1330-2022-10-05) + * **Feature**: This SDK release adds support to sync glue jobs with source control provider. Additionally, a new parameter called SourceControlDetails will be added to Job model. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.20.0](service/networkfirewall/CHANGELOG.md#v1200-2022-10-05) + * **Feature**: StreamExceptionPolicy configures how AWS Network Firewall processes traffic when a network connection breaks midstream +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.23.0](service/outposts/CHANGELOG.md#v1230-2022-10-05) + * **Feature**: This release adds the Asset state information to the ListAssets response. The ListAssets request supports filtering on Asset state. + +# Release (2022-10-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.31.0](service/connect/CHANGELOG.md#v1310-2022-10-04) + * **Feature**: Updated the CreateIntegrationAssociation API to support the CASES_DOMAIN IntegrationType. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.0.0](service/connectcases/CHANGELOG.md#v100-2022-10-04) + * **Release**: New AWS service client module + * **Feature**: This release adds APIs for Amazon Connect Cases. Cases allows your agents to quickly track and manage customer issues that require multiple interactions, follow-up tasks, and teams in your contact center. For more information, see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.63.0](service/ec2/CHANGELOG.md#v1630-2022-10-04) + * **Feature**: Added EnableNetworkAddressUsageMetrics flag for ModifyVpcAttribute, DescribeVpcAttribute APIs. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.23](service/ecs/CHANGELOG.md#v11823-2022-10-04) + * **Documentation**: Documentation updates to address various Amazon ECS tickets. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.24.0](service/s3control/CHANGELOG.md#v1240-2022-10-04) + * **Feature**: S3 Object Lambda adds support to allow customers to intercept HeadObject and ListObjects requests and introduce their own compute. These requests were previously proxied to S3. +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.17.0](service/workmail/CHANGELOG.md#v1170-2022-10-04) + * **Feature**: This release adds support for impersonation roles in Amazon WorkMail. + +# Release (2022-10-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.16.0](service/accessanalyzer/CHANGELOG.md#v1160-2022-10-03) + * **Feature**: AWS IAM Access Analyzer policy validation introduces new checks for role trust policies. As customers author a policy, IAM Access Analyzer policy validation evaluates the policy for any issues to make it easier for customers to author secure policies. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.62.0](service/ec2/CHANGELOG.md#v1620-2022-10-03) + * **Feature**: Adding an imdsSupport attribute to EC2 AMIs +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.16.0](service/snowball/CHANGELOG.md#v1160-2022-10-03) + * **Feature**: Adds support for V3_5C. This is a refreshed AWS Snowball Edge Compute Optimized device type with 28TB SSD, 104 vCPU and 416GB memory (customer usable). + +# Release (2022-09-30) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.15.0](service/codedeploy/CHANGELOG.md#v1150-2022-09-30) + * **Feature**: This release allows you to override the alarm configurations when creating a deployment. +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.19.0](service/devopsguru/CHANGELOG.md#v1190-2022-09-30) + * **Feature**: This release adds filter feature on AddNotificationChannel API, enable customer to configure the SNS notification messages by Severity or MessageTypes +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.13.0](service/dlm/CHANGELOG.md#v1130-2022-09-30) + * **Feature**: This release adds support for archival of single-volume snapshots created by Amazon Data Lifecycle Manager policies +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.46.0](service/sagemaker/CHANGELOG.md#v1460-2022-09-30) + * **Feature**: A new parameter called ExplainerConfig is added to CreateEndpointConfig API to enable SageMaker Clarify online explainability feature. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.16.0](service/sagemakerruntime/CHANGELOG.md#v1160-2022-09-30) + * **Feature**: A new parameter called EnableExplanations is added to InvokeEndpoint API to enable on-demand SageMaker Clarify online explainability requests. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.13.6](service/ssooidc/CHANGELOG.md#v1136-2022-09-30) + * **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. + +# Release (2022-09-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.15.0](service/acm/CHANGELOG.md#v1150-2022-09-29) + * **Feature**: This update returns additional certificate details such as certificate SANs and allows sorting in the ListCertificates API. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.61.0](service/ec2/CHANGELOG.md#v1610-2022-09-29) + * **Feature**: u-3tb1 instances are powered by Intel Xeon Platinum 8176M (Skylake) processors and are purpose-built to run large in-memory databases. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.3.0](service/emrserverless/CHANGELOG.md#v130-2022-09-29) + * **Feature**: This release adds API support to debug Amazon EMR Serverless jobs in real-time with live application UIs +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.25.0](service/fsx/CHANGELOG.md#v1250-2022-09-29) + * **Feature**: This release adds support for Amazon File Cache. +* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.0.0](service/migrationhuborchestrator/CHANGELOG.md#v100-2022-09-29) + * **Release**: New AWS service client module + * **Feature**: Introducing AWS MigrationHubOrchestrator. This is the first public release of AWS MigrationHubOrchestrator. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.18.0](service/polly/CHANGELOG.md#v1180-2022-09-29) + * **Feature**: Added support for the new Cantonese voice - Hiujin. Hiujin is available as a Neural voice only. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.15.0](service/proton/CHANGELOG.md#v1150-2022-09-29) + * **Feature**: This release adds an option to delete pipeline provisioning repositories using the UpdateAccountSettings API +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.45.0](service/sagemaker/CHANGELOG.md#v1450-2022-09-29) + * **Feature**: SageMaker Training Managed Warm Pools let you retain provisioned infrastructure to reduce latency for repetitive training workloads. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.2](service/secretsmanager/CHANGELOG.md#v1162-2022-09-29) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.15.0](service/translate/CHANGELOG.md#v1150-2022-09-29) + * **Feature**: This release enables customers to access control rights on Translate resources like Parallel Data and Custom Terminology using Tag Based Authorization. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.23.0](service/workspaces/CHANGELOG.md#v1230-2022-09-29) + * **Feature**: This release includes diagnostic log uploading feature. If it is enabled, the log files of WorkSpaces Windows client will be sent to Amazon WorkSpaces automatically for troubleshooting. You can use modifyClientProperty api to enable/disable this feature. + +# Release (2022-09-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.21.0](service/costexplorer/CHANGELOG.md#v1210-2022-09-27) + * **Feature**: This release is to support retroactive Cost Categories. The new field will enable you to retroactively apply new and existing cost category rules to previous months. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.35.0](service/kendra/CHANGELOG.md#v1350-2022-09-27) + * **Feature**: My AWS Service (placeholder) - Amazon Kendra now provides a data source connector for DropBox. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-dropbox.html +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.0](service/location/CHANGELOG.md#v1190-2022-09-27) + * **Feature**: This release adds place IDs, which are unique identifiers of places, along with a new GetPlace operation, which can be used with place IDs to find a place again later. UnitNumber and UnitType are also added as new properties of places. + +# Release (2022-09-26) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.10.0](feature/dynamodb/attributevalue/CHANGELOG.md#v1100-2022-09-26) + * **Feature**: Adds a String method to UnixTime, so that when structs with this field get logged it prints a human readable time. +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.10.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v1100-2022-09-26) + * **Feature**: Adds a String method to UnixTime, so that when structs with this field get logged it prints a human readable time. +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.14.0](service/costandusagereportservice/CHANGELOG.md#v1140-2022-09-26) + * **Feature**: This release adds two new support regions(me-central-1/eu-south-2) for OSG. +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.0.0](service/iotfleetwise/CHANGELOG.md#v100-2022-09-26) + * **Release**: New AWS service client module + * **Feature**: General availability (GA) for AWS IoT Fleetwise. It adds AWS IoT Fleetwise to AWS SDK. For more information, see https://docs.aws.amazon.com/iot-fleetwise/latest/APIReference/Welcome.html. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.30.0](service/ssm/CHANGELOG.md#v1300-2022-09-26) + * **Feature**: This release includes support for applying a CloudWatch alarm to Systems Manager capabilities like Automation, Run Command, State Manager, and Maintenance Windows. + +# Release (2022-09-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.13.0](service/apprunner/CHANGELOG.md#v1130-2022-09-23) + * **Feature**: AWS App Runner adds a Node.js 16 runtime. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.60.0](service/ec2/CHANGELOG.md#v1600-2022-09-23) + * **Feature**: Letting external AWS customers provide ImageId as a Launch Template override in FleetLaunchTemplateOverridesRequest +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.25.0](service/lexmodelsv2/CHANGELOG.md#v1250-2022-09-23) + * **Feature**: This release introduces additional optional parameters promptAttemptsSpecification to PromptSpecification, which enables the users to configure interrupt setting and Audio, DTMF and Text input configuration for the initial and retry prompt played by the Bot +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.23.0](service/lightsail/CHANGELOG.md#v1230-2022-09-23) + * **Feature**: This release adds Instance Metadata Service (IMDS) support for Lightsail instances. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.14.0](service/nimble/CHANGELOG.md#v1140-2022-09-23) + * **Feature**: Amazon Nimble Studio adds support for on-demand Amazon Elastic Compute Cloud (EC2) G3 and G5 instances, allowing customers to utilize additional GPU instance types for their creative projects. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.29.0](service/ssm/CHANGELOG.md#v1290-2022-09-23) + * **Feature**: This release adds new SSM document types ConformancePackTemplate and CloudFormation +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.22.9](service/wafv2/CHANGELOG.md#v1229-2022-09-23) + * **Documentation**: Add the default specification for ResourceType in ListResourcesForWebACL. + +# Release (2022-09-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.7.0](service/backupgateway/CHANGELOG.md#v170-2022-09-22) + * **Feature**: Changes include: new GetVirtualMachineApi to fetch a single user's VM, improving ListVirtualMachines to fetch filtered VMs as well as all VMs, and improving GetGatewayApi to now also return the gateway's MaintenanceStartTime. +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.14.0](service/devicefarm/CHANGELOG.md#v1140-2022-09-22) + * **Feature**: This release adds the support for VPC-ENI based connectivity for private devices on AWS Device Farm. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.59.0](service/ec2/CHANGELOG.md#v1590-2022-09-22) + * **Feature**: Documentation updates for Amazon EC2. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.32.0](service/glue/CHANGELOG.md#v1320-2022-09-22) + * **Feature**: Added support for S3 Event Notifications for Catalog Target Crawlers. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.5](service/identitystore/CHANGELOG.md#v1155-2022-09-22) + * **Documentation**: Documentation updates for the Identity Store CLI Reference. + +# Release (2022-09-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.19.0](service/comprehend/CHANGELOG.md#v1190-2022-09-21) + * **Feature**: Amazon Comprehend now supports synchronous mode for targeted sentiment API operations. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.22.2](service/route53/CHANGELOG.md#v1222-2022-09-21) + * **Bug Fix**: Updated GetChange to sanitize /change/ prefix of the changeId returned from the service. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.23.0](service/s3control/CHANGELOG.md#v1230-2022-09-21) + * **Feature**: S3 on Outposts launches support for object versioning for Outposts buckets. With S3 Versioning, you can preserve, retrieve, and restore every version of every object stored in your buckets. You can recover from both unintended user actions and application failures. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.44.0](service/sagemaker/CHANGELOG.md#v1440-2022-09-21) + * **Feature**: SageMaker now allows customization on Canvas Application settings, including enabling/disabling time-series forecasting and specifying an Amazon Forecast execution role at both the Domain and UserProfile levels. + +# Release (2022-09-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.16 + * **Documentation**: added clafirfication on the Credential object to show usage of loadDefaultConfig to load credentials +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.58.0](service/ec2/CHANGELOG.md#v1580-2022-09-20) + * **Feature**: This release adds support for blocked paths to Amazon VPC Reachability Analyzer. + +# Release (2022-09-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.18.0](service/cloudtrail/CHANGELOG.md#v1180-2022-09-19) + * **Feature**: This release includes support for importing existing trails into CloudTrail Lake. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.57.0](service/ec2/CHANGELOG.md#v1570-2022-09-19) + * **Feature**: This release adds CapacityAllocations field to DescribeCapacityReservations +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.17.0](service/mediaconnect/CHANGELOG.md#v1170-2022-09-19) + * **Feature**: This change allows the customer to use the SRT Caller protocol as part of their flows +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.26.0](service/rds/CHANGELOG.md#v1260-2022-09-19) + * **Feature**: This release adds support for Amazon RDS Proxy with SQL Server compatibility. + +# Release (2022-09-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.13.0](service/codestarnotifications/CHANGELOG.md#v1130-2022-09-16) + * **Feature**: This release adds tag based access control for the UntagResource API. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.21](service/ecs/CHANGELOG.md#v11821-2022-09-16) + * **Documentation**: This release supports new task definition sizes. + +# Release (2022-09-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.17.0](service/dynamodb/CHANGELOG.md#v1170-2022-09-15) + * **Feature**: Increased DynamoDB transaction limit from 25 to 100. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.56.0](service/ec2/CHANGELOG.md#v1560-2022-09-15) + * **Feature**: This feature allows customers to create tags for vpc-endpoint-connections and vpc-endpoint-service-permissions. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.43.0](service/sagemaker/CHANGELOG.md#v1430-2022-09-15) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying Hyperband strategy for tuning jobs, which uses a multi-fidelity based tuning strategy to stop underperforming hyperparameter configurations early. + +# Release (2022-09-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/rds/auth`: [v1.2.0](feature/rds/auth/CHANGELOG.md#v120-2022-09-14) + * **Feature**: Updated `BuildAuthToken` to validate the provided endpoint contains a port. +* `github.com/aws/aws-sdk-go-v2/internal/v4a`: [v1.0.13](internal/v4a/CHANGELOG.md#v1013-2022-09-14) + * **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer). +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.18.0](service/acmpca/CHANGELOG.md#v1180-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.7.0](service/amplifyuibuilder/CHANGELOG.md#v170-2022-09-14) + * **Feature**: Amplify Studio UIBuilder is introducing forms functionality. Forms can be configured from Data Store models, JSON, or from scratch. These forms can then be generated in your project and used like any other React components. +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.14.0](service/appconfig/CHANGELOG.md#v1140-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.17.0](service/appflow/CHANGELOG.md#v1170-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.16.0](service/appmesh/CHANGELOG.md#v1160-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.17.0](service/cloudtrail/CHANGELOG.md#v1170-2022-09-14) + * **Feature**: This release adds CloudTrail getChannel and listChannels APIs to allow customer to view the ServiceLinkedChannel configurations. +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.12.0](service/codestar/CHANGELOG.md#v1120-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.12.0](service/codestarnotifications/CHANGELOG.md#v1120-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.14.0](service/cognitoidentity/CHANGELOG.md#v1140-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.20.0](service/cognitoidentityprovider/CHANGELOG.md#v1200-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.26.0](service/configservice/CHANGELOG.md#v1260-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.30.0](service/connect/CHANGELOG.md#v1300-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.12.0](service/connectparticipant/CHANGELOG.md#v1120-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.20.0](service/costexplorer/CHANGELOG.md#v1200-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.19.0](service/customerprofiles/CHANGELOG.md#v1190-2022-09-14) + * **Feature**: Added isUnstructured in response for Customer Profiles Integration APIs + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.16.0](service/dataexchange/CHANGELOG.md#v1160-2022-09-14) + * **Feature**: Documentation updates for AWS Data Exchange. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.8.0](service/drs/CHANGELOG.md#v180-2022-09-14) + * **Feature**: Fixed the data type of lagDuration that is returned in Describe Source Server API +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.55.0](service/ec2/CHANGELOG.md#v1550-2022-09-14) + * **Feature**: Documentation updates for Amazon EC2. + * **Feature**: This release adds support to send VPC Flow Logs to kinesis-data-firehose as new destination type + * **Feature**: This update introduces API operations to manage and create local gateway route tables, CoIP pools, and VIF group associations. + * **Feature**: Two new features for local gateway route tables: support for static routes targeting Elastic Network Interfaces and direct VPC routing. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.22.0](service/eks/CHANGELOG.md#v1220-2022-09-14) + * **Feature**: Adding support for local Amazon EKS clusters on Outposts + * **Feature**: Adds support for EKS Addons ResolveConflicts "preserve" flag. Also adds new update failed status for EKS Addons. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.14.0](service/emrcontainers/CHANGELOG.md#v1140-2022-09-14) + * **Feature**: EMR on EKS now allows running Spark SQL using the newly introduced Spark SQL Job Driver in the Start Job Run API +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.2.0](service/emrserverless/CHANGELOG.md#v120-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.9.0](service/evidently/CHANGELOG.md#v190-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. + * **Feature**: This release adds support for the client-side evaluation - powered by AWS AppConfig feature. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.13.0](service/finspacedata/CHANGELOG.md#v1130-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.13.0](service/fis/CHANGELOG.md#v1130-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.12](service/fsx/CHANGELOG.md#v12412-2022-09-14) + * **Documentation**: Documentation update for Amazon FSx. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.31.0](service/glue/CHANGELOG.md#v1310-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.18.0](service/greengrassv2/CHANGELOG.md#v1180-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.3](service/identitystore/CHANGELOG.md#v1153-2022-09-14) + * **Documentation**: Documentation updates for the Identity Store CLI Reference. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.20.0](service/imagebuilder/CHANGELOG.md#v1200-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.8.0](service/inspector2/CHANGELOG.md#v180-2022-09-14) + * **Feature**: This release adds new fields like fixAvailable, fixedInVersion and remediation to the finding model. The requirement to have vulnerablePackages in the finding model has also been removed. The documentation has been updated to reflect these changes. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.29.0](service/iot/CHANGELOG.md#v1290-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.13.0](service/iotanalytics/CHANGELOG.md#v1130-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.14.0](service/iotsecuretunneling/CHANGELOG.md#v1140-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.25.0](service/iotsitewise/CHANGELOG.md#v1250-2022-09-14) + * **Feature**: Allow specifying units in Asset Properties +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.34.0](service/kendra/CHANGELOG.md#v1340-2022-09-14) + * **Feature**: This release enables our customer to choose the option of Sharepoint 2019 for the on-premise Sharepoint connector. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.24.0](service/lexmodelsv2/CHANGELOG.md#v1240-2022-09-14) + * **Feature**: This release is for supporting Composite Slot Type feature in AWS Lex V2. Composite Slot Type will help developer to logically group coherent slots and maintain their inter-relationships in runtime conversation. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.15.0](service/lexruntimev2/CHANGELOG.md#v1150-2022-09-14) + * **Feature**: This release is for supporting Composite Slot Type feature in AWS Lex V2. Composite Slot Type will help developer to logically group coherent slots and maintain their inter-relationships in runtime conversation. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.18.0](service/lookoutmetrics/CHANGELOG.md#v1180-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. + * **Feature**: Release dimension value filtering feature to allow customers to define dimension filters for including only a subset of their dataset to be used by LookoutMetrics. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.1.0](service/m2/CHANGELOG.md#v110-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.23.0](service/medialive/CHANGELOG.md#v1230-2022-09-14) + * **Feature**: This change exposes API settings which allow Dolby Atmos and Dolby Vision to be used when running a channel using Elemental Media Live +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.19.0](service/networkfirewall/CHANGELOG.md#v1190-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.15.0](service/pi/CHANGELOG.md#v1150-2022-09-14) + * **Feature**: Increases the maximum values of two RDS Performance Insights APIs. The maximum value of the Limit parameter of DimensionGroup is 25. The MaxResult maximum is now 25 for the following APIs: DescribeDimensionKeys, GetResourceMetrics, ListAvailableResourceDimensions, and ListAvailableResourceMetrics. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.17.0](service/pricing/CHANGELOG.md#v1170-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.25.0](service/quicksight/CHANGELOG.md#v1250-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.9](service/redshift/CHANGELOG.md#v1269-2022-09-14) + * **Documentation**: This release updates documentation for AQUA features and other description updates. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.22.0](service/route53/CHANGELOG.md#v1220-2022-09-14) + * **Feature**: Amazon Route 53 now supports the Middle East (UAE) Region (me-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.10.0](service/route53recoverycluster/CHANGELOG.md#v1100-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.22.0](service/s3control/CHANGELOG.md#v1220-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.42.0](service/sagemaker/CHANGELOG.md#v1420-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. + * **Feature**: SageMaker Hosting now allows customization on ML instance storage volume size, model data download timeout and inference container startup ping health check timeout for each ProductionVariant in CreateEndpointConfig API. + * **Feature**: This release adds HyperParameterTuningJob type in Search API. + * **Feature**: This release adds Mode to AutoMLJobConfig. +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.14.0](service/sagemakera2iruntime/CHANGELOG.md#v1140-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.0](service/secretsmanager/CHANGELOG.md#v1160-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.14.0](service/servicecatalogappregistry/CHANGELOG.md#v1140-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.14.0](service/sfn/CHANGELOG.md#v1140-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.18.0](service/sns/CHANGELOG.md#v1180-2022-09-14) + * **Feature**: Amazon SNS introduces the Data Protection Policy APIs, which enable customers to attach a data protection policy to an SNS topic. This allows topic owners to enable the new message data protection feature to audit and block sensitive data that is exchanged through their topics. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.28.0](service/ssm/CHANGELOG.md#v1280-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. + * **Feature**: This release adds support for Systems Manager State Manager Association tagging. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.14.0](service/timestreamwrite/CHANGELOG.md#v1140-2022-09-14) + * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.22.0](service/transfer/CHANGELOG.md#v1220-2022-09-14) + * **Feature**: This release introduces the ability to have multiple server host keys for any of your Transfer Family servers that use the SFTP protocol. + +# Release (2022-09-02.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.2](service/identitystore/CHANGELOG.md#v1152-2022-09-022) + * **Bug Fix**: Reverts a change to the identitystore module so that MaxResults members of ListGroupMemberShips, ListGroupMembershipsForMembers, ListGroups, and ListUsers are correctly generated as pointer types instead of value types + +# Release (2022-09-02) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.19.0](service/cognitoidentityprovider/CHANGELOG.md#v1190-2022-09-02) + * **Feature**: This release adds a new "AuthSessionValidity" field to the UserPoolClient in Cognito. Application admins can configure this value for their users' authentication duration, which is currently fixed at 3 minutes, up to 15 minutes. Setting this field will also apply to the SMS MFA authentication flow. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.29.0](service/connect/CHANGELOG.md#v1290-2022-09-02) + * **Feature**: This release adds search APIs for Routing Profiles and Queues, which can be used to search for those resources within a Connect Instance. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.19.0](service/mediapackage/CHANGELOG.md#v1190-2022-09-02) + * **Feature**: Added support for AES_CTR encryption to CMAF origin endpoints +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.41.0](service/sagemaker/CHANGELOG.md#v1410-2022-09-02) + * **Feature**: This release enables administrators to attribute user activity and API calls from Studio notebooks, Data Wrangler and Canvas to specific users even when users share the same execution IAM role. ExecutionRoleIdentityConfig at Sagemaker domain level enables this feature. + +# Release (2022-09-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.11](service/codegurureviewer/CHANGELOG.md#v11611-2022-09-01) + * **Documentation**: Documentation updates to fix formatting issues in CLI and SDK documentation. +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.0.0](service/controltower/CHANGELOG.md#v100-2022-09-01) + * **Release**: New AWS service client module + * **Feature**: This release contains the first SDK for AWS Control Tower. It introduces a new set of APIs: EnableControl, DisableControl, GetControlOperation, and ListEnabledControls. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.21.10](service/route53/CHANGELOG.md#v12110-2022-09-01) + * **Documentation**: Documentation updates for Amazon Route 53. + +# Release (2022-08-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.20.2](service/cloudfront/CHANGELOG.md#v1202-2022-08-31) + * **Documentation**: Update API documentation for CloudFront origin access control (OAC) +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.0](service/identitystore/CHANGELOG.md#v1150-2022-08-31) + * **Feature**: Expand IdentityStore API to support Create, Read, Update, Delete and Get operations for User, Group and GroupMembership resources. +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.13.0](service/iotthingsgraph/CHANGELOG.md#v1130-2022-08-31) + * **Feature**: This release deprecates all APIs of the ThingsGraph service +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.18.0](service/ivs/CHANGELOG.md#v1180-2022-08-31) + * **Feature**: IVS Merge Fragmented Streams. This release adds support for recordingReconnectWindow field in IVS recordingConfigurations. For more information see https://docs.aws.amazon.com/ivs/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.12](service/rdsdata/CHANGELOG.md#v11212-2022-08-31) + * **Documentation**: Documentation updates for RDS Data API +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.40.0](service/sagemaker/CHANGELOG.md#v1400-2022-08-31) + * **Feature**: SageMaker Inference Recommender now accepts Inference Recommender fields: Domain, Task, Framework, SamplePayloadUrl, SupportedContentTypes, SupportedInstanceTypes, directly in our CreateInferenceRecommendationsJob API through ContainerConfig + +# Release (2022-08-30) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.17.0](service/greengrassv2/CHANGELOG.md#v1170-2022-08-30) + * **Feature**: Adds topologyFilter to ListInstalledComponentsRequest which allows filtration of components by ROOT or ALL (including root and dependency components). Adds lastStatusChangeTimestamp to ListInstalledComponents response to show the last time a component changed state on a device. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.15](service/identitystore/CHANGELOG.md#v11415-2022-08-30) + * **Documentation**: Documentation updates for the Identity Store CLI Reference. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.15.0](service/lookoutequipment/CHANGELOG.md#v1150-2022-08-30) + * **Feature**: This release adds new apis for providing labels. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.23.0](service/macie2/CHANGELOG.md#v1230-2022-08-30) + * **Feature**: This release of the Amazon Macie API adds support for using allow lists to define specific text and text patterns to ignore when inspecting data sources for sensitive data. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.19](service/sso/CHANGELOG.md#v11119-2022-08-30) + * **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.7](service/ssoadmin/CHANGELOG.md#v1157-2022-08-30) + * **Documentation**: Documentation updates for the AWS IAM Identity Center CLI Reference. + +# Release (2022-08-29) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.9](service/fsx/CHANGELOG.md#v1249-2022-08-29) + * **Documentation**: Documentation updates for Amazon FSx for NetApp ONTAP. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.11.0](service/voiceid/CHANGELOG.md#v1110-2022-08-29) + * **Feature**: Amazon Connect Voice ID now detects voice spoofing. When a prospective fraudster tries to spoof caller audio using audio playback or synthesized speech, Voice ID will return a risk score and outcome to indicate the how likely it is that the voice is spoofed. + +# Release (2022-08-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.18.0](service/mediapackage/CHANGELOG.md#v1180-2022-08-26) + * **Feature**: This release adds Ads AdTriggers and AdsOnDeliveryRestrictions to describe calls for CMAF endpoints on MediaPackage. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.25.1](service/rds/CHANGELOG.md#v1251-2022-08-26) + * **Documentation**: Removes support for RDS Custom from DBInstanceClass in ModifyDBInstance + +# Release (2022-08-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.13](service/elasticloadbalancingv2/CHANGELOG.md#v11813-2022-08-25) + * **Documentation**: Documentation updates for ELBv2. Gateway Load Balancer now supports Configurable Flow Stickiness, enabling you to configure the hashing used to maintain stickiness of flows to a specific target appliance. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.15.0](service/gamelift/CHANGELOG.md#v1150-2022-08-25) + * **Feature**: This release adds support for eight EC2 local zones as fleet locations; Atlanta, Chicago, Dallas, Denver, Houston, Kansas City (us-east-1-mci-1a), Los Angeles, and Phoenix. It also adds support for C5d, C6a, C6i, and R5d EC2 instance families. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.22.0](service/iotwireless/CHANGELOG.md#v1220-2022-08-25) + * **Feature**: This release includes a new feature for the customers to enable the LoRa gateways to send out beacons for Class B devices and an option to select one or more gateways for Class C devices when sending the LoRaWAN downlink messages. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.13](service/ivschat/CHANGELOG.md#v1013-2022-08-25) + * **Documentation**: Documentation change for IVS Chat API Reference. Doc-only update to add a paragraph on ARNs to the Welcome section. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.8.0](service/panorama/CHANGELOG.md#v180-2022-08-25) + * **Feature**: Support sorting and filtering in ListDevices API, and add more fields to device listings and single device detail +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.13.0](service/ssooidc/CHANGELOG.md#v1130-2022-08-25) + * **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. + +# Release (2022-08-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.20.0](service/cloudfront/CHANGELOG.md#v1200-2022-08-24) + * **Feature**: Adds support for CloudFront origin access control (OAC), making it possible to restrict public access to S3 bucket origins in all AWS Regions, those with SSE-KMS, and more. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.25.0](service/configservice/CHANGELOG.md#v1250-2022-08-24) + * **Feature**: AWS Config now supports ConformancePackTemplate documents in SSM Docs for the deployment and update of conformance packs. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.14](service/iam/CHANGELOG.md#v11814-2022-08-24) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.17.1](service/ivs/CHANGELOG.md#v1171-2022-08-24) + * **Documentation**: Documentation Change for IVS API Reference - Doc-only update to type field description for CreateChannel and UpdateChannel actions and for Channel data type. Also added Amazon Resource Names (ARNs) paragraph to Welcome section. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.24.0](service/quicksight/CHANGELOG.md#v1240-2022-08-24) + * **Feature**: Added a new optional property DashboardVisual under ExperienceConfiguration parameter of GenerateEmbedUrlForAnonymousUser and GenerateEmbedUrlForRegisteredUser API operations. This supports embedding of specific visuals in QuickSight dashboards. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.21.5](service/transfer/CHANGELOG.md#v1215-2022-08-24) + * **Documentation**: Documentation updates for AWS Transfer Family + +# Release (2022-08-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.25.0](service/rds/CHANGELOG.md#v1250-2022-08-23) + * **Feature**: RDS for Oracle supports Oracle Data Guard switchover and read replica backups. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.5](service/ssoadmin/CHANGELOG.md#v1155-2022-08-23) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# Release (2022-08-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.19.5](service/docdb/CHANGELOG.md#v1195-2022-08-22) + * **Documentation**: Update document for volume clone +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.54.0](service/ec2/CHANGELOG.md#v1540-2022-08-22) + * **Feature**: R6a instances are powered by 3rd generation AMD EPYC (Milan) processors delivering all-core turbo frequency of 3.6 GHz. C6id, M6id, and R6id instances are powered by 3rd generation Intel Xeon Scalable processor (Ice Lake) delivering all-core turbo frequency of 3.5 GHz. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.23.0](service/forecast/CHANGELOG.md#v1230-2022-08-22) + * **Feature**: releasing What-If Analysis APIs and update ARN regex pattern to be more strict in accordance with security recommendation +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.12.0](service/forecastquery/CHANGELOG.md#v1120-2022-08-22) + * **Feature**: releasing What-If Analysis APIs +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.24.0](service/iotsitewise/CHANGELOG.md#v1240-2022-08-22) + * **Feature**: Enable non-unique asset names under different hierarchies +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.23.0](service/lexmodelsv2/CHANGELOG.md#v1230-2022-08-22) + * **Feature**: This release introduces a new feature to stop a running BotRecommendation Job for Automated Chatbot Designer. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.0](service/securityhub/CHANGELOG.md#v1230-2022-08-22) + * **Feature**: Added new resource details objects to ASFF, including resources for AwsBackupBackupVault, AwsBackupBackupPlan and AwsBackupRecoveryPoint. Added FixAvailable, FixedInVersion and Remediation to Vulnerability. +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.0.0](service/supportapp/CHANGELOG.md#v100-2022-08-22) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for the AWS Support App in Slack. + +# Release (2022-08-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.28.0](service/connect/CHANGELOG.md#v1280-2022-08-19) + * **Feature**: This release adds SearchSecurityProfiles API which can be used to search for Security Profile resources within a Connect Instance. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.12](service/ivschat/CHANGELOG.md#v1012-2022-08-19) + * **Documentation**: Documentation Change for IVS Chat API Reference - Doc-only update to change text/description for tags field. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.33.0](service/kendra/CHANGELOG.md#v1330-2022-08-19) + * **Feature**: This release adds support for a new authentication type - Personal Access Token (PAT) for confluence server. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.17.0](service/lookoutmetrics/CHANGELOG.md#v1170-2022-08-19) + * **Feature**: This release is to make GetDataQualityMetrics API publicly available. + +# Release (2022-08-18) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.1.0](service/chimesdkmediapipelines/CHANGELOG.md#v110-2022-08-18) + * **Feature**: The Amazon Chime SDK now supports live streaming of real-time video from the Amazon Chime SDK sessions to streaming platforms such as Amazon IVS and Amazon Elemental MediaLive. We have also added support for concatenation to create a single media capture file. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.21.0](service/cloudwatch/CHANGELOG.md#v1210-2022-08-18) + * **Feature**: Add support for managed Contributor Insights Rules +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.18.4](service/cognitoidentityprovider/CHANGELOG.md#v1184-2022-08-18) + * **Documentation**: This change is being made simply to fix the public documentation based on the models. We have included the PasswordChange and ResendCode events, along with the Pass, Fail and InProgress status. We have removed the Success and Failure status which are never returned by our APIs. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.16.0](service/dynamodb/CHANGELOG.md#v1160-2022-08-18) + * **Feature**: This release adds support for importing data from S3 into a new DynamoDB table +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.53.0](service/ec2/CHANGELOG.md#v1530-2022-08-18) + * **Feature**: This release adds support for VPN log options , a new feature allowing S2S VPN connections to send IKE activity logs to CloudWatch Logs +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.15.0](service/networkmanager/CHANGELOG.md#v1150-2022-08-18) + * **Feature**: Add TransitGatewayPeeringAttachmentId property to TransitGatewayPeering Model + +# Release (2022-08-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.15.0](service/appmesh/CHANGELOG.md#v1150-2022-08-17) + * **Feature**: AWS App Mesh release to support Multiple Listener and Access Log Format feature +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.1.0](service/connectcampaigns/CHANGELOG.md#v110-2022-08-17) + * **Feature**: Updated exceptions for Amazon Connect Outbound Campaign api's. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.32.0](service/kendra/CHANGELOG.md#v1320-2022-08-17) + * **Feature**: This release adds Zendesk connector (which allows you to specify Zendesk SAAS platform as data source), Proxy Support for Sharepoint and Confluence Server (which allows you to specify the proxy configuration if proxy is required to connect to your Sharepoint/Confluence Server as data source). +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.17.0](service/lakeformation/CHANGELOG.md#v1170-2022-08-17) + * **Feature**: This release adds a new API support "AssumeDecoratedRoleWithSAML" and also release updates the corresponding documentation. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.24.0](service/lambda/CHANGELOG.md#v1240-2022-08-17) + * **Feature**: Added support for customization of Consumer Group ID for MSK and Kafka Event Source Mappings. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.22.0](service/lexmodelsv2/CHANGELOG.md#v1220-2022-08-17) + * **Feature**: This release introduces support for enhanced conversation design with the ability to define custom conversation flows with conditional branching and new bot responses. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.24.0](service/rds/CHANGELOG.md#v1240-2022-08-17) + * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) for RDS Aurora database clusters. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.18](service/secretsmanager/CHANGELOG.md#v11518-2022-08-17) + * **Documentation**: Documentation updates for Secrets Manager. + +# Release (2022-08-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.20.0](service/rekognition/CHANGELOG.md#v1200-2022-08-16) + * **Feature**: This release adds APIs which support copying an Amazon Rekognition Custom Labels model and managing project policies across AWS account. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.12](service/servicecatalog/CHANGELOG.md#v11412-2022-08-16) + * **Documentation**: Documentation updates for Service Catalog + +# Release (2022-08-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.19.0](service/cloudfront/CHANGELOG.md#v1190-2022-08-15) + * **Feature**: Adds Http 3 support to distributions +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.13](service/identitystore/CHANGELOG.md#v11413-2022-08-15) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.17](service/sso/CHANGELOG.md#v11117-2022-08-15) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.9.0](service/wisdom/CHANGELOG.md#v190-2022-08-15) + * **Feature**: This release introduces a new API PutFeedback that allows submitting feedback to Wisdom on content relevance. + +# Release (2022-08-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.17.0](config/CHANGELOG.md#v1170-2022-08-14) + * **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.15.0](service/amp/CHANGELOG.md#v1150-2022-08-14) + * **Feature**: This release adds log APIs that allow customers to manage logging for their Amazon Managed Service for Prometheus workspaces. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.11.0](service/chimesdkmessaging/CHANGELOG.md#v1110-2022-08-14) + * **Feature**: The Amazon Chime SDK now supports channels with up to one million participants with elastic channels. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.17.0](service/ivs/CHANGELOG.md#v1170-2022-08-14) + * **Feature**: Updates various list api MaxResults ranges +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.12.0](service/personalizeruntime/CHANGELOG.md#v1120-2022-08-14) + * **Feature**: This release provides support for promotions in AWS Personalize runtime. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.6](service/rds/CHANGELOG.md#v1236-2022-08-14) + * **Documentation**: Adds support for RDS Custom to DBInstanceClass in ModifyDBInstance + +# Release (2022-08-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.0.0](service/backupstorage/CHANGELOG.md#v100-2022-08-11) + * **Release**: New AWS service client module + * **Feature**: This is the first public release of AWS Backup Storage. We are exposing some previously-internal APIs for use by external services. These APIs are not meant to be used directly by customers. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.30.0](service/glue/CHANGELOG.md#v1300-2022-08-11) + * **Feature**: Add support for Python 3.9 AWS Glue Python Shell jobs +* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.0.0](service/privatenetworks/CHANGELOG.md#v100-2022-08-11) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Private 5G. AWS Private 5G is a managed service that makes it easy to deploy, operate, and scale your own private mobile network at your on-premises location. + +# Release (2022-08-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.16.0](config/CHANGELOG.md#v1160-2022-08-10) + * **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.12.0](service/dlm/CHANGELOG.md#v1120-2022-08-10) + * **Feature**: This release adds support for excluding specific data (non-boot) volumes from multi-volume snapshot sets created by snapshot lifecycle policies +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.52.0](service/ec2/CHANGELOG.md#v1520-2022-08-10) + * **Feature**: This release adds support for excluding specific data (non-root) volumes from multi-volume snapshot sets created from instances. + +# Release (2022-08-09) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.20.0](service/cloudwatch/CHANGELOG.md#v1200-2022-08-09) + * **Feature**: Various quota increases related to dimensions and custom metrics +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.18.0](service/location/CHANGELOG.md#v1180-2022-08-09) + * **Feature**: Amazon Location Service now allows circular geofences in BatchPutGeofence, PutGeofence, and GetGeofence APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.39.0](service/sagemaker/CHANGELOG.md#v1390-2022-08-09) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying multiple alternate EC2 instance types to make tuning jobs more robust when the preferred instance type is not available due to insufficient capacity. +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.13.0](service/sagemakera2iruntime/CHANGELOG.md#v1130-2022-08-09) + * **Feature**: Fix bug with parsing ISO-8601 CreationTime in Java SDK in DescribeHumanLoop + +# Release (2022-08-08) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.9 + * **Bug Fix**: aws/signer/v4: Fixes a panic in SDK's handling of endpoint URLs with ports by correcting how URL path is parsed from opaque URLs. Fixes [#1294](https://github.com/aws/aws-sdk-go-v2/issues/1294). +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.29.0](service/glue/CHANGELOG.md#v1290-2022-08-08) + * **Feature**: Add an option to run non-urgent or non-time sensitive Glue Jobs on spare capacity +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.10](service/identitystore/CHANGELOG.md#v11410-2022-08-08) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.21.0](service/iotwireless/CHANGELOG.md#v1210-2022-08-08) + * **Feature**: AWS IoT Wireless release support for sidewalk data reliability. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.17.0](service/pinpoint/CHANGELOG.md#v1170-2022-08-08) + * **Feature**: Adds support for Advance Quiet Time in Journeys. Adds RefreshOnSegmentUpdate and WaitForQuietTime to JourneyResponse. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.23.2](service/quicksight/CHANGELOG.md#v1232-2022-08-08) + * **Documentation**: A series of documentation updates to the QuickSight API reference. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.14](service/sso/CHANGELOG.md#v11114-2022-08-08) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.2](service/ssoadmin/CHANGELOG.md#v1152-2022-08-08) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.12.12](service/ssooidc/CHANGELOG.md#v11212-2022-08-08) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# Release (2022-08-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.13.0](service/chimesdkmeetings/CHANGELOG.md#v1130-2022-08-04) + * **Feature**: Adds support for Tags on Amazon Chime SDK WebRTC sessions +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.24.0](service/configservice/CHANGELOG.md#v1240-2022-08-04) + * **Feature**: Add resourceType enums for Athena, GlobalAccelerator, Detective and EC2 types +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.3](service/databasemigrationservice/CHANGELOG.md#v1213-2022-08-04) + * **Documentation**: Documentation updates for Database Migration Service (DMS). +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.28.0](service/iot/CHANGELOG.md#v1280-2022-08-04) + * **Feature**: The release is to support attach a provisioning template to CACert for JITP function, Customer now doesn't have to hardcode a roleArn and templateBody during register a CACert to enable JITP. + +# Release (2022-08-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.18.0](service/cognitoidentityprovider/CHANGELOG.md#v1180-2022-08-03) + * **Feature**: Add a new exception type, ForbiddenException, that is returned when request is not allowed +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.22.0](service/wafv2/CHANGELOG.md#v1220-2022-08-03) + * **Feature**: You can now associate an AWS WAF web ACL with an Amazon Cognito user pool. + +# Release (2022-08-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.0.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v100-2022-08-02) + * **Release**: New AWS service client module + * **Feature**: This release supports user based subscription for Microsoft Visual Studio Professional and Enterprise on EC2. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.21.0](service/personalize/CHANGELOG.md#v1210-2022-08-02) + * **Feature**: This release adds support for incremental bulk ingestion for the Personalize CreateDatasetImportJob API. + +# Release (2022-08-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.23.1](service/configservice/CHANGELOG.md#v1231-2022-08-01) + * **Documentation**: Documentation update for PutConfigRule and PutOrganizationConfigRule +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.22.0](service/workspaces/CHANGELOG.md#v1220-2022-08-01) + * **Feature**: This release introduces ModifySamlProperties, a new API that allows control of SAML properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return SAML properties in its responses. + +# Release (2022-07-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.51.0](service/ec2/CHANGELOG.md#v1510-2022-07-29) + * **Feature**: Documentation updates for Amazon EC2. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.4](service/fsx/CHANGELOG.md#v1244-2022-07-29) + * **Documentation**: Documentation updates for Amazon FSx +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.17.0](service/shield/CHANGELOG.md#v1170-2022-07-29) + * **Feature**: AWS Shield Advanced now supports filtering for ListProtections and ListProtectionGroups. + +# Release (2022-07-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.50.1](service/ec2/CHANGELOG.md#v1501-2022-07-28) + * **Documentation**: Documentation updates for VM Import/Export. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.16.0](service/elasticsearchservice/CHANGELOG.md#v1160-2022-07-28) + * **Feature**: This release adds support for gp3 EBS (Elastic Block Store) storage. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.14.0](service/lookoutvision/CHANGELOG.md#v1140-2022-07-28) + * **Feature**: This release introduces support for image segmentation models and updates CPU accelerator options for models hosted on edge devices. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.10.0](service/opensearch/CHANGELOG.md#v1100-2022-07-28) + * **Feature**: This release adds support for gp3 EBS (Elastic Block Store) storage. + +# Release (2022-07-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.20.0](service/auditmanager/CHANGELOG.md#v1200-2022-07-27) + * **Feature**: This release adds an exceeded quota exception to several APIs. We added a ServiceQuotaExceededException for the following operations: CreateAssessment, CreateControl, CreateAssessmentFramework, and UpdateAssessmentStatus. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.21.0](service/chime/CHANGELOG.md#v1210-2022-07-27) + * **Feature**: Chime VoiceConnector will now support ValidateE911Address which will allow customers to prevalidate their addresses included in their SIP invites for emergency calling +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.23.0](service/configservice/CHANGELOG.md#v1230-2022-07-27) + * **Feature**: This release adds ListConformancePackComplianceScores API to support the new compliance score feature, which provides a percentage of the number of compliant rule-resource combinations in a conformance pack compared to the number of total possible rule-resource combinations in the conformance pack. +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.14.0](service/globalaccelerator/CHANGELOG.md#v1140-2022-07-27) + * **Feature**: Global Accelerator now supports dual-stack accelerators, enabling support for IPv4 and IPv6 traffic. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.13.0](service/marketplacecatalog/CHANGELOG.md#v1130-2022-07-27) + * **Feature**: The SDK for the StartChangeSet API will now automatically set and use an idempotency token in the ClientRequestToken request parameter if the customer does not provide it. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.17.0](service/polly/CHANGELOG.md#v1170-2022-07-27) + * **Feature**: Amazon Polly adds new English and Hindi voice - Kajal. Kajal is available as Neural voice only. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.27.5](service/ssm/CHANGELOG.md#v1275-2022-07-27) + * **Documentation**: Adding doc updates for OpsCenter support in Service Setting actions. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.21.0](service/workspaces/CHANGELOG.md#v1210-2022-07-27) + * **Feature**: Added CreateWorkspaceImage API to create a new WorkSpace image from an existing WorkSpace. + +# Release (2022-07-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.15.0](service/appsync/CHANGELOG.md#v1150-2022-07-26) + * **Feature**: Adds support for a new API to evaluate mapping templates with mock data, allowing you to remotely unit test your AppSync resolvers and functions. +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.16.0](service/detective/CHANGELOG.md#v1160-2022-07-26) + * **Feature**: Added the ability to get data source package information for the behavior graph. Graph administrators can now start (or stop) optional datasources on the behavior graph. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.15.0](service/guardduty/CHANGELOG.md#v1150-2022-07-26) + * **Feature**: Amazon GuardDuty introduces a new Malware Protection feature that triggers malware scan on selected EC2 instance resources, after the service detects a potentially malicious activity. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.13.0](service/lookoutvision/CHANGELOG.md#v1130-2022-07-26) + * **Feature**: This release introduces support for the automatic scaling of inference units used by Amazon Lookout for Vision models. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.22.0](service/macie2/CHANGELOG.md#v1220-2022-07-26) + * **Feature**: This release adds support for retrieving (revealing) sample occurrences of sensitive data that Amazon Macie detects and reports in findings. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.1](service/rds/CHANGELOG.md#v1231-2022-07-26) + * **Documentation**: Adds support for using RDS Proxies with RDS for MariaDB databases. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.19.0](service/rekognition/CHANGELOG.md#v1190-2022-07-26) + * **Feature**: This release introduces support for the automatic scaling of inference units used by Amazon Rekognition Custom Labels models. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.22.3](service/securityhub/CHANGELOG.md#v1223-2022-07-26) + * **Documentation**: Documentation updates for AWS Security Hub +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.21.0](service/transfer/CHANGELOG.md#v1210-2022-07-26) + * **Feature**: AWS Transfer Family now supports Applicability Statement 2 (AS2), a network protocol used for the secure and reliable transfer of critical Business-to-Business (B2B) data over the public internet using HTTP/HTTPS as the transport mechanism. + +# Release (2022-07-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.6](service/autoscaling/CHANGELOG.md#v1236-2022-07-25) + * **Documentation**: Documentation update for Amazon EC2 Auto Scaling. + +# Release (2022-07-22) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.7.0](service/account/CHANGELOG.md#v170-2022-07-22) + * **Feature**: This release enables customers to manage the primary contact information for their AWS accounts. For more information, see https://docs.aws.amazon.com/accounts/latest/reference/API_Operations.html +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.50.0](service/ec2/CHANGELOG.md#v1500-2022-07-22) + * **Feature**: Added support for EC2 M1 Mac instances. For more information, please visit aws.amazon.com/mac. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.15.0](service/iotdeviceadvisor/CHANGELOG.md#v1150-2022-07-22) + * **Feature**: Added new service feature (Early access only) - Long Duration Test, where customers can test the IoT device to observe how it behaves when the device is in operation for longer period. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.22.0](service/medialive/CHANGELOG.md#v1220-2022-07-22) + * **Feature**: Link devices now support remote rebooting. Link devices now support maintenance windows. Maintenance windows allow a Link device to install software updates without stopping the MediaLive channel. The channel will experience a brief loss of input from the device while updates are installed. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.0](service/rds/CHANGELOG.md#v1230-2022-07-22) + * **Feature**: This release adds the "ModifyActivityStream" API with support for audit policy state locking and unlocking. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.21.0](service/transcribe/CHANGELOG.md#v1210-2022-07-22) + * **Feature**: Remove unsupported language codes for StartTranscriptionJob and update VocabularyFileUri for UpdateMedicalVocabulary + +# Release (2022-07-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.18.0](service/athena/CHANGELOG.md#v1180-2022-07-21) + * **Feature**: This feature allows customers to retrieve runtime statistics for completed queries +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.19.0](service/cloudwatch/CHANGELOG.md#v1190-2022-07-21) + * **Feature**: Adding support for the suppression of Composite Alarm actions +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.1](service/databasemigrationservice/CHANGELOG.md#v1211-2022-07-21) + * **Documentation**: Documentation updates for Database Migration Service (DMS). +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.19.0](service/docdb/CHANGELOG.md#v1190-2022-07-21) + * **Feature**: Enable copy-on-write restore type +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.14.0](service/ec2instanceconnect/CHANGELOG.md#v1140-2022-07-21) + * **Feature**: This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.20.0](service/frauddetector/CHANGELOG.md#v1200-2022-07-21) + * **Feature**: The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.23.0](service/iotsitewise/CHANGELOG.md#v1230-2022-07-21) + * **Feature**: Added asynchronous API to ingest bulk historical and current data into IoT SiteWise. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.31.0](service/kendra/CHANGELOG.md#v1310-2022-07-21) + * **Feature**: Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.18.0](service/networkfirewall/CHANGELOG.md#v1180-2022-07-21) + * **Feature**: Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.22.1](service/rds/CHANGELOG.md#v1221-2022-07-21) + * **Documentation**: Adds support for creating an RDS Proxy for an RDS for MariaDB database. + +# Release (2022-07-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.11](service/acmpca/CHANGELOG.md#v11711-2022-07-20) + * **Documentation**: AWS Certificate Manager (ACM) Private Certificate Authority (PCA) documentation updates +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.27.0](service/iot/CHANGELOG.md#v1270-2022-07-20) + * **Feature**: GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API. + +# Release (2022-07-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.18.0](service/devopsguru/CHANGELOG.md#v1180-2022-07-19) + * **Feature**: Added new APIs for log anomaly detection feature. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.28.1](service/glue/CHANGELOG.md#v1281-2022-07-19) + * **Documentation**: Documentation updates for AWS Glue Job Timeout and Autoscaling +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.38.0](service/sagemaker/CHANGELOG.md#v1380-2022-07-19) + * **Feature**: Fixed an issue with cross account QueryLineage +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.12.0](service/sagemakeredge/CHANGELOG.md#v1120-2022-07-19) + * **Feature**: Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.20.0](service/workspaces/CHANGELOG.md#v1200-2022-07-19) + * **Feature**: Increased the character limit of the login message from 850 to 2000 characters. + +# Release (2022-07-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.14.0](service/applicationdiscoveryservice/CHANGELOG.md#v1140-2022-07-18) + * **Feature**: Add AWS Agentless Collector details to the GetDiscoverySummary API response +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.49.1](service/ec2/CHANGELOG.md#v1491-2022-07-18) + * **Documentation**: Documentation updates for Amazon EC2. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.22.0](service/elasticache/CHANGELOG.md#v1220-2022-07-18) + * **Feature**: Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.18.0](service/kms/CHANGELOG.md#v1180-2022-07-18) + * **Feature**: Added support for the SM2 KeySpec in China Partition Regions +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.17.0](service/mediapackage/CHANGELOG.md#v1170-2022-07-18) + * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints and increases the number of supported video and audio encryption presets for Speke v2 +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.37.0](service/sagemaker/CHANGELOG.md#v1370-2022-07-18) + * **Feature**: Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.0](service/ssoadmin/CHANGELOG.md#v1150-2022-07-18) + * **Feature**: AWS SSO now supports attaching customer managed policies and a permissions boundary to your permission sets. This release adds new API operations to manage and view the customer managed policies and the permissions boundary for a given permission set. + +# Release (2022-07-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.18.3](service/datasync/CHANGELOG.md#v1183-2022-07-15) + * **Documentation**: Documentation updates for AWS DataSync regarding configuring Amazon FSx for ONTAP location security groups and SMB user permissions. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.7.0](service/drs/CHANGELOG.md#v170-2022-07-15) + * **Feature**: Changed existing APIs to allow choosing a dynamic volume type for replicating volumes, to reduce costs for customers. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.8.0](service/evidently/CHANGELOG.md#v180-2022-07-15) + * **Feature**: This release adds support for the new segmentation feature. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.21.0](service/wafv2/CHANGELOG.md#v1210-2022-07-15) + * **Feature**: This SDK release provide customers ability to add sensitivity level for WAF SQLI Match Statements. + +# Release (2022-07-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.17.0](service/athena/CHANGELOG.md#v1170-2022-07-14) + * **Feature**: This release updates data types that contain either QueryExecutionId, NamedQueryId or ExpectedBucketOwner. Ids must be between 1 and 128 characters and contain only non-whitespace characters. ExpectedBucketOwner must be 12-digit string. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.13.0](service/codeartifact/CHANGELOG.md#v1130-2022-07-14) + * **Feature**: This release introduces Package Origin Controls, a mechanism used to counteract Dependency Confusion attacks. Adds two new APIs, PutPackageOriginConfiguration and DescribePackage, and updates the ListPackage, DescribePackageVersion and ListPackageVersion APIs in support of the feature. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.22.0](service/configservice/CHANGELOG.md#v1220-2022-07-14) + * **Feature**: Update ResourceType enum with values for Route53Resolver, Batch, DMS, Workspaces, Stepfunctions, SageMaker, ElasticLoadBalancingV2, MSK types +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.49.0](service/ec2/CHANGELOG.md#v1490-2022-07-14) + * **Feature**: This release adds flow logs for Transit Gateway to allow customers to gain deeper visibility and insights into network traffic through their Transit Gateways. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.18.0](service/fms/CHANGELOG.md#v1180-2022-07-14) + * **Feature**: Adds support for strict ordering in stateful rule groups in Network Firewall policies. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.28.0](service/glue/CHANGELOG.md#v1280-2022-07-14) + * **Feature**: This release adds an additional worker type for Glue Streaming jobs. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.7.0](service/inspector2/CHANGELOG.md#v170-2022-07-14) + * **Feature**: This release adds support for Inspector V2 scan configurations through the get and update configuration APIs. Currently this allows configuring ECR automated re-scan duration to lifetime or 180 days or 30 days. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.30.0](service/kendra/CHANGELOG.md#v1300-2022-07-14) + * **Feature**: This release adds AccessControlConfigurations which allow you to redefine your document level access control without the need for content re-indexing. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.13.0](service/nimble/CHANGELOG.md#v1130-2022-07-14) + * **Feature**: Amazon Nimble Studio adds support for IAM-based access to AWS resources for Nimble Studio components and custom studio components. Studio Component scripts use these roles on Nimble Studio workstation to mount filesystems, access S3 buckets, or other configured resources in the Studio's AWS account +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.22.0](service/outposts/CHANGELOG.md#v1220-2022-07-14) + * **Feature**: This release adds the ShipmentInformation and AssetInformationList fields to the GetOrder API response. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.36.0](service/sagemaker/CHANGELOG.md#v1360-2022-07-14) + * **Feature**: This release adds support for G5, P4d, and C6i instance types in Amazon SageMaker Inference and increases the number of hyperparameters that can be searched from 20 to 30 in Amazon SageMaker Automatic Model Tuning + +# Release (2022-07-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.13.0](service/appconfig/CHANGELOG.md#v1130-2022-07-13) + * **Feature**: Adding Create, Get, Update, Delete, and List APIs for new two new resources: Extensions and ExtensionAssociations. + +# Release (2022-07-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.14.0](service/networkmanager/CHANGELOG.md#v1140-2022-07-12) + * **Feature**: This release adds general availability API support for AWS Cloud WAN. + +# Release (2022-07-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.48.0](service/ec2/CHANGELOG.md#v1480-2022-07-11) + * **Feature**: Build, manage, and monitor a unified global network that connects resources running across your cloud and on-premises environments using the AWS Cloud WAN APIs. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.0](service/redshift/CHANGELOG.md#v1260-2022-07-11) + * **Feature**: This release adds a new --snapshot-arn field for describe-cluster-snapshots, describe-node-configuration-options, restore-from-cluster-snapshot, authorize-snapshot-acsess, and revoke-snapshot-acsess APIs. It allows customers to give a Redshift snapshot ARN or a Redshift Serverless ARN as input. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.2.2](service/redshiftserverless/CHANGELOG.md#v122-2022-07-11) + * **Documentation**: Removed prerelease language for GA launch. + +# Release (2022-07-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.17.0](service/backup/CHANGELOG.md#v1170-2022-07-08) + * **Feature**: This release adds support for authentication using IAM user identity instead of passed IAM role, identified by excluding the IamRoleArn field in the StartRestoreJob API. This feature applies to only resource clients with a destructive restore nature (e.g. SAP HANA). + +# Release (2022-07-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.12.0](service/chimesdkmeetings/CHANGELOG.md#v1120-2022-07-07) + * **Feature**: Adds support for AppKeys and TenantIds in Amazon Chime SDK WebRTC sessions +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.0](service/databasemigrationservice/CHANGELOG.md#v1210-2022-07-07) + * **Feature**: New api to migrate event subscriptions to event bridge rules +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.26.0](service/iot/CHANGELOG.md#v1260-2022-07-07) + * **Feature**: This release adds support to register a CA certificate without having to provide a verification certificate. This also allows multiple AWS accounts to register the same CA in the same region. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.20.0](service/iotwireless/CHANGELOG.md#v1200-2022-07-07) + * **Feature**: Adds 5 APIs: PutPositionConfiguration, GetPositionConfiguration, ListPositionConfigurations, UpdatePosition, GetPosition for the new Positioning Service feature which enables customers to configure solvers to calculate position of LoRaWAN devices, or specify position of LoRaWAN devices & gateways. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.35.0](service/sagemaker/CHANGELOG.md#v1350-2022-07-07) + * **Feature**: Heterogeneous clusters: the ability to launch training jobs with multiple instance types. This enables running component of the training job on the instance type that is most suitable for it. e.g. doing data processing and augmentation on CPU instances and neural network training on GPU instances + +# Release (2022-07-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.22.0](service/cloudformation/CHANGELOG.md#v1220-2022-07-06) + * **Feature**: My AWS Service (placeholder) - Add a new feature Account-level Targeting for StackSet operation +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.16.0](service/synthetics/CHANGELOG.md#v1160-2022-07-06) + * **Feature**: This release introduces Group feature, which enables users to group cross-region canaries. + +# Release (2022-07-05) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.5](service/configservice/CHANGELOG.md#v1215-2022-07-05) + * **Documentation**: Updating documentation service limits +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.21.0](service/lexmodelsv2/CHANGELOG.md#v1210-2022-07-05) + * **Feature**: This release introduces additional optional parameters "messageSelectionStrategy" to PromptSpecification, which enables the users to configure the bot to play messages in orderly manner. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.23.0](service/quicksight/CHANGELOG.md#v1230-2022-07-05) + * **Feature**: This release allows customers to programmatically create QuickSight accounts with Enterprise and Enterprise + Q editions. It also releases allowlisting domains for embedding QuickSight dashboards at runtime through the embedding APIs. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.22.0](service/rds/CHANGELOG.md#v1220-2022-07-05) + * **Feature**: Adds waiters support for DBCluster. +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.0.0](service/rolesanywhere/CHANGELOG.md#v100-2022-07-05) + * **Release**: New AWS service client module + * **Feature**: IAM Roles Anywhere allows your workloads such as servers, containers, and applications to obtain temporary AWS credentials and use the same IAM roles and policies that you have configured for your AWS workloads to access AWS resources. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.19.0](service/sqs/CHANGELOG.md#v1190-2022-07-05) + * **Feature**: Adds support for the SQS client to automatically validate message checksums for SendMessage, SendMessageBatch, and ReceiveMessage. A DisableMessageChecksumValidation parameter has been added to the Options struct for SQS package. Setting this to true will disable the checksum validation. This can be set when creating a client, or per operation call. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.15.0](service/ssmincidents/CHANGELOG.md#v1150-2022-07-05) + * **Feature**: Adds support for tagging incident-record on creation by providing incident tags in the template within a response-plan. + +# Release (2022-07-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.20.0](service/databasemigrationservice/CHANGELOG.md#v1200-2022-07-01) + * **Feature**: Added new features for AWS DMS version 3.4.7 that includes new endpoint settings for S3, OpenSearch, Postgres, SQLServer and Oracle. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.5](service/rds/CHANGELOG.md#v1215-2022-07-01) + * **Documentation**: Adds support for additional retention periods to Performance Insights. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.27.0](service/s3/CHANGELOG.md#v1270-2022-07-01) + * **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076). + +# Release (2022-06-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.16.0](service/athena/CHANGELOG.md#v1160-2022-06-30) + * **Feature**: This feature introduces the API support for Athena's parameterized query and BatchGetPreparedStatement API. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.18.0](service/customerprofiles/CHANGELOG.md#v1180-2022-06-30) + * **Feature**: This release adds the optional MinAllowedConfidenceScoreForMerging parameter to the CreateDomain, UpdateDomain, and GetAutoMergingPreview APIs in Customer Profiles. This parameter is used as a threshold to influence the profile auto-merging step of the Identity Resolution process. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.20.0](service/emr/CHANGELOG.md#v1200-2022-06-30) + * **Feature**: This release adds support for the ExecutionRoleArn parameter in the AddJobFlowSteps and DescribeStep APIs. Customers can use ExecutionRoleArn to specify the IAM role used for each job they submit using the AddJobFlowSteps API. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.27.0](service/glue/CHANGELOG.md#v1270-2022-06-30) + * **Feature**: This release adds tag as an input of CreateDatabase +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.29.0](service/kendra/CHANGELOG.md#v1290-2022-06-30) + * **Feature**: Amazon Kendra now provides a data source connector for alfresco +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.13.0](service/mwaa/CHANGELOG.md#v1130-2022-06-30) + * **Feature**: Documentation updates for Amazon Managed Workflows for Apache Airflow. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.16.0](service/pricing/CHANGELOG.md#v1160-2022-06-30) + * **Feature**: Documentation update for GetProducts Response. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.16.0](service/wellarchitected/CHANGELOG.md#v1160-2022-06-30) + * **Feature**: Added support for UpdateGlobalSettings API. Added status filter to ListWorkloadShares and ListLensShares. +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.16.0](service/workmail/CHANGELOG.md#v1160-2022-06-30) + * **Feature**: This release adds support for managing user availability configurations in Amazon WorkMail. + +# Release (2022-06-29) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.6 + * **Bug Fix**: Fix aws/signer/v4 to not double sign Content-Length header. Fixes [#1728](https://github.com/aws/aws-sdk-go-v2/issues/1728). Thanks to @matelang for creating the issue and PR. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.17.0](service/appstream/CHANGELOG.md#v1170-2022-06-29) + * **Feature**: Includes support for StreamingExperienceSettings in CreateStack and UpdateStack APIs +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.7](service/elasticloadbalancingv2/CHANGELOG.md#v1187-2022-06-29) + * **Documentation**: This release adds two attributes for ALB. One, helps to preserve the host header and the other helps to modify, preserve, or remove the X-Forwarded-For header in the HTTP request. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.19.0](service/emr/CHANGELOG.md#v1190-2022-06-29) + * **Feature**: This release introduces additional optional parameter "Throughput" to VolumeSpecification to enable user to configure throughput for gp3 ebs volumes. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.21.0](service/medialive/CHANGELOG.md#v1210-2022-06-29) + * **Feature**: This release adds support for automatic renewal of MediaLive reservations at the end of each reservation term. Automatic renewal is optional. This release also adds support for labelling accessibility-focused audio and caption tracks in HLS outputs. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.2.0](service/redshiftserverless/CHANGELOG.md#v120-2022-06-29) + * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.34.0](service/sagemaker/CHANGELOG.md#v1340-2022-06-29) + * **Feature**: This release adds: UpdateFeatureGroup, UpdateFeatureMetadata, DescribeFeatureMetadata APIs; FeatureMetadata type in Search API; LastModifiedTime, LastUpdateStatus, OnlineStoreTotalSizeBytes in DescribeFeatureGroup API. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.14.0](service/translate/CHANGELOG.md#v1140-2022-06-29) + * **Feature**: Added ListLanguages API which can be used to list the languages supported by Translate. + +# Release (2022-06-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.18.0](service/datasync/CHANGELOG.md#v1180-2022-06-28) + * **Feature**: AWS DataSync now supports Amazon FSx for NetApp ONTAP locations. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.47.0](service/ec2/CHANGELOG.md#v1470-2022-06-28) + * **Feature**: This release adds a new spread placement group to EC2 Placement Groups: host level spread, which spread instances between physical hosts, available to Outpost customers only. CreatePlacementGroup and DescribePlacementGroups APIs were updated with a new parameter: SpreadLevel to support this feature. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.12.0](service/finspacedata/CHANGELOG.md#v1120-2022-06-28) + * **Feature**: Release new API GetExternalDataViewAccessDetails +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.16.0](service/polly/CHANGELOG.md#v1160-2022-06-28) + * **Feature**: Add 4 new neural voices - Pedro (es-US), Liam (fr-CA), Daniel (de-DE) and Arthur (en-GB). + +# Release (2022-06-24.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.13.7](service/emrcontainers/CHANGELOG.md#v1137-2022-06-242) + * **Bug Fix**: Fixes bug with incorrect modeled timestamp format + +# Release (2022-06-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.14.0](service/lookoutequipment/CHANGELOG.md#v1140-2022-06-23) + * **Feature**: This release adds visualizations to the scheduled inference results. Users will be able to see interference results, including diagnostic results from their running inference schedulers. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.25.1](service/mediaconvert/CHANGELOG.md#v1251-2022-06-23) + * **Documentation**: AWS Elemental MediaConvert SDK has released support for automatic DolbyVision metadata generation when converting HDR10 to DolbyVision. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.15.0](service/mgn/CHANGELOG.md#v1150-2022-06-23) + * **Feature**: New and modified APIs for the Post-Migration Framework +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.6.0](service/migrationhubrefactorspaces/CHANGELOG.md#v160-2022-06-23) + * **Feature**: This release adds the new API UpdateRoute that allows route to be updated to ACTIVE/INACTIVE state. In addition, CreateRoute API will now allow users to create route in ACTIVE/INACTIVE state. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.33.0](service/sagemaker/CHANGELOG.md#v1330-2022-06-23) + * **Feature**: SageMaker Ground Truth now supports Virtual Private Cloud. Customers can launch labeling jobs and access to their private workforce in VPC mode. + +# Release (2022-06-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.8](service/apigateway/CHANGELOG.md#v1158-2022-06-22) + * **Documentation**: Documentation updates for Amazon API Gateway +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.15.0](service/pricing/CHANGELOG.md#v1150-2022-06-22) + * **Feature**: This release introduces 1 update to the GetProducts API. The serviceCode attribute is now required when you use the GetProductsRequest. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.20.0](service/transfer/CHANGELOG.md#v1200-2022-06-22) + * **Feature**: Until today, the service supported only RSA host keys and user keys. Now with this launch, Transfer Family has expanded the support for ECDSA and ED25519 host keys and user keys, enabling customers to support a broader set of clients by choosing RSA, ECDSA, and ED25519 host and user keys. + +# Release (2022-06-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.46.0](service/ec2/CHANGELOG.md#v1460-2022-06-21) + * **Feature**: This release adds support for Private IP VPNs, a new feature allowing S2S VPN connections to use private ip addresses as the tunnel outside ip address over Direct Connect as transport. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.9](service/ecs/CHANGELOG.md#v1189-2022-06-21) + * **Documentation**: Amazon ECS UpdateService now supports the following parameters: PlacementStrategies, PlacementConstraints and CapacityProviderStrategy. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.15.0](service/wellarchitected/CHANGELOG.md#v1150-2022-06-21) + * **Feature**: Adds support for lens tagging, Adds support for multiple helpful-resource urls and multiple improvement-plan urls. + +# Release (2022-06-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.14.0](service/directoryservice/CHANGELOG.md#v1140-2022-06-20) + * **Feature**: This release adds support for describing and updating AWS Managed Microsoft AD settings +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.17.7](service/kafka/CHANGELOG.md#v1177-2022-06-20) + * **Documentation**: Documentation updates to use Az Id during cluster creation. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.21.0](service/outposts/CHANGELOG.md#v1210-2022-06-20) + * **Feature**: This release adds the AssetLocation structure to the ListAssets response. AssetLocation includes the RackElevation for an Asset. + +# Release (2022-06-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.27.0](service/connect/CHANGELOG.md#v1270-2022-06-17) + * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable High volume outbound communications using attribute type HIGH_VOLUME_OUTBOUND on the specified Amazon Connect instance. +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.0.0](service/connectcampaigns/CHANGELOG.md#v100-2022-06-17) + * **Release**: New AWS service client module + * **Feature**: Added Amazon Connect high volume outbound communications SDK. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.15.7](service/dynamodb/CHANGELOG.md#v1157-2022-06-17) + * **Documentation**: Doc only update for DynamoDB service +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.7](service/dynamodbstreams/CHANGELOG.md#v1137-2022-06-17) + * **Documentation**: Doc only update for DynamoDB service + +# Release (2022-06-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.16.0](service/redshiftdata/CHANGELOG.md#v1160-2022-06-16) + * **Feature**: This release adds a new --workgroup-name field to operations that connect to an endpoint. Customers can now execute queries against their serverless workgroups. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.1.0](service/redshiftserverless/CHANGELOG.md#v110-2022-06-16) + * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.11](service/secretsmanager/CHANGELOG.md#v11511-2022-06-16) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.22.0](service/securityhub/CHANGELOG.md#v1220-2022-06-16) + * **Feature**: Added Threats field for security findings. Added new resource details for ECS Container, ECS Task, RDS SecurityGroup, Kinesis Stream, EC2 TransitGateway, EFS AccessPoint, CloudFormation Stack, CloudWatch Alarm, VPC Peering Connection and WAF Rules + +# Release (2022-06-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.11.0](service/finspacedata/CHANGELOG.md#v1110-2022-06-15) + * **Feature**: This release adds a new set of APIs, GetPermissionGroup, DisassociateUserFromPermissionGroup, AssociateUserToPermissionGroup, ListPermissionGroupsByUser, ListUsersByPermissionGroup. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.14.0](service/guardduty/CHANGELOG.md#v1140-2022-06-15) + * **Feature**: Adds finding fields available from GuardDuty Console. Adds FreeTrial related operations. Deprecates the use of various APIs related to Master Accounts and Replace them with Administrator Accounts. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.13.0](service/servicecatalogappregistry/CHANGELOG.md#v1130-2022-06-15) + * **Feature**: This release adds a new API ListAttributeGroupsForApplication that returns associated attribute groups of an application. In addition, the UpdateApplication and UpdateAttributeGroup APIs will not allow users to update the 'Name' attribute. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.19.0](service/workspaces/CHANGELOG.md#v1190-2022-06-15) + * **Feature**: Added new field "reason" to OperationNotSupportedException. Receiving this exception in the DeregisterWorkspaceDirectory API will now return a reason giving more context on the failure. + +# Release (2022-06-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.13.0](service/budgets/CHANGELOG.md#v1130-2022-06-14) + * **Feature**: Add a budgets ThrottlingException. Update the CostFilters value pattern. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.16.0](service/lookoutmetrics/CHANGELOG.md#v1160-2022-06-14) + * **Feature**: Adding filters to Alert and adding new UpdateAlert API. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.25.0](service/mediaconvert/CHANGELOG.md#v1250-2022-06-14) + * **Feature**: AWS Elemental MediaConvert SDK has added support for rules that constrain Automatic-ABR rendition selection when generating ABR package ladders. + +# Release (2022-06-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.20.0](service/outposts/CHANGELOG.md#v1200-2022-06-13) + * **Feature**: This release adds API operations AWS uses to install Outpost servers. + +# Release (2022-06-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.19.7](service/frauddetector/CHANGELOG.md#v1197-2022-06-10) + * **Documentation**: Documentation updates for Amazon Fraud Detector (AWSHawksNest) + +# Release (2022-06-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.11.0](service/chimesdkmeetings/CHANGELOG.md#v1110-2022-06-09) + * **Feature**: Adds support for live transcription in AWS GovCloud (US) Regions. + +# Release (2022-06-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.19.0](service/databasemigrationservice/CHANGELOG.md#v1190-2022-06-08) + * **Feature**: This release adds DMS Fleet Advisor APIs and exposes functionality for DMS Fleet Advisor. It adds functionality to create and modify fleet advisor instances, and to collect and analyze information about the local data infrastructure. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.7](service/iam/CHANGELOG.md#v1187-2022-06-08) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.0.0](service/m2/CHANGELOG.md#v100-2022-06-08) + * **Release**: New AWS service client module + * **Feature**: AWS Mainframe Modernization service is a managed mainframe service and set of tools for planning, migrating, modernizing, and running mainframe workloads on AWS +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.17.0](service/neptune/CHANGELOG.md#v1170-2022-06-08) + * **Feature**: This release adds support for Neptune to be configured as a global database, with a primary DB cluster in one region, and up to five secondary DB clusters in other regions. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.25.0](service/redshift/CHANGELOG.md#v1250-2022-06-08) + * **Feature**: Adds new API GetClusterCredentialsWithIAM to return temporary credentials. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.0.0](service/redshiftserverless/CHANGELOG.md#v100-2022-06-08) + * **Release**: New AWS service client module + * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. + +# Release (2022-06-07) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.19.0](service/auditmanager/CHANGELOG.md#v1190-2022-06-07) + * **Feature**: This release introduces 2 updates to the Audit Manager API. The roleType and roleArn attributes are now required when you use the CreateAssessment or UpdateAssessment operation. We also added a throttling exception to the RegisterAccount API operation. +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.19.0](service/costexplorer/CHANGELOG.md#v1190-2022-06-07) + * **Feature**: Added two new APIs to support cost allocation tags operations: ListCostAllocationTags, UpdateCostAllocationTagsStatus. + +# Release (2022-06-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.10.0](service/chimesdkmessaging/CHANGELOG.md#v1100-2022-06-06) + * **Feature**: This release adds support for searching channels by members via the SearchChannels API, removes required restrictions for Name and Mode in UpdateChannel API and enhances CreateChannel API by exposing member and moderator list as well as channel id as optional parameters. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.26.0](service/connect/CHANGELOG.md#v1260-2022-06-06) + * **Feature**: This release adds a new API, GetCurrentUserData, which returns real-time details about users' current activity. + +# Release (2022-06-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.16.0](service/applicationinsights/CHANGELOG.md#v1160-2022-06-02) + * **Feature**: Provide Account Level onboarding support through CFN/CLI +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.12.6](service/codeartifact/CHANGELOG.md#v1126-2022-06-02) + * **Documentation**: Documentation updates for CodeArtifact +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.25.0](service/connect/CHANGELOG.md#v1250-2022-06-02) + * **Feature**: This release adds the following features: 1) New APIs to manage (create, list, update) task template resources, 2) Updates to startTaskContact API to support task templates, and 3) new TransferContact API to programmatically transfer in-progress tasks via a contact flow. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.28.0](service/kendra/CHANGELOG.md#v1280-2022-06-02) + * **Feature**: Amazon Kendra now provides a data source connector for GitHub. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-github.html +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.14.0](service/proton/CHANGELOG.md#v1140-2022-06-02) + * **Feature**: Add new "Components" API to enable users to Create, Delete and Update AWS Proton components. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.10.0](service/voiceid/CHANGELOG.md#v1100-2022-06-02) + * **Feature**: Added a new attribute ServerSideEncryptionUpdateDetails to Domain and DomainSummary. + +# Release (2022-06-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.6.0](service/backupgateway/CHANGELOG.md#v160-2022-06-01) + * **Feature**: Adds GetGateway and UpdateGatewaySoftwareNow API and adds hypervisor name to UpdateHypervisor API +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.10.0](service/chimesdkmeetings/CHANGELOG.md#v1100-2022-06-01) + * **Feature**: Adds support for centrally controlling each participant's ability to send and receive audio, video and screen share within a WebRTC session. Attendee capabilities can be specified when the attendee is created and updated during the session with the new BatchUpdateAttendeeCapabilitiesExcept API. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.22.0](service/forecast/CHANGELOG.md#v1220-2022-06-01) + * **Feature**: Added Format field to Import and Export APIs in Amazon Forecast. Added TimeSeriesSelector to Create Forecast API. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.21.0](service/route53/CHANGELOG.md#v1210-2022-06-01) + * **Feature**: Add new APIs to support Route 53 IP Based Routing + +# Release (2022-05-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.17.0](service/cognitoidentityprovider/CHANGELOG.md#v1170-2022-05-31) + * **Feature**: Amazon Cognito now supports IP Address propagation for all unauthenticated APIs (e.g. SignUp, ForgotPassword). +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.6.0](service/drs/CHANGELOG.md#v160-2022-05-31) + * **Feature**: Changed existing APIs and added new APIs to accommodate using multiple AWS accounts with AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.22.0](service/iotsitewise/CHANGELOG.md#v1220-2022-05-31) + * **Feature**: This release adds the following new optional field to the IoT SiteWise asset resource: assetDescription. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.15.0](service/lookoutmetrics/CHANGELOG.md#v1150-2022-05-31) + * **Feature**: Adding backtest mode to detectors using the Cloudwatch data source. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.20.0](service/transcribe/CHANGELOG.md#v1200-2022-05-31) + * **Feature**: Amazon Transcribe now supports automatic language identification for multi-lingual audio in batch mode. + +# Release (2022-05-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.16.0](service/appflow/CHANGELOG.md#v1160-2022-05-27) + * **Feature**: Adding the following features/changes: Parquet output that preserves typing from the source connector, Failed executions threshold before deactivation for scheduled flows, increasing max size of access and refresh token from 2048 to 4096 +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.17.0](service/datasync/CHANGELOG.md#v1170-2022-05-27) + * **Feature**: AWS DataSync now supports TLS encryption in transit, file system policies and access points for EFS locations. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.1.0](service/emrserverless/CHANGELOG.md#v110-2022-05-27) + * **Feature**: This release adds support for Amazon EMR Serverless, a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.32.0](service/sagemaker/CHANGELOG.md#v1320-2022-05-27) + * **Feature**: Amazon SageMaker Notebook Instances now allows configuration of Instance Metadata Service version and Amazon SageMaker Studio now supports G5 instance types. + +# Release (2022-05-26) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.45.0](service/ec2/CHANGELOG.md#v1450-2022-05-26) + * **Feature**: C7g instances, powered by the latest generation AWS Graviton3 processors, provide the best price performance in Amazon EC2 for compute-intensive workloads. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.0.0](service/emrserverless/CHANGELOG.md#v100-2022-05-26) + * **Release**: New AWS service client module + * **Feature**: This release adds support for Amazon EMR Serverless, a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.21.0](service/forecast/CHANGELOG.md#v1210-2022-05-26) + * **Feature**: Introduced a new field in Auto Predictor as Time Alignment Boundary. It helps in aligning the timestamps generated during Forecast exports +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.22.0](service/lightsail/CHANGELOG.md#v1220-2022-05-26) + * **Feature**: Amazon Lightsail now supports the ability to configure a Lightsail Container Service to pull images from Amazon ECR private repositories in your account. + +# Release (2022-05-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.6](service/apigateway/CHANGELOG.md#v1156-2022-05-25) + * **Documentation**: Documentation updates for Amazon API Gateway +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.3](service/apprunner/CHANGELOG.md#v1123-2022-05-25) + * **Documentation**: Documentation-only update added for CodeConfiguration. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.21.0](service/cloudformation/CHANGELOG.md#v1210-2022-05-25) + * **Feature**: Add a new parameter statusReason to DescribeStackSetOperation output for additional details +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.0](service/fsx/CHANGELOG.md#v1240-2022-05-25) + * **Feature**: This release adds root squash support to FSx for Lustre to restrict root level access from clients by mapping root users to a less-privileged user/group with limited permissions. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.14.0](service/lookoutmetrics/CHANGELOG.md#v1140-2022-05-25) + * **Feature**: Adding AthenaSourceConfig for MetricSet APIs to support Athena as a data source. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.31.0](service/sagemaker/CHANGELOG.md#v1310-2022-05-25) + * **Feature**: Amazon SageMaker Autopilot adds support for manually selecting features from the input dataset using the CreateAutoMLJob API. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.9](service/secretsmanager/CHANGELOG.md#v1159-2022-05-25) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.9.0](service/voiceid/CHANGELOG.md#v190-2022-05-25) + * **Feature**: VoiceID will now automatically expire Speakers if they haven't been accessed for Enrollment, Re-enrollment or Successful Auth for three years. The Speaker APIs now return a "LastAccessedAt" time for Speakers, and the EvaluateSession API returns "SPEAKER_EXPIRED" Auth Decision for EXPIRED Speakers. + +# Release (2022-05-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.16.0](service/cognitoidentityprovider/CHANGELOG.md#v1160-2022-05-24) + * **Feature**: Amazon Cognito now supports requiring attribute verification (ex. email and phone number) before update. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.44.0](service/ec2/CHANGELOG.md#v1440-2022-05-24) + * **Feature**: Stop Protection feature enables customers to protect their instances from accidental stop actions. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.4](service/ivschat/CHANGELOG.md#v104-2022-05-24) + * **Documentation**: Doc-only update. For MessageReviewHandler structure, added timeout period in the description of the fallbackResult field +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.24.0](service/mediaconvert/CHANGELOG.md#v1240-2022-05-24) + * **Feature**: AWS Elemental MediaConvert SDK has added support for rules that constrain Automatic-ABR rendition selection when generating ABR package ladders. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.13.0](service/networkmanager/CHANGELOG.md#v1130-2022-05-24) + * **Feature**: This release adds Multi Account API support for a TGW Global Network, to enable and disable AWSServiceAccess with AwsOrganizations for Network Manager service and dependency CloudFormation StackSets service. + +# Release (2022-05-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.21.0](service/elasticache/CHANGELOG.md#v1210-2022-05-23) + * **Feature**: Added support for encryption in transit for Memcached clusters. Customers can now launch Memcached cluster with encryption in transit enabled when using Memcached version 1.6.12 or later. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.20.0](service/forecast/CHANGELOG.md#v1200-2022-05-23) + * **Feature**: New APIs for Monitor that help you understand how your predictors perform over time. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.20.0](service/personalize/CHANGELOG.md#v1200-2022-05-23) + * **Feature**: Adding modelMetrics as part of DescribeRecommender API response for Personalize. + +# Release (2022-05-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.15.7](service/cloudwatchlogs/CHANGELOG.md#v1157-2022-05-20) + * **Documentation**: Doc-only update to publish the new valid values for log retention +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.18.0](service/comprehend/CHANGELOG.md#v1180-2022-05-20) + * **Feature**: Comprehend releases 14 new entity types for DetectPiiEntities and ContainsPiiEntities APIs. + +# Release (2022-05-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.1.0](service/gamesparks/CHANGELOG.md#v110-2022-05-19) + * **Feature**: This release adds an optional DeploymentResult field in the responses of GetStageDeploymentIntegrationTests and ListStageDeploymentIntegrationTests APIs. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.13.0](service/lookoutmetrics/CHANGELOG.md#v1130-2022-05-19) + * **Feature**: In this release we added SnsFormat to SNSConfiguration to support human readable alert. + +# Release (2022-05-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.14.0](service/appmesh/CHANGELOG.md#v1140-2022-05-18) + * **Feature**: This release updates the existing Create and Update APIs for meshes and virtual nodes by adding a new IP preference field. This new IP preference field can be used to control the IP versions being used with the mesh and allows for IPv6 support within App Mesh. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.3](service/batch/CHANGELOG.md#v1183-2022-05-18) + * **Documentation**: Documentation updates for AWS Batch. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.16.0](service/greengrassv2/CHANGELOG.md#v1160-2022-05-18) + * **Feature**: This release adds the new DeleteDeployment API operation that you can use to delete deployment resources. This release also adds support for discontinued AWS-provided components, so AWS can communicate when a component has any issues that you should consider before you deploy it. +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.12.0](service/ioteventsdata/CHANGELOG.md#v1120-2022-05-18) + * **Feature**: Introducing new API for deleting detectors: BatchDeleteDetector. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.22.0](service/quicksight/CHANGELOG.md#v1220-2022-05-18) + * **Feature**: API UpdatePublicSharingSettings enables IAM admins to enable/disable account level setting for public access of dashboards. When enabled, owners/co-owners for dashboards can enable public access on their dashboards. These dashboards can only be accessed through share link or embedding. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.19.0](service/transfer/CHANGELOG.md#v1190-2022-05-18) + * **Feature**: AWS Transfer Family now supports SetStat server configuration option, which provides the ability to ignore SetStat command issued by file transfer clients, enabling customers to upload files without any errors. + +# Release (2022-05-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.3.12](internal/ini/CHANGELOG.md#v1312-2022-05-17) + * **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.25.0](service/glue/CHANGELOG.md#v1250-2022-05-17) + * **Feature**: This release adds a new optional parameter called codeGenNodeConfiguration to CRUD job APIs that allows users to manage visual jobs via APIs. The updated CreateJob and UpdateJob will create jobs that can be viewed in Glue Studio as a visual graph. GetJob can be used to get codeGenNodeConfiguration. +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.13.1](service/iotsecuretunneling/CHANGELOG.md#v1131-2022-05-17) + * **Bug Fix**: Fixes iotsecuretunneling and mobile API clients to use the correct name for signing requests, Fixes [#1686](https://github.com/aws/aws-sdk-go-v2/issues/1686). +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.2](service/kms/CHANGELOG.md#v1172-2022-05-17) + * **Documentation**: Add HMAC best practice tip, annual rotation of AWS managed keys. +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.11.5](service/mobile/CHANGELOG.md#v1115-2022-05-17) + * **Bug Fix**: Fixes iotsecuretunneling and mobile API clients to use the correct name for signing requests, Fixes [#1686](https://github.com/aws/aws-sdk-go-v2/issues/1686). + +# Release (2022-05-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.13.0](service/applicationdiscoveryservice/CHANGELOG.md#v1130-2022-05-16) + * **Feature**: Add Migration Evaluator Collector details to the GetDiscoverySummary API response +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.18.0](service/cloudfront/CHANGELOG.md#v1180-2022-05-16) + * **Feature**: Introduced a new error (TooLongCSPInResponseHeadersPolicy) that is returned when the value of the Content-Security-Policy header in a response headers policy exceeds the maximum allowed length. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.18.1](service/rekognition/CHANGELOG.md#v1181-2022-05-16) + * **Documentation**: Documentation updates for Amazon Rekognition. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.6.0](service/resiliencehub/CHANGELOG.md#v160-2022-05-16) + * **Feature**: In this release, we are introducing support for Amazon Elastic Container Service, Amazon Route 53, AWS Elastic Disaster Recovery, AWS Backup in addition to the existing supported Services. This release also supports Terraform file input from S3 and scheduling daily assessments +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.2](service/servicecatalog/CHANGELOG.md#v1142-2022-05-16) + * **Documentation**: Updated the descriptions for the ListAcceptedPortfolioShares API description and the PortfolioShareType parameters. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.16.5](service/sts/CHANGELOG.md#v1165-2022-05-16) + * **Documentation**: Documentation updates for AWS Security Token Service. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.6.0](service/workspacesweb/CHANGELOG.md#v160-2022-05-16) + * **Feature**: Amazon WorkSpaces Web now supports Administrator timeout control + +# Release (2022-05-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.9.0](service/grafana/CHANGELOG.md#v190-2022-05-13) + * **Feature**: This release adds APIs for creating and deleting API keys in an Amazon Managed Grafana workspace. + +# Release (2022-05-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.43.0](service/ec2/CHANGELOG.md#v1430-2022-05-12) + * **Feature**: This release introduces a target type Gateway Load Balancer Endpoint for mirrored traffic. Customers can now specify GatewayLoadBalancerEndpoint option during the creation of a traffic mirror target. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.10.5](service/finspacedata/CHANGELOG.md#v1105-2022-05-12) + * **Documentation**: We've now deprecated CreateSnapshot permission for creating a data view, instead use CreateDataView permission. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.25.1](service/iot/CHANGELOG.md#v1251-2022-05-12) + * **Documentation**: Documentation update for China region ListMetricValues for IoT +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.2](service/ivschat/CHANGELOG.md#v102-2022-05-12) + * **Documentation**: Documentation-only updates for IVS Chat API Reference. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.27.0](service/kendra/CHANGELOG.md#v1270-2022-05-12) + * **Feature**: Amazon Kendra now provides a data source connector for Jira. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-jira.html +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.23.0](service/lambda/CHANGELOG.md#v1230-2022-05-12) + * **Feature**: Lambda releases NodeJs 16 managed runtime to be available in all commercial regions. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.21.0](service/lightsail/CHANGELOG.md#v1210-2022-05-12) + * **Feature**: This release adds support to include inactive database bundles in the response of the GetRelationalDatabaseBundles request. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.19.1](service/outposts/CHANGELOG.md#v1191-2022-05-12) + * **Documentation**: Documentation updates for AWS Outposts. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.14.0](service/ssmincidents/CHANGELOG.md#v1140-2022-05-12) + * **Feature**: Adding support for dynamic SSM Runbook parameter values. Updating validation pattern for engagements. Adding ConflictException to UpdateReplicationSet API contract. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.6](service/transfer/CHANGELOG.md#v1186-2022-05-12) + * **Documentation**: AWS Transfer Family now accepts ECDSA keys for server host keys + +# Release (2022-05-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.42.0](service/ec2/CHANGELOG.md#v1420-2022-05-11) + * **Feature**: This release updates AWS PrivateLink APIs to support IPv6 for PrivateLink Services and Endpoints of type 'Interface'. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.7](service/secretsmanager/CHANGELOG.md#v1157-2022-05-11) + * **Documentation**: Doc only update for Secrets Manager that fixes several customer-reported issues. + +# Release (2022-05-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.17.5](service/computeoptimizer/CHANGELOG.md#v1175-2022-05-10) + * **Documentation**: Documentation updates for Compute Optimizer +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.41.0](service/ec2/CHANGELOG.md#v1410-2022-05-10) + * **Feature**: Added support for using NitroTPM and UEFI Secure Boot on EC2 instances. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.21.0](service/eks/CHANGELOG.md#v1210-2022-05-10) + * **Feature**: Adds BOTTLEROCKET_ARM_64_NVIDIA and BOTTLEROCKET_x86_64_NVIDIA AMI types to EKS managed nodegroups +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.18.0](service/emr/CHANGELOG.md#v1180-2022-05-10) + * **Feature**: This release updates the Amazon EMR ModifyInstanceGroups API to support "MERGE" type cluster reconfiguration. Also, added the ability to specify a particular Amazon Linux release for all nodes in a cluster launch request. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.5.5](service/migrationhubrefactorspaces/CHANGELOG.md#v155-2022-05-10) + * **Documentation**: AWS Migration Hub Refactor Spaces documentation only update to fix a formatting issue. + +# Release (2022-05-09) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.15.5](config/CHANGELOG.md#v1155-2022-05-09) + * **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.10.0](service/cloudcontrol/CHANGELOG.md#v1100-2022-05-09) + * **Feature**: SDK release for Cloud Control API to include paginators for Python SDK. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.7.0](service/evidently/CHANGELOG.md#v170-2022-05-09) + * **Feature**: Add detail message inside GetExperimentResults API response to indicate experiment result availability +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.13.5](service/ssmcontacts/CHANGELOG.md#v1135-2022-05-09) + * **Documentation**: Fixed an error in the DescribeEngagement example for AWS Incident Manager. + +# Release (2022-05-06) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.40.0](service/ec2/CHANGELOG.md#v1400-2022-05-06) + * **Feature**: Add new state values for IPAMs, IPAM Scopes, and IPAM Pools. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.17.0](service/location/CHANGELOG.md#v1170-2022-05-06) + * **Feature**: Amazon Location Service now includes a MaxResults parameter for ListGeofences requests. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.16.0](service/mediapackage/CHANGELOG.md#v1160-2022-05-06) + * **Feature**: This release adds Dvb Dash 2014 as an available profile option for Dash Origin Endpoints. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.1](service/rds/CHANGELOG.md#v1211-2022-05-06) + * **Documentation**: Various documentation improvements. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.24.0](service/redshift/CHANGELOG.md#v1240-2022-05-06) + * **Feature**: Introduces new field 'LoadSampleData' in CreateCluster operation. Customers can now specify 'LoadSampleData' option during creation of a cluster, which results in loading of sample data in the cluster that is created. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.1](service/securityhub/CHANGELOG.md#v1211-2022-05-06) + * **Documentation**: Documentation updates for Security Hub API reference + +# Release (2022-05-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.16.0](service/datasync/CHANGELOG.md#v1160-2022-05-05) + * **Feature**: AWS DataSync now supports a new ObjectTags Task API option that can be used to control whether Object Tags are transferred. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.39.0](service/ec2/CHANGELOG.md#v1390-2022-05-05) + * **Feature**: Amazon EC2 I4i instances are powered by 3rd generation Intel Xeon Scalable processors and feature up to 30 TB of local AWS Nitro SSD storage +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.25.0](service/iot/CHANGELOG.md#v1250-2022-05-05) + * **Feature**: AWS IoT Jobs now allows you to create up to 100,000 active continuous and snapshot jobs by using concurrency control. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.26.0](service/kendra/CHANGELOG.md#v1260-2022-05-05) + * **Feature**: AWS Kendra now supports hierarchical facets for a query. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/filtering.html + +# Release (2022-05-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.16.0](service/backup/CHANGELOG.md#v1160-2022-05-04) + * **Feature**: Adds support to 2 new filters about job complete time for 3 list jobs APIs in AWS Backup +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.13.0](service/iotsecuretunneling/CHANGELOG.md#v1130-2022-05-04) + * **Feature**: This release introduces a new API RotateTunnelAccessToken that allow revoking the existing tokens and generate new tokens +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.20.1](service/lightsail/CHANGELOG.md#v1201-2022-05-04) + * **Documentation**: Documentation updates for Lightsail +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.27.0](service/ssm/CHANGELOG.md#v1270-2022-05-04) + * **Feature**: This release adds the TargetMaps parameter in SSM State Manager API. + +# Release (2022-05-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.38.0](service/ec2/CHANGELOG.md#v1380-2022-05-03) + * **Feature**: Adds support for allocating Dedicated Hosts on AWS Outposts. The AllocateHosts API now accepts an OutpostArn request parameter, and the DescribeHosts API now includes an OutpostArn response parameter. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.12.0](service/kinesisvideo/CHANGELOG.md#v1120-2022-05-03) + * **Feature**: Add support for multiple image feature related APIs for configuring image generation and notification of a video stream. Add "GET_IMAGES" to the list of supported API names for the GetDataEndpoint API. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.13.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1130-2022-05-03) + * **Feature**: Add support for GetImages API for retrieving images from a video stream +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.8](service/s3/CHANGELOG.md#v1268-2022-05-03) + * **Documentation**: Documentation only update for doc bug fixes for the S3 API docs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.30.0](service/sagemaker/CHANGELOG.md#v1300-2022-05-03) + * **Feature**: SageMaker Autopilot adds new metrics for all candidate models generated by Autopilot experiments; RStudio on SageMaker now allows users to bring your own development environment in a custom image. + +# Release (2022-05-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.16.0](service/organizations/CHANGELOG.md#v1160-2022-05-02) + * **Feature**: This release adds the INVALID_PAYMENT_INSTRUMENT as a fail reason and an error message. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.19.0](service/outposts/CHANGELOG.md#v1190-2022-05-02) + * **Feature**: This release adds a new API called ListAssets to the Outposts SDK, which lists the hardware assets in an Outpost. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.15.0](service/synthetics/CHANGELOG.md#v1150-2022-05-02) + * **Feature**: CloudWatch Synthetics has introduced a new feature to provide customers with an option to delete the underlying resources that Synthetics canary creates when the user chooses to delete the canary. + +# Release (2022-04-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.0](service/codegurureviewer/CHANGELOG.md#v1160-2022-04-29) + * **Feature**: Amazon CodeGuru Reviewer now supports suppressing recommendations from being generated on specific files and directories. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.23.0](service/mediaconvert/CHANGELOG.md#v1230-2022-04-29) + * **Feature**: AWS Elemental MediaConvert SDK nows supports creation of Dolby Vision profile 8.1, the ability to generate black frames of video, and introduces audio-only DASH and CMAF support. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.0](service/rds/CHANGELOG.md#v1210-2022-04-29) + * **Feature**: Feature - Adds support for Internet Protocol Version 6 (IPv6) on RDS database instances. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.26.0](service/ssm/CHANGELOG.md#v1260-2022-04-29) + * **Feature**: Update the StartChangeRequestExecution, adding TargetMaps to the Runbook parameter +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.20.0](service/wafv2/CHANGELOG.md#v1200-2022-04-29) + * **Feature**: You can now inspect all request headers and all cookies. You can now specify how to handle oversize body contents in your rules that inspect the body. + +# Release (2022-04-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.5](service/auditmanager/CHANGELOG.md#v1185-2022-04-28) + * **Documentation**: This release adds documentation updates for Audit Manager. We provided examples of how to use the Custom_ prefix for the keywordValue attribute. We also provided more details about the DeleteAssessmentReport operation. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.16.0](service/braket/CHANGELOG.md#v1160-2022-04-28) + * **Feature**: This release enables Braket Hybrid Jobs with Embedded Simulators to have multiple instances. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.24.0](service/connect/CHANGELOG.md#v1240-2022-04-28) + * **Feature**: This release introduces an API for changing the current agent status of a user in Connect. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.37.0](service/ec2/CHANGELOG.md#v1370-2022-04-28) + * **Feature**: This release adds support to query the public key and creation date of EC2 Key Pairs. Additionally, the format (pem or ppk) of a key pair can be specified when creating a new key pair. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.13.5](service/guardduty/CHANGELOG.md#v1135-2022-04-28) + * **Documentation**: Documentation update for API description. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.17.0](service/networkfirewall/CHANGELOG.md#v1170-2022-04-28) + * **Feature**: AWS Network Firewall adds support for stateful threat signature AWS managed rule groups. + +# Release (2022-04-27) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.11.5](service/amplify/CHANGELOG.md#v1115-2022-04-27) + * **Documentation**: Documentation only update to support the Amplify GitHub App feature launch +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.0.0](service/chimesdkmediapipelines/CHANGELOG.md#v100-2022-04-27) + * **Release**: New AWS service client module + * **Feature**: For Amazon Chime SDK meetings, the Amazon Chime Media Pipelines SDK allows builders to capture audio, video, and content share streams. You can also capture meeting events, live transcripts, and data messages. The pipelines save the artifacts to an Amazon S3 bucket that you designate. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.16.0](service/cloudtrail/CHANGELOG.md#v1160-2022-04-27) + * **Feature**: Increases the retention period maximum to 2557 days. Deprecates unused fields of the ListEventDataStores API response. Updates documentation. +* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.1.5](service/internal/checksum/CHANGELOG.md#v115-2022-04-27) + * **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.19.0](service/iotwireless/CHANGELOG.md#v1190-2022-04-27) + * **Feature**: Add list support for event configurations, allow to get and update event configurations by resource type, support LoRaWAN events; Make NetworkAnalyzerConfiguration as a resource, add List, Create, Delete API support; Add FCntStart attribute support for ABP WirelessDevice. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.13.0](service/lookoutequipment/CHANGELOG.md#v1130-2022-04-27) + * **Feature**: This release adds the following new features: 1) Introduces an option for automatic schema creation 2) Now allows for Ingestion of data containing most common errors and allows automatic data cleaning 3) Introduces new API ListSensorStatistics that gives further information about the ingested data +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.18.0](service/rekognition/CHANGELOG.md#v1180-2022-04-27) + * **Feature**: This release adds support to configure stream-processor resources for label detections on streaming-videos. UpateStreamProcessor API is also launched with this release, which could be used to update an existing stream-processor. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.29.0](service/sagemaker/CHANGELOG.md#v1290-2022-04-27) + * **Feature**: Amazon SageMaker Autopilot adds support for custom validation dataset and validation ratio through the CreateAutoMLJob and DescribeAutoMLJob APIs. + +# Release (2022-04-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.17.0](service/cloudfront/CHANGELOG.md#v1170-2022-04-26) + * **Feature**: CloudFront now supports the Server-Timing header in HTTP responses sent from CloudFront. You can use this header to view metrics that help you gain insights about the behavior and performance of CloudFront. To use this header, enable it in a response headers policy. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.2](service/glue/CHANGELOG.md#v1242-2022-04-26) + * **Documentation**: This release adds documentation for the APIs to create, read, delete, list, and batch read of AWS Glue custom patterns, and for Lake Formation configuration settings in the AWS Glue crawler. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.0](service/ivschat/CHANGELOG.md#v100-2022-04-26) + * **Release**: New AWS service client module + * **Feature**: Adds new APIs for IVS Chat, a feature for building interactive chat experiences alongside an IVS broadcast. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.20.0](service/lightsail/CHANGELOG.md#v1200-2022-04-26) + * **Feature**: This release adds support for Lightsail load balancer HTTP to HTTPS redirect and TLS policy configuration. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.16.0](service/networkfirewall/CHANGELOG.md#v1160-2022-04-26) + * **Feature**: AWS Network Firewall now enables customers to use a customer managed AWS KMS key for the encryption of their firewall resources. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.14.5](service/pricing/CHANGELOG.md#v1145-2022-04-26) + * **Documentation**: Documentation updates for Price List API +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.28.0](service/sagemaker/CHANGELOG.md#v1280-2022-04-26) + * **Feature**: SageMaker Inference Recommender now accepts customer KMS key ID for encryption of endpoints and compilation outputs created during inference recommendation. + +# Release (2022-04-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.3 + * **Dependency Update**: Update SDK's internal copy of golang.org/x/sync/singleflight to address issue with test failing due to timeing issues +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.12.0](credentials/CHANGELOG.md#v1120-2022-04-25) + * **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.23.0](service/connect/CHANGELOG.md#v1230-2022-04-25) + * **Feature**: This release adds SearchUsers API which can be used to search for users with a Connect Instance +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.4](service/gamelift/CHANGELOG.md#v1144-2022-04-25) + * **Documentation**: Documentation updates for Amazon GameLift. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.13.0](service/mq/CHANGELOG.md#v1130-2022-04-25) + * **Feature**: This release adds the CRITICAL_ACTION_REQUIRED broker state and the ActionRequired API property. CRITICAL_ACTION_REQUIRED informs you when your broker is degraded. ActionRequired provides you with a code which you can use to find instructions in the Developer Guide on how to resolve the issue. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.0](service/rdsdata/CHANGELOG.md#v1120-2022-04-25) + * **Feature**: Support to receive SQL query results in the form of a simplified JSON string. This enables developers using the new JSON string format to more easily convert it to an object using popular JSON string parsing libraries. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.0](service/securityhub/CHANGELOG.md#v1210-2022-04-25) + * **Feature**: Security Hub now lets you opt-out of auto-enabling the defaults standards (CIS and FSBP) in accounts that are auto-enabled with Security Hub via Security Hub's integration with AWS Organizations. + +# Release (2022-04-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.9.0](service/chimesdkmeetings/CHANGELOG.md#v190-2022-04-22) + * **Feature**: Include additional exceptions types. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.36.0](service/ec2/CHANGELOG.md#v1360-2022-04-22) + * **Feature**: Adds support for waiters that automatically poll for a deleted NAT Gateway until it reaches the deleted state. + +# Release (2022-04-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.5](service/elasticache/CHANGELOG.md#v1205-2022-04-21) + * **Documentation**: Doc only update for ElastiCache +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.0](service/glue/CHANGELOG.md#v1240-2022-04-21) + * **Feature**: This release adds APIs to create, read, delete, list, and batch read of Glue custom entity types +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.21.0](service/iotsitewise/CHANGELOG.md#v1210-2022-04-21) + * **Feature**: This release adds 3 new batch data query APIs : BatchGetAssetPropertyValue, BatchGetAssetPropertyValueHistory and BatchGetAssetPropertyAggregates +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.7.0](service/iottwinmaker/CHANGELOG.md#v170-2022-04-21) + * **Feature**: General availability (GA) for AWS IoT TwinMaker. For more information, see https://docs.aws.amazon.com/iot-twinmaker/latest/apireference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.12.0](service/lookoutmetrics/CHANGELOG.md#v1120-2022-04-21) + * **Feature**: Added DetectMetricSetConfig API for detecting configuration required for creating metric set from provided S3 data source. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.17.0](service/mediatailor/CHANGELOG.md#v1170-2022-04-21) + * **Feature**: This release introduces tiered channels and adds support for live sources. Customers using a STANDARD channel can now create programs using live sources. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.5](service/secretsmanager/CHANGELOG.md#v1155-2022-04-21) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.17.0](service/storagegateway/CHANGELOG.md#v1170-2022-04-21) + * **Feature**: This release adds support for minimum of 5 character length virtual tape barcodes. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.8.0](service/wisdom/CHANGELOG.md#v180-2022-04-21) + * **Feature**: This release updates the GetRecommendations API to include a trigger event list for classifying and grouping recommendations. + +# Release (2022-04-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.22.0](service/connect/CHANGELOG.md#v1220-2022-04-20) + * **Feature**: This release adds APIs to search, claim, release, list, update, and describe phone numbers. You can also use them to associate and disassociate contact flows to phone numbers. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.21.0](service/macie2/CHANGELOG.md#v1210-2022-04-20) + * **Feature**: Sensitive data findings in Amazon Macie now indicate how Macie found the sensitive data that produced a finding (originType). +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.14.0](service/mgn/CHANGELOG.md#v1140-2022-04-20) + * **Feature**: Removed required annotation from input fields in Describe operations requests. Added quotaValue to ServiceQuotaExceededException +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.20.0](service/rds/CHANGELOG.md#v1200-2022-04-20) + * **Feature**: Added a new cluster-level attribute to set the capacity range for Aurora Serverless v2 instances. + +# Release (2022-04-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.0](service/autoscaling/CHANGELOG.md#v1230-2022-04-19) + * **Feature**: EC2 Auto Scaling now adds default instance warm-up times for all scaling activities, health check replacements, and other replacement events in the Auto Scaling instance lifecycle. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.25.0](service/kendra/CHANGELOG.md#v1250-2022-04-19) + * **Feature**: Amazon Kendra now provides a data source connector for Quip. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-quip.html +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.0](service/kms/CHANGELOG.md#v1170-2022-04-19) + * **Feature**: Adds support for KMS keys and APIs that generate and verify HMAC codes +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.19.0](service/personalize/CHANGELOG.md#v1190-2022-04-19) + * **Feature**: Adding StartRecommender and StopRecommender APIs for Personalize. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.15.0](service/polly/CHANGELOG.md#v1150-2022-04-19) + * **Feature**: Amazon Polly adds new Austrian German voice - Hannah. Hannah is available as Neural voice only. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.23.0](service/redshift/CHANGELOG.md#v1230-2022-04-19) + * **Feature**: Introduces new fields for LogDestinationType and LogExports on EnableLogging requests and Enable/Disable/DescribeLogging responses. Customers can now select CloudWatch Logs as a destination for their Audit Logs. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.25.0](service/ssm/CHANGELOG.md#v1250-2022-04-19) + * **Feature**: Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression when creating SSM association. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.15.0](service/textract/CHANGELOG.md#v1150-2022-04-19) + * **Feature**: This release adds support for specifying and extracting information from documents using the Queries feature within Analyze Document API +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.4](service/transfer/CHANGELOG.md#v1184-2022-04-19) + * **Documentation**: This release contains corrected HomeDirectoryMappings examples for several API functions: CreateAccess, UpdateAccess, CreateUser, and UpdateUser,. +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.12.0](service/worklink/CHANGELOG.md#v1120-2022-04-19) + * **Feature**: Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK. + +# Release (2022-04-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.9.0](feature/dynamodb/attributevalue/CHANGELOG.md#v190-2022-04-15) + * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution. +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.9.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v190-2022-04-15) + * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.15.0](service/athena/CHANGELOG.md#v1150-2022-04-15) + * **Feature**: This release adds subfields, ErrorMessage, Retryable, to the AthenaError response object in the GetQueryExecution API when a query fails. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.19.0](service/lightsail/CHANGELOG.md#v1190-2022-04-15) + * **Feature**: This release adds support to describe the synchronization status of the account-level block public access feature for your Amazon Lightsail buckets. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.19.0](service/rds/CHANGELOG.md#v1190-2022-04-15) + * **Feature**: Removes Amazon RDS on VMware with the deletion of APIs related to Custom Availability Zones and Media installation + +# Release (2022-04-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.15.0](service/appflow/CHANGELOG.md#v1150-2022-04-14) + * **Feature**: Enables users to pass custom token URL parameters for Oauth2 authentication during create connector profile +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.16.0](service/appstream/CHANGELOG.md#v1160-2022-04-14) + * **Feature**: Includes updates for create and update fleet APIs to manage the session scripts locations for Elastic fleets. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.0](service/batch/CHANGELOG.md#v1180-2022-04-14) + * **Feature**: Enables configuration updates for compute environments with BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.1](service/cloudwatch/CHANGELOG.md#v1181-2022-04-14) + * **Documentation**: Updates documentation for additional statistics in CloudWatch Metric Streams. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.1](service/ec2/CHANGELOG.md#v1351-2022-04-14) + * **Documentation**: Documentation updates for Amazon EC2. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.23.0](service/glue/CHANGELOG.md#v1230-2022-04-14) + * **Feature**: Auto Scaling for Glue version 3.0 and later jobs to dynamically scale compute resources. This SDK change provides customers with the auto-scaled DPU usage + +# Release (2022-04-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.0](service/cloudwatch/CHANGELOG.md#v1180-2022-04-13) + * **Feature**: Adds support for additional statistics in CloudWatch Metric Streams. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.23.0](service/fsx/CHANGELOG.md#v1230-2022-04-13) + * **Feature**: This release adds support for deploying FSx for ONTAP file systems in a single Availability Zone. + +# Release (2022-04-12) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.17.0](service/devopsguru/CHANGELOG.md#v1170-2022-04-12) + * **Feature**: This release adds new APIs DeleteInsight to deletes the insight along with the associated anomalies, events and recommendations. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.0](service/ec2/CHANGELOG.md#v1350-2022-04-12) + * **Feature**: X2idn and X2iedn instances are powered by 3rd generation Intel Xeon Scalable processors with an all-core turbo frequency up to 3.5 GHzAmazon EC2. C6a instances are powered by 3rd generation AMD EPYC processors. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.17.0](service/efs/CHANGELOG.md#v1170-2022-04-12) + * **Feature**: Amazon EFS adds support for a ThrottlingException when using the CreateAccessPoint API if the account is nearing the AccessPoint limit(120). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.6.0](service/iottwinmaker/CHANGELOG.md#v160-2022-04-12) + * **Feature**: This release adds the following new features: 1) ListEntities API now supports search using ExternalId. 2) BatchPutPropertyValue and GetPropertyValueHistory API now allows users to represent time in sub-second level precisions. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.15.4](service/kinesis/CHANGELOG.md#v1154-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.14.4](service/lexruntimev2/CHANGELOG.md#v1144-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.5](service/s3/CHANGELOG.md#v1265-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.4](service/transcribestreaming/CHANGELOG.md#v164-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. + +# Release (2022-04-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.6.0](service/amplifyuibuilder/CHANGELOG.md#v160-2022-04-11) + * **Feature**: In this release, we have added the ability to bind events to component level actions. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.0](service/apprunner/CHANGELOG.md#v1120-2022-04-11) + * **Feature**: This release adds tracing for App Runner services with X-Ray using AWS Distro for OpenTelemetry. New APIs: CreateObservabilityConfiguration, DescribeObservabilityConfiguration, ListObservabilityConfigurations, and DeleteObservabilityConfiguration. Updated APIs: CreateService and UpdateService. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.18.0](service/workspaces/CHANGELOG.md#v1180-2022-04-11) + * **Feature**: Added API support that allows customers to create GPU-enabled WorkSpaces using EC2 G4dn instances. + +# Release (2022-04-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.22.0](service/mediaconvert/CHANGELOG.md#v1220-2022-04-08) + * **Feature**: AWS Elemental MediaConvert SDK has added support for the pass-through of WebVTT styling to WebVTT outputs, pass-through of KLV metadata to supported formats, and improved filter support for processing 444/RGB content. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.17.0](service/mediapackagevod/CHANGELOG.md#v1170-2022-04-08) + * **Feature**: This release adds ScteMarkersSource as an available field for Dash Packaging Configurations. When set to MANIFEST, MediaPackage will source the SCTE-35 markers from the manifest. When set to SEGMENTS, MediaPackage will source the SCTE-35 markers from the segments. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.19.0](service/wafv2/CHANGELOG.md#v1190-2022-04-08) + * **Feature**: Add a new CurrentDefaultVersion field to ListAvailableManagedRuleGroupVersions API response; add a new VersioningSupported boolean to each ManagedRuleGroup returned from ListAvailableManagedRuleGroups API response. + +# Release (2022-04-07) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/internal/v4a`: [v1.0.0](internal/v4a/CHANGELOG.md#v100-2022-04-07) + * **Release**: New internal v4a signing module location. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.18.0](service/docdb/CHANGELOG.md#v1180-2022-04-07) + * **Feature**: Added support to enable/disable performance insights when creating or modifying db instances +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.16.0](service/eventbridge/CHANGELOG.md#v1160-2022-04-07) + * **Feature**: Adds new EventBridge Endpoint resources for disaster recovery, multi-region failover, and cross-region replication capabilities to help you build resilient event-driven applications. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.18.0](service/personalize/CHANGELOG.md#v1180-2022-04-07) + * **Feature**: This release provides tagging support in AWS Personalize. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.14.4](service/pi/CHANGELOG.md#v1144-2022-04-07) + * **Documentation**: Adds support for DocumentDB to the Performance Insights API. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.27.0](service/sagemaker/CHANGELOG.md#v1270-2022-04-07) + * **Feature**: Amazon Sagemaker Notebook Instances now supports G5 instance types + +# Release (2022-04-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.0](service/configservice/CHANGELOG.md#v1210-2022-04-06) + * **Feature**: Add resourceType enums for AWS::EMR::SecurityConfiguration and AWS::SageMaker::CodeRepository +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.24.0](service/kendra/CHANGELOG.md#v1240-2022-04-06) + * **Feature**: Amazon Kendra now provides a data source connector for Box. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-box.html +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.22.0](service/lambda/CHANGELOG.md#v1220-2022-04-06) + * **Feature**: This release adds new APIs for creating and managing Lambda Function URLs and adds a new FunctionUrlAuthType parameter to the AddPermission API. Customers can use Function URLs to create built-in HTTPS endpoints on their functions. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.7.0](service/panorama/CHANGELOG.md#v170-2022-04-06) + * **Feature**: Added Brand field to device listings. + +# Release (2022-04-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.15.0](service/datasync/CHANGELOG.md#v1150-2022-04-05) + * **Feature**: AWS DataSync now supports Amazon FSx for OpenZFS locations. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.22.0](service/fsx/CHANGELOG.md#v1220-2022-04-05) + * **Feature**: Provide customers more visibility into file system status by adding new "Misconfigured Unavailable" status for Amazon FSx for Windows File Server. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.21.4](service/s3control/CHANGELOG.md#v1214-2022-04-05) + * **Documentation**: Documentation-only update for doc bug fixes for the S3 Control API docs. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.20.0](service/securityhub/CHANGELOG.md#v1200-2022-04-05) + * **Feature**: Added additional ASFF details for RdsSecurityGroup AutoScalingGroup, ElbLoadBalancer, CodeBuildProject and RedshiftCluster. + +# Release (2022-04-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.24.0](service/iot/CHANGELOG.md#v1240-2022-04-04) + * **Feature**: AWS IoT - AWS IoT Device Defender adds support to list metric datapoints collected for IoT devices through the ListMetricValues API +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.13.0](service/proton/CHANGELOG.md#v1130-2022-04-04) + * **Feature**: SDK release to support tagging for AWS Proton Repository resource +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.0](service/servicecatalog/CHANGELOG.md#v1140-2022-04-04) + * **Feature**: This release adds ProvisioningArtifictOutputKeys to DescribeProvisioningParameters to reference the outputs of a Provisioned Product and deprecates ProvisioningArtifactOutputs. +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.12.4](service/sms/CHANGELOG.md#v1124-2022-04-04) + * **Documentation**: Revised product update notice for SMS console deprecation. + +# Release (2022-04-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.21.0](service/connect/CHANGELOG.md#v1210-2022-04-01) + * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable multi-party conferencing using attribute type MULTI_PARTY_CONFERENCING on the specified Amazon Connect instance. + +# Release (2022-03-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.8.4](feature/dynamodb/attributevalue/CHANGELOG.md#v184-2022-03-31) + * **Documentation**: Fixes documentation typos in Number type's helper methods +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.8.4](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v184-2022-03-31) + * **Documentation**: Fixes documentation typos in Number type's helper methods +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.3](service/auditmanager/CHANGELOG.md#v1183-2022-03-31) + * **Documentation**: This release adds documentation updates for Audit Manager. The updates provide data deletion guidance when a customer deregisters Audit Manager or deregisters a delegated administrator. +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.9.0](service/cloudcontrol/CHANGELOG.md#v190-2022-03-31) + * **Feature**: SDK release for Cloud Control API in Amazon Web Services China (Beijing) Region, operated by Sinnet, and Amazon Web Services China (Ningxia) Region, operated by NWCD +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.20.0](service/databrew/CHANGELOG.md#v1200-2022-03-31) + * **Feature**: This AWS Glue Databrew release adds feature to support ORC as an input format. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.8.0](service/grafana/CHANGELOG.md#v180-2022-03-31) + * **Feature**: This release adds tagging support to the Managed Grafana service. New APIs: TagResource, UntagResource and ListTagsForResource. Updates: add optional field tags to support tagging while calling CreateWorkspace. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.0.0](service/pinpointsmsvoicev2/CHANGELOG.md#v100-2022-03-31) + * **Release**: New AWS service client module + * **Feature**: Amazon Pinpoint now offers a version 2.0 suite of SMS and voice APIs, providing increased control over sending and configuration. This release is a new SDK for sending SMS and voice messages called PinpointSMSVoiceV2. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.9.0](service/route53recoverycluster/CHANGELOG.md#v190-2022-03-31) + * **Feature**: This release adds a new API "ListRoutingControls" to list routing control states using the highly reliable Route 53 ARC data plane endpoints. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.17.0](service/workspaces/CHANGELOG.md#v1170-2022-03-31) + * **Feature**: Added APIs that allow you to customize the logo, login message, and help links in the WorkSpaces client login page. To learn more, visit https://docs.aws.amazon.com/workspaces/latest/adminguide/customize-branding.html + +# Release (2022-03-30) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.34.0](service/ec2/CHANGELOG.md#v1340-2022-03-30) + * **Feature**: This release simplifies the auto-recovery configuration process enabling customers to set the recovery behavior to disabled or default +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.17.0](service/fms/CHANGELOG.md#v1170-2022-03-30) + * **Feature**: AWS Firewall Manager now supports the configuration of third-party policies that can use either the centralized or distributed deployment models. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.21.0](service/fsx/CHANGELOG.md#v1210-2022-03-30) + * **Feature**: This release adds support for modifying throughput capacity for FSx for ONTAP file systems. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.23.3](service/iot/CHANGELOG.md#v1233-2022-03-30) + * **Documentation**: Doc only update for IoT that fixes customer-reported issues. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.12.0](service/iotdataplane/CHANGELOG.md#v1120-2022-03-30) + * **Feature**: Update the default AWS IoT Core Data Plane endpoint from VeriSign signed to ATS signed. If you have firewalls with strict egress rules, configure the rules to grant you access to data-ats.iot.[region].amazonaws.com or data-ats.iot.[region].amazonaws.com.cn. + +# Release (2022-03-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.15.0](service/organizations/CHANGELOG.md#v1150-2022-03-29) + * **Feature**: This release provides the new CloseAccount API that enables principals in the management account to close any member account within an organization. + +# Release (2022-03-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.3](service/acmpca/CHANGELOG.md#v1173-2022-03-28) + * **Documentation**: Updating service name entities +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.20.0](service/medialive/CHANGELOG.md#v1200-2022-03-28) + * **Feature**: This release adds support for selecting a maintenance window. + +# Release (2022-03-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.17.0](service/batch/CHANGELOG.md#v1170-2022-03-25) + * **Feature**: Bug Fix: Fixed a bug where shapes were marked as unboxed and were not serialized and sent over the wire, causing an API error from the service. + * This is a breaking change, and has been accepted due to the API operation not being usable due to the members modeled as unboxed (aka value) types. The update changes the members to boxed (aka pointer) types so that the zero value of the members can be handled correctly by the SDK and service. Your application will fail to compile with the updated module. To workaround this you'll need to update your application to use pointer types for the members impacted. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.33.0](service/ec2/CHANGELOG.md#v1330-2022-03-25) + * **Feature**: This is release adds support for Amazon VPC Reachability Analyzer to analyze path through a Transit Gateway. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.24.0](service/ssm/CHANGELOG.md#v1240-2022-03-25) + * **Feature**: This Patch Manager release supports creating, updating, and deleting Patch Baselines for Rocky Linux OS. + +# Release (2022-03-24) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.20.0](service/configservice/CHANGELOG.md#v1200-2022-03-24) + * **Feature**: Added new APIs GetCustomRulePolicy and GetOrganizationCustomRulePolicy, and updated existing APIs PutConfigRule, DescribeConfigRule, DescribeConfigRuleEvaluationStatus, PutOrganizationConfigRule, DescribeConfigRule to support a new feature for building AWS Config rules with AWS CloudFormation Guard +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.21.0](service/lambda/CHANGELOG.md#v1210-2022-03-24) + * **Feature**: Adds support for increased ephemeral storage (/tmp) up to 10GB for Lambda functions. Customers can now provision up to 10 GB of ephemeral storage per function instance, a 20x increase over the previous limit of 512 MB. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.19.0](service/transcribe/CHANGELOG.md#v1190-2022-03-24) + * **Feature**: This release adds an additional parameter for subtitling with Amazon Transcribe batch jobs: outputStartIndex. + +# Release (2022-03-23) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.0 + * **Feature**: Update CredentialsCache to make use of two new optional CredentialsProvider interfaces to give the cache, per provider, behavior how the cache handles credentials that fail to refresh, and adjusting expires time. See [aws.CredentialsCache](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#CredentialsCache) for more details. + * **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.11.0](credentials/CHANGELOG.md#v1110-2022-03-23) + * **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.0](service/auditmanager/CHANGELOG.md#v1180-2022-03-23) + * **Feature**: This release updates 1 API parameter, the SnsArn attribute. The character length and regex pattern for the SnsArn attribute have been updated, which enables you to deselect an SNS topic when using the UpdateSettings operation. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.15.0](service/ebs/CHANGELOG.md#v1150-2022-03-23) + * **Feature**: Increased the maximum supported value for the Timeout parameter of the StartSnapshot API from 60 minutes to 4320 minutes. Changed the HTTP error code for ConflictException from 503 to 409. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.2](service/elasticache/CHANGELOG.md#v1202-2022-03-23) + * **Documentation**: Doc only update for ElastiCache +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.0.0](service/gamesparks/CHANGELOG.md#v100-2022-03-23) + * **Release**: New AWS service client module + * **Feature**: Released the preview of Amazon GameSparks, a fully managed AWS service that provides a multi-service backend for game developers. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.22.0](service/redshift/CHANGELOG.md#v1220-2022-03-23) + * **Feature**: This release adds a new [--encrypted | --no-encrypted] field in restore-from-cluster-snapshot API. Customers can now restore an unencrypted snapshot to a cluster encrypted with AWS Managed Key or their own KMS key. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.23.0](service/ssm/CHANGELOG.md#v1230-2022-03-23) + * **Feature**: Update AddTagsToResource, ListTagsForResource, and RemoveTagsFromResource APIs to reflect the support for tagging Automation resources. Includes other minor documentation updates. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.1](service/transfer/CHANGELOG.md#v1181-2022-03-23) + * **Documentation**: Documentation updates for AWS Transfer Family to describe how to remove an associated workflow from a server. + +# Release (2022-03-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.18.0](service/costexplorer/CHANGELOG.md#v1180-2022-03-22) + * **Feature**: Added three new APIs to support tagging and resource-level authorization on Cost Explorer resources: TagResource, UntagResource, ListTagsForResource. Added optional parameters to CreateCostCategoryDefinition, CreateAnomalySubscription and CreateAnomalyMonitor APIs to support Tag On Create. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.2](service/ecs/CHANGELOG.md#v1182-2022-03-22) + * **Documentation**: Documentation only update to address tickets +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.16.0](service/lakeformation/CHANGELOG.md#v1160-2022-03-22) + * **Feature**: The release fixes the incorrect permissions called out in the documentation - DESCRIBE_TAG, ASSOCIATE_TAG, DELETE_TAG, ALTER_TAG. This trebuchet release fixes the corresponding SDK and documentation. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.16.0](service/location/CHANGELOG.md#v1160-2022-03-22) + * **Feature**: Amazon Location Service now includes a MaxResults parameter for GetDevicePositionHistory requests. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.14.0](service/polly/CHANGELOG.md#v1140-2022-03-22) + * **Feature**: Amazon Polly adds new Catalan voice - Arlet. Arlet is available as Neural voice only. + +# Release (2022-03-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.8.0](service/chimesdkmeetings/CHANGELOG.md#v180-2022-03-21) + * **Feature**: Add support for media replication to link multiple WebRTC media sessions together to reach larger and global audiences. Participants connected to a replica session can be granted access to join the primary session and can switch sessions with their existing WebRTC connection +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.17.0](service/ecr/CHANGELOG.md#v1170-2022-03-21) + * **Feature**: This release includes a fix in the DescribeImageScanFindings paginated output. +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.16.0](service/mediaconnect/CHANGELOG.md#v1160-2022-03-21) + * **Feature**: This release adds support for selecting a maintenance window. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.21.0](service/quicksight/CHANGELOG.md#v1210-2022-03-21) + * **Feature**: AWS QuickSight Service Features - Expand public API support for group management. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.16.1](service/ram/CHANGELOG.md#v1161-2022-03-21) + * **Documentation**: Document improvements to the RAM API operations and parameter descriptions. + +# Release (2022-03-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.22.0](service/glue/CHANGELOG.md#v1220-2022-03-18) + * **Feature**: Added 9 new APIs for AWS Glue Interactive Sessions: ListSessions, StopSession, CreateSession, GetSession, DeleteSession, RunStatement, GetStatement, ListStatements, CancelStatement + +# Release (2022-03-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.0](service/acmpca/CHANGELOG.md#v1170-2022-03-16) + * **Feature**: AWS Certificate Manager (ACM) Private Certificate Authority (CA) now supports customizable certificate subject names and extensions. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.13.0](service/amplifybackend/CHANGELOG.md#v1130-2022-03-16) + * **Feature**: Adding the ability to customize Cognito verification messages for email and SMS in CreateBackendAuth and UpdateBackendAuth. Adding deprecation documentation for ForgotPassword in CreateBackendAuth and UpdateBackendAuth +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.0.0](service/billingconductor/CHANGELOG.md#v100-2022-03-16) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Billing Conductor. The AWS Billing Conductor is a customizable billing service, allowing you to customize your billing data to match your desired business structure. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.13.0](service/s3outposts/CHANGELOG.md#v1130-2022-03-16) + * **Feature**: S3 on Outposts is releasing a new API, ListSharedEndpoints, that lists all endpoints associated with S3 on Outpost, that has been shared by Resource Access Manager (RAM). +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.13.0](service/ssmincidents/CHANGELOG.md#v1130-2022-03-16) + * **Feature**: Removed incorrect validation pattern for IncidentRecordSource.invokedBy + +# Release (2022-03-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.15.0](service/cognitoidentityprovider/CHANGELOG.md#v1150-2022-03-15) + * **Feature**: Updated EmailConfigurationType and SmsConfigurationType to reflect that you can now choose Amazon SES and Amazon SNS resources in the same Region. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.15.0](service/dataexchange/CHANGELOG.md#v1150-2022-03-15) + * **Feature**: This feature enables data providers to use the RevokeRevision operation to revoke subscriber access to a given revision. Subscribers are unable to interact with assets within a revoked revision. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.32.0](service/ec2/CHANGELOG.md#v1320-2022-03-15) + * **Feature**: Adds the Cascade parameter to the DeleteIpam API. Customers can use this parameter to automatically delete their IPAM, including non-default scopes, pools, cidrs, and allocations. There mustn't be any pools provisioned in the default public scope to use this parameter. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.1](service/ecs/CHANGELOG.md#v1181-2022-03-15) + * **Documentation**: Documentation only update to address tickets +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.0.2](service/keyspaces/CHANGELOG.md#v102-2022-03-15) + * **Documentation**: Fixing formatting issues in CLI and SDK documentation +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.15.1](service/location/CHANGELOG.md#v1151-2022-03-15) + * **Documentation**: New HERE style "VectorHereExplore" and "VectorHereExploreTruck". +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.18.1](service/rds/CHANGELOG.md#v1181-2022-03-15) + * **Documentation**: Various documentation improvements +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.17.0](service/robomaker/CHANGELOG.md#v1170-2022-03-15) + * **Feature**: This release deprecates ROS, Ubuntu and Gazbeo from RoboMaker Simulation Service Software Suites in favor of user-supplied containers and Relaxed Software Suites. + +# Release (2022-03-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.19.0](service/configservice/CHANGELOG.md#v1190-2022-03-14) + * **Feature**: Add resourceType enums for AWS::ECR::PublicRepository and AWS::EC2::LaunchTemplate +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.1](service/elasticache/CHANGELOG.md#v1201-2022-03-14) + * **Documentation**: Doc only update for ElastiCache +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.23.0](service/kendra/CHANGELOG.md#v1230-2022-03-14) + * **Feature**: Amazon Kendra now provides a data source connector for Slack. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-slack.html +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.14.0](service/timestreamquery/CHANGELOG.md#v1140-2022-03-14) + * **Feature**: Amazon Timestream Scheduled Queries now support Timestamp datatype in a multi-measure record. + +# Release (2022-03-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.20.0](service/chime/CHANGELOG.md#v1200-2022-03-11) + * **Feature**: Chime VoiceConnector Logging APIs will now support MediaMetricLogs. Also CreateMeetingDialOut now returns AccessDeniedException. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.20.0](service/connect/CHANGELOG.md#v1200-2022-03-11) + * **Feature**: This release adds support for enabling Rich Messaging when starting a new chat session via the StartChatContact API. Rich Messaging enables the following formatting options: bold, italics, hyperlinks, bulleted lists, and numbered lists. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.20.0](service/lambda/CHANGELOG.md#v1200-2022-03-11) + * **Feature**: Adds PrincipalOrgID support to AddPermission API. Customers can use it to manage permissions to lambda functions at AWS Organizations level. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.18.0](service/outposts/CHANGELOG.md#v1180-2022-03-11) + * **Feature**: This release adds address filters for listSites +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.1](service/secretsmanager/CHANGELOG.md#v1151-2022-03-11) + * **Documentation**: Documentation updates for Secrets Manager. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.0](service/transcribestreaming/CHANGELOG.md#v160-2022-03-11) + * **Feature**: Amazon Transcribe StartTranscription API now supports additional parameters for Language Identification feature: customVocabularies and customFilterVocabularies + +# Release (2022-03-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.20.0](service/lexmodelsv2/CHANGELOG.md#v1200-2022-03-10) + * **Feature**: This release makes slotTypeId an optional parameter in CreateSlot and UpdateSlot APIs in Amazon Lex V2 for model building. Customers can create and update slots without specifying a slot type id. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.18.0](service/transcribe/CHANGELOG.md#v1180-2022-03-10) + * **Feature**: Documentation fix for API `StartMedicalTranscriptionJobRequest`, now showing min sample rate as 16khz +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.0](service/transfer/CHANGELOG.md#v1180-2022-03-10) + * **Feature**: Adding more descriptive error types for managed workflows + +# Release (2022-03-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.17.0](service/comprehend/CHANGELOG.md#v1170-2022-03-09) + * **Feature**: Amazon Comprehend now supports extracting the sentiment associated with entities such as brands, products and services from text documents. + +# Release (2022-03-08.3) + +* No change notes available for this release. + +# Release (2022-03-08.2) + +* No change notes available for this release. + +# Release (2022-03-08) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.11.0](service/amplify/CHANGELOG.md#v1110-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.5.0](service/amplifyuibuilder/CHANGELOG.md#v150-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.14.0](service/appflow/CHANGELOG.md#v1140-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.11.0](service/apprunner/CHANGELOG.md#v1110-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.14.0](service/athena/CHANGELOG.md#v1140-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.15.0](service/braket/CHANGELOG.md#v1150-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.7.0](service/chimesdkmeetings/CHANGELOG.md#v170-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.15.0](service/cloudtrail/CHANGELOG.md#v1150-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.19.0](service/connect/CHANGELOG.md#v1190-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.16.0](service/devopsguru/CHANGELOG.md#v1160-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.31.0](service/ec2/CHANGELOG.md#v1310-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.16.0](service/ecr/CHANGELOG.md#v1160-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.0](service/ecs/CHANGELOG.md#v1180-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.0](service/elasticache/CHANGELOG.md#v1200-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.10.0](service/finspacedata/CHANGELOG.md#v1100-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.12.0](service/fis/CHANGELOG.md#v1120-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.20.0](service/fsx/CHANGELOG.md#v1200-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.0](service/gamelift/CHANGELOG.md#v1140-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.15.0](service/greengrassv2/CHANGELOG.md#v1150-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.1.0](service/internal/checksum/CHANGELOG.md#v110-2022-03-08) + * **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606) +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.8.0](service/kafkaconnect/CHANGELOG.md#v180-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.22.0](service/kendra/CHANGELOG.md#v1220-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.0.0](service/keyspaces/CHANGELOG.md#v100-2022-03-08) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.14.0](service/macie/CHANGELOG.md#v1140-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.15.0](service/mediapackage/CHANGELOG.md#v1150-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.13.0](service/mgn/CHANGELOG.md#v1130-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.5.0](service/migrationhubrefactorspaces/CHANGELOG.md#v150-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.12.0](service/mq/CHANGELOG.md#v1120-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.6.0](service/panorama/CHANGELOG.md#v160-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.18.0](service/rds/CHANGELOG.md#v1180-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.8.0](service/route53recoverycluster/CHANGELOG.md#v180-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.12.0](service/servicecatalogappregistry/CHANGELOG.md#v1120-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.18.0](service/sqs/CHANGELOG.md#v1180-2022-03-08) + * **Feature**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.16.0](service/sts/CHANGELOG.md#v1160-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.14.0](service/synthetics/CHANGELOG.md#v1140-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.13.0](service/timestreamquery/CHANGELOG.md#v1130-2022-03-08) + * **Documentation**: Updated service client model to latest release. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.17.0](service/transfer/CHANGELOG.md#v1170-2022-03-08) + * **Feature**: Updated service client model to latest release. + +# Release (2022-02-24.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.21.0](service/autoscaling/CHANGELOG.md#v1210-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.18.0](service/databrew/CHANGELOG.md#v1180-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.15.0](service/fms/CHANGELOG.md#v1150-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.17.0](service/lightsail/CHANGELOG.md#v1170-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.19.0](service/route53/CHANGELOG.md#v1190-2022-02-242) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.20.0](service/s3control/CHANGELOG.md#v1200-2022-02-242) + * **Feature**: API client updated + +# Release (2022-02-24) + +## General Highlights +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.14.0 + * **Feature**: Add new AdaptiveMode retryer to aws/retry package. This new retryer uses dynamic token bucketing with client ratelimiting when throttle responses are received. + * **Feature**: Adds new interface aws.RetryerV2, replacing aws.Retryer and deprecating the GetInitialToken method in favor of GetAttemptToken so Context can be provided. The SDK will use aws.RetryerV2 internally. Wrapping aws.Retryers as aws.RetryerV2 automatically. +* `github.com/aws/aws-sdk-go-v2/config`: [v1.14.0](config/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. + * **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.9.0](credentials/CHANGELOG.md#v190-2022-02-24) + * **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.7.0](feature/dynamodb/attributevalue/CHANGELOG.md#v170-2022-02-24) + * **Feature**: Fixes [#645](https://github.com/aws/aws-sdk-go-v2/issues/645), [#411](https://github.com/aws/aws-sdk-go-v2/issues/411) by adding support for (un)marshaling AttributeValue maps to Go maps key types of string, number, bool, and types implementing encoding.Text(un)Marshaler interface + * **Bug Fix**: Fixes [#1569](https://github.com/aws/aws-sdk-go-v2/issues/1569) inconsistent serialization of Go struct field names +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.4.0](feature/dynamodb/expression/CHANGELOG.md#v140-2022-02-24) + * **Feature**: Add support for expression names with dots via new NameBuilder function NameNoDotSplit, related to [aws/aws-sdk-go#2570](https://github.com/aws/aws-sdk-go/issues/2570) +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.7.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v170-2022-02-24) + * **Feature**: Fixes [#645](https://github.com/aws/aws-sdk-go-v2/issues/645), [#411](https://github.com/aws/aws-sdk-go-v2/issues/411) by adding support for (un)marshaling AttributeValue maps to Go maps key types of string, number, bool, and types implementing encoding.Text(un)Marshaler interface + * **Bug Fix**: Fixes [#1569](https://github.com/aws/aws-sdk-go-v2/issues/1569) inconsistent serialization of Go struct field names +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.14.0](service/accessanalyzer/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.5.0](service/account/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.13.0](service/acm/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.15.0](service/acmpca/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.13.0](service/alexaforbusiness/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.13.0](service/amp/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.10.0](service/amplify/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.11.0](service/amplifybackend/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.4.0](service/amplifyuibuilder/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.14.0](service/apigateway/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.9.0](service/apigatewaymanagementapi/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.11.0](service/apigatewayv2/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.11.0](service/appconfig/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.3.0](service/appconfigdata/CHANGELOG.md#v130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.13.0](service/appflow/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.12.0](service/appintegrations/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.14.0](service/applicationautoscaling/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.8.0](service/applicationcostprofiler/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.11.0](service/applicationdiscoveryservice/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.14.0](service/applicationinsights/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.12.0](service/appmesh/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.10.0](service/apprunner/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.14.0](service/appstream/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.13.0](service/appsync/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.13.0](service/athena/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.16.0](service/auditmanager/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.20.0](service/autoscaling/CHANGELOG.md#v1200-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.11.0](service/autoscalingplans/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.14.0](service/backup/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.4.0](service/backupgateway/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.15.0](service/batch/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.14.0](service/braket/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.11.0](service/budgets/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.18.0](service/chime/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.8.0](service/chimesdkidentity/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.6.0](service/chimesdkmeetings/CHANGELOG.md#v160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.8.0](service/chimesdkmessaging/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.15.0](service/cloud9/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.7.0](service/cloudcontrol/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.11.0](service/clouddirectory/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.19.0](service/cloudformation/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.15.0](service/cloudfront/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.11.0](service/cloudhsm/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.12.0](service/cloudhsmv2/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.12.0](service/cloudsearch/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.10.0](service/cloudsearchdomain/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.14.0](service/cloudtrail/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.16.0](service/cloudwatch/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.13.0](service/cloudwatchevents/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.14.0](service/cloudwatchlogs/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.11.0](service/codeartifact/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.18.0](service/codebuild/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.12.0](service/codecommit/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.13.0](service/codedeploy/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.11.0](service/codeguruprofiler/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.14.0](service/codegurureviewer/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.12.0](service/codepipeline/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.10.0](service/codestar/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.12.0](service/codestarconnections/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.10.0](service/codestarnotifications/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.12.0](service/cognitoidentity/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.13.0](service/cognitoidentityprovider/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.10.0](service/cognitosync/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.15.0](service/comprehend/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.12.0](service/comprehendmedical/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.16.0](service/computeoptimizer/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.17.0](service/configservice/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.18.0](service/connect/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.11.0](service/connectcontactlens/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.10.0](service/connectparticipant/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.12.0](service/costandusagereportservice/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.16.0](service/costexplorer/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.16.0](service/customerprofiles/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.17.0](service/databasemigrationservice/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.17.0](service/databrew/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.13.0](service/dataexchange/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.12.0](service/datapipeline/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.13.0](service/datasync/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.10.0](service/dax/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.14.0](service/detective/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.12.0](service/devicefarm/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.15.0](service/devopsguru/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.16.0](service/directconnect/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.12.0](service/directoryservice/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.10.0](service/dlm/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.16.0](service/docdb/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.4.0](service/drs/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.14.0](service/dynamodb/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.12.0](service/dynamodbstreams/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.13.0](service/ebs/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.30.0](service/ec2/CHANGELOG.md#v1300-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.12.0](service/ec2instanceconnect/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.15.0](service/ecr/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.12.0](service/ecrpublic/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.17.0](service/ecs/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.15.0](service/efs/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.19.0](service/eks/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.19.0](service/elasticache/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.13.0](service/elasticbeanstalk/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.10.0](service/elasticinference/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.13.0](service/elasticloadbalancing/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.17.0](service/elasticloadbalancingv2/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.14.0](service/elasticsearchservice/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.12.0](service/elastictranscoder/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.16.0](service/emr/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.12.0](service/emrcontainers/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.14.0](service/eventbridge/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.5.0](service/evidently/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.7.0](service/finspace/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.9.0](service/finspacedata/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.13.0](service/firehose/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.11.0](service/fis/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.14.0](service/fms/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.18.0](service/forecast/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.10.0](service/forecastquery/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated + * **Bug Fix**: Fixed an issue that resulted in the wrong service endpoints being constructed. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.18.0](service/frauddetector/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.19.0](service/fsx/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.13.0](service/gamelift/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.12.0](service/glacier/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.12.0](service/globalaccelerator/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.20.0](service/glue/CHANGELOG.md#v1200-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.6.0](service/grafana/CHANGELOG.md#v160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.12.0](service/greengrass/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.14.0](service/greengrassv2/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.12.0](service/groundstation/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.12.0](service/guardduty/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.14.0](service/health/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.13.0](service/healthlake/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.11.0](service/honeycode/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.17.0](service/iam/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.13.0](service/identitystore/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.18.0](service/imagebuilder/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.11.0](service/inspector/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.5.0](service/inspector2/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.0.0](service/internal/checksum/CHANGELOG.md#v100-2022-02-24) + * **Release**: New module for computing checksums +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.22.0](service/iot/CHANGELOG.md#v1220-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.9.0](service/iot1clickdevicesservice/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.10.0](service/iot1clickprojects/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.11.0](service/iotanalytics/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.10.0](service/iotdataplane/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.13.0](service/iotdeviceadvisor/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.13.0](service/iotevents/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.10.0](service/ioteventsdata/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.11.0](service/iotfleethub/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.10.0](service/iotjobsdataplane/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.11.0](service/iotsecuretunneling/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.19.0](service/iotsitewise/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.11.0](service/iotthingsgraph/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.4.0](service/iottwinmaker/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.17.0](service/iotwireless/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.15.0](service/ivs/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.16.0](service/kafka/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.7.0](service/kafkaconnect/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.21.0](service/kendra/CHANGELOG.md#v1210-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.14.0](service/kinesis/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.12.0](service/kinesisanalytics/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.13.0](service/kinesisanalyticsv2/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.10.0](service/kinesisvideo/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.11.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.9.0](service/kinesisvideomedia/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.9.0](service/kinesisvideosignaling/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.15.0](service/kms/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.14.0](service/lakeformation/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.18.0](service/lambda/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.15.0](service/lexmodelbuildingservice/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.18.0](service/lexmodelsv2/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.11.0](service/lexruntimeservice/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.13.0](service/lexruntimev2/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.14.0](service/licensemanager/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.16.0](service/lightsail/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.14.0](service/location/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.11.0](service/lookoutequipment/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.10.0](service/lookoutmetrics/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.11.0](service/lookoutvision/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.13.0](service/machinelearning/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.13.0](service/macie/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.19.0](service/macie2/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.11.0](service/managedblockchain/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.11.0](service/marketplacecatalog/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.10.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.10.0](service/marketplaceentitlementservice/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.12.0](service/marketplacemetering/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.14.0](service/mediaconnect/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.20.0](service/mediaconvert/CHANGELOG.md#v1200-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.18.0](service/medialive/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.14.0](service/mediapackage/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.15.0](service/mediapackagevod/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.11.0](service/mediastore/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.11.0](service/mediastoredata/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.15.0](service/mediatailor/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.8.0](service/memorydb/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.12.0](service/mgn/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.11.0](service/migrationhub/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.11.0](service/migrationhubconfig/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.4.0](service/migrationhubrefactorspaces/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.4.0](service/migrationhubstrategy/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.10.0](service/mobile/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.11.0](service/mq/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.12.0](service/mturk/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.11.0](service/mwaa/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.15.0](service/neptune/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.14.0](service/networkfirewall/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.11.0](service/networkmanager/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.11.0](service/nimble/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.8.0](service/opensearch/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.12.0](service/opsworks/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.13.0](service/opsworkscm/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.13.0](service/organizations/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.16.0](service/outposts/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.5.0](service/panorama/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.16.0](service/personalize/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.10.0](service/personalizeevents/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.10.0](service/personalizeruntime/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.13.0](service/pi/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.15.0](service/pinpoint/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.10.0](service/pinpointemail/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.9.0](service/pinpointsmsvoice/CHANGELOG.md#v190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.12.0](service/polly/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.13.0](service/pricing/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.11.0](service/proton/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.13.0](service/qldb/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.12.0](service/qldbsession/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.19.0](service/quicksight/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.15.0](service/ram/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.5.0](service/rbin/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.17.0](service/rds/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.10.0](service/rdsdata/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.20.0](service/redshift/CHANGELOG.md#v1200-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.14.0](service/redshiftdata/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.16.0](service/rekognition/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.4.0](service/resiliencehub/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.11.0](service/resourcegroups/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.12.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.15.0](service/robomaker/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.18.0](service/route53/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.11.0](service/route53domains/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.7.0](service/route53recoverycluster/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.8.0](service/route53recoverycontrolconfig/CHANGELOG.md#v180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.7.0](service/route53recoveryreadiness/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.14.0](service/route53resolver/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.5.0](service/rum/CHANGELOG.md#v150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.25.0](service/s3/CHANGELOG.md#v1250-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.19.0](service/s3control/CHANGELOG.md#v1190-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.11.0](service/s3outposts/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.25.0](service/sagemaker/CHANGELOG.md#v1250-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.11.0](service/sagemakera2iruntime/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.10.0](service/sagemakeredge/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.10.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.14.0](service/sagemakerruntime/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.10.0](service/savingsplans/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.13.0](service/schemas/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.14.0](service/secretsmanager/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.18.0](service/securityhub/CHANGELOG.md#v1180-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.10.0](service/serverlessapplicationrepository/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.12.0](service/servicecatalog/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.11.0](service/servicecatalogappregistry/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.16.0](service/servicediscovery/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.12.0](service/servicequotas/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.13.0](service/ses/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.12.0](service/sesv2/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.12.0](service/sfn/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.15.0](service/shield/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.12.0](service/signer/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.11.0](service/sms/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.14.0](service/snowball/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.7.0](service/snowdevicemanagement/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.16.0](service/sns/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.17.0](service/sqs/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.21.0](service/ssm/CHANGELOG.md#v1210-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.12.0](service/ssmcontacts/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.11.0](service/ssmincidents/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.10.0](service/sso/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.13.0](service/ssoadmin/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.11.0](service/ssooidc/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.15.0](service/storagegateway/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.15.0](service/sts/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.12.0](service/support/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.12.0](service/swf/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.13.0](service/synthetics/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.13.0](service/textract/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.12.0](service/timestreamquery/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.12.0](service/timestreamwrite/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.16.0](service/transcribe/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.4.0](service/transcribestreaming/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.16.0](service/transfer/CHANGELOG.md#v1160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.12.0](service/translate/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.7.0](service/voiceid/CHANGELOG.md#v170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.10.0](service/waf/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.11.0](service/wafregional/CHANGELOG.md#v1110-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.17.0](service/wafv2/CHANGELOG.md#v1170-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.13.0](service/wellarchitected/CHANGELOG.md#v1130-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.6.0](service/wisdom/CHANGELOG.md#v160-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.10.0](service/workdocs/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.10.0](service/worklink/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.14.0](service/workmail/CHANGELOG.md#v1140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.10.0](service/workmailmessageflow/CHANGELOG.md#v1100-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.15.0](service/workspaces/CHANGELOG.md#v1150-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.4.0](service/workspacesweb/CHANGELOG.md#v140-2022-02-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.12.0](service/xray/CHANGELOG.md#v1120-2022-02-24) + * **Feature**: API client updated + +# Release (2022-01-28) + +## General Highlights +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. +* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.13.1](config/CHANGELOG.md#v1131-2022-01-28) + * **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. + * **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.13.0](service/applicationinsights/CHANGELOG.md#v1130-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.13.1](service/cloudtrail/CHANGELOG.md#v1131-2022-01-28) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.13.1](service/codegurureviewer/CHANGELOG.md#v1131-2022-01-28) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.16.0](service/configservice/CHANGELOG.md#v1160-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.17.0](service/connect/CHANGELOG.md#v1170-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.12.1](service/ebs/CHANGELOG.md#v1121-2022-01-28) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.29.0](service/ec2/CHANGELOG.md#v1290-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.11.0](service/ec2instanceconnect/CHANGELOG.md#v1110-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.14.0](service/efs/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.10.0](service/fis/CHANGELOG.md#v1100-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.17.0](service/frauddetector/CHANGELOG.md#v1170-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.18.0](service/fsx/CHANGELOG.md#v1180-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.11.0](service/greengrass/CHANGELOG.md#v1110-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.13.0](service/greengrassv2/CHANGELOG.md#v1130-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.11.0](service/guardduty/CHANGELOG.md#v1110-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.10.0](service/honeycode/CHANGELOG.md#v1100-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.14.0](service/ivs/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.15.0](service/kafka/CHANGELOG.md#v1150-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.13.0](service/location/CHANGELOG.md#v1130-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.9.0](service/lookoutmetrics/CHANGELOG.md#v190-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.18.0](service/macie2/CHANGELOG.md#v1180-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.19.0](service/mediaconvert/CHANGELOG.md#v1190-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.14.0](service/mediatailor/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.14.0](service/ram/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.6.1](service/route53recoveryreadiness/CHANGELOG.md#v161-2022-01-28) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.24.0](service/sagemaker/CHANGELOG.md#v1240-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.17.0](service/securityhub/CHANGELOG.md#v1170-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.14.0](service/storagegateway/CHANGELOG.md#v1140-2022-01-28) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.15.0](service/transcribe/CHANGELOG.md#v1150-2022-01-28) + * **Feature**: Updated to latest API model. + +# Release (2022-01-14) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.13.0 + * **Bug Fix**: Updates the Retry middleware to release the retry token, on subsequent attempts. This fixes #1413, and is based on PR #1424 +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.6.0](feature/dynamodb/attributevalue/CHANGELOG.md#v160-2022-01-14) + * **Feature**: Adds new MarshalWithOptions and UnmarshalWithOptions helpers allowing Encoding and Decoding options to be specified when serializing AttributeValues. Addresses issue: https://github.com/aws/aws-sdk-go-v2/issues/1494 +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.6.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v160-2022-01-14) + * **Feature**: Adds new MarshalWithOptions and UnmarshalWithOptions helpers allowing Encoding and Decoding options to be specified when serializing AttributeValues. Addresses issue: https://github.com/aws/aws-sdk-go-v2/issues/1494 +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.12.0](service/appsync/CHANGELOG.md#v1120-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.10.0](service/autoscalingplans/CHANGELOG.md#v1100-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.15.0](service/computeoptimizer/CHANGELOG.md#v1150-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.15.0](service/costexplorer/CHANGELOG.md#v1150-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.16.0](service/databasemigrationservice/CHANGELOG.md#v1160-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.16.0](service/databrew/CHANGELOG.md#v1160-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.28.0](service/ec2/CHANGELOG.md#v1280-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.18.0](service/elasticache/CHANGELOG.md#v1180-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.13.0](service/elasticsearchservice/CHANGELOG.md#v1130-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.8.0](service/finspacedata/CHANGELOG.md#v180-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.13.0](service/fms/CHANGELOG.md#v1130-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.19.0](service/glue/CHANGELOG.md#v1190-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.9.0](service/honeycode/CHANGELOG.md#v190-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.12.0](service/identitystore/CHANGELOG.md#v1120-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.9.0](service/ioteventsdata/CHANGELOG.md#v190-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.16.0](service/iotwireless/CHANGELOG.md#v1160-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.20.0](service/kendra/CHANGELOG.md#v1200-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.17.0](service/lexmodelsv2/CHANGELOG.md#v1170-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.12.0](service/lexruntimev2/CHANGELOG.md#v1120-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.8.0](service/lookoutmetrics/CHANGELOG.md#v180-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.17.0](service/medialive/CHANGELOG.md#v1170-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.13.0](service/mediatailor/CHANGELOG.md#v1130-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.10.0](service/mwaa/CHANGELOG.md#v1100-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.10.0](service/nimble/CHANGELOG.md#v1100-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.7.0](service/opensearch/CHANGELOG.md#v170-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.12.0](service/pi/CHANGELOG.md#v1120-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.14.0](service/pinpoint/CHANGELOG.md#v1140-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.16.0](service/rds/CHANGELOG.md#v1160-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.20.0](service/ssm/CHANGELOG.md#v1200-2022-01-14) + * **Feature**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.9.0](service/sso/CHANGELOG.md#v190-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.14.0](service/transcribe/CHANGELOG.md#v1140-2022-01-14) + * **Documentation**: Updated API models +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.14.0](service/workspaces/CHANGELOG.md#v1140-2022-01-14) + * **Feature**: Updated API models + +# Release (2022-01-07) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.12.0](config/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.12.0](service/appstream/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.12.0](service/cloudtrail/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.12.0](service/detective/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.27.0](service/ec2/CHANGELOG.md#v1270-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.15.0](service/ecs/CHANGELOG.md#v1150-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.17.0](service/eks/CHANGELOG.md#v1170-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.18.0](service/glue/CHANGELOG.md#v1180-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.11.0](service/greengrassv2/CHANGELOG.md#v1110-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.20.0](service/iot/CHANGELOG.md#v1200-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.12.0](service/lakeformation/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.16.0](service/lambda/CHANGELOG.md#v1160-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.17.0](service/mediaconvert/CHANGELOG.md#v1170-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.17.0](service/quicksight/CHANGELOG.md#v1170-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.15.0](service/rds/CHANGELOG.md#v1150-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.14.0](service/rekognition/CHANGELOG.md#v1140-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.23.0](service/s3/CHANGELOG.md#v1230-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.17.0](service/s3control/CHANGELOG.md#v1170-2022-01-07) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.9.0](service/s3outposts/CHANGELOG.md#v190-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.22.0](service/sagemaker/CHANGELOG.md#v1220-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.12.0](service/secretsmanager/CHANGELOG.md#v1120-2022-01-07) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.9.0](service/ssooidc/CHANGELOG.md#v190-2022-01-07) + * **Feature**: API client updated + +# Release (2021-12-21) + +## General Highlights +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.11.0](service/accessanalyzer/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.10.0](service/acm/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.11.0](service/apigateway/CHANGELOG.md#v1110-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.11.0](service/applicationautoscaling/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.10.0](service/appsync/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.17.0](service/autoscaling/CHANGELOG.md#v1170-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.3.0](service/chimesdkmeetings/CHANGELOG.md#v130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.5.0](service/chimesdkmessaging/CHANGELOG.md#v150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.4.0](service/cloudcontrol/CHANGELOG.md#v140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.16.0](service/cloudformation/CHANGELOG.md#v1160-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.13.0](service/cloudwatch/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.10.0](service/cloudwatchevents/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.11.0](service/cloudwatchlogs/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: API client updated + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.10.0](service/codedeploy/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.9.0](service/comprehendmedical/CHANGELOG.md#v190-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.13.0](service/configservice/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.13.0](service/customerprofiles/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.14.0](service/databasemigrationservice/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.10.0](service/datasync/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.12.0](service/devopsguru/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.13.0](service/directconnect/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.13.0](service/docdb/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.11.0](service/dynamodb/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.9.0](service/dynamodbstreams/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.26.0](service/ec2/CHANGELOG.md#v1260-2021-12-21) + * **Feature**: API client updated + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.12.0](service/ecr/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.14.0](service/ecs/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.16.0](service/elasticache/CHANGELOG.md#v1160-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.10.0](service/elasticloadbalancing/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.14.0](service/elasticloadbalancingv2/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.11.0](service/elasticsearchservice/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.13.0](service/emr/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.11.0](service/eventbridge/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.6.0](service/finspacedata/CHANGELOG.md#v160-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.15.0](service/forecast/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.9.0](service/glacier/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.9.0](service/groundstation/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.11.0](service/health/CHANGELOG.md#v1110-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.15.0](service/imagebuilder/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.19.0](service/iot/CHANGELOG.md#v1190-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.11.0](service/kinesis/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.9.0](service/kinesisanalytics/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.10.0](service/kinesisanalyticsv2/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.12.0](service/kms/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.15.0](service/lambda/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.15.0](service/lexmodelsv2/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.10.0](service/location/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.6.0](service/lookoutmetrics/CHANGELOG.md#v160-2021-12-21) + * **Feature**: API client updated + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.8.0](service/lookoutvision/CHANGELOG.md#v180-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.9.0](service/marketplacemetering/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.11.0](service/mediaconnect/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.12.0](service/neptune/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.11.0](service/networkfirewall/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.8.0](service/nimble/CHANGELOG.md#v180-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.5.0](service/opensearch/CHANGELOG.md#v150-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.13.0](service/outposts/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.10.0](service/pi/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.10.0](service/qldb/CHANGELOG.md#v1100-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.14.0](service/rds/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.17.0](service/redshift/CHANGELOG.md#v1170-2021-12-21) + * **Feature**: API client updated + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.8.0](service/resourcegroups/CHANGELOG.md#v180-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.9.0](service/resourcegroupstaggingapi/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.15.0](service/route53/CHANGELOG.md#v1150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.8.0](service/route53domains/CHANGELOG.md#v180-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.5.0](service/route53recoverycontrolconfig/CHANGELOG.md#v150-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.22.0](service/s3/CHANGELOG.md#v1220-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.16.0](service/s3control/CHANGELOG.md#v1160-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.21.0](service/sagemaker/CHANGELOG.md#v1210-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.7.3](service/savingsplans/CHANGELOG.md#v173-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.11.0](service/secretsmanager/CHANGELOG.md#v1110-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.14.0](service/securityhub/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.9.0](service/sfn/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.8.0](service/sms/CHANGELOG.md#v180-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.13.0](service/sns/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.14.0](service/sqs/CHANGELOG.md#v1140-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.18.0](service/ssm/CHANGELOG.md#v1180-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.12.0](service/sts/CHANGELOG.md#v1120-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.9.0](service/support/CHANGELOG.md#v190-2021-12-21) + * **Documentation**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.9.0](service/swf/CHANGELOG.md#v190-2021-12-21) + * **Feature**: Updated to latest service endpoints +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.13.0](service/transfer/CHANGELOG.md#v1130-2021-12-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.11.0](service/workmail/CHANGELOG.md#v1110-2021-12-21) + * **Feature**: API client updated + +# Release (2021-12-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.10.1](service/accessanalyzer/CHANGELOG.md#v1101-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.9.3](service/amp/CHANGELOG.md#v193-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.0.0](service/amplifyuibuilder/CHANGELOG.md#v100-2021-12-03) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.8.3](service/appmesh/CHANGELOG.md#v183-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.10.2](service/braket/CHANGELOG.md#v1102-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.7.3](service/codeguruprofiler/CHANGELOG.md#v173-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.1.1](service/evidently/CHANGELOG.md#v111-2021-12-03) + * **Bug Fix**: Fixed a bug that prevented the resolution of the correct endpoint for some API operations. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.2.3](service/grafana/CHANGELOG.md#v123-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.9.2](service/location/CHANGELOG.md#v192-2021-12-03) + * **Bug Fix**: Fixed a bug that prevented the resolution of the correct endpoint for some API operations. + * **Bug Fix**: Fixed an issue that caused some operations to not be signed using sigv4, resulting in authentication failures. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.7.0](service/networkmanager/CHANGELOG.md#v170-2021-12-03) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.7.3](service/nimble/CHANGELOG.md#v173-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.7.2](service/proton/CHANGELOG.md#v172-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.10.0](service/ram/CHANGELOG.md#v1100-2021-12-03) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.12.0](service/rekognition/CHANGELOG.md#v1120-2021-12-03) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.3.3](service/snowdevicemanagement/CHANGELOG.md#v133-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.2.3](service/wisdom/CHANGELOG.md#v123-2021-12-03) + * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller. + +# Release (2021-12-02) + +## General Highlights +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.11.0](config/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.10.0](service/accessanalyzer/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.9.0](service/applicationinsights/CHANGELOG.md#v190-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.0.0](service/backupgateway/CHANGELOG.md#v100-2021-12-02) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.8.0](service/cloudhsm/CHANGELOG.md#v180-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.11.0](service/devopsguru/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.12.0](service/directconnect/CHANGELOG.md#v1120-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.10.0](service/dynamodb/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.25.0](service/ec2/CHANGELOG.md#v1250-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.1.0](service/evidently/CHANGELOG.md#v110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.14.0](service/fsx/CHANGELOG.md#v1140-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.16.0](service/glue/CHANGELOG.md#v1160-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.1.0](service/inspector2/CHANGELOG.md#v110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.18.0](service/iot/CHANGELOG.md#v1180-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.0.0](service/iottwinmaker/CHANGELOG.md#v100-2021-12-02) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.11.0](service/kafka/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.17.0](service/kendra/CHANGELOG.md#v1170-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.10.0](service/kinesis/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.10.0](service/lakeformation/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.14.0](service/lexmodelsv2/CHANGELOG.md#v1140-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.10.0](service/lexruntimev2/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: Support has been added for the `StartConversation` API. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.12.0](service/outposts/CHANGELOG.md#v1120-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.1.0](service/rbin/CHANGELOG.md#v110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.10.0](service/redshiftdata/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.1.0](service/rum/CHANGELOG.md#v110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.21.0](service/s3/CHANGELOG.md#v1210-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.20.0](service/sagemaker/CHANGELOG.md#v1200-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.11.0](service/sagemakerruntime/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.11.0](service/shield/CHANGELOG.md#v1110-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.10.0](service/snowball/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.10.0](service/storagegateway/CHANGELOG.md#v1100-2021-12-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.0.0](service/workspacesweb/CHANGELOG.md#v100-2021-12-02) + * **Release**: New AWS service client module + +# Release (2021-11-30) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.16.0](service/autoscaling/CHANGELOG.md#v1160-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.10.0](service/backup/CHANGELOG.md#v1100-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.10.0](service/braket/CHANGELOG.md#v1100-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.2.0](service/chimesdkmeetings/CHANGELOG.md#v120-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.15.0](service/cloudformation/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.13.0](service/computeoptimizer/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.13.0](service/connect/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.12.0](service/customerprofiles/CHANGELOG.md#v1120-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.13.0](service/databasemigrationservice/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.9.0](service/dataexchange/CHANGELOG.md#v190-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.9.0](service/dynamodb/CHANGELOG.md#v190-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.24.0](service/ec2/CHANGELOG.md#v1240-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.11.0](service/ecr/CHANGELOG.md#v1110-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.13.0](service/ecs/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.15.0](service/eks/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.15.0](service/elasticache/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.13.0](service/elasticloadbalancingv2/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.10.0](service/elasticsearchservice/CHANGELOG.md#v1100-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.0.0](service/evidently/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.5.0](service/finspacedata/CHANGELOG.md#v150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.14.0](service/imagebuilder/CHANGELOG.md#v1140-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.0.0](service/inspector2/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery`: [v1.3.2](service/internal/endpoint-discovery/CHANGELOG.md#v132-2021-11-30) + * **Bug Fix**: Fixed a race condition that caused concurrent calls relying on endpoint discovery to share the same `url.URL` reference in their operation's http.Request. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.17.0](service/iot/CHANGELOG.md#v1170-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.9.0](service/iotdeviceadvisor/CHANGELOG.md#v190-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.15.0](service/iotsitewise/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.13.0](service/iotwireless/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.14.0](service/lambda/CHANGELOG.md#v1140-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.14.0](service/macie2/CHANGELOG.md#v1140-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.8.0](service/mgn/CHANGELOG.md#v180-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.0.0](service/migrationhubrefactorspaces/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.4.0](service/opensearch/CHANGELOG.md#v140-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.11.0](service/outposts/CHANGELOG.md#v1110-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.12.0](service/personalize/CHANGELOG.md#v1120-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.7.0](service/personalizeruntime/CHANGELOG.md#v170-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.12.0](service/pinpoint/CHANGELOG.md#v1120-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.7.0](service/proton/CHANGELOG.md#v170-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.15.0](service/quicksight/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.0.0](service/rbin/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.13.0](service/rds/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.16.0](service/redshift/CHANGELOG.md#v1160-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.0.0](service/rum/CHANGELOG.md#v100-2021-11-30) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.20.0](service/s3/CHANGELOG.md#v1200-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.15.0](service/s3control/CHANGELOG.md#v1150-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.13.0](service/sqs/CHANGELOG.md#v1130-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.17.0](service/ssm/CHANGELOG.md#v1170-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.11.0](service/sts/CHANGELOG.md#v1110-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.10.0](service/textract/CHANGELOG.md#v1100-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.8.0](service/timestreamquery/CHANGELOG.md#v180-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.8.0](service/timestreamwrite/CHANGELOG.md#v180-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.1.0](service/transcribestreaming/CHANGELOG.md#v110-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.8.0](service/translate/CHANGELOG.md#v180-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.9.0](service/wellarchitected/CHANGELOG.md#v190-2021-11-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.11.0](service/workspaces/CHANGELOG.md#v1110-2021-11-30) + * **Feature**: API client updated + +# Release (2021-11-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.11.1 + * **Bug Fix**: Fixed a bug that prevented aws.EndpointResolverWithOptionsFunc from satisfying the aws.EndpointResolverWithOptions interface. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.8.0](service/amplifybackend/CHANGELOG.md#v180-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.10.0](service/apigateway/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.7.0](service/appconfig/CHANGELOG.md#v170-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.0.0](service/appconfigdata/CHANGELOG.md#v100-2021-11-19) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.8.0](service/applicationinsights/CHANGELOG.md#v180-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.10.0](service/appstream/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.12.0](service/auditmanager/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.11.0](service/batch/CHANGELOG.md#v1110-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.14.0](service/chime/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.1.0](service/chimesdkmeetings/CHANGELOG.md#v110-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.14.0](service/cloudformation/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.10.0](service/cloudtrail/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.12.0](service/cloudwatch/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.12.0](service/connect/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.12.0](service/databasemigrationservice/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.13.0](service/databrew/CHANGELOG.md#v1130-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.10.0](service/devopsguru/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.0.0](service/drs/CHANGELOG.md#v100-2021-11-19) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.8.0](service/dynamodbstreams/CHANGELOG.md#v180-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.23.0](service/ec2/CHANGELOG.md#v1230-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.14.0](service/eks/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.14.0](service/forecast/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.10.0](service/ivs/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.10.0](service/kafka/CHANGELOG.md#v1100-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.16.0](service/kendra/CHANGELOG.md#v1160-2021-11-19) + * **Announcement**: Fix API modeling bug incorrectly generating `DocumentAttributeValue` type as a union instead of a structure. This update corrects this bug by correcting the `DocumentAttributeValue` type to be a `struct` instead of an `interface`. This change also removes the `DocumentAttributeValueMember` types. To migrate to this change your application using service/kendra will need to be updated to use struct members in `DocumentAttributeValue` instead of `DocumentAttributeValueMember` types. + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.11.0](service/kms/CHANGELOG.md#v1110-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.13.0](service/lambda/CHANGELOG.md#v1130-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.13.0](service/lexmodelsv2/CHANGELOG.md#v1130-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.9.0](service/lexruntimev2/CHANGELOG.md#v190-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.9.0](service/location/CHANGELOG.md#v190-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.15.0](service/mediaconvert/CHANGELOG.md#v1150-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.14.0](service/medialive/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.7.0](service/mgn/CHANGELOG.md#v170-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.0.0](service/migrationhubstrategy/CHANGELOG.md#v100-2021-11-19) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.9.0](service/qldb/CHANGELOG.md#v190-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.9.0](service/qldbsession/CHANGELOG.md#v190-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.15.0](service/redshift/CHANGELOG.md#v1150-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.12.0](service/sns/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.16.0](service/ssm/CHANGELOG.md#v1160-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.12.0](service/transfer/CHANGELOG.md#v1120-2021-11-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.14.0](service/wafv2/CHANGELOG.md#v1140-2021-11-19) + * **Feature**: API client updated + +# Release (2021-11-12) + +## General Highlights +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. +* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.9.0](service/backup/CHANGELOG.md#v190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.10.0](service/batch/CHANGELOG.md#v1100-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.0.0](service/chimesdkmeetings/CHANGELOG.md#v100-2021-11-12) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.12.0](service/computeoptimizer/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.11.0](service/connect/CHANGELOG.md#v1110-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.12.0](service/docdb/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.8.0](service/dynamodb/CHANGELOG.md#v180-2021-11-12) + * **Documentation**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.22.0](service/ec2/CHANGELOG.md#v1220-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.12.0](service/ecs/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.9.0](service/gamelift/CHANGELOG.md#v190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.9.0](service/greengrassv2/CHANGELOG.md#v190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.10.0](service/health/CHANGELOG.md#v1100-2021-11-12) + * **Documentation**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.9.0](service/identitystore/CHANGELOG.md#v190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.12.0](service/iotwireless/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.11.0](service/neptune/CHANGELOG.md#v1110-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.12.0](service/rds/CHANGELOG.md#v1120-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.0.0](service/resiliencehub/CHANGELOG.md#v100-2021-11-12) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.8.0](service/resourcegroupstaggingapi/CHANGELOG.md#v180-2021-11-12) + * **Documentation**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.14.0](service/s3control/CHANGELOG.md#v1140-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.19.0](service/sagemaker/CHANGELOG.md#v1190-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.10.0](service/sagemakerruntime/CHANGELOG.md#v1100-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.7.0](service/ssmincidents/CHANGELOG.md#v170-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.11.0](service/transcribe/CHANGELOG.md#v1110-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.7.0](service/translate/CHANGELOG.md#v170-2021-11-12) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.13.0](service/wafv2/CHANGELOG.md#v1130-2021-11-12) + * **Feature**: Updated service to latest API model. + +# Release (2021-11-06) + +## General Highlights +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream`: [v1.0.0](aws/protocol/eventstream/CHANGELOG.md#v100-2021-11-06) + * **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release. + * **Release**: Protocol support has been added for AWS event stream. +* `github.com/aws/aws-sdk-go-v2/internal/endpoints/v2`: [v2.0.0](internal/endpoints/v2/CHANGELOG.md#v200-2021-11-06) + * **Release**: Endpoint Variant Model Support +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.6.0](service/applicationinsights/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.8.0](service/appstream/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.11.0](service/auditmanager/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.14.0](service/autoscaling/CHANGELOG.md#v1140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.13.0](service/chime/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.4.0](service/chimesdkidentity/CHANGELOG.md#v140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.4.0](service/chimesdkmessaging/CHANGELOG.md#v140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.10.0](service/cloudfront/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.7.0](service/codecommit/CHANGELOG.md#v170-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.10.0](service/connect/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.7.0](service/connectcontactlens/CHANGELOG.md#v170-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.6.0](service/connectparticipant/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.10.0](service/databasemigrationservice/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.8.0](service/datasync/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.11.0](service/docdb/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.9.0](service/ebs/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.21.0](service/ec2/CHANGELOG.md#v1210-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.9.0](service/ecr/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.11.0](service/ecs/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.12.0](service/eks/CHANGELOG.md#v1120-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.13.0](service/elasticache/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.9.0](service/elasticsearchservice/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.8.0](service/emrcontainers/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.4.0](service/finspace/CHANGELOG.md#v140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.12.0](service/fsx/CHANGELOG.md#v1120-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.8.0](service/gamelift/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.9.0](service/health/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.12.0](service/iam/CHANGELOG.md#v1120-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.0](service/internal/eventstreamtesting/CHANGELOG.md#v100-2021-11-06) + * **Release**: Protocol support has been added for AWS event stream. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.13.0](service/iotsitewise/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.14.0](service/kendra/CHANGELOG.md#v1140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.8.0](service/kinesis/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Support has been added for the SubscribeToShard API. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.9.0](service/kms/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.12.0](service/lightsail/CHANGELOG.md#v1120-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.13.0](service/macie2/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.6.0](service/mgn/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.10.0](service/neptune/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.6.0](service/networkmanager/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.6.0](service/nimble/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.3.0](service/opensearch/CHANGELOG.md#v130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.14.0](service/quicksight/CHANGELOG.md#v1140-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.11.0](service/rds/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.10.0](service/rekognition/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.9.0](service/route53resolver/CHANGELOG.md#v190-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.18.0](service/s3/CHANGELOG.md#v1180-2021-11-06) + * **Feature**: Support has been added for the SelectObjectContent API. + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.13.0](service/s3control/CHANGELOG.md#v1130-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.18.0](service/sagemaker/CHANGELOG.md#v1180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.11.0](service/servicediscovery/CHANGELOG.md#v1110-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.6.0](service/ssmincidents/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.6.0](service/sso/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.8.0](service/storagegateway/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.7.0](service/support/CHANGELOG.md#v170-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.8.0](service/textract/CHANGELOG.md#v180-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.10.0](service/transcribe/CHANGELOG.md#v1100-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.0.0](service/transcribestreaming/CHANGELOG.md#v100-2021-11-06) + * **Release**: New AWS service client module + * **Feature**: Support has been added for the StartStreamTranscription and StartMedicalStreamTranscription APIs. +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.6.0](service/waf/CHANGELOG.md#v160-2021-11-06) + * **Feature**: Updated service to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.2.0](service/wisdom/CHANGELOG.md#v120-2021-11-06) + * **Feature**: Updated service to latest API model. + +# Release (2021-10-21) + +## General Highlights +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.10.0 + * **Feature**: Adds dynamic signing middleware that switches to unsigned payload when TLS is enabled. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.8.0](service/appflow/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.8.0](service/applicationautoscaling/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.13.0](service/autoscaling/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.3.0](service/chimesdkmessaging/CHANGELOG.md#v130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.11.0](service/cloudformation/CHANGELOG.md#v1110-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.7.0](service/cloudsearch/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.7.0](service/cloudtrail/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.9.0](service/cloudwatch/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.7.0](service/cloudwatchevents/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.8.0](service/cloudwatchlogs/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.7.0](service/codedeploy/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.10.0](service/configservice/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.7.0](service/dataexchange/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.9.0](service/directconnect/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.10.0](service/docdb/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.6.0](service/dynamodb/CHANGELOG.md#v160-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.20.0](service/ec2/CHANGELOG.md#v1200-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.8.0](service/ecr/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.10.0](service/ecs/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.9.0](service/efs/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.12.0](service/elasticache/CHANGELOG.md#v1120-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.7.0](service/elasticloadbalancing/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.10.0](service/elasticloadbalancingv2/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.10.0](service/emr/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.8.0](service/eventbridge/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.6.0](service/glacier/CHANGELOG.md#v160-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.13.0](service/glue/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.8.0](service/ivs/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.13.0](service/kendra/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.7.0](service/kinesis/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.7.0](service/kinesisanalyticsv2/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.8.0](service/kms/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.10.0](service/lambda/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.13.0](service/mediaconvert/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.9.0](service/mediapackage/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.10.0](service/mediapackagevod/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.9.0](service/mediatailor/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.9.0](service/neptune/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.0.0](service/panorama/CHANGELOG.md#v100-2021-10-21) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.13.0](service/quicksight/CHANGELOG.md#v1130-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.10.0](service/rds/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.12.0](service/redshift/CHANGELOG.md#v1120-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.10.0](service/robomaker/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.17.0](service/s3/CHANGELOG.md#v1170-2021-10-21) + * **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.17.0](service/sagemaker/CHANGELOG.md#v1170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.12.0](service/securityhub/CHANGELOG.md#v1120-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.6.0](service/sfn/CHANGELOG.md#v160-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.9.0](service/sns/CHANGELOG.md#v190-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.10.0](service/sqs/CHANGELOG.md#v1100-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.7.0](service/storagegateway/CHANGELOG.md#v170-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.8.0](service/sts/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.6.0](service/swf/CHANGELOG.md#v160-2021-10-21) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.8.0](service/workmail/CHANGELOG.md#v180-2021-10-21) + * **Feature**: API client updated + +# Release (2021-10-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.6.0](feature/ec2/imds/CHANGELOG.md#v160-2021-10-11) + * **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. + * **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.5.0](service/amplifybackend/CHANGELOG.md#v150-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.7.0](service/applicationautoscaling/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.3.0](service/apprunner/CHANGELOG.md#v130-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.6.0](service/backup/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.11.0](service/chime/CHANGELOG.md#v1110-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.11.0](service/codebuild/CHANGELOG.md#v1110-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.10.0](service/databrew/CHANGELOG.md#v1100-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.19.0](service/ec2/CHANGELOG.md#v1190-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.8.0](service/efs/CHANGELOG.md#v180-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.9.0](service/elasticloadbalancingv2/CHANGELOG.md#v190-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.7.0](service/firehose/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.10.0](service/frauddetector/CHANGELOG.md#v1100-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.10.0](service/fsx/CHANGELOG.md#v1100-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.12.0](service/glue/CHANGELOG.md#v1120-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.0.0](service/grafana/CHANGELOG.md#v100-2021-10-11) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.8.0](service/iotevents/CHANGELOG.md#v180-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.12.0](service/kendra/CHANGELOG.md#v1120-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.7.0](service/kms/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.9.0](service/lexmodelsv2/CHANGELOG.md#v190-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.6.0](service/lexruntimev2/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.6.0](service/location/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.12.0](service/mediaconvert/CHANGELOG.md#v1120-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.10.0](service/medialive/CHANGELOG.md#v1100-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.16.0](service/sagemaker/CHANGELOG.md#v1160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.7.0](service/secretsmanager/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.11.0](service/securityhub/CHANGELOG.md#v1110-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.12.0](service/ssm/CHANGELOG.md#v1120-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.6.0](service/ssooidc/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.7.0](service/synthetics/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.6.0](service/textract/CHANGELOG.md#v160-2021-10-11) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.7.0](service/workmail/CHANGELOG.md#v170-2021-10-11) + * **Feature**: API client updated + +# Release (2021-09-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.0.0](service/account/CHANGELOG.md#v100-2021-09-30) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.6.0](service/amp/CHANGELOG.md#v160-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.7.0](service/appintegrations/CHANGELOG.md#v170-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.0.0](service/cloudcontrol/CHANGELOG.md#v100-2021-09-30) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.5.0](service/cloudhsmv2/CHANGELOG.md#v150-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.8.0](service/connect/CHANGELOG.md#v180-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.6.0](service/dataexchange/CHANGELOG.md#v160-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.8.0](service/elasticloadbalancingv2/CHANGELOG.md#v180-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.11.0](service/imagebuilder/CHANGELOG.md#v1110-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.9.0](service/lambda/CHANGELOG.md#v190-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.11.0](service/macie2/CHANGELOG.md#v1110-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.7.0](service/networkfirewall/CHANGELOG.md#v170-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.8.0](service/pinpoint/CHANGELOG.md#v180-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.6.0](service/sesv2/CHANGELOG.md#v160-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.8.0](service/transfer/CHANGELOG.md#v180-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.0.0](service/voiceid/CHANGELOG.md#v100-2021-09-30) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.0.0](service/wisdom/CHANGELOG.md#v100-2021-09-30) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.6.0](service/workmail/CHANGELOG.md#v160-2021-09-30) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.7.0](service/workspaces/CHANGELOG.md#v170-2021-09-30) + * **Feature**: API client updated + +# Release (2021-09-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.2.4](feature/dynamodb/expression/CHANGELOG.md#v124-2021-09-24) + * **Documentation**: Fixes typo in NameBuilder.NamesList example documentation to use the correct variable name. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.6.0](service/appmesh/CHANGELOG.md#v160-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.7.0](service/appsync/CHANGELOG.md#v170-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.9.0](service/auditmanager/CHANGELOG.md#v190-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.5.0](service/codecommit/CHANGELOG.md#v150-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.8.0](service/comprehend/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.8.0](service/databasemigrationservice/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.18.0](service/ec2/CHANGELOG.md#v1180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.7.0](service/ecr/CHANGELOG.md#v170-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.7.0](service/elasticsearchservice/CHANGELOG.md#v170-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.10.0](service/iam/CHANGELOG.md#v1100-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.6.0](service/identitystore/CHANGELOG.md#v160-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.10.0](service/imagebuilder/CHANGELOG.md#v1100-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.13.0](service/iot/CHANGELOG.md#v1130-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.7.0](service/iotevents/CHANGELOG.md#v170-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.1.0](service/kafkaconnect/CHANGELOG.md#v110-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.6.0](service/lakeformation/CHANGELOG.md#v160-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.8.0](service/lexmodelsv2/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.5.0](service/lexruntimev2/CHANGELOG.md#v150-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.8.0](service/licensemanager/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.11.0](service/mediaconvert/CHANGELOG.md#v1110-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.9.0](service/mediapackagevod/CHANGELOG.md#v190-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.8.0](service/mediatailor/CHANGELOG.md#v180-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.1.0](service/opensearch/CHANGELOG.md#v110-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.12.0](service/quicksight/CHANGELOG.md#v1120-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.11.0](service/ssm/CHANGELOG.md#v1110-2021-09-24) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.10.0](service/wafv2/CHANGELOG.md#v1100-2021-09-24) + * **Feature**: API client updated + +# Release (2021-09-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.10.0](service/chime/CHANGELOG.md#v1100-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.10.1](service/cloudformation/CHANGELOG.md#v1101-2021-09-17) + * **Documentation**: Updated API client documentation. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.7.0](service/comprehend/CHANGELOG.md#v170-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.17.0](service/ec2/CHANGELOG.md#v1170-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.6.0](service/ecr/CHANGELOG.md#v160-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.12.0](service/iot/CHANGELOG.md#v1120-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.0.0](service/kafkaconnect/CHANGELOG.md#v100-2021-09-17) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.7.0](service/lexmodelsv2/CHANGELOG.md#v170-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.4.0](service/lexruntimev2/CHANGELOG.md#v140-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.10.0](service/macie2/CHANGELOG.md#v1100-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.8.0](service/mediapackagevod/CHANGELOG.md#v180-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.6.0](service/networkfirewall/CHANGELOG.md#v160-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.7.0](service/pinpoint/CHANGELOG.md#v170-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.11.0](service/quicksight/CHANGELOG.md#v1110-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.9.0](service/rds/CHANGELOG.md#v190-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.9.0](service/robomaker/CHANGELOG.md#v190-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.16.0](service/s3/CHANGELOG.md#v1160-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.15.0](service/sagemaker/CHANGELOG.md#v1150-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.5.0](service/ssooidc/CHANGELOG.md#v150-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.8.0](service/transcribe/CHANGELOG.md#v180-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.9.0](service/wafv2/CHANGELOG.md#v190-2021-09-17) + * **Feature**: Updated API client and endpoints to latest revision. + +# Release (2021-09-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.4.1](credentials/CHANGELOG.md#v141-2021-09-10) + * **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.5.0](service/amp/CHANGELOG.md#v150-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.7.0](service/braket/CHANGELOG.md#v170-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.2.0](service/chimesdkidentity/CHANGELOG.md#v120-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.2.0](service/chimesdkmessaging/CHANGELOG.md#v120-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.7.0](service/codegurureviewer/CHANGELOG.md#v170-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.10.0](service/eks/CHANGELOG.md#v1100-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.11.0](service/elasticache/CHANGELOG.md#v1110-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.9.0](service/emr/CHANGELOG.md#v190-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.10.0](service/forecast/CHANGELOG.md#v1100-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.9.0](service/frauddetector/CHANGELOG.md#v190-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.7.0](service/kafka/CHANGELOG.md#v170-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.4.0](service/lookoutequipment/CHANGELOG.md#v140-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.8.0](service/mediapackage/CHANGELOG.md#v180-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.0.0](service/opensearch/CHANGELOG.md#v100-2021-09-10) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.8.0](service/outposts/CHANGELOG.md#v180-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.7.0](service/ram/CHANGELOG.md#v170-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.14.0](service/sagemaker/CHANGELOG.md#v1140-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.9.0](service/servicediscovery/CHANGELOG.md#v190-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.5.0](service/ssmcontacts/CHANGELOG.md#v150-2021-09-10) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.6.0](service/xray/CHANGELOG.md#v160-2021-09-10) + * **Feature**: API client updated + +# Release (2021-09-02) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.8.0](config/CHANGELOG.md#v180-2021-09-02) + * **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.7.0](service/accessanalyzer/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.8.0](service/acmpca/CHANGELOG.md#v180-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.8.0](service/cloud9/CHANGELOG.md#v180-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.10.0](service/cloudformation/CHANGELOG.md#v1100-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.6.0](service/cloudtrail/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.10.0](service/codebuild/CHANGELOG.md#v1100-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.9.0](service/computeoptimizer/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.9.0](service/configservice/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.7.0](service/ebs/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.16.0](service/ec2/CHANGELOG.md#v1160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.7.0](service/efs/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.8.0](service/emr/CHANGELOG.md#v180-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.6.0](service/firehose/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.8.0](service/frauddetector/CHANGELOG.md#v180-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.9.0](service/fsx/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.7.0](service/internal/s3shared/CHANGELOG.md#v170-2021-09-02) + * **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.11.0](service/iot/CHANGELOG.md#v1110-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.5.0](service/iotjobsdataplane/CHANGELOG.md#v150-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.7.0](service/ivs/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.6.0](service/kms/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.9.0](service/lexmodelbuildingservice/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.7.0](service/mediatailor/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.2.0](service/memorydb/CHANGELOG.md#v120-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.5.0](service/mwaa/CHANGELOG.md#v150-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.6.0](service/polly/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.10.0](service/quicksight/CHANGELOG.md#v1100-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.15.0](service/s3/CHANGELOG.md#v1150-2021-09-02) + * **Feature**: API client updated + * **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.11.0](service/s3control/CHANGELOG.md#v1110-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.7.0](service/sagemakerruntime/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.6.0](service/schemas/CHANGELOG.md#v160-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.10.0](service/securityhub/CHANGELOG.md#v1100-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.5.0](service/servicecatalogappregistry/CHANGELOG.md#v150-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.9.0](service/sqs/CHANGELOG.md#v190-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.4.0](service/ssmincidents/CHANGELOG.md#v140-2021-09-02) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.7.0](service/transfer/CHANGELOG.md#v170-2021-09-02) + * **Feature**: API client updated + +# Release (2021-08-27) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.4.0](credentials/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.2.0](feature/dynamodb/attributevalue/CHANGELOG.md#v120-2021-08-27) + * **Bug Fix**: Fix unmarshaler's decoding of AttributeValueMemberN into a type that is a string alias. +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.7.0](service/acmpca/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.5.0](service/amplify/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.4.0](service/amplifybackend/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.7.0](service/apigateway/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.4.0](service/apigatewaymanagementapi/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.7.0](service/appflow/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.4.0](service/applicationinsights/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.2.0](service/apprunner/CHANGELOG.md#v120-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.6.0](service/appstream/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.6.0](service/appsync/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.6.0](service/athena/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.8.0](service/auditmanager/CHANGELOG.md#v180-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.5.0](service/autoscalingplans/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.5.0](service/backup/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.7.0](service/batch/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.6.0](service/braket/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.1.0](service/chimesdkidentity/CHANGELOG.md#v110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.1.0](service/chimesdkmessaging/CHANGELOG.md#v110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.5.0](service/cloudtrail/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.6.0](service/cloudwatchevents/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.5.0](service/codeartifact/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.9.0](service/codebuild/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.4.0](service/codecommit/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.5.0](service/codeguruprofiler/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.4.0](service/codestarnotifications/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.5.0](service/cognitoidentity/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.6.0](service/cognitoidentityprovider/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.6.0](service/comprehend/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.8.0](service/computeoptimizer/CHANGELOG.md#v180-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.5.0](service/connectcontactlens/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.9.0](service/customerprofiles/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.7.0](service/databasemigrationservice/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.6.0](service/datasync/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.4.0](service/dax/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.5.0](service/directoryservice/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.5.0](service/dlm/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.4.0](service/dynamodbstreams/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.15.0](service/ec2/CHANGELOG.md#v1150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.5.0](service/ecrpublic/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.6.0](service/efs/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.9.0](service/eks/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.6.0](service/emrcontainers/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.7.0](service/eventbridge/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.2.0](service/finspace/CHANGELOG.md#v120-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.2.0](service/finspacedata/CHANGELOG.md#v120-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.5.0](service/firehose/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.7.0](service/fms/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.9.0](service/forecast/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.4.0](service/forecastquery/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.7.0](service/frauddetector/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.8.0](service/fsx/CHANGELOG.md#v180-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.6.0](service/gamelift/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.11.0](service/glue/CHANGELOG.md#v1110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.6.0](service/groundstation/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.5.0](service/guardduty/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.7.0](service/health/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.6.0](service/healthlake/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.10.0](service/iot/CHANGELOG.md#v1100-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.4.0](service/iot1clickdevicesservice/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.5.0](service/iotanalytics/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.4.0](service/iotdataplane/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.5.0](service/iotfleethub/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.11.0](service/iotsitewise/CHANGELOG.md#v1110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.6.0](service/ivs/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.5.0](service/lakeformation/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.6.0](service/lexmodelsv2/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.3.0](service/lexruntimev2/CHANGELOG.md#v130-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.7.0](service/licensemanager/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.10.0](service/lightsail/CHANGELOG.md#v1100-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.3.0](service/lookoutequipment/CHANGELOG.md#v130-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.3.0](service/lookoutmetrics/CHANGELOG.md#v130-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.9.0](service/macie2/CHANGELOG.md#v190-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.10.0](service/mediaconvert/CHANGELOG.md#v1100-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.7.0](service/mediapackage/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.7.0](service/mediapackagevod/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.5.0](service/mq/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.5.0](service/networkfirewall/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.7.0](service/outposts/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.6.0](service/pi/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.4.0](service/pinpointsmsvoice/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.5.0](service/polly/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.6.0](service/qldb/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.5.0](service/qldbsession/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.6.0](service/ram/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.8.0](service/rekognition/CHANGELOG.md#v180-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.5.0](service/resourcegroupstaggingapi/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.8.0](service/robomaker/CHANGELOG.md#v180-2021-08-27) + * **Bug Fix**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.1.0](service/route53recoverycontrolconfig/CHANGELOG.md#v110-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.7.0](service/route53resolver/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.14.0](service/s3/CHANGELOG.md#v1140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.10.0](service/s3control/CHANGELOG.md#v1100-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.5.0](service/s3outposts/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.5.0](service/servicecatalog/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.4.0](service/servicecatalogappregistry/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.5.0](service/signer/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.4.0](service/ssooidc/CHANGELOG.md#v140-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.6.0](service/storagegateway/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.6.0](service/synthetics/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.5.0](service/textract/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.7.0](service/transcribe/CHANGELOG.md#v170-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.6.0](service/transfer/CHANGELOG.md#v160-2021-08-27) + * **Feature**: Updated API model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.5.0](service/wafregional/CHANGELOG.md#v150-2021-08-27) + * **Feature**: Updated API model to latest revision. + +# Release (2021-08-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.6.0](service/apigateway/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.5.0](service/apigatewayv2/CHANGELOG.md#v150-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.6.0](service/appflow/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.5.0](service/applicationautoscaling/CHANGELOG.md#v150-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.6.0](service/cloud9/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.4.0](service/clouddirectory/CHANGELOG.md#v140-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.6.0](service/cloudwatchlogs/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.8.0](service/codebuild/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.7.0](service/configservice/CHANGELOG.md#v170-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.8.0](service/costexplorer/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.8.0](service/customerprofiles/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.8.0](service/databrew/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.4.0](service/directoryservice/CHANGELOG.md#v140-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.14.0](service/ec2/CHANGELOG.md#v1140-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.9.0](service/elasticache/CHANGELOG.md#v190-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.6.0](service/emr/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.10.0](service/iotsitewise/CHANGELOG.md#v1100-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.7.0](service/lambda/CHANGELOG.md#v170-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.6.0](service/licensemanager/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.0.0](service/memorydb/CHANGELOG.md#v100-2021-08-19) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.8.0](service/quicksight/CHANGELOG.md#v180-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.10.0](service/route53/CHANGELOG.md#v1100-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.6.0](service/route53resolver/CHANGELOG.md#v160-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.13.0](service/s3/CHANGELOG.md#v1130-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.12.0](service/sagemaker/CHANGELOG.md#v1120-2021-08-19) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.5.0](service/sagemakerruntime/CHANGELOG.md#v150-2021-08-19) + * **Feature**: API client updated + +# Release (2021-08-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.3.1](feature/cloudfront/sign/CHANGELOG.md#v131-2021-08-12) + * **Bug Fix**: Update to not escape HTML when encoding the policy. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.5.0](service/athena/CHANGELOG.md#v150-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.11.0](service/autoscaling/CHANGELOG.md#v1110-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.8.0](service/chime/CHANGELOG.md#v180-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.0.0](service/chimesdkidentity/CHANGELOG.md#v100-2021-08-12) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.0.0](service/chimesdkmessaging/CHANGELOG.md#v100-2021-08-12) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.7.0](service/codebuild/CHANGELOG.md#v170-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.6.0](service/connect/CHANGELOG.md#v160-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.5.0](service/ebs/CHANGELOG.md#v150-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.8.0](service/ecs/CHANGELOG.md#v180-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.5.0](service/lexmodelsv2/CHANGELOG.md#v150-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.9.0](service/lightsail/CHANGELOG.md#v190-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.3.0](service/nimble/CHANGELOG.md#v130-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.7.0](service/rekognition/CHANGELOG.md#v170-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.9.0](service/route53/CHANGELOG.md#v190-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.0.0](service/snowdevicemanagement/CHANGELOG.md#v100-2021-08-12) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.9.0](service/ssm/CHANGELOG.md#v190-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.5.0](service/synthetics/CHANGELOG.md#v150-2021-08-12) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.7.0](service/wafv2/CHANGELOG.md#v170-2021-08-12) + * **Feature**: API client updated + +# Release (2021-08-04) + +## General Highlights +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.8.0 + * **Bug Fix**: Corrected an issue where the retryer was not using the last attempt's ResultMetadata as the bases for the return result from the stack. ([#1345](https://github.com/aws/aws-sdk-go-v2/pull/1345)) +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.2.0](feature/dynamodb/expression/CHANGELOG.md#v120-2021-08-04) + * **Feature**: Add IsSet helper for ConditionBuilder and KeyConditionBuilder ([#1329](https://github.com/aws/aws-sdk-go-v2/pull/1329)) +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.5.2](service/accessanalyzer/CHANGELOG.md#v152-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.3.1](service/amp/CHANGELOG.md#v131-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.5.0](service/appintegrations/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.4.2](service/appmesh/CHANGELOG.md#v142-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.5.0](service/appsync/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.7.0](service/auditmanager/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.6.0](service/batch/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.5.2](service/braket/CHANGELOG.md#v152-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.7.0](service/chime/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.8.0](service/cloudformation/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.7.0](service/cloudwatch/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.6.0](service/codebuild/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.4.2](service/codeguruprofiler/CHANGELOG.md#v142-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.5.0](service/cognitoidentityprovider/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.7.0](service/computeoptimizer/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.7.0](service/databrew/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.7.0](service/directconnect/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.13.0](service/ec2/CHANGELOG.md#v1130-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.7.0](service/ecs/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.6.0](service/elasticloadbalancingv2/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.5.0](service/emr/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.5.0](service/emrcontainers/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.6.0](service/eventbridge/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.10.0](service/glue/CHANGELOG.md#v1100-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.5.0](service/greengrassv2/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.5.2](service/groundstation/CHANGELOG.md#v152-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.8.0](service/iam/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.4.0](service/identitystore/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.8.0](service/imagebuilder/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.9.0](service/iot/CHANGELOG.md#v190-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.4.0](service/iotanalytics/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.9.0](service/iotsitewise/CHANGELOG.md#v190-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.8.0](service/iotwireless/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.10.0](service/kendra/CHANGELOG.md#v1100-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.6.0](service/lambda/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.7.0](service/lexmodelbuildingservice/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.4.0](service/lexmodelsv2/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.4.0](service/location/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.9.0](service/mediaconvert/CHANGELOG.md#v190-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.8.0](service/medialive/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.3.1](service/mgn/CHANGELOG.md#v131-2021-08-04) + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.7.0](service/personalize/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.2.0](service/proton/CHANGELOG.md#v120-2021-08-04) + * **Feature**: Updated to latest API model. + * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349)) +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.5.0](service/qldb/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.7.0](service/quicksight/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.7.0](service/rds/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.10.0](service/redshift/CHANGELOG.md#v1100-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.5.0](service/redshiftdata/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.7.0](service/robomaker/CHANGELOG.md#v170-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.8.0](service/route53/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.0.0](service/route53recoverycluster/CHANGELOG.md#v100-2021-08-04) + * **Release**: New AWS service client module + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.0.0](service/route53recoverycontrolconfig/CHANGELOG.md#v100-2021-08-04) + * **Release**: New AWS service client module + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.0.0](service/route53recoveryreadiness/CHANGELOG.md#v100-2021-08-04) + * **Release**: New AWS service client module + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.12.0](service/s3/CHANGELOG.md#v1120-2021-08-04) + * **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346)) +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.9.0](service/s3control/CHANGELOG.md#v190-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.4.0](service/s3outposts/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.11.0](service/sagemaker/CHANGELOG.md#v1110-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.5.0](service/secretsmanager/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.8.0](service/securityhub/CHANGELOG.md#v180-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.6.0](service/shield/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.3.0](service/ssmcontacts/CHANGELOG.md#v130-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.2.0](service/ssmincidents/CHANGELOG.md#v120-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.5.0](service/ssoadmin/CHANGELOG.md#v150-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.4.0](service/synthetics/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.4.0](service/textract/CHANGELOG.md#v140-2021-08-04) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.6.0](service/transcribe/CHANGELOG.md#v160-2021-08-04) + * **Feature**: Updated to latest API model. + +# Release (2021-07-15) + +## General Highlights +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.5.0](config/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.3.0](feature/ec2/imds/CHANGELOG.md#v130-2021-07-15) + * **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.5.0](service/acm/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.3.0](service/amp/CHANGELOG.md#v130-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.4.0](service/amplify/CHANGELOG.md#v140-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.3.0](service/amplifybackend/CHANGELOG.md#v130-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.10.0](service/autoscaling/CHANGELOG.md#v1100-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.6.0](service/chime/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.7.0](service/cloudformation/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.7.0](service/cloudfront/CHANGELOG.md#v170-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.5.0](service/cloudsearch/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.6.0](service/cloudwatch/CHANGELOG.md#v160-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.6.0](service/databasemigrationservice/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.6.0](service/devopsguru/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.6.0](service/directconnect/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.8.0](service/docdb/CHANGELOG.md#v180-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.12.0](service/ec2/CHANGELOG.md#v1120-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.8.0](service/eks/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.8.0](service/elasticache/CHANGELOG.md#v180-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.5.0](service/elasticbeanstalk/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.5.0](service/elasticloadbalancing/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.5.0](service/elasticloadbalancingv2/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.6.0](service/fms/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.6.0](service/frauddetector/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.9.0](service/glue/CHANGELOG.md#v190-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.6.0](service/health/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.5.0](service/healthlake/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.7.0](service/iam/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.7.0](service/imagebuilder/CHANGELOG.md#v170-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.8.0](service/iot/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.8.0](service/iotsitewise/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.9.0](service/kendra/CHANGELOG.md#v190-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.5.0](service/lambda/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.6.0](service/lexmodelbuildingservice/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.8.0](service/lightsail/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.5.1](service/macie/CHANGELOG.md#v151-2021-07-15) + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.8.1](service/macie2/CHANGELOG.md#v181-2021-07-15) + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.8.0](service/mediaconvert/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.5.0](service/mediatailor/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.3.0](service/mgn/CHANGELOG.md#v130-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.4.0](service/mq/CHANGELOG.md#v140-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.7.0](service/neptune/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.6.0](service/outposts/CHANGELOG.md#v160-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.5.1](service/pricing/CHANGELOG.md#v151-2021-07-15) + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.6.0](service/rds/CHANGELOG.md#v160-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.9.0](service/redshift/CHANGELOG.md#v190-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.10.0](service/sagemaker/CHANGELOG.md#v1100-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.5.0](service/ses/CHANGELOG.md#v150-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.7.0](service/sns/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.7.0](service/sqs/CHANGELOG.md#v170-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.8.0](service/ssm/CHANGELOG.md#v180-2021-07-15) + * **Feature**: Updated service model to latest version. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.5.0](service/storagegateway/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.6.0](service/sts/CHANGELOG.md#v160-2021-07-15) + * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. + * **Documentation**: Updated service model to latest revision. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.5.0](service/wellarchitected/CHANGELOG.md#v150-2021-07-15) + * **Feature**: Updated service model to latest version. + +# Release (2021-07-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.1.0](internal/ini/CHANGELOG.md#v110-2021-07-01) + * **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.9.0](service/autoscaling/CHANGELOG.md#v190-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.6.0](service/databrew/CHANGELOG.md#v160-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.11.0](service/ec2/CHANGELOG.md#v1110-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.8.0](service/glue/CHANGELOG.md#v180-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.8.0](service/kendra/CHANGELOG.md#v180-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.7.0](service/mediaconvert/CHANGELOG.md#v170-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.6.0](service/mediapackagevod/CHANGELOG.md#v160-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.8.0](service/redshift/CHANGELOG.md#v180-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.9.0](service/sagemaker/CHANGELOG.md#v190-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.7.0](service/servicediscovery/CHANGELOG.md#v170-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.6.0](service/sqs/CHANGELOG.md#v160-2021-07-01) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.2.0](service/ssmcontacts/CHANGELOG.md#v120-2021-07-01) + * **Feature**: API client updated + +# Release (2021-06-25) + +## General Highlights +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.7.0 + * **Feature**: Adds configuration values for enabling endpoint discovery. + * **Bug Fix**: Keep Object-Lock headers a header when presigning Sigv4 signing requests +* `github.com/aws/aws-sdk-go-v2/config`: [v1.4.0](config/CHANGELOG.md#v140-2021-06-25) + * **Feature**: Adds configuration setting for enabling endpoint discovery. +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.3.0](credentials/CHANGELOG.md#v130-2021-06-25) + * **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) +* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.2.0](feature/cloudfront/sign/CHANGELOG.md#v120-2021-06-25) + * **Feature**: Add UnmarshalJSON for AWSEpochTime to correctly unmarshal AWSEpochTime, ([#1298](https://github.com/aws/aws-sdk-go-v2/pull/1298)) +* `github.com/aws/aws-sdk-go-v2/internal/configsources`: [v1.0.0](internal/configsources/CHANGELOG.md#v100-2021-06-25) + * **Release**: Release new modules +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.2.0](service/amp/CHANGELOG.md#v120-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.3.0](service/amplify/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.2.0](service/amplifybackend/CHANGELOG.md#v120-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.5.0](service/appflow/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.4.0](service/appmesh/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.5.0](service/chime/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.5.0](service/cloud9/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.6.0](service/cloudformation/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.6.0](service/cloudfront/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.4.0](service/cloudsearch/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.5.0](service/cloudwatch/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.5.0](service/cloudwatchevents/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.5.0](service/codebuild/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.5.0](service/codegurureviewer/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.4.0](service/cognitoidentity/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.4.0](service/cognitoidentityprovider/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.5.0](service/connect/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.3.0](service/dax/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.7.0](service/docdb/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.4.0](service/dynamodb/CHANGELOG.md#v140-2021-06-25) + * **Feature**: Adds support for endpoint discovery. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.10.0](service/ec2/CHANGELOG.md#v1100-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.7.0](service/elasticache/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.4.0](service/elasticbeanstalk/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.4.0](service/elasticloadbalancing/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.4.0](service/elasticloadbalancingv2/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.5.0](service/eventbridge/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.5.0](service/greengrass/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.4.0](service/greengrassv2/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.6.0](service/iam/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery`: [v1.0.0](service/internal/endpoint-discovery/CHANGELOG.md#v100-2021-06-25) + * **Release**: Release new modules + * **Feature**: Module supporting endpoint-discovery across all service clients. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.7.0](service/iot/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.3.0](service/iotanalytics/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.7.0](service/kendra/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.4.0](service/kms/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.3.0](service/lexmodelsv2/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.2.0](service/lexruntimev2/CHANGELOG.md#v120-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.5.0](service/licensemanager/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.2.0](service/lookoutmetrics/CHANGELOG.md#v120-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.4.0](service/managedblockchain/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.6.0](service/mediaconnect/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.7.0](service/medialive/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.4.0](service/mediatailor/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.6.0](service/neptune/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.1.0](service/proton/CHANGELOG.md#v110-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.6.0](service/quicksight/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.5.0](service/ram/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.5.0](service/rds/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.7.0](service/redshift/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.4.0](service/redshiftdata/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.7.0](service/route53/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.8.0](service/sagemaker/CHANGELOG.md#v180-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.4.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.7.0](service/securityhub/CHANGELOG.md#v170-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.4.0](service/ses/CHANGELOG.md#v140-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.5.0](service/snowball/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.6.0](service/sns/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.5.0](service/sqs/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.5.0](service/sts/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.3.0](service/timestreamquery/CHANGELOG.md#v130-2021-06-25) + * **Feature**: Adds support for endpoint discovery. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.3.0](service/timestreamwrite/CHANGELOG.md#v130-2021-06-25) + * **Feature**: Adds support for endpoint discovery. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.5.0](service/transfer/CHANGELOG.md#v150-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.3.0](service/waf/CHANGELOG.md#v130-2021-06-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.6.0](service/wafv2/CHANGELOG.md#v160-2021-06-25) + * **Feature**: API client updated + +# Release (2021-06-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.7.0](service/autoscaling/CHANGELOG.md#v170-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.3.2](service/cloudtrail/CHANGELOG.md#v132-2021-06-11) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.3.3](service/cognitoidentityprovider/CHANGELOG.md#v133-2021-06-11) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.6.0](service/eks/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.6.0](service/fsx/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.6.0](service/glue/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.6.0](service/kendra/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.7.0](service/macie2/CHANGELOG.md#v170-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.6.0](service/medialive/CHANGELOG.md#v160-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.4.0](service/pi/CHANGELOG.md#v140-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.0.0](service/proton/CHANGELOG.md#v100-2021-06-11) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.3.1](service/qldb/CHANGELOG.md#v131-2021-06-11) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.4.2](service/rds/CHANGELOG.md#v142-2021-06-11) + * **Documentation**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.7.0](service/sagemaker/CHANGELOG.md#v170-2021-06-11) + * **Feature**: Updated to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.4.1](service/transfer/CHANGELOG.md#v141-2021-06-11) + * **Documentation**: Updated to latest API model. + +# Release (2021-06-04) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.5.0](service/acmpca/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.6.0](service/autoscaling/CHANGELOG.md#v160-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.4.0](service/braket/CHANGELOG.md#v140-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.5.2](service/cloudfront/CHANGELOG.md#v152-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.4.0](service/datasync/CHANGELOG.md#v140-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.3.0](service/devicefarm/CHANGELOG.md#v130-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.6.0](service/docdb/CHANGELOG.md#v160-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.9.0](service/ec2/CHANGELOG.md#v190-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.5.0](service/ecs/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.7.0](service/forecast/CHANGELOG.md#v170-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.5.0](service/fsx/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.5.1](service/iam/CHANGELOG.md#v151-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.4.0](service/internal/s3shared/CHANGELOG.md#v140-2021-06-04) + * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.4.0](service/iotevents/CHANGELOG.md#v140-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.3.0](service/ioteventsdata/CHANGELOG.md#v130-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.6.0](service/iotsitewise/CHANGELOG.md#v160-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.6.0](service/iotwireless/CHANGELOG.md#v160-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.5.0](service/kendra/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.6.1](service/lightsail/CHANGELOG.md#v161-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.2.0](service/location/CHANGELOG.md#v120-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.2.0](service/mwaa/CHANGELOG.md#v120-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.4.0](service/outposts/CHANGELOG.md#v140-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.3.0](service/polly/CHANGELOG.md#v130-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.3.0](service/qldb/CHANGELOG.md#v130-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.3.2](service/resourcegroups/CHANGELOG.md#v132-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.6.2](service/route53/CHANGELOG.md#v162-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.4.2](service/route53resolver/CHANGELOG.md#v142-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.10.0](service/s3/CHANGELOG.md#v1100-2021-06-04) + * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.7.0](service/s3control/CHANGELOG.md#v170-2021-06-04) + * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.5.0](service/servicediscovery/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.5.0](service/sns/CHANGELOG.md#v150-2021-06-04) + * **Feature**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.4.2](service/sqs/CHANGELOG.md#v142-2021-06-04) + * **Documentation**: Updated service client to latest API model. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.6.2](service/ssm/CHANGELOG.md#v162-2021-06-04) + * **Documentation**: Updated service client to latest API model. + +# Release (2021-05-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.4.0](service/cloudwatchlogs/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/commander`: [v1.1.0](service/commander/CHANGELOG.md#v110-2021-05-25) + * **Feature**: Deprecated module. The API client was incorrectly named. Use AWS Systems Manager Incident Manager (ssmincidents) instead. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.5.0](service/computeoptimizer/CHANGELOG.md#v150-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.6.0](service/costexplorer/CHANGELOG.md#v160-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.8.0](service/ec2/CHANGELOG.md#v180-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.4.0](service/efs/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.6.0](service/forecast/CHANGELOG.md#v160-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.6.0](service/iot/CHANGELOG.md#v160-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.4.0](service/opsworkscm/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.5.0](service/quicksight/CHANGELOG.md#v150-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.9.0](service/s3/CHANGELOG.md#v190-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.0.0](service/ssmincidents/CHANGELOG.md#v100-2021-05-25) + * **Release**: New AWS service client module +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.4.0](service/transfer/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.4.0](service/workspaces/CHANGELOG.md#v140-2021-05-25) + * **Feature**: API client updated + +# Release (2021-05-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.6.0 + * **Feature**: `internal/ini`: This package has been migrated to a separate module at `github.com/aws/aws-sdk-go-v2/internal/ini`. +* `github.com/aws/aws-sdk-go-v2/config`: [v1.3.0](config/CHANGELOG.md#v130-2021-05-20) + * **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. + * **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. +* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.0.0](internal/ini/CHANGELOG.md#v100-2021-05-20) + * **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.0.0](service/applicationcostprofiler/CHANGELOG.md#v100-2021-05-20) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.0.0](service/apprunner/CHANGELOG.md#v100-2021-05-20) + * **Release**: New AWS service client module + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.5.0](service/autoscaling/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.4.0](service/computeoptimizer/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.6.0](service/detective/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.5.0](service/eks/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.6.0](service/elasticache/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.4.0](service/elasticsearchservice/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.5.0](service/iam/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.5.0](service/imagebuilder/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.5.0](service/iot/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.4.0](service/iotdeviceadvisor/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.5.0](service/iotsitewise/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.4.0](service/kinesis/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.3.0](service/kinesisanalytics/CHANGELOG.md#v130-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.4.0](service/kinesisanalyticsv2/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.2.0](service/lexmodelsv2/CHANGELOG.md#v120-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.4.0](service/licensemanager/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.6.0](service/lightsail/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.4.0](service/macie/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.6.0](service/macie2/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.5.0](service/mediaconnect/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.5.0](service/neptune/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.5.0](service/personalize/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.4.0](service/quicksight/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.5.0](service/rekognition/CHANGELOG.md#v150-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.8.0](service/s3/CHANGELOG.md#v180-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.6.0](service/sagemaker/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.3.0](service/sagemakera2iruntime/CHANGELOG.md#v130-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.6.0](service/securityhub/CHANGELOG.md#v160-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.3.0](service/support/CHANGELOG.md#v130-2021-05-20) + * **Feature**: API client updated +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.4.0](service/transcribe/CHANGELOG.md#v140-2021-05-20) + * **Feature**: API client updated + +# Release (2021-05-14) + +## General Highlights +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.5.0 + * **Feature**: `AddSDKAgentKey` and `AddSDKAgentKeyValue` in `aws/middleware` package have been updated to direct metadata to `User-Agent` HTTP header. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.3.0](service/codeartifact/CHANGELOG.md#v130-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/commander`: [v1.0.0](service/commander/CHANGELOG.md#v100-2021-05-14) + * **Release**: New AWS service client module + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.5.0](service/configservice/CHANGELOG.md#v150-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.4.0](service/connect/CHANGELOG.md#v140-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.7.0](service/ec2/CHANGELOG.md#v170-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.4.0](service/ecs/CHANGELOG.md#v140-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.4.0](service/eks/CHANGELOG.md#v140-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.0.0](service/finspace/CHANGELOG.md#v100-2021-05-14) + * **Release**: New AWS service client module + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.0.0](service/finspacedata/CHANGELOG.md#v100-2021-05-14) + * **Release**: New AWS service client module + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.4.0](service/iot/CHANGELOG.md#v140-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.5.0](service/iotwireless/CHANGELOG.md#v150-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.3.0](service/kinesis/CHANGELOG.md#v130-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.2.0](service/kinesisanalytics/CHANGELOG.md#v120-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.3.0](service/kinesisanalyticsv2/CHANGELOG.md#v130-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.3.0](service/lakeformation/CHANGELOG.md#v130-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.1.0](service/lookoutmetrics/CHANGELOG.md#v110-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.5.0](service/mediaconvert/CHANGELOG.md#v150-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.6.0](service/route53/CHANGELOG.md#v160-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.7.0](service/s3/CHANGELOG.md#v170-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.6.0](service/s3control/CHANGELOG.md#v160-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.6.0](service/ssm/CHANGELOG.md#v160-2021-05-14) + * **Feature**: Updated to latest service API model. +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.0.0](service/ssmcontacts/CHANGELOG.md#v100-2021-05-14) + * **Release**: New AWS service client module + * **Feature**: Updated to latest service API model. + +# Release 2021-05-06 + +## Breaking change +* `service/ec2` - v1.6.0 + * This release contains a breaking change to the Amazon EC2 API client. API number(int/int64/etc) and boolean members were changed from value, to pointer type. Your applications using the EC2 API client will fail to compile after upgrading for all members that were updated. To migrate to this module you'll need to update your application to use pointers for all number and boolean members in the API client module. The SDK provides helper utilities to convert between value and pointer types. For example the [aws.Bool](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Bool) function to get the address from a bool literal. Similar utilities are available for all other primitive types in the [aws](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws) package. + +## Service Client Highlights +* `service/acmpca` - v1.3.0 + * Feature: API client updated +* `service/apigateway` - v1.3.0 + * Feature: API client updated +* `service/auditmanager` - v1.4.0 + * Feature: API client updated +* `service/chime` - v1.3.0 + * Feature: API client updated +* `service/cloudformation` - v1.4.0 + * Feature: API client updated +* `service/cloudfront` - v1.4.0 + * Feature: API client updated +* `service/codegurureviewer` - v1.3.0 + * Feature: API client updated +* `service/connect` - v1.3.0 + * Feature: API client updated +* `service/customerprofiles` - v1.5.0 + * Feature: API client updated +* `service/devopsguru` - v1.3.0 + * Feature: API client updated +* `service/docdb` - v1.4.0 + * Feature: API client updated +* `service/ec2` - v1.6.0 + * Bug Fix: Fix incorrectly modeled Amazon EC2 number and boolean members in structures. The Amazon EC2 API client has been updated with a breaking change to fix all structure number and boolean members to be pointer types instead of value types. Fixes [#1107](https://github.com/aws/aws-sdk-go-v2/issues/1107), [#1178](https://github.com/aws/aws-sdk-go-v2/issues/1178), and [#1190](https://github.com/aws/aws-sdk-go-v2/issues/1190). This breaking change is made within the major version of the client' module, because the client operations failed and were unusable with value type number and boolean members with the EC2 API. + * Feature: API client updated +* `service/ecs` - v1.3.0 + * Feature: API client updated +* `service/eks` - v1.3.0 + * Feature: API client updated +* `service/forecast` - v1.4.0 + * Feature: API client updated +* `service/glue` - v1.4.0 + * Feature: API client updated +* `service/health` - v1.3.0 + * Feature: API client updated +* `service/iotsitewise` - v1.3.0 + * Feature: API client updated +* `service/iotwireless` - v1.4.0 + * Feature: API client updated +* `service/kafka` - v1.3.0 + * Feature: API client updated +* `service/kinesisanalyticsv2` - v1.2.0 + * Feature: API client updated +* `service/macie2` - v1.4.0 + * Feature: API client updated +* `service/marketplacecatalog` - v1.2.0 + * Feature: API client updated +* `service/mediaconvert` - v1.4.0 + * Feature: API client updated +* `service/mediapackage` - v1.4.0 + * Feature: API client updated +* `service/mediapackagevod` - v1.3.0 + * Feature: API client updated +* `service/mturk` - v1.2.0 + * Feature: API client updated +* `service/nimble` - v1.0.0 + * Feature: API client updated +* `service/organizations` - v1.3.0 + * Feature: API client updated +* `service/personalize` - v1.3.0 + * Feature: API client updated +* `service/robomaker` - v1.4.0 + * Feature: API client updated +* `service/route53` - v1.5.0 + * Feature: API client updated +* `service/s3` - v1.6.0 + * Bug Fix: Fix PutObject and UploadPart unseekable stream documentation link to point to the correct location. + * Feature: API client updated +* `service/sagemaker` - v1.4.0 + * Feature: API client updated +* `service/securityhub` - v1.4.0 + * Feature: API client updated +* `service/servicediscovery` - v1.3.0 + * Feature: API client updated +* `service/snowball` - v1.3.0 + * Feature: API client updated +* `service/sns` - v1.3.0 + * Feature: API client updated +* `service/ssm` - v1.5.0 + * Feature: API client updated +## Core SDK Highlights +* Dependency Update: Update smithy-go dependency to v1.4.0 +* Dependency Update: Updated SDK dependencies to their latest versions. +* `aws` - v1.4.0 + * Feature: Add support for FIPS global partition endpoints ([#1242](https://github.com/aws/aws-sdk-go-v2/pull/1242)) + +# Release 2021-04-23 +## Service Client Highlights +* `service/cloudformation` - v1.3.2 + * Documentation: Service Documentation Updates +* `service/cognitoidentityprovider` - v1.2.3 + * Documentation: Service Documentation Updates +* `service/costexplorer` - v1.4.0 + * Feature: Service API Updates +* `service/databasemigrationservice` - v1.3.0 + * Feature: Service API Updates +* `service/detective` - v1.4.0 + * Feature: Service API Updates +* `service/elasticache` - v1.4.0 + * Feature: Service API Updates +* `service/forecast` - v1.3.0 + * Feature: Service API Updates +* `service/groundstation` - v1.3.0 + * Feature: Service API Updates +* `service/kendra` - v1.3.0 + * Feature: Service API Updates +* `service/redshift` - v1.5.0 + * Feature: Service API Updates +* `service/savingsplans` - v1.2.0 + * Feature: Service API Updates +* `service/securityhub` - v1.3.0 + * Feature: Service API Updates +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. +* `feature/rds/auth` - v1.0.0 + * Feature: Add Support for Amazon RDS IAM Authentication + +# Release 2021-04-14 +## Service Client Highlights +* `service/codebuild` - v1.3.0 + * Feature: API client updated +* `service/codestarconnections` - v1.2.0 + * Feature: API client updated +* `service/comprehendmedical` - v1.2.0 + * Feature: API client updated +* `service/configservice` - v1.4.0 + * Feature: API client updated +* `service/ec2` - v1.5.0 + * Feature: API client updated +* `service/fsx` - v1.3.0 + * Feature: API client updated +* `service/lightsail` - v1.4.0 + * Feature: API client updated +* `service/mediaconnect` - v1.3.0 + * Feature: API client updated +* `service/rds` - v1.3.0 + * Feature: API client updated +* `service/redshift` - v1.4.0 + * Feature: API client updated +* `service/shield` - v1.3.0 + * Feature: API client updated +* `service/sts` - v1.3.0 + * Feature: API client updated +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. + +# Release 2021-04-08 +## Service Client Highlights +* Feature: API model sync +* `service/lookoutequipment` - v1.0.0 + * v1 Release: new service client +* `service/mgn` - v1.0.0 + * v1 Release: new service client +## Core SDK Highlights +* Dependency Update: smithy-go version bump +* Dependency Update: Updated SDK dependencies to their latest versions. + +# Release 2021-04-01 +## Service Client Highlights +* Bug Fix: Fix URL Path and RawQuery of resolved endpoint being ignored by the API client's request serialization. + * Fixes [issue#1191](https://github.com/aws/aws-sdk-go-v2/issues/1191) +* Refactored internal endpoints model for accessors +* Feature: updated to latest models +* New services + * `service/location` - v1.0.0 + * `service/lookoutmetrics` - v1.0.0 +## Core SDK Highlights +* Dependency Update: update smithy-go module +* Dependency Update: Updated SDK dependencies to their latest versions. + +# Release 2021-03-18 +## Service Client Highlights +* Bug Fix: Updated presign URLs to no longer include the X-Amz-User-Agent header +* Feature: Update API model +* Add New supported API +* `service/internal/s3shared` - v1.2.0 + * Feature: Support for S3 Object Lambda +* `service/s3` - v1.3.0 + * Bug Fix: Adds documentation to the PutObject and UploadPart operations Body member how to upload unseekable objects to an Amazon S3 Bucket. + * Feature: S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request +* `service/s3control` - v1.3.0 + * Feature: S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. +* `aws` - v1.3.0 + * Feature: Add helper to V4 signer package to swap compute payload hash middleware with unsigned payload middleware +* `feature/s3/manager` - v1.1.0 + * Bug Fix: Add support for Amazon S3 Object Lambda feature. + * Feature: Updates for S3 Object Lambda feature + +# Release 2021-03-12 +## Service Client Highlights +* Bug Fix: Fixed a bug that could union shape types to be deserialized incorrectly +* Bug Fix: Fixed a bug where unboxed shapes that were marked as required were not serialized and sent over the wire, causing an API error from the service. +* Bug Fix: Fixed a bug with generated API Paginators' handling of nil input parameters causing a panic. +* Dependency Update: update smithy-go dependency +* `service/detective` - v1.1.2 + * Bug Fix: Fix deserialization of API response timestamp member. +* `service/docdb` - v1.2.0 + * Feature: Client now support presigned URL generation for CopyDBClusterSnapshot and CreateDBCluster operations by specifying the target SourceRegion +* `service/neptune` - v1.2.0 + * Feature: Client now support presigned URL generation for CopyDBClusterSnapshot and CreateDBCluster operations by specifying the target SourceRegion +* `service/s3` - v1.2.1 + * Bug Fix: Fixed an issue where ListObjectsV2 and ListParts paginators could loop infinitely + * Bug Fix: Fixed key encoding when addressing S3 Access Points +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. +* `config` - v1.1.2 + * Bug Fix: Fixed a panic when using WithEC2IMDSRegion without a specified IMDS client + +# Release 2021-02-09 +## Service Client Highlights +* `service/s3` - v1.2.0 + * Feature: adds support for s3 vpc endpoint interface [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) +* `service/s3control` - v1.2.0 + * Feature: adds support for s3 vpc endpoint interface [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. +* `aws` - v1.2.0 + * Feature: support to add endpoint source on context. Adds getter/setter for the endpoint source [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113) +* `config` - v1.1.1 + * Bug Fix: Only Validate SSO profile configuration when attempting to use SSO credentials [#1103](https://github.com/aws/aws-sdk-go-v2/pull/1103) + * Bug Fix: Environment credentials were not taking precedence over AWS_PROFILE [#1103](https://github.com/aws/aws-sdk-go-v2/pull/1103) + +# Release 2021-01-29 +## Service Client Highlights +* Bug Fix: A serialization bug has been fixed that caused some service operations with empty inputs to not be serialized correctly ([#1071](https://github.com/aws/aws-sdk-go-v2/pull/1071)) +* Bug Fix: Fixes a bug that could cause a waiter to fail when comparing types ([#1083](https://github.com/aws/aws-sdk-go-v2/pull/1083)) +## Core SDK Highlights +* Feature: EndpointResolverFromURL helpers have been added for constructing a service EndpointResolver type ([#1066](https://github.com/aws/aws-sdk-go-v2/pull/1066)) +* Dependency Update: Updated SDK dependencies to their latest versions. +* `aws` - v1.1.0 + * Feature: Add support for specifying the EndpointSource on aws.Endpoint types ([#1070](https://github.com/aws/aws-sdk-go-v2/pull/1070/)) +* `config` - v1.1.0 + * Feature: Add Support for AWS Single Sign-On (SSO) credential provider ([#1072](https://github.com/aws/aws-sdk-go-v2/pull/1072)) +* `credentials` - v1.1.0 + * Feature: Add AWS Single Sign-On (SSO) credential provider ([#1072](https://github.com/aws/aws-sdk-go-v2/pull/1072)) + +# Release 2021-01-19 + +We are excited to announce the [General Availability](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-version-2-general-availability/) +(GA) release of the [AWS SDK for Go version 2 (v2)](https://github.com/aws/aws-sdk-go-v2). +This release follows the [Release candidate](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-version-2-v2-release-candidate) +of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version 1 and takes advantage of modern Go language features. + +## Breaking Changes +* `aws`: Updated Config.Retryer member to be a func that returns aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) + * Updates the SDK's references to Config.Retryer to be a function that returns aws.Retryer value. This ensures that custom retry options specified in the `aws.Config` are scoped to individual client instances. + * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. + * Removes duplicate `Retryer` interface from `retry` package. Single definition is `aws.Retryer` now. +* `aws/middleware`: Updates `AddAttemptClockSkewMiddleware` to use appropriate `AddRecordResponseTiming` naming ([#1031](https://github.com/aws/aws-sdk-go-v2/pull/1031)) + * Removes `ResponseMetadata` struct type, and adds its members to middleware metadata directly, to improve discoverability. +* `config`: Updated the `WithRetryer` helper to take a function that returns an aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) + * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer. +* `API Clients`: Fix SDK's API client enum constant name generation to have expected casing ([#1020](https://github.com/aws/aws-sdk-go-v2/pull/1020)) + * This updates of the generated enum const value names in API client's `types` package to have the expected casing. Prior to this, enum names were being generated with lowercase names instead of camel case. +* `API Clients`: Updates SDK's API client request middleware stack values to be scoped to individual operation call ([#1019](https://github.com/aws/aws-sdk-go-v2/pull/1019)) + * The API client request middleware stack values were mistakenly allowed to escape to nested API operation calls. This broke the SDK's presigners. + * Stack values that should not escape are not scoped to the individual operation call. +* `Multiple API Clients`: Unexported the API client's `WithEndpointResolver` this type wasn't intended to be exported ([#1051](https://github.com/aws/aws-sdk-go-v2/pull/1051)) + * Using the `aws.Config.EndpointResolver` member for setting custom endpoint resolver instead. + +## New Features +* `service/sts`: Add support for presigning GetCallerIdentity operation ([#1030](https://github.com/aws/aws-sdk-go-v2/pull/1030)) + * Adds a PresignClient to the `sts` API client module. Use PresignGetCallerIdentity to obtain presigned URLs for the create presigned URLs for the GetCallerIdentity operation. + * Fixes [#1021](https://github.com/aws/aws-sdk-go-v2/issues/1021) +* `aws/retry`: Add package documentation for retry package ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033)) + * Adds documentation for the retry package + +## Bug Fixes +* `Multiple API Clients`: Fix SDK's generated serde for unmodeled operation input/output ([#1050](https://github.com/aws/aws-sdk-go-v2/pull/1050)) + * Fixes [#1047](https://github.com/aws/aws-sdk-go-v2/issues/1047) by fixing the how the SDKs generated serialization and deserialization of API operations that did not have modeled input or output types. This caused the SDK to incorrectly attempt to deserialize response documents that were either empty, or contained unexpected data. +* `service/s3`: Fix Tagging parameter not serialized correctly for presigned PutObject requests ([#1017](https://github.com/aws/aws-sdk-go-v2/pull/1017)) + * Fixes the Tagging parameter incorrectly being serialized to the URL's query string instead of being signed as a HTTP request header. + * When using PresignPutObject make sure to add all signed headers returned by the method to your down stream's HTTP client's request. These headers must be included in the request, or the request will fail with signature errors. + * Fixes [#1016](https://github.com/aws/aws-sdk-go-v2/issues/1016) +* `service/s3`: Fix Unmarshaling `GetObjectAcl` operation's Grantee type response ([#1034](https://github.com/aws/aws-sdk-go-v2/pull/1034)) + * Updates the SDK's codegen for correctly deserializing XML attributes in tags with XML namespaces. + * Fixes [#1013](https://github.com/aws/aws-sdk-go-v2/issues/1013) +* `service/s3`: Fix Unmarshaling `GetBucketLocation` operation's response ([#1027](https://github.com/aws/aws-sdk-go-v2/pull/1027)) + * Fixes [#908](https://github.com/aws/aws-sdk-go-v2/issues/908) + +## Migrating from v2 preview SDK's v0.31.0 to v1.0.0 + +### aws.Config Retryer member + +If your application sets the `Config.Retryer` member the application will need +to be updated to set a function that returns an `aws.Retryer`. In addition, if +your application used the `config.WithRetryer` helper a function that returns +an `aws.Retryer` needs to be used. + +If your application used the `retry.Retryer` type, update to using the +`aws.Retryer` type instead. + +### API Client enum value names + +If your application used the enum values in the API Client's `types` package between v0.31.0 and the latest version of the client module you may need to update the naming of the enum value. The enum value name casing were updated to camel case instead lowercased. + +# Release 2020-12-23 + +We’re happy to announce the Release Candidate (RC) of the AWS SDK for Go v2. +This RC follows the developer preview release of the AWS SDK for Go v2. The SDK +has undergone a major rewrite from the v1 code base to incorporate your +feedback and to take advantage of modern Go language features. + +## Documentation +* Developer Guide: https://aws.github.io/aws-sdk-go-v2/docs/ +* API Reference docs: https://pkg.go.dev/github.com/aws/aws-sdk-go-v2 +* Migration Guide: https://aws.github.io/aws-sdk-go-v2/docs/migrating/ + +## Breaking Changes +* Dependency `github.com/awslabs/smithy-go` has been relocated to `github.com/aws/smithy-go` + * The `smithy-go` repository was moved from the `awslabs` GitHub organization to `aws`. + * `xml`, `httpbinding`, and `json` package relocated under `encoding` package. +* The module `ec2imds` moved to `feature/ec2/imds` path ([#984](https://github.com/aws/aws-sdk-go-v2/pull/984)) + * Moves the `ec2imds` feature module to be in common location as other SDK features. +* `aws/signer/v4`: Refactor AWS Sigv4 Signer and options types to allow function options ([#955](https://github.com/aws/aws-sdk-go-v2/pull/955)) + * Fixes [#917](https://github.com/aws/aws-sdk-go-v2/issues/917), [#960](https://github.com/aws/aws-sdk-go-v2/issues/960), [#958](https://github.com/aws/aws-sdk-go-v2/issues/958) +* `aws`: CredentialCache type updated to require constructor function ([#946](https://github.com/aws/aws-sdk-go-v2/pull/946)) + * Fixes [#940](https://github.com/aws/aws-sdk-go-v2/issues/940) +* `credentials`: ExpiryWindow and Jitter moved from credential provider to `CredentialCache` ([#946](https://github.com/aws/aws-sdk-go-v2/pull/946)) + * Moves ExpiryWindow and Jitter options to common option of the `CredentialCache` instead of duplicated across providers. + * Fixes [#940](https://github.com/aws/aws-sdk-go-v2/issues/940) +* `config`: Ensure shared credentials file has precedence over shared config file ([#990](https://github.com/aws/aws-sdk-go-v2/pull/990)) + * The shared config file was incorrectly overriding the shared credentials file when merging values. +* `config`: Add `context.Context` to `LoadDefaultConfig` ([#951](https://github.com/aws/aws-sdk-go-v2/pull/951)) + * Updates `config#LoadDefaultConfig` function to take `context.Context` as well as functional options for the `config#LoadOptions` type. + * Fixes [#926](https://github.com/aws/aws-sdk-go-v2/issues/926), [#819](https://github.com/aws/aws-sdk-go-v2/issues/819) +* `aws`: Rename `NoOpRetryer` to `NopRetryer` to have consistent naming with rest of SDK ([#987](https://github.com/aws/aws-sdk-go-v2/pull/987)) + * Fixes [#878](https://github.com/aws/aws-sdk-go-v2/issues/878) +* `service/s3control`: Change `S3InitiateRestoreObjectOperation.ExpirationInDays` from value to pointer type ([#988](https://github.com/aws/aws-sdk-go-v2/pull/988)) +* `aws`: `ReaderSeekerCloser` and `WriteAtBuffer` have been relocated to `feature/s3/manager`. + +## New Features +* *Waiters*: Add Waiter utilities for API clients ([aws/smithy-go#237](https://github.com/aws/smithy-go/pull/237)) + * Your application can now use Waiter utilities to wait for AWS resources. +* `feature/dynamodb/attributevalue`: Add Amazon DynamoDB Attribute value marshaler utility ([#948](https://github.com/aws/aws-sdk-go-v2/pull/948)) + * Adds a utility for marshaling Go types too and from Amazon DynamoDB AttributeValues. + * Also includes utility for converting from Amazon DynamoDB Streams AttributeValues to Amazon DynamoDB AttributeValues. +* `feature/dynamodbstreams/attributevalue`: Add Amazon DynamoDB Streams Attribute value marshaler utility ([#948](https://github.com/aws/aws-sdk-go-v2/pull/948)) + * Adds a utility for marshaling Go types too and from Amazon DynamoDB Streams AttributeValues. + * Also includes utility for converting from Amazon DynamoDB AttributeValues to Amazon DynamoDB Streams AttributeValues. +* `feature/dynamodb/expression`: Add Amazon DynamoDB expression utility ([#981](https://github.com/aws/aws-sdk-go-v2/pull/981)) + * Adds the expression utility to the SDK for easily building Amazon DynamoDB operation expressions in code. + +## Bug Fixes +* `service/s3`: Fix Presigner to configure client correctly for Amazon S3 ([#969](https://github.com/aws/aws-sdk-go-v2/pull/969)) +* service/s3: Fix deserialization of CompleteMultipartUpload ([#965](https://github.com/aws/aws-sdk-go-v2/pull/965) + * Fixes [#927](https://github.com/aws/aws-sdk-go-v2/issues/927) +* `codegen`: Fix API client union serialization ([#979](https://github.com/aws/aws-sdk-go-v2/pull/979)) + * Fixes [#978](https://github.com/aws/aws-sdk-go-v2/issues/978) + +## Service Client Highlights +* API Clients have been bumped to version `v0.31.0` +* Regenerate API Clients from updated API models adding waiter utilities, and union parameters. +* `codegen`: + * Add documentation to union API parameters describing valid member types, and usage example ([aws/smithy-go#239](https://github.com/aws/smithy-go/pull/239)) + * Normalize Metadata header map keys to be lower case ([aws/smithy-go#241](https://github.com/aws/smithy-go/pull/241)), ([#982](https://github.com/aws/aws-sdk-go-v2/pull/982)) + * Fixes [#376](https://github.com/aws/aws-sdk-go-v2/issues/376) Amazon S3 Metadata parameters keys are always returned as lower case. + * Fix API client deserialization of XML based responses ([aws/smithy-go#245](https://github.com/aws/smithy-go/pull/245)), ([#992](https://github.com/aws/aws-sdk-go-v2/pull/992)) + * Fixes [#910](https://github.com/aws/aws-sdk-go-v2/issues/910) +* `service/s3`, `service/s3control`: + * Add support for reading `s3_use_arn_region` from shared config file ([#991](https://github.com/aws/aws-sdk-go-v2/pull/991)) + * Add Utility for getting RequestID and HostID of response ([#983](https://github.com/aws/aws-sdk-go-v2/pull/983)) + +## Other changes +* Updates branch `HEAD` points from `master` to `main`. + * This should not impact your application, but if you have pull requests or forks of the SDK you may need to update the upstream branch your fork is based off of. + +## Migrating from v2 preview SDK's v0.30.0 to v0.31.0 release candidate + +### smithy-go module relocation + +If your application uses `smithy-go` utilities for request pipeline your application will need to be updated to refer to the new import path of `github.com/aws/smithy-go`. If you application did *not* use `smithy-go` utilities directly, your application will update automatically. + +### EC2 IMDS module relocation + +If your application used the `ec2imds` module, it has been relocated to `feature/ec2/imds`. Your application will need to update to the new import path, `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`. + +### CredentialsCache Constructor and ExpiryWindow Options + +The `aws#CredentialsCache` type was updated, and a new constructor function, `NewCredentialsCache` was added. This function needs to be used to initialize the `CredentialCache`. The constructor also has function options to specify additional configuration, e.g. ExpiryWindow and Jitter. + +If your application was specifying the `ExpiryWindow` with the `credentials/stscreds#AssumeRoleOptions`, `credentials/stscreds#WebIdentityRoleOptions`, `credentials/processcreds#Options`, or `credentials/ec2rolecrds#Options` types the `ExpiryWindow` option will need to specified on the `CredentialsCache` constructor instead. + +### AWS Sigv4 Signer Refactor + +The `aws/signer/v4` package's `Signer.SignHTTP` and `Signer.PresignHTTP` methods were updated to take functional options. If your application provided a custom implementation for API client's `HTTPSignerV4` or `HTTPPresignerV4` interfaces, that implementation will need to be updated for the new function signature. + +### Configuration Loading + +The `config#LoadDefaultConfig` function has been updated to require a `context.Context` as the first parameter, with additional optional function options as variadic additional arguments. Your application will need to update its usage of `LoadDefaultConfig` to pass in `context.Context` as the first parameter. If your application used the `With...` helpers those should continue to work without issue. + +The v2 SDK corrects its behavior to be inline with the AWS CLI and other AWS SDKs. Refer to https://docs.aws.amazon.com/credref/latest/refdocs/overview.html for more information how to use the shared config and credentials files. + +# Release 2020-11-30 + +## Breaking Change +* `codegen`: Add support for slice and maps generated with value members instead of pointer ([#887](https://github.com/aws/aws-sdk-go-v2/pull/887)) + * This update allow the SDK's code generation to be aware of API shapes and members that are not nullable, and can be rendered as value types by the code generation instead of pointer types. + * Several API client parameter types will change from pointer members to value members for slice, map, number and bool member types. + * See Migration notes for migrating to v0.30.0 with this change. +* `aws/transport/http`: Move aws.BuildableHTTPClient to HTTP transport package ([#898](https://github.com/aws/aws-sdk-go-v2/pull/898)) + * Moves the `BuildableHTTPClient` from the SDK's `aws` package to the `aws/transport/http` package as `BuildableClient` to with other HTTP specific utilities. +* `feature/cloudfront/sign`: Add CloudFront sign feature as module ([#884](https://github.com/aws/aws-sdk-go-v2/pull/884)) + * Moves `service/cloudfront/sign` package out of the `cloudfront` module, and into its own module as `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`. + +## New Features +* `config`: Add a WithRetryer provider helper to the config loader ([#897](https://github.com/aws/aws-sdk-go-v2/pull/897)) + * Adds a `WithRetryer` configuration provider to the config loader as a convenience helper to set the `Retryer` on the `aws.Config` when its being loaded. +* `config`: Default to TLS 1.2 for HTTPS requests ([#892](https://github.com/aws/aws-sdk-go-v2/pull/892)) + * Updates the SDK's default HTTP client to use TLS 1.2 as the minimum TLS version for all HTTPS requests by default. + +## Bug Fixes +* `config`: Fix AWS_CA_BUNDLE usage while loading default config ([#912](https://github.com/aws/aws-sdk-go-v2/pull/)) + * Fixes the `LoadDefaultConfig`'s configuration provider order to correctly load a custom HTTP client prior to configuring the client for `AWS_CA_BUNDLE` environment variable. +* `service/s3`: Fix signature mismatch error for s3 ([#913](https://github.com/aws/aws-sdk-go-v2/pull/913)) + * Fixes ([#883](https://github.com/aws/aws-sdk-go-v2/issues/883)) +* `service/s3control`: + * Fix HostPrefix addition behavior for s3control ([#882](https://github.com/aws/aws-sdk-go-v2/pull/882)) + * Fixes ([#863](https://github.com/aws/aws-sdk-go-v2/issues/863)) + * Fix s3control error deserializer ([#875](https://github.com/aws/aws-sdk-go-v2/pull/875)) + * Fixes ([#864](https://github.com/aws/aws-sdk-go-v2/issues/864)) + +## Service Client Highlights +* Pagination support has been added to supported APIs. See [Using Operation Paginators](https://aws.github.io/aws-sdk-go-v2/docs/making-requests/#using-operation-paginators) in the Developer Guide. ([#885](https://github.com/aws/aws-sdk-go-v2/pull/885)) +* Logging support has been added to service clients. See [Logging](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/logging/) in the Developer Guide. ([#872](https://github.com/aws/aws-sdk-go-v2/pull/872)) +* `service`: Add support for pre-signed URL clients for S3, RDS, EC2 service ([#888](https://github.com/aws/aws-sdk-go-v2/pull/888)) + * `service/s3`: operations `PutObject` and `GetObject` are now supported with s3 pre-signed url client. + * `service/ec2`: operation `CopySnapshot` is now supported with ec2 pre-signed url client. + * `service/rds`: operations `CopyDBSnapshot`, `CreateDBInstanceReadReplica`, `CopyDBClusterSnapshot`, `CreateDBCluster` are now supported with rds pre-signed url client. +* `service/s3`: Add support for S3 access point and S3 on outposts access point ARNs ([#870](https://github.com/aws/aws-sdk-go-v2/pull/870)) +* `service/s3control`: Adds support for S3 on outposts access point and S3 on outposts bucket ARNs ([#870](https://github.com/aws/aws-sdk-go-v2/pull/870)) + +## Migrating from v2 preview SDK's v0.29.0 to v0.30.0 + +### aws.BuildableHTTPClient move +The `aws`'s `BuildableHTTPClient` HTTP client implementation was moved to `aws/transport/http` as `BuildableClient`. If your application used the `aws.BuildableHTTPClient` type, update it to use the `BuildableClient` in the `aws/transport/http` package. + +### Slice and Map API member types +This release includes several code generation updates for API client's slice map members. Using API modeling metadata the Slice and map members are now generated as value types instead of pointer types. For your application this means that for these types, the SDK no longer will have pointer member types, and have value member types. + +To migrate to this change you'll need to remove the pointer handling for slice and map members, and instead use value type handling of the member values. + +### Boolean and Number API member types +Similar to the slice and map API member types being generated as value, the SDK's code generation now has metadata where the SDK can generate boolean and number members as value type instead of pointer types. + +To migrate to this change you'll need to remove the pointer handling for numbers and boolean member types, and instead use value handling. + +# Release 2020-10-30 + +## New Features +* Adds HostnameImmutable flag on aws.Endpoint to direct SDK if the associated endpoint is modifiable.([#848](https://github.com/aws/aws-sdk-go-v2/pull/848)) + +## Bug Fixes +* Fix SDK handling of xml based services - xml namespaces ([#858](https://github.com/aws/aws-sdk-go-v2/pull/858)) + * Fixes ([#850](https://github.com/aws/aws-sdk-go-v2/issues/850)) + +## Service Client Highlights +* API Clients have been bumped to version `v0.29.0` + * Regenerate API Clients from update API models. +* Improve client doc generation. + +## Core SDK Highlights +* Dependency Update: Updated SDK dependencies to their latest versions. + +## Migrating from v2 preview SDK's v0.28.0 to v0.29.0 +* API Clients ResolverOptions type renamed to EndpointResolverOptions + +# Release 2020-10-26 + +## New Features +* `service/s3`: Add support for Accelerate, and Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) +* `service/s3control`: Add support for Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) + +## Service Client Highlights +* API Clients have been bumped to version `v0.28.0` + * Regenerate API Clients from update API models. +* `service/s3`: Add support for Accelerate, and Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) +* `service/s3control`: Add support for Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836)) +* `service/route53`: Fix sanitizeURL customization to handle leading slash(`/`) [#846](https://github.com/aws/aws-sdk-go-v2/pull/846) + * Fixes [#843](https://github.com/aws/aws-sdk-go-v2/issues/843) +* `service/route53`: Fix codegen to correctly look for operations that need sanitize url ([#851](https://github.com/aws/aws-sdk-go-v2/pull/851)) + +## Core SDK Highlights +* `aws/protocol/restjson`: Fix unexpected JSON error response deserialization ([#837](https://github.com/aws/aws-sdk-go-v2/pull/837)) + * Fixes [#832](https://github.com/aws/aws-sdk-go-v2/issues/832) +* `example/service/s3/listobjects`: Add example for Amazon S3 ListObjectsV2 ([#838](https://github.com/aws/aws-sdk-go-v2/pull/838)) + +# Release 2020-10-16 + +## New Features +* `feature/s3/manager`: + * Initial `v0.1.0` release + * Add the Amazon S3 Upload and Download transfer manager ([#802](https://github.com/aws/aws-sdk-go-v2/pull/802)) + +## Service Client Highlights +* Clients have been bumped to version `v0.27.0` +* `service/machinelearning`: Add customization for setting client endpoint with PredictEndpoint value if set ([#782](https://github.com/aws/aws-sdk-go-v2/pull/782)) +* `service/s3`: Fix empty response body deserialization in case of error response ([#801](https://github.com/aws/aws-sdk-go-v2/pull/801)) + * Fixes xml deserialization util to correctly handle empty response body in case of an error response. +* `service/s3`: Add customization to auto fill Content-Md5 request header for Amazon S3 operations ([#812](https://github.com/aws/aws-sdk-go-v2/pull/812)) +* `service/s3`: Add fallback to using HTTP status code for error code ([#818](https://github.com/aws/aws-sdk-go-v2/pull/818)) + * Adds falling back to using the HTTP status code to create a API Error code when not error code is received from the service, such as HeadObject. +* `service/route53`: Add support for deserialzing `InvalidChangeBatch` API error ([#792](https://github.com/aws/aws-sdk-go-v2/pull/792)) +* `codegen`: Remove API client `Options` getter methods ([#788](https://github.com/aws/aws-sdk-go-v2/pull/788)) +* `codegen`: Regenerate API Client modeled endpoints ([#791](https://github.com/aws/aws-sdk-go-v2/pull/791)) +* `codegen`: Sort API Client struct member paramaters by required and alphabetical ([#787](https://github.com/aws/aws-sdk-go-v2/pull/787)) +* `codegen`: Add package docs to API client modules ([#821](https://github.com/aws/aws-sdk-go-v2/pull/821)) +* `codegen`: Rename `smithy-go`'s `smithy.OperationError` to `smithy.OperationInvokeError`. + +## Core SDK Highlights +* `config`: + * Bumped to `v0.2.0` + * Refactor Config Module, Add Config Package Documentation and Examples, Improve Overall SDK Readme ([#822](https://github.com/aws/aws-sdk-go-v2/pull/822)) +* `credentials`: + * Bumped to `v0.1.2` + * Strip Monotonic Clock Readings when Comparing Credential Expiry Time ([#789](https://github.com/aws/aws-sdk-go-v2/pull/789)) +* `ec2imds`: + * Bumped to `v0.1.2` + * Fix refreshing API token if expired ([#789](https://github.com/aws/aws-sdk-go-v2/pull/789)) + +## Migrating from v0.26.0 to v0.27.0 + +#### Configuration + +The `config` module's exported types were trimmed down to add clarity and reduce confusion. Additional changes to the `config` module' helpers. + +* Refactored `WithCredentialsProvider`, `WithHTTPClient`, and `WithEndpointResolver` to functions instead of structs. +* Removed `MFATokenFuncProvider`, use `AssumeRoleCredentialOptionsProvider` for setting options for `stscreds.AssumeRoleOptions`. +* Renamed `WithWebIdentityCredentialProviderOptions` to `WithWebIdentityRoleCredentialOptions` +* Renamed `AssumeRoleCredentialProviderOptions` to `AssumeRoleCredentialOptionsProvider` +* Renamed `EndpointResolverFuncProvider` to `EndpointResolverProvider` + +#### API Client +* API Client `Options` type getter methods have been removed. Use the struct members instead. +* The error returned by API Client operations was renamed from `smithy.OperationError` to `smithy.OperationInvokeError`. + +# Release 2020-09-30 + +## Service Client Highlights +* Service clients have been bumped to `v0.26.0` simplify the documentation experience when using [pkg.go.dev](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2). +* `service/s3`: Disable automatic decompression of getting Amazon S3 objects with the `Content-Encoding: gzip` metadata header. ([#748](https://github.com/aws/aws-sdk-go-v2/pull/748)) + * This changes the SDK's default behavior with regard to making S3 API calls. The client will no longer automatically set the `Accept-Encoding` HTTP request header, nor will it automatically decompress the gzipped response when the `Content-Encoding: gzip` response header was received. + * If you'd like the client to sent the `Accept-Encoding: gzip` request header, you can add this header to the API operation method call with the [SetHeaderValue](https://pkg.go.dev/github.com/awslabs/smithy-go/transport/http#SetHeaderValue). middleware helper. +* `service/cloudfront/sign`: Fix cloudfront example usage of SignWithPolicy ([#673](https://github.com/aws/aws-sdk-go-v2/pull/673)) + * Fixes [#671](https://github.com/aws/aws-sdk-go-v2/issues/671) documentation typo by correcting the usage of `SignWithPolicy`. + +## Core SDK Highlights +* SDK core module released at `v0.26.0` +* `config` module released at `v0.1.1` +* `credentials` module released at `v0.1.1` +* `ec2imds` module released at `v0.1.1` + +# Release 2020-09-28 +## Announcements +We’re happy to share the updated clients for the v0.25.0 preview version of the AWS SDK for Go V2. + +The updated clients leverage new developments and advancements within AWS and the Go software ecosystem at large since +our original preview announcement. Using the new clients will be a bit different than before. The key differences are: +simplified API operation invocation, performance improvements, support for error wrapping, and a new middleware architecture. +So below we have a guided walkthrough to help try it out and share your feedback in order to better influence the features +you’d like to see in the GA version. + +See [Announcement Blog Post](https://aws.amazon.com/blogs/developer/client-updates-in-the-preview-version-of-the-aws-sdk-for-go-v2/) for more details. + +## Service Client Highlights +* Initial service clients released at version `v0.1.0` +## Core SDK Highlights +* SDK core module released at `v0.25.0` +* `config` module released at `v0.1.0` +* `credentials` module released at `v0.1.0` +* `ec2imds` module released at `v0.1.0` + +## Migrating from v2 preview SDK's v0.24.0 to v0.25.0 + +#### Design changes + +The v2 preview SDK `v0.25.0` release represents a significant stepping stone bringing the v2 SDK closer to its target design and usability. This release includes significant breaking changes to the v2 preview SDK. The updates in the `v0.25.0` release focus on refactoring and modularization of the SDK’s API clients to use the new [client design](https://github.com/aws/aws-sdk-go-v2/issues/438), updated request pipeline (aka [middleware](https://pkg.go.dev/github.com/awslabs/smithy-go/middleware)), refactored [credential providers](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials), and [configuration loading](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) packages. + +We've also bumped the minimum supported Go version with this release. Starting with v0.25.0 the SDK requires a minimum version of Go `v1.15`. + +As a part of the refactoring done to v2 preview SDK some components have not been included in this update. The following is a non exhaustive list of features that are not available. + +* API Paginators - [#439](https://github.com/aws/aws-sdk-go-v2/issues/439) +* API Waiters - [#442](https://github.com/aws/aws-sdk-go-v2/issues/442) +* Presign URL - [#794](https://github.com/aws/aws-sdk-go-v2/issues/794) +* Amazon S3 Upload and Download manager - [#802](https://github.com/aws/aws-sdk-go-v2/pull/802) +* Amazon DynamoDB's AttributeValue marshaler, and Expression package - [#790](https://github.com/aws/aws-sdk-go-v2/issues/790) +* Debug Logging - [#594](https://github.com/aws/aws-sdk-go-v2/issues/594) + +We expect additional breaking changes to the v2 preview SDK in the coming releases. We expect these changes to focus on organizational, naming, and hardening the SDK's design for future feature capabilities after it is released for general availability. + +#### Relocated Packages + +In this release packages within the SDK were relocated, and in some cases those packages were converted to Go modules. The following is a list of packages have were relocated. + +* `github.com/aws/aws-sdk-go-v2/aws/external` => `github.com/aws/aws-sdk-go-v2/config` module +* `github.com/aws/aws-sdk-go-v2/aws/ec2metadata` => `github.com/aws/aws-sdk-go-v2/ec2imds` module + +The `github.com/aws/aws-sdk-go-v2/credentials` module contains refactored credentials providers. + +* `github.com/aws/aws-sdk-go-v2/ec2rolecreds` => `github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds` +* `github.com/aws/aws-sdk-go-v2/endpointcreds` => `github.com/aws/aws-sdk-go-v2/credentials/endpointcreds` +* `github.com/aws/aws-sdk-go-v2/processcreds` => `github.com/aws/aws-sdk-go-v2/credentials/processcreds` +* `github.com/aws/aws-sdk-go-v2/stscreds` => `github.com/aws/aws-sdk-go-v2/credentials/stscreds` + +#### Modularization + +New modules were added to the v2 preview SDK to allow the components to be versioned independently from each other. This allows your application to depend on specific versions of an API client module, and take discrete updates from the SDK core and other API client modules as desired. + +* [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) +* [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) +* Module for each API client, e.g. [github.com/aws/aws-sdk-go-v2/service/s3](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3) + +#### API Clients + +The following is a list of the major changes to the API client modules + +* Removed paginators: we plan to add these back once they are implemented to integrate with the SDK's new API client design. +* Removed waiters: we need to further investigate how the V2 SDK should expose waiters, and how their behavior should be modeled. +* API Clients are now Go modules. When migrating to the v2 preview SDK `v0.25.0`, you'll need to add the API client's module to your application's go.mod file. +* API parameter nested types have been moved to a `types` package within the API client's module, e.g. `github.com/aws/aws-sdk-go-v2/service/s3/types` These types were moved to improve documentation and discovery of the API client, operation, and input/output types. For example Amazon S3's ListObject's operation [ListObjectOutput.Contents](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3/#ListObjectsOutput) input parameter is a slice of [types.Object](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3/types#Object). +* The client operation method has been renamed, removing the `Request` suffix. The method now invokes the operation instead of constructing a request, which needed to be invoked separately. The operation methods were also expanded to include functional options for providing operation specific configuration, such as modifying the request pipeline. + +```go +result, err := client.Scan(context.TODO(), &dynamodb.ScanInput{ + TableName: aws.String("exampleTable"), +}, func(o *Options) { + // Limit operation calls to only 1 attempt. + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, 1) +}) +``` + +#### Configuration + +In addition to the `github.com/aws/aws-sdk-go-v2/aws/external` package being made a module at `github.com/aws/aws-sdk-go-v2/config`, the `LoadDefaultAWSConfig` function was renamed to `LoadDefaultConfig`. + +The `github.com/aws/aws-sdk-go-v2/aws/defaults` package has been removed. Its components have been migrated to the `github.com/aws/aws-sdk-go-v2/aws` package, and `github.com/aws/aws-sdk-go-v2/config` module. + +#### Error Handling + +The `github.com/aws/aws-sdk-go-v2/aws/awserr` package was removed as a part of the SDK error handling refactor. The SDK now uses typed errors built around [Go v1.13](https://golang.org/doc/go1.13#error_wrapping)'s [errors.As](https://pkg.go.dev/errors#As) and [errors.Unwrap](https://pkg.go.dev/errors#Unwrap) features. All SDK error types that wrap other errors implement the `Unwrap` method. Generic v2 preview SDK errors created with `fmt.Errorf` use `%w` to wrap the underlying error. + +The SDK API clients now include generated public error types for errors modeled for an API. The SDK will automatically deserialize the error response from the API into the appropriate error type. Your application should use `errors.As` to check if the returned error matches one it is interested in. Your application can also use the generic interface [smithy.APIError](https://pkg.go.dev/github.com/awslabs/smithy-go/#APIError) to test if the API client's operation method returned an API error, but not check against a specific error. + +API client errors returned to the caller will use error wrapping to layer the error values. This allows underlying error types to be specific to their use case, and the SDK's more generic error types to wrap the underlying error. + +For example, if an [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) [Scan](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/dynamodb#Scan) operation call cannot find the `TableName` requested, the error returned will contain [dynamodb.ResourceNotFoundException](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/dynamodb/types#ResourceNotFoundException). The SDK will return this error value wrapped in a couple layers, with each layer adding additional contextual information such as [ResponseError](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/transport/http#ResponseError) for AWS HTTP response error metadata , and [smithy.OperationError](https://pkg.go.dev/github.com/awslabs/smithy-go/#OperationError) for API operation call metadata. + +```go +result, err := client.Scan(context.TODO(), params) +if err != nil { + // To get a specific API error + var notFoundErr *types.ResourceNotFoundException + if errors.As(err, ¬FoundErr) { + log.Printf("scan failed because the table was not found, %v", + notFoundErr.ErrorMessage()) + } + + // To get any API error + var apiErr smithy.APIError + if errors.As(err, &apiErr) { + log.Printf("scan failed because of an API error, Code: %v, Message: %v", + apiErr.ErrorCode(), apiErr.ErrorMessage()) + } + + // To get the AWS response metadata, such as RequestID + var respErr *awshttp.ResponseError // Using import alias "awshttp" for package github.com/aws/aws-sdk-go-v2/aws/transport/http + if errors.As(err, &respErr) { + log.Printf("scan failed with HTTP status code %v, Request ID %v and error %v", + respErr.HTTPStatusCode(), respErr.ServiceRequestID(), respErr) + } + + return err +} +``` + +Logging an error value will include information from each wrapped error. For example, the following is a mock error logged for a Scan operation call that failed because the table was not found. + +> 2020/10/15 16:03:37 operation error DynamoDB: Scan, https response error StatusCode: 400, RequestID: ABCREQUESTID123, ResourceNotFoundException: Requested resource not found + +#### Endpoints + +The `github.com/aws/aws-sdk-go-v2/aws/endpoints` has been removed from the SDK, along with all exported endpoint definitions and iteration behavior. Each generated API client now includes its own endpoint definition internally to the module. + +API clients can optionally be configured with a generic [aws.EndpointResolver](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#EndpointResolver) via the [aws.Config.EndpointResolver](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config.EndpointResolver). If the API client is not configured with a custom endpoint resolver it will defer to the endpoint resolver the client module was generated with. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..5b627cfa6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md new file mode 100644 index 000000000..5e59bba7b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md @@ -0,0 +1,177 @@ +# Contributing to the AWS SDK for Go + +Thank you for your interest in contributing to the AWS SDK for Go! +We work hard to provide a high-quality and useful SDK, and we greatly value +feedback and contributions from our community. Whether it's a bug report, +new feature, correction, or additional documentation, we welcome your issues +and pull requests. Please read through this document before submitting any +[issues] or [pull requests][pr] to ensure we have all the necessary information to +effectively respond to your bug report or contribution. + +Jump To: + +* [Bug Reports](#bug-reports) +* [Feature Requests](#feature-requests) +* [Code Contributions](#code-contributions) + +## How to contribute + +*Before you send us a pull request, please be sure that:* + +1. You're working from the latest source on the `main` branch. +2. You check existing open, and recently closed, pull requests to be sure + that someone else hasn't already addressed the problem. +3. You create an issue before working on a contribution that will take a + significant amount of your time. + +*Creating a Pull Request* + +1. Fork the repository. +2. In your fork, make your change in a branch that's based on this repo's `main` branch. +3. Commit the change to your fork, using a clear and descriptive commit message. +4. Create a pull request, answering any questions in the pull request form. + +For contributions that will take a significant amount of time, open a new +issue to pitch your idea before you get started. Explain the problem and +describe the content you want to see added to the documentation. Let us know +if you'll write it yourself or if you'd like us to help. We'll discuss your +proposal with you and let you know whether we're likely to accept it. + +## Bug Reports + +You can file bug reports against the SDK on the [GitHub issues][issues] page. + +If you are filing a report for a bug or regression in the SDK, it's extremely +helpful to provide as much information as possible when opening the original +issue. This helps us reproduce and investigate the possible bug without having +to wait for this extra information to be provided. Please read the following +guidelines prior to filing a bug report. + +1. Search through existing [issues][] to ensure that your specific issue has + not yet been reported. If it is a common issue, it is likely there is + already a bug report for your problem. + +2. Ensure that you have tested the latest version of the SDK. Although you + may have an issue against an older version of the SDK, we cannot provide + bug fixes for old versions. It's also possible that the bug may have been + fixed in the latest release. + +3. Provide as much information about your environment, SDK version, and + relevant dependencies as possible. For example, let us know what version + of Go you are using, which and version of the operating system, and the + the environment your code is running in. e.g Container. + +4. Provide a minimal test case that reproduces your issue or any error + information you related to your problem. We can provide feedback much + more quickly if we know what operations you are calling in the SDK. If + you cannot provide a full test case, provide as much code as you can + to help us diagnose the problem. Any relevant information should be provided + as well, like whether this is a persistent issue, or if it only occurs + some of the time. + +## Feature Requests + +Open an [issue][issues] with the following: + +* A short, descriptive title. Ideally, other community members should be able + to get a good idea of the feature just from reading the title. +* A detailed description of the the proposed feature. + * Why it should be added to the SDK. + * If possible, example code to illustrate how it should work. +* Use Markdown to make the request easier to read; +* If you intend to implement this feature, indicate that you'd like to the issue to be assigned to you. + +## Code Contributions + +We are always happy to receive code and documentation contributions to the SDK. +Please be aware of the following notes prior to opening a pull request: + +1. The SDK is released under the [Apache license][license]. Any code you submit + will be released under that license. For substantial contributions, we may + ask you to sign a [Contributor License Agreement (CLA)][cla]. + +2. If you would like to implement support for a significant feature that is not + yet available in the SDK, please talk to us beforehand to avoid any + duplication of effort. + +3. Wherever possible, pull requests should contain tests as appropriate. + Bugfixes should contain tests that exercise the corrected behavior (i.e., the + test should fail without the bugfix and pass with it), and new features + should be accompanied by tests exercising the feature. + +4. Pull requests that contain failing tests will not be merged until the test + failures are addressed. Pull requests that cause a significant drop in the + SDK's test coverage percentage are unlikely to be merged until tests have + been added. + +5. The JSON files under the SDK's `models` folder are sourced from outside the SDK. + Such as `models/apis/ec2/2016-11-15/api.json`. We will not accept pull requests + directly on these models. If you discover an issue with the models please + create a [GitHub issue][issues] describing the issue. + +### Testing + +To run the tests locally, running the `make unit` command will `go get` the +SDK's testing dependencies, and run vet, link and unit tests for the SDK. + +``` +make unit +``` + +Standard go testing functionality is supported as well. To test SDK code that +is tagged with `codegen` you'll need to set the build tag in the go test +command. The `make unit` command will do this automatically. + +``` +go test -tags codegen ./private/... +``` + +See the `Makefile` for additional testing tags that can be used in testing. + +To test on multiple platform the SDK includes several DockerFiles under the +`awstesting/sandbox` folder, and associated make recipes to to execute +unit testing within environments configured for specific Go versions. + +``` +make sandbox-test-go18 +``` + +To run all sandbox environments use the following make recipe + +``` +# Optionally update the Go tip that will be used during the batch testing +make update-aws-golang-tip + +# Run all SDK tests for supported Go versions in sandboxes +make sandbox-test +``` + +In addition the sandbox environment include make recipes for interactive modes +so you can run command within the Docker container and context of the SDK. + +``` +make sandbox-go18 +``` + +### Changelog Documents + +You can see all release changes in the `CHANGELOG.md` file at the root of the +repository. The release notes added to this file will contain service client +updates, and major SDK changes. When submitting a pull request please include an entry in `CHANGELOG_PENDING.md` under the appropriate changelog type so your changelog entry is included on the following release. + +#### Changelog Types + +* `SDK Features` - For major additive features, internal changes that have +outward impact, or updates to the SDK foundations. This will result in a minor +version change. +* `SDK Enhancements` - For minor additive features or incremental sized changes. +This will result in a patch version change. +* `SDK Bugs` - For minor changes that resolve an issue. This will result in a +patch version change. + +[issues]: https://github.com/aws/aws-sdk-go-v2/issues +[pr]: https://github.com/aws/aws-sdk-go-v2/pulls +[license]: http://aws.amazon.com/apache2.0/ +[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement +[releasenotes]: https://github.com/aws/aws-sdk-go-v2/releases + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md new file mode 100644 index 000000000..4c9be94a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md @@ -0,0 +1,15 @@ +Open Discussions +--- +The following issues are currently open for community feedback. +All discourse must adhere to the [Code of Conduct] policy. + +* [Refactoring API Client Paginators](https://github.com/aws/aws-sdk-go-v2/issues/439) +* [Refactoring API Client Waiters](https://github.com/aws/aws-sdk-go-v2/issues/442) +* [Refactoring API Client Enums and Types to Discrete Packages](https://github.com/aws/aws-sdk-go-v2/issues/445) +* [SDK Modularization](https://github.com/aws/aws-sdk-go-v2/issues/444) + +Past Discussions +--- +The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions. + +[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/main/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile new file mode 100644 index 000000000..4f74a2654 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile @@ -0,0 +1,526 @@ +# Lint rules to ignore +LINTIGNORESINGLEFIGHT='internal/sync/singleflight/singleflight.go:.+error should be the last type' +LINT_IGNORE_S3MANAGER_INPUT='feature/s3/manager/upload.go:.+struct field SSEKMSKeyId should be SSEKMSKeyID' + +UNIT_TEST_TAGS= +BUILD_TAGS=-tags "example,codegen,integration,ec2env,perftest" + +SMITHY_GO_SRC ?= $(shell pwd)/../smithy-go + +SDK_MIN_GO_VERSION ?= 1.15 + +EACHMODULE_FAILFAST ?= true +EACHMODULE_FAILFAST_FLAG=-fail-fast=${EACHMODULE_FAILFAST} + +EACHMODULE_CONCURRENCY ?= 1 +EACHMODULE_CONCURRENCY_FLAG=-c ${EACHMODULE_CONCURRENCY} + +EACHMODULE_SKIP ?= +EACHMODULE_SKIP_FLAG=-skip="${EACHMODULE_SKIP}" + +EACHMODULE_FLAGS=${EACHMODULE_CONCURRENCY_FLAG} ${EACHMODULE_FAILFAST_FLAG} ${EACHMODULE_SKIP_FLAG} + +# SDK's Core and client packages that are compatible with Go 1.9+. +SDK_CORE_PKGS=./aws/... ./internal/... +SDK_CLIENT_PKGS=./service/... +SDK_COMPA_PKGS=${SDK_CORE_PKGS} ${SDK_CLIENT_PKGS} + +# SDK additional packages that are used for development of the SDK. +SDK_EXAMPLES_PKGS= +SDK_ALL_PKGS=${SDK_COMPA_PKGS} ${SDK_EXAMPLES_PKGS} + +RUN_NONE=-run NONE +RUN_INTEG=-run '^TestInteg_' + +CODEGEN_RESOURCES_PATH=$(shell pwd)/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen +CODEGEN_API_MODELS_PATH=$(shell pwd)/codegen/sdk-codegen/aws-models +ENDPOINTS_JSON=${CODEGEN_RESOURCES_PATH}/endpoints.json +ENDPOINT_PREFIX_JSON=${CODEGEN_RESOURCES_PATH}/endpoint-prefix.json + +LICENSE_FILE=$(shell pwd)/LICENSE.txt + +SMITHY_GO_VERSION ?= +PRE_RELEASE_VERSION ?= +RELEASE_MANIFEST_FILE ?= +RELEASE_CHGLOG_DESC_FILE ?= + +REPOTOOLS_VERSION ?= latest +REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools +REPOTOOLS_CMD_ANNOTATE_STABLE_GEN = ${REPOTOOLS_MODULE}/cmd/annotatestablegen@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_MAKE_RELATIVE = ${REPOTOOLS_MODULE}/cmd/makerelative@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY = ${REPOTOOLS_MODULE}/cmd/editmoduledependency@${REPOTOOLS_VERSION} + +REPOTOOLS_CALCULATE_RELEASE_VERBOSE ?= false +REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG=-v=${REPOTOOLS_CALCULATE_RELEASE_VERBOSE} + +REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= + +ifneq ($(PRE_RELEASE_VERSION),) + REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} +endif + +.PHONY: all +all: generate unit + +################### +# Code Generation # +################### +.PHONY: generate smithy-generate smithy-build smithy-build-% smithy-clean smithy-go-publish-local format \ +gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy-% gen-aws-ptrs tidy-modules-% \ +add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-internal-codegen \ +sync-api-models copy-attributevalue-feature min-go-version-% update-requires smithy-annotate-stable \ +update-module-metadata download-modules-% + +generate: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \ +gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy-. min-go-version-. \ +tidy-modules-. add-module-license-files gen-aws-ptrs format + +smithy-generate: + cd codegen && ./gradlew clean build -Plog-tests && ./gradlew clean + +smithy-build: + cd codegen && ./gradlew clean build -Plog-tests + +smithy-build-%: + @# smithy-build- command that uses the pattern to define build filter that + @# the smithy API model service id starts with. Strips off the + @# "smithy-build-". + @# + @# e.g. smithy-build-com.amazonaws.rds + @# e.g. smithy-build-com.amazonaws.rds#AmazonRDSv19 + cd codegen && \ + SMITHY_GO_BUILD_API="$(subst smithy-build-,,$@)" ./gradlew clean build -Plog-tests + +smithy-annotate-stable: + go run ${REPOTOOLS_CMD_ANNOTATE_STABLE_GEN} + +smithy-clean: + cd codegen && ./gradlew clean + +smithy-go-publish-local: + rm -rf /tmp/smithy-go-local + git clone https://github.com/aws/smithy-go /tmp/smithy-go-local + make -C /tmp/smithy-go-local smithy-clean smithy-publish-local + +format: + gofmt -w -s . + +gen-config-asserts: + @echo "Generating SDK config package implementor assertions" + cd config \ + && go mod tidy \ + && go generate + +gen-internal-codegen: + @echo "Generating internal/codegen" + cd internal/codegen \ + && go mod tidy \ + && go generate + +gen-repo-mod-replace: + @echo "Generating go.mod replace for repo modules" + go run ${REPOTOOLS_CMD_MAKE_RELATIVE} + +gen-mod-replace-smithy-%: + @# gen-mod-replace-smithy- command that uses the pattern to define build filter that + @# for modules to add replace to. Strips off the "gen-mod-replace-smithy-". + @# + @# SMITHY_GO_SRC environment variable is the path to add replace to + @# + @# e.g. gen-mod-replace-smithy-service_ssooidc + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst gen-mod-replace-smithy-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}" + +gen-mod-dropreplace-smithy-%: + @# gen-mod-dropreplace-smithy- command that uses the pattern to define build filter that + @# for modules to add replace to. Strips off the "gen-mod-dropreplace-smithy-". + @# + @# e.g. gen-mod-dropreplace-smithy-service_ssooidc + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst gen-mod-dropreplace-smithy-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod edit -dropreplace github.com/aws/smithy-go" + +gen-aws-ptrs: + cd aws && go generate + +tidy-modules-%: + @# tidy command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "tidy-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. tidy-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst tidy-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod tidy" + +download-modules-%: + @# download command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "download-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. download-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst download-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod download all" + +add-module-license-files: + cd internal/repotools/cmd/eachmodule && \ + go run . -skip-root \ + "cp $(LICENSE_FILE) ." + +sync-models: sync-endpoints-model sync-api-models + +sync-endpoints-model: sync-endpoints.json + +sync-endpoints.json: + [[ ! -z "${ENDPOINTS_MODEL}" ]] && cp ${ENDPOINTS_MODEL} ${ENDPOINTS_JSON} || echo "ENDPOINTS_MODEL not set, must not be empty" + +clone-v1-models: + rm -rf /tmp/aws-sdk-go-model-sync + git clone https://github.com/aws/aws-sdk-go.git --depth 1 /tmp/aws-sdk-go-model-sync + +sync-api-models: + cd internal/repotools/cmd/syncAPIModels && \ + go run . \ + -m ${API_MODELS} \ + -o ${CODEGEN_API_MODELS_PATH} + +copy-attributevalue-feature: + cd ./feature/dynamodbstreams/attributevalue && \ + find . -name "*.go" | grep -v "doc.go" | xargs -I % rm % && \ + find ../../dynamodb/attributevalue -name "*.go" | grep -v "doc.go" | xargs -I % cp % . && \ + ls *.go | grep -v "convert.go" | grep -v "doc.go" | \ + xargs -I % sed -i.bk -E 's:github.com/aws/aws-sdk-go-v2/(service|feature)/dynamodb:github.com/aws/aws-sdk-go-v2/\1/dynamodbstreams:g' % && \ + ls *.go | grep -v "convert.go" | grep -v "doc.go" | \ + xargs -I % sed -i.bk 's:DynamoDB:DynamoDBStreams:g' % && \ + ls *.go | grep -v "doc.go" | \ + xargs -I % sed -i.bk 's:dynamodb\.:dynamodbstreams.:g' % && \ + sed -i.bk 's:streams\.:ddbtypes.:g' "convert.go" && \ + sed -i.bk 's:ddb\.:streams.:g' "convert.go" && \ + sed -i.bk 's:ddbtypes\.:ddb.:g' "convert.go" &&\ + sed -i.bk 's:Streams::g' "convert.go" && \ + rm -rf ./*.bk && \ + go mod tidy && \ + gofmt -w -s . && \ + go test . + +min-go-version-%: + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst min-go-version-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod edit -go=${SDK_MIN_GO_VERSION}" + +update-requires: + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} + +update-module-metadata: + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} + +################ +# Unit Testing # +################ +.PHONY: unit unit-race unit-test unit-race-test unit-race-modules-% unit-modules-% build build-modules-% \ +go-build-modules-% test test-race-modules-% test-modules-% cachedep cachedep-modules-% api-diff-modules-% + +unit: lint unit-modules-. +unit-race: lint unit-race-modules-. + +unit-test: test-modules-. +unit-race-test: test-race-modules-. + +unit-race-modules-%: + @# unit command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "unit-race-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. unit-race-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst unit-race-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go vet ${BUILD_TAGS} --all ./..." \ + "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \ + "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." + +unit-modules-%: + @# unit command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "unit-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. unit-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst unit-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go vet ${BUILD_TAGS} --all ./..." \ + "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \ + "go test -timeout=1m ${UNIT_TEST_TAGS} ./..." + +build: build-modules-. + +build-modules-%: + @# build command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "build-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. build-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst build-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test ${BUILD_TAGS} ${RUN_NONE} ./..." + +go-build-modules-%: + @# build command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "build-modules-" and + @# replaces all "_" with "/". + @# + @# Validates that all modules in the repo have buildable Go files. + @# + @# e.g. go-build-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst go-build-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go build ${BUILD_TAGS} ./..." + +test: test-modules-. + +test-race-modules-%: + @# Test command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "test-race-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. test-race-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst test-race-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..." + +test-modules-%: + @# Test command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "test-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. test-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst test-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test -timeout=1m ${UNIT_TEST_TAGS} ./..." + +cachedep: cachedep-modules-. + +cachedep-modules-%: + @# build command that uses the pattern to define the root path that the + @# module caching will start from. Strips off the "cachedep-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. cachedep-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst cachedep-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod download" + +api-diff-modules-%: + @# Command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "api-diff-modules-" and + @# replaces all "_" with "/". + @# + @# Requires golang.org/x/exp/cmd/gorelease to be available in the GOPATH. + @# + @# e.g. api-diff-modules-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst api-diff-modules-,,$@)) \ + -fail-fast=true \ + -c 1 \ + -skip="internal/repotools" \ + "$$(go env GOPATH)/bin/gorelease" + +############## +# CI Testing # +############## +.PHONY: ci-test ci-test-no-generate ci-test-generate-validate + +ci-test: generate unit-race ci-test-generate-validate +ci-test-no-generate: unit-race + +ci-test-generate-validate: + @echo "CI test validate no generated code changes" + git update-index --assume-unchanged go.mod go.sum + git add . -A + gitstatus=`git diff --cached --ignore-space-change`; \ + echo "$$gitstatus"; \ + if [ "$$gitstatus" != "" ] && [ "$$gitstatus" != "skipping validation" ]; then echo "$$gitstatus"; exit 1; fi + git update-index --no-assume-unchanged go.mod go.sum + +ci-lint: ci-lint-. + +ci-lint-%: + @# Run golangci-lint command that uses the pattern to define the root path that the + @# module check will start from. Strips off the "ci-lint-" and + @# replaces all "_" with "/". + @# + @# e.g. ci-lint-internal_protocoltest + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst ci-lint-,,$@)) \ + -fail-fast=false \ + -c 1 \ + -skip="internal/repotools" \ + "golangci-lint run" + +ci-lint-install: + @# Installs golangci-lint at GoPATH. + @# This should be used to run golangci-lint locally. + @# + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +####################### +# Integration Testing # +####################### +.PHONY: integration integ-modules-% cleanup-integ-buckets + +integration: integ-modules-service + +integ-modules-%: + @# integration command that uses the pattern to define the root path that + @# the module testing will start from. Strips off the "integ-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. test-modules-service_dynamodb + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst integ-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test -timeout=10m -tags "integration" -v ${RUN_INTEG} -count 1 ./..." + +cleanup-integ-buckets: + @echo "Cleaning up SDK integration resources" + go run -tags "integration" ./internal/awstesting/cmd/bucket_cleanup/main.go "aws-sdk-go-integration" + +############## +# Benchmarks # +############## +.PHONY: bench bench-modules-% + +bench: bench-modules-. + +bench-modules-%: + @# benchmark command that uses the pattern to define the root path that + @# the module testing will start from. Strips off the "bench-modules-" and + @# replaces all "_" with "/". + @# + @# e.g. bench-modules-service_dynamodb + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..." + +##################### +# Release Process # +##################### +.PHONY: preview-release pre-release-validation release + +ls-changes: + go run ${REPOTOOLS_CMD_CHANGELOG} ls + +preview-release: + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS} + +pre-release-validation: + @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ + echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ + fi + @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ + echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ + fi + +release: pre-release-validation + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS} + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} + go run ${REPOTOOLS_CMD_CHANGELOG} rm -all + go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} + +############## +# Repo Tools # +############## +.PHONY: install-repotools + +install-repotools: + go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} + +set-smithy-go-version: + @if [[ -z "${SMITHY_GO_VERSION}" ]]; then \ + echo "SMITHY_GO_VERSION is required to update SDK's smithy-go module dependency version" && false; \ + fi + go run ${REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY} -s "github.com/aws/smithy-go" -v "${SMITHY_GO_VERSION}" + +################## +# Linting/Verify # +################## +.PHONY: verify lint vet vet-modules-% sdkv1check + +verify: lint vet sdkv1check + +lint: + @echo "go lint SDK and vendor packages" + @lint=`golint ./...`; \ + dolint=`echo "$$lint" | grep -E -v \ + -e ${LINT_IGNORE_S3MANAGER_INPUT} \ + -e ${LINTIGNORESINGLEFIGHT}`; \ + echo "$$dolint"; \ + if [ "$$dolint" != "" ]; then exit 1; fi + +vet: vet-modules-. + +vet-modules-%: + cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst vet-modules-,,$@)) ${EACHMODULE_FLAGS} \ + "go vet ${BUILD_TAGS} --all ./..." + +sdkv1check: + @echo "Checking for usage of AWS SDK for Go v1" + @sdkv1usage=`go list -test -f '''{{ if not .Standard }}{{ range $$_, $$name := .Imports }} * {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ range $$_, $$name := .TestImports }} *: {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ end}}''' ./... | sort -u | grep '''/aws-sdk-go/'''`; \ + echo "$$sdkv1usage"; \ + if [ "$$sdkv1usage" != "" ]; then exit 1; fi + +list-deps: list-deps-. + +list-deps-%: + @# command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "list-deps-" and + @# replaces all "_" with "/". + @# + @# Trim output to only include stdout for list of dependencies only. + @# make list-deps 2>&- + @# + @# e.g. list-deps-internal_protocoltest + @cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst list-deps-,,$@)) ${EACHMODULE_FLAGS} \ + "go list -m all | grep -v 'github.com/aws/aws-sdk-go-v2'" | sort -u + +################### +# Sandbox Testing # +################### +.PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip + +sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-go1.18 sandbox-test-go1.19 sandbox-test-go1.20 sandbox-test-gotip + +sandbox-build-%: + @# sandbox-build-go1.17 + @# sandbox-build-gotip + @if [ $@ == sandbox-build-gotip ]; then\ + docker build \ + -f ./internal/awstesting/sandbox/Dockerfile.test.gotip \ + -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\ + else\ + docker build \ + --build-arg GO_VERSION=$(subst sandbox-build-go,,$@) \ + -f ./internal/awstesting/sandbox/Dockerfile.test.goversion \ + -t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\ + fi + +sandbox-run-%: sandbox-build-% + @# sandbox-run-go1.17 + @# sandbox-run-gotip + docker run -i -t "aws-sdk-go-$(subst sandbox-run-,,$@)" bash +sandbox-test-%: sandbox-build-% + @# sandbox-test-go1.17 + @# sandbox-test-gotip + docker run -t "aws-sdk-go-$(subst sandbox-test-,,$@)" + +update-aws-golang-tip: + docker build --no-cache=true -f ./internal/awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt new file mode 100644 index 000000000..899129ecc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/README.md b/vendor/github.com/aws/aws-sdk-go-v2/README.md new file mode 100644 index 000000000..54626706f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/README.md @@ -0,0 +1,156 @@ +# AWS SDK for Go v2 + +[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) + +`aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language. + +The v2 SDK requires a minimum version of `Go 1.15`. + +Check out the [release notes](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) for information about the latest bug +fixes, updates, and features added to the SDK. + +Jump To: +* [Getting Started](#getting-started) +* [Getting Help](#getting-help) +* [Contributing](#feedback-and-contributing) +* [More Resources](#resources) + +## Maintenance and support for SDK major versions + +For information about maintenance and support for SDK major versions and their underlying dependencies, see the +following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide: + +* [AWS SDKs and Tools Maintenance Policy](https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html) +* [AWS SDKs and Tools Version Support Matrix](https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html) + +## Getting started +To get started working with the SDK setup your project for Go modules, and retrieve the SDK dependencies with `go get`. +This example shows how you can use the v2 SDK to make an API request using the SDK's [Amazon DynamoDB] client. + +###### Initialize Project +```sh +$ mkdir ~/helloaws +$ cd ~/helloaws +$ go mod init helloaws +``` +###### Add SDK Dependencies +```sh +$ go get github.com/aws/aws-sdk-go-v2/aws +$ go get github.com/aws/aws-sdk-go-v2/config +$ go get github.com/aws/aws-sdk-go-v2/service/dynamodb +``` + +###### Write Code +In your preferred editor add the following content to `main.go` + +```go +package main + +import ( + "context" + "fmt" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" +) + +func main() { + // Using the SDK's default configuration, loading additional config + // and credentials values from the environment variables, shared + // credentials, and shared configuration files + cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2")) + if err != nil { + log.Fatalf("unable to load SDK config, %v", err) + } + + // Using the Config value, create the DynamoDB client + svc := dynamodb.NewFromConfig(cfg) + + // Build the request with its input parameters + resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{ + Limit: aws.Int32(5), + }) + if err != nil { + log.Fatalf("failed to list tables, %v", err) + } + + fmt.Println("Tables:") + for _, tableName := range resp.TableNames { + fmt.Println(tableName) + } +} +``` + +###### Compile and Execute +```sh +$ go run . +Tables: +tableOne +tableTwo +``` + +## Getting Help + +Please use these community resources for getting help. We use the GitHub issues +for tracking bugs and feature requests. + +* Ask us a [question](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=q-a) or open a [discussion](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=general). +* If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go-v2/issues/new/choose). +* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). + +This SDK implements AWS service APIs. For general issues regarding the AWS services and their limitations, you may also take a look at the [Amazon Web Services Discussion Forums](https://forums.aws.amazon.com/). + +### Opening Issues + +If you encounter a bug with the AWS SDK for Go we would like to hear about it. +Search the [existing issues][Issues] and see +if others are also experiencing the same issue before opening a new issue. Please +include the version of AWS SDK for Go, Go language, and OS you’re using. Please +also include reproduction case when appropriate. + +The GitHub issues are intended for bug reports and feature requests. For help +and questions with using AWS SDK for Go please make use of the resources listed +in the [Getting Help](#getting-help) section. +Keeping the list of open issues lean will help us respond in a timely manner. + +## Feedback and contributing + +The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways. + +**GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch. + +**Contributing**. You can open pull requests for fixes or additions to the AWS SDK for Go 2.0. All pull requests must be submitted under the Apache 2.0 license and will be reviewed by an SDK team member before being merged in. Accompanying unit tests, where possible, are appreciated. + +## Resources + +[SDK Developer Guide](https://aws.github.io/aws-sdk-go-v2/docs/) - Use this document to learn how to get started and +use the AWS SDK for Go V2. + +[SDK Migration Guide](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) - Use this document to learn how to migrate to V2 from the AWS SDK for Go. + +[SDK API Reference Documentation](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) - Use this +document to look up all API operation input and output parameters for AWS +services supported by the SDK. The API reference also includes documentation of +the SDK, and examples how to using the SDK, service client API operations, and +API operation require parameters. + +[Service Documentation](https://aws.amazon.com/documentation/) - Use this +documentation to learn how to interface with AWS services. These guides are +great for getting started with a service, or when looking for more +information about a service. While this document is not required for coding, +services may supply helpful samples to look out for. + +[Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback + +[Issues] - Report issues, submit pull requests, and get involved + (see [Apache 2.0 License][license]) + +[Dep]: https://github.com/golang/dep +[Issues]: https://github.com/aws/aws-sdk-go-v2/issues +[Projects]: https://github.com/aws/aws-sdk-go-v2/projects +[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md +[Amazon DynamoDB]: https://aws.amazon.com/dynamodb/ +[design]: https://github.com/aws/aws-sdk-go-v2/blob/main/DESIGN.md +[license]: http://aws.amazon.com/apache2.0/ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go new file mode 100644 index 000000000..fe63fedad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go @@ -0,0 +1,92 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an arn +// by looking for whether the string starts with arn: +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go new file mode 100644 index 000000000..20153586b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -0,0 +1,179 @@ +package aws + +import ( + "net/http" + + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// HTTPClient provides the interface to provide custom HTTPClients. Generally +// *http.Client is sufficient for most use cases. The HTTPClient should not +// follow 301 or 302 redirects. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// A Config provides service configuration for service clients. +type Config struct { + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for + // information on AWS regions. + Region string + + // The credentials object to use when signing requests. + // Use the LoadDefaultConfig to load configuration from all the SDK's supported + // sources, and resolve credentials using the SDK's default credential chain. + Credentials CredentialsProvider + + // The Bearer Authentication token provider to use for authenticating API + // operation calls with a Bearer Authentication token. The API clients and + // operation must support Bearer Authentication scheme in order for the + // token provider to be used. API clients created with NewFromConfig will + // automatically be configured with this option, if the API client support + // Bearer Authentication. + // + // The SDK's config.LoadDefaultConfig can automatically populate this + // option for external configuration options such as SSO session. + // https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html + BearerAuthTokenProvider smithybearer.TokenProvider + + // The HTTP Client the SDK's API clients will use to invoke HTTP requests. + // The SDK defaults to a BuildableClient allowing API clients to create + // copies of the HTTP Client for service specific customizations. + // + // Use a (*http.Client) for custom behavior. Using a custom http.Client + // will prevent the SDK from modifying the HTTP client. + HTTPClient HTTPClient + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // See the `aws.EndpointResolver` documentation for additional usage + // information. + // + // Deprecated: See Config.EndpointResolverWithOptions + EndpointResolver EndpointResolver + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // When EndpointResolverWithOptions is specified, it will be used by a + // service client rather than using EndpointResolver if also specified. + // + // See the `aws.EndpointResolverWithOptions` documentation for additional + // usage information. + EndpointResolverWithOptions EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMode RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + // + // In general, the provider function should return a new instance of a + // Retryer if you are attempting to provide a consistent Retryer + // configuration across all clients. This will ensure that each client will + // be provided a new instance of the Retryer implementation, and will avoid + // issues such as sharing the same retry token bucket across services. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API + // clients. + Retryer func() Retryer + + // ConfigSources are the sources that were used to construct the Config. + // Allows for additional configuration to be loaded by clients. + ConfigSources []interface{} + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // The logger writer interface to write logging messages to. Defaults to + // standard error. + Logger logging.Logger + + // Configures the events that will be sent to the configured logger. This + // can be used to configure the logging of signing, retries, request, and + // responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode ClientLogMode + + // The configured DefaultsMode. If not specified, service clients will + // default to legacy. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, + // standard + DefaultsMode DefaultsMode + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode + // is set to DefaultsModeAuto and is initialized by + // `config.LoadDefaultConfig`. You should not populate this structure + // programmatically, or rely on the values here within your applications. + RuntimeEnvironment RuntimeEnvironment +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +func NewConfig() *Config { + return &Config{} +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c Config) Copy() Config { + cp := c + return cp +} + +// EndpointDiscoveryEnableState indicates if endpoint discovery is +// enabled, disabled, auto or unset state. +// +// Default behavior (Auto or Unset) indicates operations that require endpoint +// discovery will use Endpoint Discovery by default. Operations that +// optionally use Endpoint Discovery will not use Endpoint Discovery +// unless EndpointDiscovery is explicitly enabled. +type EndpointDiscoveryEnableState uint + +// Enumeration values for EndpointDiscoveryEnableState +const ( + // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset. + // Users do not need to use this value explicitly. The behavior for unset + // is the same as for EndpointDiscoveryAuto. + EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota + + // EndpointDiscoveryAuto represents an AUTO state that allows endpoint + // discovery only when required by the api. This is the default + // configuration resolved by the client if endpoint discovery is neither + // enabled or disabled. + EndpointDiscoveryAuto // default state + + // EndpointDiscoveryDisabled indicates client MUST not perform endpoint + // discovery even when required. + EndpointDiscoveryDisabled + + // EndpointDiscoveryEnabled indicates client MUST always perform endpoint + // discovery if supported for the operation. + EndpointDiscoveryEnabled +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go new file mode 100644 index 000000000..4d8e26ef3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go @@ -0,0 +1,22 @@ +package aws + +import ( + "context" + "time" +) + +type suppressedContext struct { + context.Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go new file mode 100644 index 000000000..781ac0ae2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -0,0 +1,224 @@ +package aws + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" +) + +// CredentialsCacheOptions are the options +type CredentialsCacheOptions struct { + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // An ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. This can cause an + // increased number of requests to refresh the credentials to occur. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // ExpiryWindowJitterFrac provides a mechanism for randomizing the + // expiration of credentials within the configured ExpiryWindow by a random + // percentage. Valid values are between 0.0 and 1.0. + // + // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac + // is 0.5 then credentials will be set to expire between 30 to 60 seconds + // prior to their actual expiration time. + // + // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. + // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. + // If ExpiryWindowJitterFrac < 0 the value will be treated as 0. + // If ExpiryWindowJitterFrac > 1 the value will be treated as 1. + ExpiryWindowJitterFrac float64 +} + +// CredentialsCache provides caching and concurrency safe credentials retrieval +// via the provider's retrieve method. +// +// CredentialsCache will look for optional interfaces on the Provider to adjust +// how the credential cache handles credentials caching. +// +// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle +// credential refresh failures. This could return an updated Credentials +// value, or attempt another means of retrieving credentials. +// +// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how +// credentials Expires is modified. This could modify how the Credentials +// Expires is adjusted based on the CredentialsCache ExpiryWindow option. +// Such as providing a floor not to reduce the Expires below. +type CredentialsCache struct { + provider CredentialsProvider + + options CredentialsCacheOptions + creds atomic.Value + sf singleflight.Group +} + +// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider +// is expected to not be nil. A variadic list of one or more functions can be +// provided to modify the CredentialsCache configuration. This allows for +// configuration of credential expiry window and jitter. +func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { + options := CredentialsCacheOptions{} + + for _, fn := range optFns { + fn(&options) + } + + if options.ExpiryWindow < 0 { + options.ExpiryWindow = 0 + } + + if options.ExpiryWindowJitterFrac < 0 { + options.ExpiryWindowJitterFrac = 0 + } else if options.ExpiryWindowJitterFrac > 1 { + options.ExpiryWindowJitterFrac = 1 + } + + return &CredentialsCache{ + provider: provider, + options: options, + } +} + +// Retrieve returns the credentials. If the credentials have already been +// retrieved, and not expired the cached credentials will be returned. If the +// credentials have not been retrieved yet, or expired the provider's Retrieve +// method will be called. +// +// Returns and error if the provider's retrieve method returns an error. +func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { + if creds, ok := p.getCreds(); ok && !creds.Expired() { + return creds, nil + } + + resCh := p.sf.DoChan("", func() (interface{}, error) { + return p.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Credentials), res.Err + case <-ctx.Done(): + return Credentials{}, &RequestCanceledError{Err: ctx.Err()} + } +} + +func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { + currCreds, ok := p.getCreds() + if ok && !currCreds.Expired() { + return currCreds, nil + } + + newCreds, err := p.provider.Retrieve(ctx) + if err != nil { + handleFailToRefresh := defaultHandleFailToRefresh + if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok { + handleFailToRefresh = cs.HandleFailToRefresh + } + newCreds, err = handleFailToRefresh(ctx, currCreds, err) + if err != nil { + return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err) + } + } + + if newCreds.CanExpire && p.options.ExpiryWindow > 0 { + adjustExpiresBy := defaultAdjustExpiresBy + if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok { + adjustExpiresBy = cs.AdjustExpiresBy + } + + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return Credentials{}, fmt.Errorf("failed to get random provider, %w", err) + } + + var jitter time.Duration + if p.options.ExpiryWindowJitterFrac > 0 { + jitter = time.Duration(randFloat64 * + p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) + } + + newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter)) + if err != nil { + return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err) + } + } + + p.creds.Store(&newCreds) + return newCreds, nil +} + +// getCreds returns the currently stored credentials and true. Returning false +// if no credentials were stored. +func (p *CredentialsCache) getCreds() (Credentials, bool) { + v := p.creds.Load() + if v == nil { + return Credentials{}, false + } + + c := v.(*Credentials) + if c == nil || !c.HasKeys() { + return Credentials{}, false + } + + return *c, true +} + +// Invalidate will invalidate the cached credentials. The next call to Retrieve +// will cause the provider's Retrieve method to be called. +func (p *CredentialsCache) Invalidate() { + p.creds.Store((*Credentials)(nil)) +} + +// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache +// matches the target provider type. +func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool { + return IsCredentialsProvider(p.provider, target) +} + +// HandleFailRefreshCredentialsCacheStrategy is an interface for +// CredentialsCache to allow CredentialsProvider how failed to refresh +// credentials is handled. +type HandleFailRefreshCredentialsCacheStrategy interface { + // Given the previously cached Credentials, if any, and refresh error, may + // returns new or modified set of Credentials, or error. + // + // Credential caches may use default implementation if nil. + HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error) +} + +// defaultHandleFailToRefresh returns the passed in error. +func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) { + return Credentials{}, err +} + +// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache +// to allow CredentialsProvider to intercept adjustments to Credentials expiry +// based on expectations and use cases of CredentialsProvider. +// +// Credential caches may use default implementation if nil. +type AdjustExpiresByCredentialsCacheStrategy interface { + // Given a Credentials as input, applying any mutations and + // returning the potentially updated Credentials, or error. + AdjustExpiresBy(Credentials, time.Duration) (Credentials, error) +} + +// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires, +// and returns the updated credentials value. If Credentials value's CanExpire +// is false, the passed in credentials are returned unchanged. +func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) { + if !creds.CanExpire { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go new file mode 100644 index 000000000..714d4ad85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -0,0 +1,170 @@ +package aws + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +// AnonymousCredentials provides a sentinel CredentialsProvider that should be +// used to instruct the SDK's signing middleware to not sign the request. +// +// Using `nil` credentials when configuring an API client will achieve the same +// result. The AnonymousCredentials type allows you to configure the SDK's +// external config loading to not attempt to source credentials from the shared +// config or environment. +// +// For example you can use this CredentialsProvider with an API client's +// Options to instruct the client not to sign a request for accessing public +// S3 bucket objects. +// +// The following example demonstrates using the AnonymousCredentials to prevent +// SDK's external config loading attempt to resolve credentials. +// +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithCredentialsProvider(aws.AnonymousCredentials{}), +// ) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(cfg) +// +// Alternatively you can leave the API client Option's `Credential` member to +// nil. If using the `NewFromConfig` constructor you'll need to explicitly set +// the `Credentials` member to nil, if the external config resolved a +// credential provider. +// +// client := s3.New(s3.Options{ +// // Credentials defaults to a nil value. +// }) +// +// This can also be configured for specific operations calls too. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(config) +// +// result, err := client.GetObject(context.TODO(), s3.GetObject{ +// Bucket: aws.String("example-bucket"), +// Key: aws.String("example-key"), +// }, func(o *s3.Options) { +// o.Credentials = nil +// // Or +// o.Credentials = aws.AnonymousCredentials{} +// }) +type AnonymousCredentials struct{} + +// Retrieve implements the CredentialsProvider interface, but will always +// return error, and cannot be used to sign a request. The AnonymousCredentials +// type is used as a sentinel type instructing the AWS request signing +// middleware to not sign a request. +func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { + return Credentials{Source: "AnonymousCredentials"}, + fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") +} + +// A Credentials is the AWS credentials value for individual credential fields. +type Credentials struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Source of the credentials + Source string + + // States if the credentials can expire or not. + CanExpire bool + + // The time the credentials will expire at. Should be ignored if CanExpire + // is false. + Expires time.Time +} + +// Expired returns if the credentials have expired. +func (v Credentials) Expired() bool { + if v.CanExpire { + // Calling Round(0) on the current time will truncate the monotonic + // reading only. Ensures credential expiry time is always based on + // reported wall-clock time. + return !v.Expires.After(sdk.NowTime().Round(0)) + } + + return false +} + +// HasKeys returns if the credentials keys are set. +func (v Credentials) HasKeys() bool { + return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 +} + +// A CredentialsProvider is the interface for any component which will provide +// credentials Credentials. A CredentialsProvider is required to manage its own +// Expired state, and what to be expired means. +// +// A credentials provider implementation can be wrapped with a CredentialCache +// to cache the credential value retrieved. Without the cache the SDK will +// attempt to retrieve the credentials for every request. +type CredentialsProvider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve(ctx context.Context) (Credentials, error) +} + +// CredentialsProviderFunc provides a helper wrapping a function value to +// satisfy the CredentialsProvider interface. +type CredentialsProviderFunc func(context.Context) (Credentials, error) + +// Retrieve delegates to the function value the CredentialsProviderFunc wraps. +func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { + return fn(ctx) +} + +type isCredentialsProvider interface { + IsCredentialsProvider(CredentialsProvider) bool +} + +// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the +// implementation type. +// +// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating +// whether target matches the credential provider type. +// +// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used: +// +// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false +// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false +// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false +// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false +func IsCredentialsProvider(provider, target CredentialsProvider) bool { + if target == nil || provider == nil { + return provider == target + } + + if x, ok := provider.(isCredentialsProvider); ok { + return x.IsCredentialsProvider(target) + } + + targetType := reflect.TypeOf(target) + if targetType.Kind() != reflect.Ptr { + targetType = reflect.PtrTo(targetType) + } + + providerType := reflect.TypeOf(provider) + if providerType.Kind() != reflect.Ptr { + providerType = reflect.PtrTo(providerType) + } + + return targetType.AssignableTo(providerType) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go new file mode 100644 index 000000000..fd408e518 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go @@ -0,0 +1,38 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "runtime" + "strings" +) + +var getGOOS = func() string { + return runtime.GOOS +} + +// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode +// is set to aws.DefaultsModeAuto. +func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode { + goos := getGOOS() + if goos == "android" || goos == "ios" { + return aws.DefaultsModeMobile + } + + var currentRegion string + if len(environment.EnvironmentIdentifier) > 0 { + currentRegion = environment.Region + } + + if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 { + currentRegion = environment.EC2InstanceMetadataRegion + } + + if len(region) > 0 && len(currentRegion) > 0 { + if strings.EqualFold(region, currentRegion) { + return aws.DefaultsModeInRegion + } + return aws.DefaultsModeCrossRegion + } + + return aws.DefaultsModeStandard +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go new file mode 100644 index 000000000..8b7e01fa2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go @@ -0,0 +1,43 @@ +package defaults + +import ( + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// Configuration is the set of SDK configuration options that are determined based +// on the configured DefaultsMode. +type Configuration struct { + // RetryMode is the configuration's default retry mode API clients should + // use for constructing a Retryer. + RetryMode aws.RetryMode + + // ConnectTimeout is the maximum amount of time a dial will wait for + // a connect to complete. + // + // See https://pkg.go.dev/net#Dialer.Timeout + ConnectTimeout *time.Duration + + // TLSNegotiationTimeout specifies the maximum amount of time waiting to + // wait for a TLS handshake. + // + // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout + TLSNegotiationTimeout *time.Duration +} + +// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set. +func (c *Configuration) GetConnectTimeout() (time.Duration, bool) { + if c.ConnectTimeout == nil { + return 0, false + } + return *c.ConnectTimeout, true +} + +// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set. +func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) { + if c.TLSNegotiationTimeout == nil { + return 0, false + } + return *c.TLSNegotiationTimeout, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go new file mode 100644 index 000000000..dbaa873dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go @@ -0,0 +1,50 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT. + +package defaults + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "time" +) + +// GetModeConfiguration returns the default Configuration descriptor for the given mode. +// +// Supports the following modes: cross-region, in-region, mobile, standard +func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) { + var mv aws.DefaultsMode + mv.SetFromString(string(mode)) + + switch mv { + case aws.DefaultsModeCrossRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeInRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(1100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeMobile: + settings := Configuration{ + ConnectTimeout: aws.Duration(30000 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeStandard: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + default: + return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go new file mode 100644 index 000000000..2d90011b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go @@ -0,0 +1,2 @@ +// Package defaults provides recommended configuration values for AWS SDKs and CLIs. +package defaults diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go new file mode 100644 index 000000000..fcf9387c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go @@ -0,0 +1,95 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT. + +package aws + +import ( + "strings" +) + +// DefaultsMode is the SDK defaults mode setting. +type DefaultsMode string + +// The DefaultsMode constants. +const ( + // DefaultsModeAuto is an experimental mode that builds on the standard mode. + // The SDK will attempt to discover the execution environment to determine the + // appropriate settings automatically. + // + // Note that the auto detection is heuristics-based and does not guarantee 100% + // accuracy. STANDARD mode will be used if the execution environment cannot + // be determined. The auto detection might query EC2 Instance Metadata service + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html), + // which might introduce latency. Therefore we recommend choosing an explicit + // defaults_mode instead if startup latency is critical to your application + DefaultsModeAuto DefaultsMode = "auto" + + // DefaultsModeCrossRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services in a different region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeCrossRegion DefaultsMode = "cross-region" + + // DefaultsModeInRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services from within the same AWS + // region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeInRegion DefaultsMode = "in-region" + + // DefaultsModeLegacy provides default settings that vary per SDK and were used + // prior to establishment of defaults_mode + DefaultsModeLegacy DefaultsMode = "legacy" + + // DefaultsModeMobile builds on the standard mode and includes optimization + // tailored for mobile applications + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeMobile DefaultsMode = "mobile" + + // DefaultsModeStandard provides the latest recommended default values that + // should be safe to run in most scenarios + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeStandard DefaultsMode = "standard" +) + +// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches +// the provided string when compared using EqualFold. If the value does not match a known +// constant it will be set to as-is and the function will return false. As a special case, if the +// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. +func (d *DefaultsMode) SetFromString(v string) (ok bool) { + switch { + case strings.EqualFold(v, string(DefaultsModeAuto)): + *d = DefaultsModeAuto + ok = true + case strings.EqualFold(v, string(DefaultsModeCrossRegion)): + *d = DefaultsModeCrossRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeInRegion)): + *d = DefaultsModeInRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeLegacy)): + *d = DefaultsModeLegacy + ok = true + case strings.EqualFold(v, string(DefaultsModeMobile)): + *d = DefaultsModeMobile + ok = true + case strings.EqualFold(v, string(DefaultsModeStandard)): + *d = DefaultsModeStandard + ok = true + case len(v) == 0: + *d = DefaultsModeLegacy + ok = true + default: + *d = DefaultsMode(v) + } + return ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go new file mode 100644 index 000000000..d8b6e09e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go @@ -0,0 +1,62 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// # Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.ToString(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.ToStringSlice(strPtrs) +// +// # SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws + +// generate.go uses a build tag of "ignore", go run doesn't need to specify +// this because go run ignores all build flags when running a go file directly. +//go:generate go run -tags codegen generate.go +//go:generate go run -tags codegen logging_generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go new file mode 100644 index 000000000..aa10a9b40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -0,0 +1,229 @@ +package aws + +import ( + "fmt" +) + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) { + type iface interface { + GetUseDualStackEndpoint() DualStackEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseDualStackEndpoint() + found = true + break + } + } + return value, found +} + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + +// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) { + type iface interface { + GetUseFIPSEndpoint() FIPSEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseFIPSEndpoint() + found = true + break + } + } + return value, found +} + +// Endpoint represents the endpoint a service client should make API operation +// calls to. +// +// The SDK will automatically resolve these endpoints per API client using an +// internal endpoint resolvers. If you'd like to provide custom endpoint +// resolving behavior you can implement the EndpointResolver interface. +type Endpoint struct { + // The base URL endpoint the SDK API clients will use to make API calls to. + // The SDK will suffix URI path and query elements to this endpoint. + URL string + + // Specifies if the endpoint's hostname can be modified by the SDK's API + // client. + // + // If the hostname is mutable the SDK API clients may modify any part of + // the hostname based on the requirements of the API, (e.g. adding, or + // removing content in the hostname). Such as, Amazon S3 API client + // prefixing "bucketname" to the hostname, or changing the + // hostname service name component from "s3." to "s3-accesspoint.dualstack." + // for the dualstack endpoint of an S3 Accesspoint resource. + // + // Care should be taken when providing a custom endpoint for an API. If the + // endpoint hostname is mutable, and the client cannot modify the endpoint + // correctly, the operation call will most likely fail, or have undefined + // behavior. + // + // If hostname is immutable, the SDK API clients will not modify the + // hostname of the URL. This may cause the API client not to function + // correctly if the API requires the operation specific hostname values + // to be used by the client. + // + // This flag does not modify the API client's behavior if this endpoint + // will be used instead of Endpoint Discovery, or if the endpoint will be + // used to perform Endpoint Discovery. That behavior is configured via the + // API Client's Options. + HostnameImmutable bool + + // The AWS partition the endpoint belongs to. + PartitionID string + + // The service name that should be used for signing the requests to the + // endpoint. + SigningName string + + // The region that should be used for signing the request to the endpoint. + SigningRegion string + + // The signing method that should be used for signing the requests to the + // endpoint. + SigningMethod string + + // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. + // When providing a custom endpoint, you should set the source as EndpointSourceCustom. + // If source is not provided when providing a custom endpoint, the SDK may not + // perform required host mutations correctly. Source should be used along with + // HostnameImmutable property as per the usage requirement. + Source EndpointSource +} + +// EndpointSource is the endpoint source type. +type EndpointSource int + +const ( + // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source. + EndpointSourceServiceMetadata EndpointSource = iota + + // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when + // user provides a custom endpoint to be used by the SDK. + EndpointSourceCustom +) + +// EndpointNotFoundError is a sentinel error to indicate that the +// EndpointResolver implementation was unable to resolve an endpoint for the +// given service and region. Resolvers should use this to indicate that an API +// client should fallback and attempt to use it's internal default resolver to +// resolve the endpoint. +type EndpointNotFoundError struct { + Err error +} + +// Error is the error message. +func (e *EndpointNotFoundError) Error() string { + return fmt.Sprintf("endpoint not found, %v", e.Err) +} + +// Unwrap returns the underlying error. +func (e *EndpointNotFoundError) Unwrap() error { + return e.Err +} + +// EndpointResolver is an endpoint resolver that can be used to provide or +// override an endpoint for the given service and region. API clients will +// attempt to use the EndpointResolver first to resolve an endpoint if +// available. If the EndpointResolver returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +// +// Deprecated: See EndpointResolverWithOptions +type EndpointResolver interface { + ResolveEndpoint(service, region string) (Endpoint, error) +} + +// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. +// +// Deprecated: See EndpointResolverWithOptionsFunc +type EndpointResolverFunc func(service, region string) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +// +// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint +func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { + return e(service, region) +} + +// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or +// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will +// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if +// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +type EndpointResolverWithOptions interface { + ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) +} + +// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) { + return e(service, region, options...) +} + +// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value. +// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS. +func GetDisableHTTPS(options ...interface{}) (value bool, found bool) { + type iface interface { + GetDisableHTTPS() bool + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetDisableHTTPS() + found = true + break + } + } + return value, found +} + +// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value. +// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion. +func GetResolvedRegion(options ...interface{}) (value string, found bool) { + type iface interface { + GetResolvedRegion() string + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetResolvedRegion() + found = true + break + } + } + return value, found +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go new file mode 100644 index 000000000..f390a08f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go @@ -0,0 +1,9 @@ +package aws + +// MissingRegionError is an error that is returned if region configuration +// value was not found. +type MissingRegionError struct{} + +func (*MissingRegionError) Error() string { + return "an AWS region is required, but was not found" +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go new file mode 100644 index 000000000..2394418e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go @@ -0,0 +1,365 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + return ptr.ToBool(p) +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + return ptr.ToBoolSlice(vs) +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + return ptr.ToBoolMap(vs) +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + return ptr.ToByte(p) +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + return ptr.ToByteSlice(vs) +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + return ptr.ToByteMap(vs) +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + return ptr.ToString(p) +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + return ptr.ToStringSlice(vs) +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + return ptr.ToStringMap(vs) +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + return ptr.ToInt(p) +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + return ptr.ToIntSlice(vs) +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + return ptr.ToIntMap(vs) +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + return ptr.ToInt8(p) +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + return ptr.ToInt8Slice(vs) +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + return ptr.ToInt8Map(vs) +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + return ptr.ToInt16(p) +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + return ptr.ToInt16Slice(vs) +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + return ptr.ToInt16Map(vs) +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + return ptr.ToInt32(p) +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + return ptr.ToInt32Slice(vs) +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + return ptr.ToInt32Map(vs) +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + return ptr.ToInt64(p) +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + return ptr.ToInt64Slice(vs) +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + return ptr.ToInt64Map(vs) +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + return ptr.ToUint(p) +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + return ptr.ToUintSlice(vs) +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + return ptr.ToUintMap(vs) +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + return ptr.ToUint8(p) +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + return ptr.ToUint8Slice(vs) +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + return ptr.ToUint8Map(vs) +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + return ptr.ToUint16(p) +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + return ptr.ToUint16Slice(vs) +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + return ptr.ToUint16Map(vs) +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + return ptr.ToUint32(p) +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + return ptr.ToUint32Slice(vs) +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + return ptr.ToUint32Map(vs) +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + return ptr.ToUint64(p) +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + return ptr.ToUint64Slice(vs) +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + return ptr.ToUint64Map(vs) +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + return ptr.ToFloat32(p) +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + return ptr.ToFloat32Slice(vs) +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + return ptr.ToFloat32Map(vs) +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + return ptr.ToFloat64(p) +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + return ptr.ToFloat64Slice(vs) +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + return ptr.ToFloat64Map(vs) +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + return ptr.ToTime(p) +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + return ptr.ToTimeSlice(vs) +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + return ptr.ToTimeMap(vs) +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + return ptr.ToDuration(p) +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + return ptr.ToDurationSlice(vs) +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + return ptr.ToDurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go new file mode 100644 index 000000000..abee83b73 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package aws + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.18.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go new file mode 100644 index 000000000..91c94d987 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go @@ -0,0 +1,119 @@ +// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( + LogSigning ClientLogMode = 1 << (64 - 1 - iota) + LogRetries + LogRequest + LogRequestWithBody + LogResponse + LogResponseWithBody + LogDeprecatedUsage + LogRequestEventMessage + LogResponseEventMessage +) + +// IsSigning returns whether the Signing logging mode bit is set +func (m ClientLogMode) IsSigning() bool { + return m&LogSigning != 0 +} + +// IsRetries returns whether the Retries logging mode bit is set +func (m ClientLogMode) IsRetries() bool { + return m&LogRetries != 0 +} + +// IsRequest returns whether the Request logging mode bit is set +func (m ClientLogMode) IsRequest() bool { + return m&LogRequest != 0 +} + +// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set +func (m ClientLogMode) IsRequestWithBody() bool { + return m&LogRequestWithBody != 0 +} + +// IsResponse returns whether the Response logging mode bit is set +func (m ClientLogMode) IsResponse() bool { + return m&LogResponse != 0 +} + +// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set +func (m ClientLogMode) IsResponseWithBody() bool { + return m&LogResponseWithBody != 0 +} + +// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set +func (m ClientLogMode) IsDeprecatedUsage() bool { + return m&LogDeprecatedUsage != 0 +} + +// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set +func (m ClientLogMode) IsRequestEventMessage() bool { + return m&LogRequestEventMessage != 0 +} + +// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set +func (m ClientLogMode) IsResponseEventMessage() bool { + return m&LogResponseEventMessage != 0 +} + +// ClearSigning clears the Signing logging mode bit +func (m *ClientLogMode) ClearSigning() { + *m &^= LogSigning +} + +// ClearRetries clears the Retries logging mode bit +func (m *ClientLogMode) ClearRetries() { + *m &^= LogRetries +} + +// ClearRequest clears the Request logging mode bit +func (m *ClientLogMode) ClearRequest() { + *m &^= LogRequest +} + +// ClearRequestWithBody clears the RequestWithBody logging mode bit +func (m *ClientLogMode) ClearRequestWithBody() { + *m &^= LogRequestWithBody +} + +// ClearResponse clears the Response logging mode bit +func (m *ClientLogMode) ClearResponse() { + *m &^= LogResponse +} + +// ClearResponseWithBody clears the ResponseWithBody logging mode bit +func (m *ClientLogMode) ClearResponseWithBody() { + *m &^= LogResponseWithBody +} + +// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit +func (m *ClientLogMode) ClearDeprecatedUsage() { + *m &^= LogDeprecatedUsage +} + +// ClearRequestEventMessage clears the RequestEventMessage logging mode bit +func (m *ClientLogMode) ClearRequestEventMessage() { + *m &^= LogRequestEventMessage +} + +// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit +func (m *ClientLogMode) ClearResponseEventMessage() { + *m &^= LogResponseEventMessage +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go new file mode 100644 index 000000000..6ecc2231a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go @@ -0,0 +1,95 @@ +//go:build clientlogmode +// +build clientlogmode + +package main + +import ( + "fmt" + "log" + "os" + "strings" + "text/template" +) + +var config = struct { + ModeBits []string +}{ + // Items should be appended only to keep bit-flag positions stable + ModeBits: []string{ + "Signing", + "Retries", + "Request", + "RequestWithBody", + "Response", + "ResponseWithBody", + "DeprecatedUsage", + "RequestEventMessage", + "ResponseEventMessage", + }, +} + +func bitName(name string) string { + return strings.ToUpper(name[:1]) + name[1:] +} + +var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{ + "symbolName": func(name string) string { + return "Log" + bitName(name) + }, + "bitName": bitName, +}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( +{{- range $index, $field := .ModeBits }} + {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }} +{{- end }} +) +{{ range $_, $field := .ModeBits }} +// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set +func (m ClientLogMode) Is{{- bitName $field }}() bool { + return m&{{- (symbolName $field) }} != 0 +} +{{ end }} +{{- range $_, $field := .ModeBits }} +// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit +func (m *ClientLogMode) Clear{{- bitName $field }}() { + *m &^= {{ (symbolName $field) }} +} +{{ end -}} +`)) + +func main() { + uniqueBitFields := make(map[string]struct{}) + + for _, bitName := range config.ModeBits { + if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok { + panic(fmt.Sprintf("duplicate bit field: %s", bitName)) + } + uniqueBitFields[bitName] = struct{}{} + } + + file, err := os.Create("logging.go") + if err != nil { + log.Fatal(err) + } + defer file.Close() + + err = tmpl.Execute(file, config) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go new file mode 100644 index 000000000..e6e87ac77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go @@ -0,0 +1,180 @@ +package middleware + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + + "github.com/aws/smithy-go/middleware" +) + +// RegisterServiceMetadata registers metadata about the service and operation into the middleware context +// so that it is available at runtime for other middleware to introspect. +type RegisterServiceMetadata struct { + ServiceID string + SigningName string + Region string + OperationName string +} + +// ID returns the middleware identifier. +func (s *RegisterServiceMetadata) ID() string { + return "RegisterServiceMetadata" +} + +// HandleInitialize registers service metadata information into the middleware context, allowing for introspection. +func (s RegisterServiceMetadata) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) { + if len(s.ServiceID) > 0 { + ctx = SetServiceID(ctx, s.ServiceID) + } + if len(s.SigningName) > 0 { + ctx = SetSigningName(ctx, s.SigningName) + } + if len(s.Region) > 0 { + ctx = setRegion(ctx, s.Region) + } + if len(s.OperationName) > 0 { + ctx = setOperationName(ctx, s.OperationName) + } + return next.HandleInitialize(ctx, in) +} + +// service metadata keys for storing and lookup of runtime stack information. +type ( + serviceIDKey struct{} + signingNameKey struct{} + signingRegionKey struct{} + regionKey struct{} + operationNameKey struct{} + partitionIDKey struct{} +) + +// GetServiceID retrieves the service id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetServiceID(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string) + return v +} + +// GetSigningName retrieves the service signing name from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) + return v +} + +// GetSigningRegion retrieves the region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) + return v +} + +// GetRegion retrieves the endpoint region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, regionKey{}).(string) + return v +} + +// GetOperationName retrieves the service operation metadata from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetOperationName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string) + return v +} + +// GetPartitionID retrieves the endpoint partition id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPartitionID(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string) + return v +} + +// SetSigningName set or modifies the signing name on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingNameKey{}, value) +} + +// SetSigningRegion sets or modifies the region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingRegionKey{}, value) +} + +// SetServiceID sets the service id on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetServiceID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, serviceIDKey{}, value) +} + +// setRegion sets the endpoint region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, regionKey{}, value) +} + +// setOperationName sets the service operation on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setOperationName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, operationNameKey{}, value) +} + +// SetPartitionID sets the partition id of a resolved region on the context +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPartitionID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, partitionIDKey{}, value) +} + +// EndpointSource key +type endpointSourceKey struct{} + +// GetEndpointSource returns an endpoint source if set on context +func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) { + v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource) + return v +} + +// SetEndpointSource sets endpoint source on context +func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context { + return middleware.WithStackValue(ctx, endpointSourceKey{}, value) +} + +type signingCredentialsKey struct{} + +// GetSigningCredentials returns the credentials that were used for signing if set on context. +func GetSigningCredentials(ctx context.Context) (v aws.Credentials) { + v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials) + return v +} + +// SetSigningCredentials sets the credentails used for signing on the context. +func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context { + return middleware.WithStackValue(ctx, signingCredentialsKey{}, value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go new file mode 100644 index 000000000..9bd0dfb15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go @@ -0,0 +1,168 @@ +package middleware + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyrand "github.com/aws/smithy-go/rand" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation +// invocation. +type ClientRequestID struct{} + +// ID the identifier for the ClientRequestID +func (r *ClientRequestID) ID() string { + return "ClientRequestID" +} + +// HandleBuild attaches a unique operation invocation id for the operation to the request +func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID() + if err != nil { + return out, metadata, err + } + + const invocationIDHeader = "Amz-Sdk-Invocation-Id" + req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID) + + return next.HandleBuild(ctx, in) +} + +// RecordResponseTiming records the response timing for the SDK client requests. +type RecordResponseTiming struct{} + +// ID is the middleware identifier +func (a *RecordResponseTiming) ID() string { + return "RecordResponseTiming" +} + +// HandleDeserialize calculates response metadata and clock skew +func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + responseAt := sdk.NowTime() + setResponseAt(&metadata, responseAt) + + var serverTime time.Time + + switch resp := out.RawResponse.(type) { + case *smithyhttp.Response: + respDateHeader := resp.Header.Get("Date") + if len(respDateHeader) == 0 { + break + } + var parseErr error + serverTime, parseErr = smithyhttp.ParseTime(respDateHeader) + if parseErr != nil { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "failed to parse response Date header value, got %v", + parseErr.Error()) + break + } + setServerTime(&metadata, serverTime) + } + + if !serverTime.IsZero() { + attemptSkew := serverTime.Sub(responseAt) + setAttemptSkew(&metadata, attemptSkew) + } + + return out, metadata, err +} + +type responseAtKey struct{} + +// GetResponseAt returns the time response was received at. +func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(responseAtKey{}).(time.Time) + return v, ok +} + +// setResponseAt sets the response time on the metadata. +func setResponseAt(metadata *middleware.Metadata, v time.Time) { + metadata.Set(responseAtKey{}, v) +} + +type serverTimeKey struct{} + +// GetServerTime returns the server time for response. +func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(serverTimeKey{}).(time.Time) + return v, ok +} + +// setServerTime sets the server time on the metadata. +func setServerTime(metadata *middleware.Metadata, v time.Time) { + metadata.Set(serverTimeKey{}, v) +} + +type attemptSkewKey struct{} + +// GetAttemptSkew returns Attempt clock skew for response from metadata. +func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) { + v, ok = metadata.Get(attemptSkewKey{}).(time.Duration) + return v, ok +} + +// setAttemptSkew sets the attempt clock skew on the metadata. +func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) { + metadata.Set(attemptSkewKey{}, v) +} + +// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack +func AddClientRequestIDMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ClientRequestID{}, middleware.After) +} + +// AddRecordResponseTiming adds RecordResponseTiming middleware to the +// middleware stack. +func AddRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After) +} + +// rawResponseKey is the accessor key used to store and access the +// raw response within the response metadata. +type rawResponseKey struct{} + +// addRawResponse middleware adds raw response on to the metadata +type addRawResponse struct{} + +// ID the identifier for the ClientRequestID +func (m *addRawResponse) ID() string { + return "AddRawResponseToMetadata" +} + +// HandleDeserialize adds raw response on the middleware metadata +func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + metadata.Set(rawResponseKey{}, out.RawResponse) + return out, metadata, err +} + +// AddRawResponseToMetadata adds middleware to the middleware stack that +// store raw response on to the metadata. +func AddRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&addRawResponse{}, middleware.Before) +} + +// GetRawResponse returns raw response set on metadata +func GetRawResponse(metadata middleware.Metadata) interface{} { + return metadata.Get(rawResponseKey{}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go new file mode 100644 index 000000000..ba262dadc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go @@ -0,0 +1,24 @@ +//go:build go1.16 +// +build go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + os = "macos" + case "ios": + os = "ios" + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go new file mode 100644 index 000000000..e14a1e4ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go @@ -0,0 +1,24 @@ +//go:build !go1.16 +// +build !go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64 + // For now declare this as "other" until we have a better detection mechanism. + fallthrough + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go new file mode 100644 index 000000000..3f6aaf231 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go @@ -0,0 +1,94 @@ +package middleware + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "os" +) + +const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME" +const envAmznTraceID = "_X_AMZN_TRACE_ID" +const amznTraceIDHeader = "X-Amzn-Trace-Id" + +// AddRecursionDetection adds recursionDetection to the middleware stack +func AddRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&RecursionDetection{}, middleware.After) +} + +// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent +// to avoid recursion invocation in Lambda +type RecursionDetection struct{} + +// ID returns the middleware identifier +func (m *RecursionDetection) ID() string { + return "RecursionDetection" +} + +// HandleBuild detects Lambda environment and adds its trace ID to request header if absent +func (m *RecursionDetection) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName) + xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID) + value := req.Header.Get(amznTraceIDHeader) + // only set the X-Amzn-Trace-Id header when it is not set initially, the + // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists + if value != "" || !hasLambdaEnv || !hasTraceID { + return next.HandleBuild(ctx, in) + } + + req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID)) + return next.HandleBuild(ctx, in) +} + +func percentEncode(s string) string { + upperhex := "0123456789ABCDEF" + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEncode(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + required := len(s) + 2*hexCount + t := make([]byte, required) + j := 0 + for i := 0; i < len(s); i++ { + if c := s[i]; shouldEncode(c) { + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + return string(t) +} + +func shouldEncode(c byte) bool { + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',': + return false + default: + return true + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go new file mode 100644 index 000000000..dd3391fe4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go @@ -0,0 +1,27 @@ +package middleware + +import ( + "github.com/aws/smithy-go/middleware" +) + +// requestIDKey is used to retrieve request id from response metadata +type requestIDKey struct{} + +// SetRequestIDMetadata sets the provided request id over middleware metadata +func SetRequestIDMetadata(metadata *middleware.Metadata, id string) { + metadata.Set(requestIDKey{}, id) +} + +// GetRequestIDMetadata retrieves the request id from middleware metadata +// returns string and bool indicating value of request id, whether request id was set. +func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) { + if !metadata.Has(requestIDKey{}) { + return "", false + } + + v, ok := metadata.Get(requestIDKey{}).(string) + if !ok { + return "", true + } + return v, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go new file mode 100644 index 000000000..7ce48c611 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go @@ -0,0 +1,49 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddRequestIDRetrieverMiddleware adds request id retriever middleware +func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before operation deserializers so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before) +} + +type requestIDRetriever struct { +} + +// ID returns the middleware identifier +func (m *requestIDRetriever) ID() string { + return "RequestIDRetriever" +} + +func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // Different header which can map to request id + requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"} + + for _, h := range requestIDHeaderList { + // check for headers known to contain Request id + if v := resp.Header.Get(h); len(v) != 0 { + // set reqID on metadata for successful responses. + SetRequestIDMetadata(&metadata, v) + break + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go new file mode 100644 index 000000000..285b2bba8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -0,0 +1,243 @@ +package middleware + +import ( + "context" + "fmt" + "os" + "runtime" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +var languageVersion = strings.TrimPrefix(runtime.Version(), "go") + +// SDKAgentKeyType is the metadata type to add to the SDK agent string +type SDKAgentKeyType int + +// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will +// be mapped to AdditionalMetadata. +const ( + _ SDKAgentKeyType = iota + APIMetadata + OperatingSystemMetadata + LanguageMetadata + EnvironmentMetadata + FeatureMetadata + ConfigMetadata + FrameworkMetadata + AdditionalMetadata + ApplicationIdentifier +) + +func (k SDKAgentKeyType) string() string { + switch k { + case APIMetadata: + return "api" + case OperatingSystemMetadata: + return "os" + case LanguageMetadata: + return "lang" + case EnvironmentMetadata: + return "exec-env" + case FeatureMetadata: + return "ft" + case ConfigMetadata: + return "cfg" + case FrameworkMetadata: + return "lib" + case ApplicationIdentifier: + return "app" + case AdditionalMetadata: + fallthrough + default: + return "md" + } +} + +const execEnvVar = `AWS_EXECUTION_ENV` + +// requestUserAgent is a build middleware that set the User-Agent for the request. +type requestUserAgent struct { + sdkAgent, userAgent *smithyhttp.UserAgentBuilder +} + +// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the +// request. +// +// User-Agent example: +// +// aws-sdk-go-v2/1.2.3 +// +// X-Amz-User-Agent example: +// +// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 +func newRequestUserAgent() *requestUserAgent { + userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() + addProductName(userAgent) + addProductName(sdkAgent) + + r := &requestUserAgent{ + sdkAgent: sdkAgent, + userAgent: userAgent, + } + + addSDKMetadata(r) + + return r +} + +func addSDKMetadata(r *requestUserAgent) { + r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) + r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH) + if ev := os.Getenv(execEnvVar); len(ev) > 0 { + r.AddSDKAgentKey(EnvironmentMetadata, ev) + } +} + +func addProductName(builder *smithyhttp.UserAgentBuilder) { + builder.AddKeyValue(aws.SDKName, aws.SDKVersion) +} + +// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKey(key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKey(key) + return nil + } +} + +// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKeyValue(key, value) + return nil + } +} + +// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKey(keyType, key) + return nil + } +} + +// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKeyValue(keyType, key, value) + return nil + } +} + +// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present. +func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { + _, err := getOrAddRequestUserAgent(stack) + return err +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) { + id := (*requestUserAgent)(nil).ID() + bm, ok := stack.Build.Get(id) + if !ok { + bm = newRequestUserAgent() + err := stack.Build.Add(bm, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := bm.(*requestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) + } + + return requestUserAgent, nil +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKey(key string) { + u.userAgent.AddKey(key) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) { + u.userAgent.AddKeyValue(key, value) +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { + // TODO: should target sdkAgent + u.userAgent.AddKey(keyType.string() + "/" + key) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { + // TODO: should target sdkAgent + u.userAgent.AddKeyValue(keyType.string()+"/"+key, value) +} + +// ID the name of the middleware. +func (u *requestUserAgent) ID() string { + return "UserAgent" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + switch req := in.Request.(type) { + case *smithyhttp.Request: + u.addHTTPUserAgent(req) + // TODO: To be re-enabled + // u.addHTTPSDKAgent(req) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + return next.HandleBuild(ctx, in) +} + +func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { + const userAgent = "User-Agent" + updateHTTPHeader(request, userAgent, u.userAgent.Build()) +} + +func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { + const sdkAgent = "X-Amz-User-Agent" + updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) +} + +func updateHTTPHeader(request *smithyhttp.Request, header string, value string) { + var current string + if v := request.Header[header]; len(v) > 0 { + current = v[0] + } + if len(current) > 0 { + current = value + " " + current + } else { + current = value + } + request.Header[header] = append(request.Header[header][:0], current) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md new file mode 100644 index 000000000..402849cda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -0,0 +1,54 @@ +# v1.4.8 (2022-09-14) + +* No change notes available for this release. + +# v1.4.7 (2022-09-02) + +* No change notes available for this release. + +# v1.4.6 (2022-08-31) + +* No change notes available for this release. + +# v1.4.5 (2022-08-29) + +* No change notes available for this release. + +# v1.4.4 (2022-08-09) + +* No change notes available for this release. + +# v1.4.3 (2022-06-29) + +* No change notes available for this release. + +# v1.4.2 (2022-06-07) + +* No change notes available for this release. + +# v1.4.1 (2022-03-24) + +* No change notes available for this release. + +# v1.4.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.3.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.1.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.0.0 (2021-11-06) + +* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release. +* **Release**: Protocol support has been added for AWS event stream. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go new file mode 100644 index 000000000..151054971 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + *hs = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go new file mode 100644 index 000000000..d9ab7652f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go @@ -0,0 +1,218 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/smithy-go/logging" + "hash" + "hash/crc32" + "io" +) + +// DecoderOptions is the Decoder configuration options. +type DecoderOptions struct { + Logger logging.Logger + LogMessages bool +} + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + options DecoderOptions +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(optFns ...func(*DecoderOptions)) *Decoder { + options := DecoderOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Decoder{ + options: options, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if decodeMessage fails to read +// the message from the stream. +// +// payloadBuf is a byte slice that will be used in the returned Message.Payload. Callers +// must ensure that the Message.Payload from a previous decode has been consumed before passing in the same underlying +// payloadBuf byte slice. +func (d *Decoder) Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { + if d.options.Logger != nil && d.options.LogMessages { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.options.Logger, debugMsgBuf, m, err) + }() + } + + m, err = decodeMessage(reader, payloadBuf) + + return m, err +} + +// decodeMessage attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if decodeMessage fails to read +// the message from the reader. +func decodeMessage(reader io.Reader, payloadBuf []byte) (m Message, err error) { + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +func logMessageDecode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Logf(logging.Debug, w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "decodeMessage error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return v, err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return b[0], err +} + +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} + +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} + +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go new file mode 100644 index 000000000..f03ee4b93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go @@ -0,0 +1,167 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/smithy-go/logging" + "hash" + "hash/crc32" + "io" +) + +// EncoderOptions is the configuration options for Encoder. +type EncoderOptions struct { + Logger logging.Logger + LogMessages bool +} + +// Encoder provides EventStream message encoding. +type Encoder struct { + options EncoderOptions + + headersBuf *bytes.Buffer + messageBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages. +func NewEncoder(optFns ...func(*EncoderOptions)) *Encoder { + o := EncoderOptions{} + + for _, fn := range optFns { + fn(&o) + } + + return &Encoder{ + options: o, + headersBuf: bytes.NewBuffer(nil), + messageBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(w io.Writer, msg Message) (err error) { + e.headersBuf.Reset() + e.messageBuf.Reset() + + var writer io.Writer = e.messageBuf + if e.options.Logger != nil && e.options.LogMessages { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.options.Logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(writer, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err = hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + if err := binary.Write(writer, binary.BigEndian, msgCRC); err != nil { + return err + } + + _, err = io.Copy(w, e.messageBuf) + + return err +} + +func logMessageEncode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Logf(logging.Debug, w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go new file mode 100644 index 000000000..5481ef307 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go new file mode 100644 index 000000000..93ea71ffd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go @@ -0,0 +1,24 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + ContentTypeHeader = ":content-type" // message payload content-type + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go new file mode 100644 index 000000000..d07ff6b89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go @@ -0,0 +1,71 @@ +package eventstreamapi + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +type eventStreamWriterKey struct{} + +// GetInputStreamWriter returns EventTypeHeader io.PipeWriter used for the operation's input event stream. +func GetInputStreamWriter(ctx context.Context) io.WriteCloser { + writeCloser, _ := middleware.GetStackValue(ctx, eventStreamWriterKey{}).(io.WriteCloser) + return writeCloser +} + +func setInputStreamWriter(ctx context.Context, writeCloser io.WriteCloser) context.Context { + return middleware.WithStackValue(ctx, eventStreamWriterKey{}, writeCloser) +} + +// InitializeStreamWriter is a Finalize middleware initializes an in-memory pipe for sending event stream messages +// via the HTTP request body. +type InitializeStreamWriter struct{} + +// AddInitializeStreamWriter adds the InitializeStreamWriter middleware to the provided stack. +func AddInitializeStreamWriter(stack *middleware.Stack) error { + return stack.Finalize.Add(&InitializeStreamWriter{}, middleware.After) +} + +// ID returns the identifier for the middleware. +func (i *InitializeStreamWriter) ID() string { + return "InitializeStreamWriter" +} + +// HandleFinalize is the middleware implementation. +func (i *InitializeStreamWriter) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + + inputReader, inputWriter := io.Pipe() + defer func() { + if err == nil { + return + } + _ = inputReader.Close() + _ = inputWriter.Close() + }() + + request, err = request.SetStream(inputReader) + if err != nil { + return out, metadata, err + } + in.Request = request + + ctx = setInputStreamWriter(ctx, inputWriter) + + out, metadata, err = next.HandleFinalize(ctx, in) + if err != nil { + return out, metadata, err + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go new file mode 100644 index 000000000..cbf5a2862 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go @@ -0,0 +1,13 @@ +//go:build go1.18 +// +build go1.18 + +package eventstreamapi + +import smithyhttp "github.com/aws/smithy-go/transport/http" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. +// +// This operation is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go new file mode 100644 index 000000000..7d10ec2eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go @@ -0,0 +1,12 @@ +//go:build !go1.18 +// +build !go1.18 + +package eventstreamapi + +import smithyhttp "github.com/aws/smithy-go/transport/http" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. +func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { + r.Header.Set("Expect", "100-continue") + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go new file mode 100644 index 000000000..048add0bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package eventstream + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.4.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go new file mode 100644 index 000000000..f6f8c5674 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go @@ -0,0 +1,175 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go new file mode 100644 index 000000000..423b6bb26 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go @@ -0,0 +1,521 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + var scratch [36]byte + + const dash = '-' + + hex.Encode(scratch[:8], v[0:4]) + scratch[8] = dash + hex.Encode(scratch[9:13], v[4:6]) + scratch[13] = dash + hex.Encode(scratch[14:18], v[6:8]) + scratch[18] = dash + hex.Encode(scratch[19:23], v[8:10]) + scratch[23] = dash + hex.Encode(scratch[24:], v[10:]) + + return string(scratch[:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go new file mode 100644 index 000000000..f7427da03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go @@ -0,0 +1,117 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := EncodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go new file mode 100644 index 000000000..47ebc0f54 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -0,0 +1,72 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Array represents the encoding of Query lists and sets. A Query array is a +// representation of a list of values of a fixed type. A serialized array might +// look like the following: +// +// ListName.member.1=foo +// &ListName.member.2=bar +// &Listname.member.3=baz +type Array struct { + // The query values to add the array to. + values url.Values + // The array's prefix, which includes the names of all parent structures + // and ends with the name of the list. For example, the prefix might be + // "ParentStructure.ListName". This prefix will be used to form the full + // keys for each element in the list. For example, an entry might have the + // key "ParentStructure.ListName.member.MemberName.1". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the list is flat or not. A list that is not flat will produce the + // following entry to the url.Values for a given entry: + // ListName.MemberName.1=value + // A list that is flat will produce the following: + // ListName.1=value + flat bool + // The location name of the member. In most cases this should be "member". + memberName string + // Elements are stored in values, so we keep track of the list size here. + size int32 + // Empty lists are encoded as "=", if we add a value later we will + // remove this encoding + emptyValue Value +} + +func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { + emptyValue := newValue(values, prefix, flat) + emptyValue.String("") + + return &Array{ + values: values, + prefix: prefix, + flat: flat, + memberName: memberName, + emptyValue: emptyValue, + } +} + +// Value adds a new element to the Query Array. Returns a Value type used to +// encode the array element. +func (a *Array) Value() Value { + if a.size == 0 { + delete(a.values, a.emptyValue.key) + } + + // Query lists start a 1, so adjust the size first + a.size++ + prefix := a.prefix + if !a.flat { + prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) + } + // Lists can't have flat members + return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go new file mode 100644 index 000000000..2ecf9241c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go @@ -0,0 +1,80 @@ +package query + +import ( + "io" + "net/url" + "sort" +) + +// Encoder is a Query encoder that supports construction of Query body +// values using methods. +type Encoder struct { + // The query values that will be built up to manage encoding. + values url.Values + // The writer that the encoded body will be written to. + writer io.Writer + Value +} + +// NewEncoder returns a new Query body encoder +func NewEncoder(writer io.Writer) *Encoder { + values := url.Values{} + return &Encoder{ + values: values, + writer: writer, + Value: newBaseValue(values), + } +} + +// Encode returns the []byte slice representing the current +// state of the Query encoder. +func (e Encoder) Encode() error { + ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) + if !ok { + // Fall back to less optimal byte slice casting if WriteString isn't available. + ws = &wrapWriteString{writer: e.writer} + } + + // Get the keys and sort them to have a stable output + keys := make([]string, 0, len(e.values)) + for k := range e.values { + keys = append(keys, k) + } + sort.Strings(keys) + isFirstEntry := true + for _, key := range keys { + queryValues := e.values[key] + escapedKey := url.QueryEscape(key) + for _, value := range queryValues { + if !isFirstEntry { + if _, err := ws.WriteString(`&`); err != nil { + return err + } + } else { + isFirstEntry = false + } + if _, err := ws.WriteString(escapedKey); err != nil { + return err + } + if _, err := ws.WriteString(`=`); err != nil { + return err + } + if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { + return err + } + } + } + return nil +} + +// wrapWriteString wraps an io.Writer to provide a WriteString method +// where one is not available. +type wrapWriteString struct { + writer io.Writer +} + +// WriteString writes a string to the wrapped writer by casting it to +// a byte array first. +func (w wrapWriteString) WriteString(v string) (int, error) { + return w.writer.Write([]byte(v)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go new file mode 100644 index 000000000..dea242b8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go @@ -0,0 +1,78 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Map represents the encoding of Query maps. A Query map is a representation +// of a mapping of arbitrary string keys to arbitrary values of a fixed type. +// A Map differs from an Object in that the set of keys is not fixed, in that +// the values must all be of the same type, and that map entries are ordered. +// A serialized map might look like the following: +// +// MapName.entry.1.key=Foo +// &MapName.entry.1.value=spam +// &MapName.entry.2.key=Bar +// &MapName.entry.2.value=eggs +type Map struct { + // The query values to add the map to. + values url.Values + // The map's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.MapName". This prefix will be used to form the full + // keys for each key-value pair of the map. For example, a value might have + // the key "ParentStructure.MapName.1.value". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the map is flat or not. A map that is not flat will produce the + // following entries to the url.Values for a given key-value pair: + // MapName.entry.1.KeyLocationName=mykey + // MapName.entry.1.ValueLocationName=myvalue + // A map that is flat will produce the following: + // MapName.1.KeyLocationName=mykey + // MapName.1.ValueLocationName=myvalue + flat bool + // The location name of the key. In most cases this should be "key". + keyLocationName string + // The location name of the value. In most cases this should be "value". + valueLocationName string + // Elements are stored in values, so we keep track of the list size here. + size int32 +} + +func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { + return &Map{ + values: values, + prefix: prefix, + flat: flat, + keyLocationName: keyLocationName, + valueLocationName: valueLocationName, + } +} + +// Key adds the given named key to the Query map. +// Returns a Value encoder that should be used to encode a Query value type. +func (m *Map) Key(name string) Value { + // Query lists start a 1, so adjust the size first + m.size++ + var key string + var value string + if m.flat { + key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) + } else { + key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) + } + + // The key can only be a string, so we just go ahead and set it here + newValue(m.values, key, false).String(name) + + // Maps can't have flat members + return newValue(m.values, value, false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go new file mode 100644 index 000000000..360344791 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go @@ -0,0 +1,62 @@ +package query + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the +// operation serializer that will convert the query request body to a GET +// operation with the query message in the HTTP request querystring. +func AddAsGetRequestMiddleware(stack *middleware.Stack) error { + return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) +} + +type asGetRequest struct{} + +func (*asGetRequest) ID() string { return "Query:AsGetRequest" } + +func (m *asGetRequest) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) + } + + req.Method = "GET" + + // If the stream is not set, nothing else to do. + stream := req.GetStream() + if stream == nil { + return next.HandleSerialize(ctx, input) + } + + // Clear the stream since there will not be any body. + req.Header.Del("Content-Type") + req, err = req.SetStream(nil) + if err != nil { + return out, metadata, fmt.Errorf("unable update request body %w", err) + } + input.Request = req + + // Update request query with the body's query string value. + delim := "" + if len(req.URL.RawQuery) != 0 { + delim = "&" + } + + b, err := ioutil.ReadAll(stream) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request body %w", err) + } + req.URL.RawQuery += delim + string(b) + + return next.HandleSerialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go new file mode 100644 index 000000000..6a99d4ea8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -0,0 +1,56 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Object represents the encoding of Query structures and unions. A Query +// object is a representation of a mapping of string keys to arbitrary +// values where there is a fixed set of keys whose values each have their +// own known type. A serialized object might look like the following: +// +// ObjectName.Foo=value +// &ObjectName.Bar=5 +type Object struct { + // The query values to add the object to. + values url.Values + // The object's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.ObjectName". This prefix will be used to form the full + // keys for each member of the object. For example, a member might have the + // key "ParentStructure.ObjectName.MemberName". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string +} + +func newObject(values url.Values, prefix string) *Object { + return &Object{ + values: values, + prefix: prefix, + } +} + +// Key adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. +func (o *Object) Key(name string) Value { + return o.key(name, false) +} + +// FlatKey adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. The +// value will be flattened if it is a map or array. +func (o *Object) FlatKey(name string) Value { + return o.key(name, true) +} + +func (o *Object) key(name string, flatValue bool) Value { + if o.prefix != "" { + return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) + } + return newValue(o.values, name, flatValue) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go new file mode 100644 index 000000000..302525ab1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go @@ -0,0 +1,106 @@ +package query + +import ( + "math/big" + "net/url" + + "github.com/aws/smithy-go/encoding/httpbinding" +) + +// Value represents a Query Value type. +type Value struct { + // The query values to add the value to. + values url.Values + // The value's key, which will form the prefix for complex types. + key string + // Whether the value should be flattened or not if it's a flattenable type. + flat bool + queryValue httpbinding.QueryValue +} + +func newValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, false), + } +} + +func newBaseValue(values url.Values) Value { + return Value{ + values: values, + queryValue: httpbinding.NewQueryValue(nil, "", false), + } +} + +// Array returns a new Array encoder. +func (qv Value) Array(locationName string) *Array { + return newArray(qv.values, qv.key, qv.flat, locationName) +} + +// Object returns a new Object encoder. +func (qv Value) Object() *Object { + return newObject(qv.values, qv.key) +} + +// Map returns a new Map encoder. +func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { + return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) +} + +// Base64EncodeBytes encodes v as a base64 query string value. +// This is intended to enable compatibility with the JSON encoder. +func (qv Value) Base64EncodeBytes(v []byte) { + qv.queryValue.Blob(v) +} + +// Boolean encodes v as a query string value +func (qv Value) Boolean(v bool) { + qv.queryValue.Boolean(v) +} + +// String encodes v as a query string value +func (qv Value) String(v string) { + qv.queryValue.String(v) +} + +// Byte encodes v as a query string value +func (qv Value) Byte(v int8) { + qv.queryValue.Byte(v) +} + +// Short encodes v as a query string value +func (qv Value) Short(v int16) { + qv.queryValue.Short(v) +} + +// Integer encodes v as a query string value +func (qv Value) Integer(v int32) { + qv.queryValue.Integer(v) +} + +// Long encodes v as a query string value +func (qv Value) Long(v int64) { + qv.queryValue.Long(v) +} + +// Float encodes v as a query string value +func (qv Value) Float(v float32) { + qv.queryValue.Float(v) +} + +// Double encodes v as a query string value +func (qv Value) Double(v float64) { + qv.queryValue.Double(v) +} + +// BigInteger encodes v as a query string value +func (qv Value) BigInteger(v *big.Int) { + qv.queryValue.BigInteger(v) +} + +// BigDecimal encodes v as a query string value +func (qv Value) BigDecimal(v *big.Float) { + qv.queryValue.BigDecimal(v) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go new file mode 100644 index 000000000..1bce78a4d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go @@ -0,0 +1,85 @@ +package restjson + +import ( + "encoding/json" + "io" + "strings" + + "github.com/aws/smithy-go" +) + +// GetErrorInfo util looks for code, __type, and message members in the +// json body. These members are optionally available, and the function +// returns the value of member if it is available. This function is useful to +// identify the error code, msg in a REST JSON error response. +func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) { + var errInfo struct { + Code string + Type string `json:"__type"` + Message string + } + + err = decoder.Decode(&errInfo) + if err != nil { + if err == io.EOF { + return errorType, message, nil + } + return errorType, message, err + } + + // assign error type + if len(errInfo.Code) != 0 { + errorType = errInfo.Code + } else if len(errInfo.Type) != 0 { + errorType = errInfo.Type + } + + // assign error message + if len(errInfo.Message) != 0 { + message = errInfo.Message + } + + // sanitize error + if len(errorType) != 0 { + errorType = SanitizeErrorCode(errorType) + } + + return errorType, message, nil +} + +// SanitizeErrorCode sanitizes the errorCode string . +// The rule for sanitizing is if a `:` character is present, then take only the +// contents before the first : character in the value. +// If a # character is present, then take only the contents after the +// first # character in the value. +func SanitizeErrorCode(errorCode string) string { + if strings.ContainsAny(errorCode, ":") { + errorCode = strings.SplitN(errorCode, ":", 2)[0] + } + + if strings.ContainsAny(errorCode, "#") { + errorCode = strings.SplitN(errorCode, "#", 2)[1] + } + + return errorCode +} + +// GetSmithyGenericAPIError returns smithy generic api error and an error interface. +// Takes in json decoder, and error Code string as args. The function retrieves error message +// and error code from the decoder body. If errorCode of length greater than 0 is passed in as +// an argument, it is used instead. +func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) { + errorType, message, err := GetErrorInfo(decoder) + if err != nil { + return nil, err + } + + if len(errorCode) == 0 { + errorCode = errorType + } + + return &smithy.GenericAPIError{ + Code: errorCode, + Message: message, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go new file mode 100644 index 000000000..6975ce652 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go @@ -0,0 +1,48 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string + RequestID string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents(errResponse), nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents(errResponse), nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal Error wrapping +type noWrappedErrorResponse struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId"` +} + +// wrappedErrorResponse represents the error response body +// wrapped within Error +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go new file mode 100644 index 000000000..974ef594f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go @@ -0,0 +1,96 @@ +package ratelimit + +import ( + "sync" +) + +// TokenBucket provides a concurrency safe utility for adding and removing +// tokens from the available token bucket. +type TokenBucket struct { + remainingTokens uint + maxCapacity uint + minCapacity uint + mu sync.Mutex +} + +// NewTokenBucket returns an initialized TokenBucket with the capacity +// specified. +func NewTokenBucket(i uint) *TokenBucket { + return &TokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *TokenBucket) Refund(amount uint) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *TokenBucket) Capacity() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *TokenBucket) Remaining() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *TokenBucket) Resize(size uint) uint { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = uintMax(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} + +func uintMin(a, b uint) uint { + if a < b { + return a + } + return b +} + +func uintMax(a, b uint) uint { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go new file mode 100644 index 000000000..d89090ad3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go @@ -0,0 +1,83 @@ +package ratelimit + +import ( + "context" + "fmt" +) + +type rateToken struct { + tokenCost uint + bucket *TokenBucket +} + +func (t rateToken) release() error { + t.bucket.Refund(t.tokenCost) + return nil +} + +// TokenRateLimit provides a Token Bucket RateLimiter implementation +// that limits the overall number of retry attempts that can be made across +// operation invocations. +type TokenRateLimit struct { + bucket *TokenBucket +} + +// NewTokenRateLimit returns an TokenRateLimit with default values. +// Functional options can configure the retry rate limiter. +func NewTokenRateLimit(tokens uint) *TokenRateLimit { + return &TokenRateLimit{ + bucket: NewTokenBucket(tokens), + } +} + +type canceledError struct { + Err error +} + +func (c canceledError) CanceledError() bool { return true } +func (c canceledError) Unwrap() error { return c.Err } +func (c canceledError) Error() string { + return fmt.Sprintf("canceled, %v", c.Err) +} + +// GetToken may cause a available pool of retry quota to be +// decremented. Will return an error if the decremented value can not be +// reduced from the retry quota. +func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) { + select { + case <-ctx.Done(): + return nil, canceledError{Err: ctx.Err()} + default: + } + if avail, ok := l.bucket.Retrieve(cost); !ok { + return nil, QuotaExceededError{Available: avail, Requested: cost} + } + + return rateToken{ + tokenCost: cost, + bucket: l.bucket, + }.release, nil +} + +// AddTokens increments the token bucket by a fixed amount. +func (l *TokenRateLimit) AddTokens(v uint) error { + l.bucket.Refund(v) + return nil +} + +// Remaining returns the number of remaining tokens in the bucket. +func (l *TokenRateLimit) Remaining() uint { + return l.bucket.Remaining() +} + +// QuotaExceededError provides the SDK error when the retries for a given +// token bucket have been exhausted. +type QuotaExceededError struct { + Available uint + Requested uint +} + +func (e QuotaExceededError) Error() string { + return fmt.Sprintf("retry quota exceeded, %d available, %d requested", + e.Available, e.Requested) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go new file mode 100644 index 000000000..d8d00e615 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go @@ -0,0 +1,25 @@ +package aws + +import ( + "fmt" +) + +// TODO remove replace with smithy.CanceledError + +// RequestCanceledError is the error that will be returned by an API request +// that was canceled. Requests given a Context may return this error when +// canceled. +type RequestCanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*RequestCanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *RequestCanceledError) Unwrap() error { + return e.Err +} +func (e *RequestCanceledError) Error() string { + return fmt.Sprintf("request canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go new file mode 100644 index 000000000..4dfde8573 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go @@ -0,0 +1,156 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +const ( + // DefaultRequestCost is the cost of a single request from the adaptive + // rate limited token bucket. + DefaultRequestCost uint = 1 +) + +// DefaultThrottles provides the set of errors considered throttle errors that +// are checked by default. +var DefaultThrottles = []IsErrorThrottle{ + ThrottleErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// AdaptiveModeOptions provides the functional options for configuring the +// adaptive retry mode, and delay behavior. +type AdaptiveModeOptions struct { + // If the adaptive token bucket is empty, when an attempt will be made + // AdaptiveMode will sleep until a token is available. This can occur when + // attempts fail with throttle errors. Use this option to disable the sleep + // until token is available, and return error immediately. + FailOnNoAttemptTokens bool + + // The cost of an attempt from the AdaptiveMode's adaptive token bucket. + RequestCost uint + + // Set of strategies to determine if the attempt failed due to a throttle + // error. + // + // It is safe to append to this list in NewAdaptiveMode's functional options. + Throttles []IsErrorThrottle + + // Set of options for standard retry mode that AdaptiveMode is built on top + // of. AdaptiveMode may apply its own defaults to Standard retry mode that + // are different than the defaults of NewStandard. Use these options to + // override the default options. + StandardOptions []func(*StandardOptions) +} + +// AdaptiveMode provides an experimental retry strategy that expands on the +// Standard retry strategy, adding client attempt rate limits. The attempt rate +// limit is initially unrestricted, but becomes restricted when the attempt +// fails with for a throttle error. When restricted AdaptiveMode may need to +// sleep before an attempt is made, if too many throttles have been received. +// AdaptiveMode's sleep can be canceled with context cancel. Set +// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep, +// to fail fast. +// +// Eventually unrestricted attempt rate limit will be restored once attempts no +// longer are failing due to throttle errors. +type AdaptiveMode struct { + options AdaptiveModeOptions + throttles IsErrorThrottles + + retryer aws.RetryerV2 + rateLimit *adaptiveRateLimit +} + +// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy. +func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode { + o := AdaptiveModeOptions{ + RequestCost: DefaultRequestCost, + Throttles: append([]IsErrorThrottle{}, DefaultThrottles...), + } + for _, fn := range optFns { + fn(&o) + } + + return &AdaptiveMode{ + options: o, + throttles: IsErrorThrottles(o.Throttles), + retryer: NewStandard(o.StandardOptions...), + rateLimit: newAdaptiveRateLimit(), + } +} + +// IsErrorRetryable returns if the failed attempt is retryable. This check +// should determine if the error can be retried, or if the error is +// terminal. +func (a *AdaptiveMode) IsErrorRetryable(err error) bool { + return a.retryer.IsErrorRetryable(err) +} + +// MaxAttempts returns the maximum number of attempts that can be made for +// an attempt before failing. A value of 0 implies that the attempt should +// be retried until it succeeds if the errors are retryable. +func (a *AdaptiveMode) MaxAttempts() int { + return a.retryer.MaxAttempts() +} + +// RetryDelay returns the delay that should be used before retrying the +// attempt. Will return error if the if the delay could not be determined. +func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) ( + time.Duration, error, +) { + return a.retryer.RetryDelay(attempt, opErr) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) ( + releaseToken func(error) error, err error, +) { + return a.retryer.GetRetryToken(ctx, opErr) +} + +// GetInitialToken returns the initial attempt token that can increment the +// retry token pool if the attempt is successful. +// +// Deprecated: This method does not provide a way to block using Context, +// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only +// present to implement Retryer interface. +func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) { + return nopRelease +} + +// GetAttemptToken returns the attempt token that can be used to rate limit +// attempt calls. Will be used by the SDK's retry package's Attempt +// middleware to get an attempt token prior to calling the temp and releasing +// the attempt token after the attempt has been made. +func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { + for { + acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost) + if acquiredToken { + break + } + if a.options.FailOnNoAttemptTokens { + return nil, fmt.Errorf( + "unable to get attempt token, and FailOnNoAttemptTokens enables") + } + + if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil { + return nil, fmt.Errorf("failed to wait for token to be available, %w", err) + } + } + + return a.handleResponse, nil +} + +func (a *AdaptiveMode) handleResponse(opErr error) error { + throttled := a.throttles.IsErrorThrottle(opErr).Bool() + + a.rateLimit.Update(throttled) + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go new file mode 100644 index 000000000..ad96d9b8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go @@ -0,0 +1,158 @@ +package retry + +import ( + "math" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +type adaptiveRateLimit struct { + tokenBucketEnabled bool + + smooth float64 + beta float64 + scaleConstant float64 + minFillRate float64 + + fillRate float64 + calculatedRate float64 + lastRefilled time.Time + measuredTxRate float64 + lastTxRateBucket float64 + requestCount int64 + lastMaxRate float64 + lastThrottleTime time.Time + timeWindow float64 + + tokenBucket *adaptiveTokenBucket + + mu sync.Mutex +} + +func newAdaptiveRateLimit() *adaptiveRateLimit { + now := sdk.NowTime() + return &adaptiveRateLimit{ + smooth: 0.8, + beta: 0.7, + scaleConstant: 0.4, + + minFillRate: 0.5, + + lastTxRateBucket: math.Floor(timeFloat64Seconds(now)), + lastThrottleTime: now, + + tokenBucket: newAdaptiveTokenBucket(0), + } +} + +func (a *adaptiveRateLimit) Enable(v bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.tokenBucketEnabled = v +} + +func (a *adaptiveRateLimit) AcquireToken(amount uint) ( + tokenAcquired bool, waitTryAgain time.Duration, +) { + a.mu.Lock() + defer a.mu.Unlock() + + if !a.tokenBucketEnabled { + return true, 0 + } + + a.tokenBucketRefill() + + available, ok := a.tokenBucket.Retrieve(float64(amount)) + if !ok { + waitDur := float64Seconds((float64(amount) - available) / a.fillRate) + return false, waitDur + } + + return true, 0 +} + +func (a *adaptiveRateLimit) Update(throttled bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.updateMeasuredRate() + + if throttled { + rateToUse := a.measuredTxRate + if a.tokenBucketEnabled { + rateToUse = math.Min(a.measuredTxRate, a.fillRate) + } + + a.lastMaxRate = rateToUse + a.calculateTimeWindow() + a.lastThrottleTime = sdk.NowTime() + a.calculatedRate = a.cubicThrottle(rateToUse) + a.tokenBucketEnabled = true + } else { + a.calculateTimeWindow() + a.calculatedRate = a.cubicSuccess(sdk.NowTime()) + } + + newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate) + a.tokenBucketUpdateRate(newRate) +} + +func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 { + dt := secondsFloat64(t.Sub(a.lastThrottleTime)) + return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate +} + +func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 { + return rateToUse * a.beta +} + +func (a *adaptiveRateLimit) calculateTimeWindow() { + a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.) +} + +func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) { + a.tokenBucketRefill() + a.fillRate = math.Max(newRPS, a.minFillRate) + a.tokenBucket.Resize(newRPS) +} + +func (a *adaptiveRateLimit) updateMeasuredRate() { + now := sdk.NowTime() + timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2. + a.requestCount++ + + if timeBucket > a.lastTxRateBucket { + currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket) + a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth)) + a.requestCount = 0 + a.lastTxRateBucket = timeBucket + } +} + +func (a *adaptiveRateLimit) tokenBucketRefill() { + now := sdk.NowTime() + if a.lastRefilled.IsZero() { + a.lastRefilled = now + return + } + + fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate + a.tokenBucket.Refund(fillAmount) + a.lastRefilled = now +} + +func float64Seconds(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +func secondsFloat64(v time.Duration) float64 { + return float64(v) / float64(time.Second) +} + +func timeFloat64Seconds(v time.Time) float64 { + return float64(v.UnixNano()) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go new file mode 100644 index 000000000..052723e8e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go @@ -0,0 +1,83 @@ +package retry + +import ( + "math" + "sync" +) + +// adaptiveTokenBucket provides a concurrency safe utility for adding and +// removing tokens from the available token bucket. +type adaptiveTokenBucket struct { + remainingTokens float64 + maxCapacity float64 + minCapacity float64 + mu sync.Mutex +} + +// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the +// capacity specified. +func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket { + return &adaptiveTokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *adaptiveTokenBucket) Refund(amount float64) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *adaptiveTokenBucket) Capacity() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *adaptiveTokenBucket) Remaining() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *adaptiveTokenBucket) Resize(size float64) float64 { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = math.Max(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go new file mode 100644 index 000000000..3a08ebe0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go @@ -0,0 +1,80 @@ +// Package retry provides interfaces and implementations for SDK request retry behavior. +// +// # Retryer Interface and Implementations +// +// This package defines Retryer interface that is used to either implement custom retry behavior +// or to extend the existing retry implementations provided by the SDK. This package provides a single +// retry implementation: Standard. +// +// # Standard +// +// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited +// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. +// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client, +// and uses an additional delay policy to limit the time between a requests subsequent attempts. +// +// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether +// a given error is retryable. By default this list of retryables includes the following: +// - Retrying errors that implement the RetryableError method, and return true. +// - Connection Errors +// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. +// - Connection Reset Errors. +// - net.OpErr types that are dialing errors or are temporary. +// - HTTP Status Codes: 500, 502, 503, and 504. +// - API Error Codes +// - RequestTimeout, RequestTimeoutException +// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, +// RequestThrottled, SlowDown, EC2ThrottledException +// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException +// - TransactionInProgressException, PriorRequestNotComplete +// +// The standard retryer will not retry a request in the event if the context associated with the request +// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context +// value. +// +// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer +// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions +// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, +// and the retry delay policy. +// +// For example to modify the default retry attempts for the standard retryer: +// +// // configure the custom retryer +// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { +// o.MaxAttempts = 5 +// }) +// +// // create a service client with the retryer +// s3.NewFromConfig(cfg, func(o *s3.Options) { +// o.Retryer = customRetry +// }) +// +// # Utilities +// +// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic +// way. These are: +// +// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable +// in addition to those considered retryable by the provided retryer. +// +// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping +// a retryer implementation. +// +// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a +// request by wrapping a retryer implementation. +// +// The following package functions have been provided to easily satisfy different retry interfaces to further customize +// a given retryer's behavior: +// +// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, +// you can use this method to easily create custom back off policies to be used with the +// standard retryer. +// +// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be retried. +// +// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be considered a timeout. +package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go new file mode 100644 index 000000000..3e432eefe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go @@ -0,0 +1,20 @@ +package retry + +import "fmt" + +// MaxAttemptsError provides the error when the maximum number of attempts have +// been exceeded. +type MaxAttemptsError struct { + Attempt int + Err error +} + +func (e *MaxAttemptsError) Error() string { + return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err) +} + +// Unwrap returns the nested error causing the max attempts error. Provides the +// implementation for errors.Is and errors.As to unwrap nested errors. +func (e *MaxAttemptsError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go new file mode 100644 index 000000000..c266996de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go @@ -0,0 +1,49 @@ +package retry + +import ( + "math" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/timeconv" +) + +// ExponentialJitterBackoff provides backoff delays with jitter based on the +// number of attempts. +type ExponentialJitterBackoff struct { + maxBackoff time.Duration + // precomputed number of attempts needed to reach max backoff. + maxBackoffAttempts float64 + + randFloat64 func() (float64, error) +} + +// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured +// for the max backoff. +func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff { + return &ExponentialJitterBackoff{ + maxBackoff: maxBackoff, + maxBackoffAttempts: math.Log2( + float64(maxBackoff) / float64(time.Second)), + randFloat64: rand.CryptoRandFloat64, + } +} + +// BackoffDelay returns the duration to wait before the next attempt should be +// made. Returns an error if unable get a duration. +func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + if attempt > int(j.maxBackoffAttempts) { + return j.maxBackoff, nil + } + + b, err := j.randFloat64() + if err != nil { + return 0, err + } + + // [0.0, 1.0) * 2 ^ attempts + ri := int64(1 << uint64(attempt)) + delaySeconds := b * float64(ri) + + return timeconv.FloatSecondsDur(delaySeconds), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go new file mode 100644 index 000000000..7a3f18301 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go @@ -0,0 +1,52 @@ +package retry + +import ( + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// attemptResultsKey is a metadata accessor key to retrieve metadata +// for all request attempts. +type attemptResultsKey struct { +} + +// GetAttemptResults retrieves attempts results from middleware metadata. +func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) { + m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults) + return m, ok +} + +// AttemptResults represents struct containing metadata returned by all request attempts. +type AttemptResults struct { + + // Results is a slice consisting attempt result from all request attempts. + // Results are stored in order request attempt is made. + Results []AttemptResult +} + +// AttemptResult represents attempt result returned by a single request attempt. +type AttemptResult struct { + + // Err is the error if received for the request attempt. + Err error + + // Retryable denotes if request may be retried. This states if an + // error is considered retryable. + Retryable bool + + // Retried indicates if this request was retried. + Retried bool + + // ResponseMetadata is any existing metadata passed via the response middlewares. + ResponseMetadata middleware.Metadata +} + +// addAttemptResults adds attempt results to middleware metadata +func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) { + metadata.Set(attemptResultsKey{}, v) +} + +// GetRawResponse returns raw response recorded for the attempt result +func (a AttemptResult) GetRawResponse() interface{} { + return awsmiddle.GetRawResponse(a.ResponseMetadata) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go new file mode 100644 index 000000000..822fc920a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -0,0 +1,330 @@ +package retry + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + smithymiddle "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +// RequestCloner is a function that can take an input request type and clone +// the request for use in a subsequent retry attempt. +type RequestCloner func(interface{}) interface{} + +type retryMetadata struct { + AttemptNum int + AttemptTime time.Time + MaxAttempts int + AttemptClockSkew time.Duration +} + +// Attempt is a Smithy Finalize middleware that handles retry attempts using +// the provided Retryer implementation. +type Attempt struct { + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogAttempts bool + + retryer aws.RetryerV2 + requestCloner RequestCloner +} + +// NewAttemptMiddleware returns a new Attempt retry middleware. +func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { + m := &Attempt{ + retryer: wrapAsRetryerV2(retryer), + requestCloner: requestCloner, + } + for _, fn := range optFns { + fn(m) + } + return m +} + +// ID returns the middleware identifier +func (r *Attempt) ID() string { return "Retry" } + +func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) { + if !r.LogAttempts { + return + } + logger.Logf(classification, format, v...) +} + +// HandleFinalize utilizes the provider Retryer implementation to attempt +// retries over the next handler +func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + var attemptNum int + var attemptClockSkew time.Duration + var attemptResults AttemptResults + + maxAttempts := r.retryer.MaxAttempts() + releaseRetryToken := nopRelease + + for { + attemptNum++ + attemptInput := in + attemptInput.Request = r.requestCloner(attemptInput.Request) + + // Record the metadata for the for attempt being started. + attemptCtx := setRetryMetadata(ctx, retryMetadata{ + AttemptNum: attemptNum, + AttemptTime: sdk.NowTime().UTC(), + MaxAttempts: maxAttempts, + AttemptClockSkew: attemptClockSkew, + }) + + var attemptResult AttemptResult + out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) + attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) + + // AttemptResult Retried states that the attempt was not successful, and + // should be retried. + shouldRetry := attemptResult.Retried + + // Add attempt metadata to list of all attempt metadata + attemptResults.Results = append(attemptResults.Results, attemptResult) + + if !shouldRetry { + // Ensure the last response's metadata is used as the bases for result + // metadata returned by the stack. The Slice of attempt results + // will be added to this cloned metadata. + metadata = attemptResult.ResponseMetadata.Clone() + + break + } + } + + addAttemptResults(&metadata, attemptResults) + return out, metadata, err +} + +// handleAttempt handles an individual request attempt. +func (r *Attempt) handleAttempt( + ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler, +) ( + out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error, +) { + defer func() { + attemptResult.Err = err + }() + + // Short circuit if this attempt never can succeed because the context is + // canceled. This reduces the chance of token pools being modified for + // attempts that will not be made + select { + case <-ctx.Done(): + return out, attemptResult, nopRelease, ctx.Err() + default: + } + + //------------------------------ + // Get Attempt Token + //------------------------------ + releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx) + if err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to get retry Send token, %w", err) + } + + //------------------------------ + // Send Attempt + //------------------------------ + logger := smithymiddle.GetLogger(ctx) + service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx) + retryMetadata, _ := getRetryMetadata(ctx) + attemptNum := retryMetadata.AttemptNum + maxAttempts := retryMetadata.MaxAttempts + + // Following attempts must ensure the request payload stream starts in a + // rewound state. + if attemptNum > 1 { + if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok { + if rewindErr := rewindable.RewindStream(); rewindErr != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to rewind transport stream for retry, %w", rewindErr) + } + } + + r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", + service, operation, attemptNum) + } + + var metadata smithymiddle.Metadata + out, metadata, err = next.HandleFinalize(ctx, in) + attemptResult.ResponseMetadata = metadata + + //------------------------------ + // Bookkeeping + //------------------------------ + // Release the retry token based on the state of the attempt's error (if any). + if releaseError := releaseRetryToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release retry token after request error, %w", err) + } + // Release the attempt token based on the state of the attempt's error (if any). + if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release initial token after request error, %w", err) + } + // If there was no error making the attempt, nothing further to do. There + // will be nothing to retry. + if err == nil { + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Is Retryable and Should Retry + //------------------------------ + // If the attempt failed with an unretryable error, nothing further to do + // but return, and inform the caller about the terminal failure. + retryable := r.retryer.IsErrorRetryable(err) + if !retryable { + r.logf(logger, logging.Debug, "request failed with unretryable error %v", err) + return out, attemptResult, nopRelease, err + } + + // set retryable to true + attemptResult.Retryable = true + + // Once the maximum number of attempts have been exhausted there is nothing + // further to do other than inform the caller about the terminal failure. + if maxAttempts > 0 && attemptNum >= maxAttempts { + r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts) + err = &MaxAttemptsError{ + Attempt: attemptNum, + Err: err, + } + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Get Retry (aka Retry Quota) Token + //------------------------------ + // Get a retry token that will be released after the + releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) + if retryTokenErr != nil { + return out, attemptResult, nopRelease, retryTokenErr + } + + //------------------------------ + // Retry Delay and Sleep + //------------------------------ + // Get the retry delay before another attempt can be made, and sleep for + // that time. Potentially early exist if the sleep is canceled via the + // context. + retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) + if reqErr != nil { + return out, attemptResult, releaseRetryToken, reqErr + } + if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil { + err = &aws.RequestCanceledError{Err: reqErr} + return out, attemptResult, releaseRetryToken, err + } + + // The request should be re-attempted. + attemptResult.Retried = true + + return out, attemptResult, releaseRetryToken, err +} + +// MetricsHeader attaches SDK request metric header for retries to the transport +type MetricsHeader struct{} + +// ID returns the middleware identifier +func (r *MetricsHeader) ID() string { + return "RetryMetricsHeader" +} + +// HandleFinalize attaches the SDK request metric header to the transport layer +func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + retryMetadata, _ := getRetryMetadata(ctx) + + const retryMetricHeader = "Amz-Sdk-Request" + var parts []string + + parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum)) + if retryMetadata.MaxAttempts != 0 { + parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts)) + } + + var ttl time.Time + if deadline, ok := ctx.Deadline(); ok { + ttl = deadline + } + + // Only append the TTL if it can be determined. + if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 { + const unixTimeFormat = "20060102T150405Z" + ttl = ttl.Add(retryMetadata.AttemptClockSkew) + parts = append(parts, "ttl="+ttl.Format(unixTimeFormat)) + } + + switch req := in.Request.(type) { + case *http.Request: + req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; ")) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + return next.HandleFinalize(ctx, in) +} + +type retryMetadataKey struct{} + +// getRetryMetadata retrieves retryMetadata from the context and a bool +// indicating if it was set. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { + metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) + return metadata, ok +} + +// setRetryMetadata sets the retryMetadata on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { + return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata) +} + +// AddRetryMiddlewaresOptions is the set of options that can be passed to +// AddRetryMiddlewares for configuring retry associated middleware. +type AddRetryMiddlewaresOptions struct { + Retryer aws.Retryer + + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogRetryAttempts bool +} + +// AddRetryMiddlewares adds retry middleware to operation middleware stack +func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error { + attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) { + middleware.LogAttempts = options.LogRetryAttempts + }) + + if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil { + return err + } + if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go new file mode 100644 index 000000000..af81635b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go @@ -0,0 +1,90 @@ +package retry + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// AddWithErrorCodes returns a Retryer with additional error codes considered +// for determining if the error should be retried. +func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer { + retryable := &RetryableErrorCode{ + Codes: map[string]struct{}{}, + } + for _, c := range codes { + retryable.Codes[c] = struct{}{} + } + + return &withIsErrorRetryable{ + RetryerV2: wrapAsRetryerV2(r), + Retryable: retryable, + } +} + +type withIsErrorRetryable struct { + aws.RetryerV2 + Retryable IsErrorRetryable +} + +func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool { + if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary { + return v.Bool() + } + return r.RetryerV2.IsErrorRetryable(err) +} + +// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value +// specified. +func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer { + return &withMaxAttempts{ + RetryerV2: wrapAsRetryerV2(r), + Max: max, + } +} + +type withMaxAttempts struct { + aws.RetryerV2 + Max int +} + +func (w *withMaxAttempts) MaxAttempts() int { + return w.Max +} + +// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer +// overriding the RetryDelay behavior for a alternate minimum initial backoff +// delay. +func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer { + return &withMaxBackoffDelay{ + RetryerV2: wrapAsRetryerV2(r), + backoff: NewExponentialJitterBackoff(delay), + } +} + +type withMaxBackoffDelay struct { + aws.RetryerV2 + backoff *ExponentialJitterBackoff +} + +func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) { + return r.backoff.BackoffDelay(attempt, err) +} + +type wrappedAsRetryerV2 struct { + aws.Retryer +} + +func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 { + v, ok := r.(aws.RetryerV2) + if !ok { + v = wrappedAsRetryerV2{Retryer: r} + } + + return v +} + +func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) { + return w.Retryer.GetInitialToken(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go new file mode 100644 index 000000000..00d7d3eee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -0,0 +1,191 @@ +package retry + +import ( + "errors" + "net" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorRetryable provides the interface of an implementation to determine if +// a error as the result of an operation is retryable. +type IsErrorRetryable interface { + IsErrorRetryable(error) aws.Ternary +} + +// IsErrorRetryables is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorRetryables []IsErrorRetryable + +// IsErrorRetryable returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorRetryable(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface. +type IsErrorRetryableFunc func(error) aws.Ternary + +// IsErrorRetryable returns if the error is retryable. +func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary { + return fn(err) +} + +// RetryableError is an IsErrorRetryable implementation which uses the +// optional interface Retryable on the error value to determine if the error is +// retryable. +type RetryableError struct{} + +// IsErrorRetryable returns if the error is retryable if it satisfies the +// Retryable interface, and returns if the attempt should be retried. +func (RetryableError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ RetryableError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.RetryableError()) +} + +// NoRetryCanceledError detects if the error was an request canceled error and +// returns if so. +type NoRetryCanceledError struct{} + +// IsErrorRetryable returns the error is not retryable if the request was +// canceled. +func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ CanceledError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + if v.CanceledError() { + return aws.FalseTernary + } + return aws.UnknownTernary +} + +// RetryableConnectionError determines if the underlying error is an HTTP +// connection and returns if it should be retried. +// +// Includes errors such as connection reset, connection refused, net dial, +// temporary, and timeout errors. +type RetryableConnectionError struct{} + +// IsErrorRetryable returns if the error is caused by and HTTP connection +// error, and should be retried. +func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { + if err == nil { + return aws.UnknownTernary + } + var retryable bool + + var conErr interface{ ConnectionError() bool } + var tempErr interface{ Temporary() bool } + var timeoutErr interface{ Timeout() bool } + var urlErr *url.Error + var netOpErr *net.OpError + var dnsError *net.DNSError + + switch { + case errors.As(err, &dnsError): + // NXDOMAIN errors should not be retried + retryable = !dnsError.IsNotFound && dnsError.IsTemporary + + case errors.As(err, &conErr) && conErr.ConnectionError(): + retryable = true + + case strings.Contains(err.Error(), "connection reset"): + retryable = true + + case errors.As(err, &urlErr): + // Refused connections should be retried as the service may not yet be + // running on the port. Go TCP dial considers refused connections as + // not temporary. + if strings.Contains(urlErr.Error(), "connection refused") { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(urlErr)) + } + + case errors.As(err, &netOpErr): + // Network dial, or temporary network errors are always retryable. + if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(netOpErr)) + } + + case errors.As(err, &tempErr) && tempErr.Temporary(): + // Fallback to the generic temporary check, with temporary errors + // retryable. + retryable = true + + case errors.As(err, &timeoutErr) && timeoutErr.Timeout(): + // Fallback to the generic timeout check, with timeout errors + // retryable. + retryable = true + + default: + return aws.UnknownTernary + } + + return aws.BoolTernary(retryable) + +} + +// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status +// codes. +type RetryableHTTPStatusCode struct { + Codes map[int]struct{} +} + +// IsErrorRetryable return if the passed in error is retryable based on the +// HTTP status code. +func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ HTTPStatusCode() int } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.HTTPStatusCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} + +// RetryableErrorCode determines if an attempt should be retried based on the +// API error code. +type RetryableErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorRetryable return if the error is retryable based on the error codes. +// Returns unknown if the error doesn't have a code or it is unknown. +func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go new file mode 100644 index 000000000..25abffc81 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go @@ -0,0 +1,258 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" +) + +// BackoffDelayer provides the interface for determining the delay to before +// another request attempt, that previously failed. +type BackoffDelayer interface { + BackoffDelay(attempt int, err error) (time.Duration, error) +} + +// BackoffDelayerFunc provides a wrapper around a function to determine the +// backoff delay of an attempt retry. +type BackoffDelayerFunc func(int, error) (time.Duration, error) + +// BackoffDelay returns the delay before attempt to retry a request. +func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) { + return fn(attempt, err) +} + +const ( + // DefaultMaxAttempts is the maximum of attempts for an API request + DefaultMaxAttempts int = 3 + + // DefaultMaxBackoff is the maximum back off delay between attempts + DefaultMaxBackoff time.Duration = 20 * time.Second +) + +// Default retry token quota values. +const ( + DefaultRetryRateTokens uint = 500 + DefaultRetryCost uint = 5 + DefaultRetryTimeoutCost uint = 10 + DefaultNoRetryIncrement uint = 1 +) + +// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK +// should consider as retryable errors. +var DefaultRetryableHTTPStatusCodes = map[int]struct{}{ + 500: {}, + 502: {}, + 503: {}, + 504: {}, +} + +// DefaultRetryableErrorCodes provides the set of API error codes that should +// be retried. +var DefaultRetryableErrorCodes = map[string]struct{}{ + "RequestTimeout": {}, + "RequestTimeoutException": {}, +} + +// DefaultThrottleErrorCodes provides the set of API error codes that are +// considered throttle errors. +var DefaultThrottleErrorCodes = map[string]struct{}{ + "Throttling": {}, + "ThrottlingException": {}, + "ThrottledException": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, + "ProvisionedThroughputExceededException": {}, + "TransactionInProgressException": {}, + "RequestLimitExceeded": {}, + "BandwidthLimitExceeded": {}, + "LimitExceededException": {}, + "RequestThrottled": {}, + "SlowDown": {}, + "PriorRequestNotComplete": {}, + "EC2ThrottledException": {}, +} + +// DefaultRetryables provides the set of retryable checks that are used by +// default. +var DefaultRetryables = []IsErrorRetryable{ + NoRetryCanceledError{}, + RetryableError{}, + RetryableConnectionError{}, + RetryableHTTPStatusCode{ + Codes: DefaultRetryableHTTPStatusCodes, + }, + RetryableErrorCode{ + Codes: DefaultRetryableErrorCodes, + }, + RetryableErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// DefaultTimeouts provides the set of timeout checks that are used by default. +var DefaultTimeouts = []IsErrorTimeout{ + TimeouterError{}, +} + +// StandardOptions provides the functional options for configuring the standard +// retryable, and delay behavior. +type StandardOptions struct { + // Maximum number of attempts that should be made. + MaxAttempts int + + // MaxBackoff duration between retried attempts. + MaxBackoff time.Duration + + // Provides the backoff strategy the retryer will use to determine the + // delay between retry attempts. + Backoff BackoffDelayer + + // Set of strategies to determine if the attempt should be retried based on + // the error response received. + // + // It is safe to append to this list in NewStandard's functional options. + Retryables []IsErrorRetryable + + // Set of strategies to determine if the attempt failed due to a timeout + // error. + // + // It is safe to append to this list in NewStandard's functional options. + Timeouts []IsErrorTimeout + + // Provides the rate limiting strategy for rate limiting attempt retries + // across all attempts the retryer is being used with. + RateLimiter RateLimiter + + // The cost to deduct from the RateLimiter's token bucket per retry. + RetryCost uint + + // The cost to deduct from the RateLimiter's token bucket per retry caused + // by timeout error. + RetryTimeoutCost uint + + // The cost to payback to the RateLimiter's token bucket for successful + // attempts. + NoRetryIncrement uint +} + +// RateLimiter provides the interface for limiting the rate of attempt retries +// allowed by the retryer. +type RateLimiter interface { + GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error) + AddTokens(uint) error +} + +// Standard is the standard retry pattern for the SDK. It uses a set of +// retryable checks to determine of the failed attempt should be retried, and +// what retry delay should be used. +type Standard struct { + options StandardOptions + + timeout IsErrorTimeout + retryable IsErrorRetryable + backoff BackoffDelayer +} + +// NewStandard initializes a standard retry behavior with defaults that can be +// overridden via functional options. +func NewStandard(fnOpts ...func(*StandardOptions)) *Standard { + o := StandardOptions{ + MaxAttempts: DefaultMaxAttempts, + MaxBackoff: DefaultMaxBackoff, + Retryables: append([]IsErrorRetryable{}, DefaultRetryables...), + Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...), + + RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens), + RetryCost: DefaultRetryCost, + RetryTimeoutCost: DefaultRetryTimeoutCost, + NoRetryIncrement: DefaultNoRetryIncrement, + } + for _, fn := range fnOpts { + fn(&o) + } + if o.MaxAttempts <= 0 { + o.MaxAttempts = DefaultMaxAttempts + } + + backoff := o.Backoff + if backoff == nil { + backoff = NewExponentialJitterBackoff(o.MaxBackoff) + } + + return &Standard{ + options: o, + backoff: backoff, + retryable: IsErrorRetryables(o.Retryables), + timeout: IsErrorTimeouts(o.Timeouts), + } +} + +// MaxAttempts returns the maximum number of attempts that can be made for a +// request before failing. +func (s *Standard) MaxAttempts() int { + return s.options.MaxAttempts +} + +// IsErrorRetryable returns if the error is can be retried or not. Should not +// consider the number of attempts made. +func (s *Standard) IsErrorRetryable(err error) bool { + return s.retryable.IsErrorRetryable(err).Bool() +} + +// RetryDelay returns the delay to use before another request attempt is made. +func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { + return s.backoff.BackoffDelay(attempt, err) +} + +// GetAttemptToken returns the token to be released after then attempt completes. +// The release token will add NoRetryIncrement to the RateLimiter token pool if +// the attempt was successful. If the attempt failed, nothing will be done. +func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) { + return s.GetInitialToken(), nil +} + +// GetInitialToken returns a token for adding the NoRetryIncrement to the +// RateLimiter token if the attempt completed successfully without error. +// +// InitialToken applies to result of the each attempt, including the first. +// Whereas the RetryToken applies to the result of subsequent attempts. +// +// Deprecated: use GetAttemptToken instead. +func (s *Standard) GetInitialToken() func(error) error { + return releaseToken(s.noRetryIncrement).release +} + +func (s *Standard) noRetryIncrement() error { + return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) { + cost := s.options.RetryCost + + if s.timeout.IsErrorTimeout(opErr).Bool() { + cost = s.options.RetryTimeoutCost + } + + fn, err := s.options.RateLimiter.GetToken(ctx, cost) + if err != nil { + return nil, fmt.Errorf("failed to get rate limit token, %w", err) + } + + return releaseToken(fn).release, nil +} + +func nopRelease(error) error { return nil } + +type releaseToken func() error + +func (f releaseToken) release(err error) error { + if err != nil { + return nil + } + + return f() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go new file mode 100644 index 000000000..c4b844d15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go @@ -0,0 +1,60 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorThrottle provides the interface of an implementation to determine if +// a error response from an operation is a throttling error. +type IsErrorThrottle interface { + IsErrorThrottle(error) aws.Ternary +} + +// IsErrorThrottles is a collection of checks to determine of the error a +// throttle error. Iterates through the checks and returns the state of +// throttle if any check returns something other than unknown. +type IsErrorThrottles []IsErrorThrottle + +// IsErrorThrottle returns if the error is a throttle error if any of the +// checks in the list return a value other than unknown. +func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorThrottle(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface. +type IsErrorThrottleFunc func(error) aws.Ternary + +// IsErrorThrottle returns if the error is a throttle error. +func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary { + return fn(err) +} + +// ThrottleErrorCode determines if an attempt should be retried based on the +// API error code. +type ThrottleErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorThrottle return if the error is a throttle error based on the error +// codes. Returns unknown if the error doesn't have a code or it is unknown. +func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go new file mode 100644 index 000000000..3d47870d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go @@ -0,0 +1,52 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorTimeout provides the interface of an implementation to determine if +// a error matches. +type IsErrorTimeout interface { + IsErrorTimeout(err error) aws.Ternary +} + +// IsErrorTimeouts is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorTimeouts []IsErrorTimeout + +// IsErrorTimeout returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary { + for _, t := range ts { + if v := t.IsErrorTimeout(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface. +type IsErrorTimeoutFunc func(error) aws.Ternary + +// IsErrorTimeout returns if the error is retryable. +func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary { + return fn(err) +} + +// TimeouterError provides the IsErrorTimeout implementation for determining if +// an error is a timeout based on type with the Timeout method. +type TimeouterError struct{} + +// IsErrorTimeout returns if the error is a timeout error. +func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary { + var v interface{ Timeout() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.Timeout()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go new file mode 100644 index 000000000..b0ba4cb2f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go @@ -0,0 +1,127 @@ +package aws + +import ( + "context" + "fmt" + "time" +) + +// RetryMode provides the mode the API client will use to create a retryer +// based on. +type RetryMode string + +const ( + // RetryModeStandard model provides rate limited retry attempts with + // exponential backoff delay. + RetryModeStandard RetryMode = "standard" + + // RetryModeAdaptive model provides attempt send rate limiting on throttle + // responses in addition to standard mode's retry rate limiting. + // + // Adaptive retry mode is experimental and is subject to change in the + // future. + RetryModeAdaptive RetryMode = "adaptive" +) + +// ParseRetryMode attempts to parse a RetryMode from the given string. +// Returning error if the value is not a known RetryMode. +func ParseRetryMode(v string) (mode RetryMode, err error) { + switch v { + case "standard": + return RetryModeStandard, nil + case "adaptive": + return RetryModeAdaptive, nil + default: + return mode, fmt.Errorf("unknown RetryMode, %v", v) + } +} + +func (m RetryMode) String() string { return string(m) } + +// Retryer is an interface to determine if a given error from a +// attempt should be retried, and if so what backoff delay to apply. The +// default implementation used by most services is the retry package's Standard +// type. Which contains basic retry logic using exponential backoff. +type Retryer interface { + // IsErrorRetryable returns if the failed attempt is retryable. This check + // should determine if the error can be retried, or if the error is + // terminal. + IsErrorRetryable(error) bool + + // MaxAttempts returns the maximum number of attempts that can be made for + // an attempt before failing. A value of 0 implies that the attempt should + // be retried until it succeeds if the errors are retryable. + MaxAttempts() int + + // RetryDelay returns the delay that should be used before retrying the + // attempt. Will return error if the delay could not be determined. + RetryDelay(attempt int, opErr error) (time.Duration, error) + + // GetRetryToken attempts to deduct the retry cost from the retry token pool. + // Returning the token release function, or error. + GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error) + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + GetInitialToken() (releaseToken func(error) error) +} + +// RetryerV2 is an interface to determine if a given error from an attempt +// should be retried, and if so what backoff delay to apply. The default +// implementation used by most services is the retry package's Standard type. +// Which contains basic retry logic using exponential backoff. +// +// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken +// method in favor of GetAttemptToken which takes a context, and can return an error. +// +// The SDK's retry package's Attempt middleware, and utilities will always +// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if +// GetAttemptToken is not implemented. +type RetryerV2 interface { + Retryer + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + // + // Deprecated: This method does not provide a way to block using Context, + // nor can it return an error. Use RetryerV2, and GetAttemptToken instead. + GetInitialToken() (releaseToken func(error) error) + + // GetAttemptToken returns the send token that can be used to rate limit + // attempt calls. Will be used by the SDK's retry package's Attempt + // middleware to get a send token prior to calling the temp and releasing + // the send token after the attempt has been made. + GetAttemptToken(context.Context) (func(error) error, error) +} + +// NopRetryer provides a RequestRetryDecider implementation that will flag +// all attempt errors as not retryable, with a max attempts of 1. +type NopRetryer struct{} + +// IsErrorRetryable returns false for all error values. +func (NopRetryer) IsErrorRetryable(error) bool { return false } + +// MaxAttempts always returns 1 for the original attempt. +func (NopRetryer) MaxAttempts() int { return 1 } + +// RetryDelay is not valid for the NopRetryer. Will always return error. +func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { + return 0, fmt.Errorf("not retrying any attempt errors") +} + +// GetRetryToken returns a stub function that does nothing. +func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) { + return nopReleaseToken, nil +} + +// GetInitialToken returns a stub function that does nothing. +func (NopRetryer) GetInitialToken() func(error) error { + return nopReleaseToken +} + +// GetAttemptToken returns a stub function that does nothing. +func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) { + return nopReleaseToken, nil +} + +func nopReleaseToken(error) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go new file mode 100644 index 000000000..3af9b2b33 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go @@ -0,0 +1,14 @@ +package aws + +// ExecutionEnvironmentID is the AWS execution environment runtime identifier. +type ExecutionEnvironmentID string + +// RuntimeEnvironment is a collection of values that are determined at runtime +// based on the environment that the SDK is executing in. Some of these values +// may or may not be present based on the executing environment and certain SDK +// configuration properties that drive whether these values are populated.. +type RuntimeEnvironment struct { + EnvironmentIdentifier ExecutionEnvironmentID + Region string + EC2InstanceMetadataRegion string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go new file mode 100644 index 000000000..cbf22f1d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go @@ -0,0 +1,115 @@ +package v4 + +import ( + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +func lookupKey(service, region string) string { + var s strings.Builder + s.Grow(len(region) + len(service) + 3) + s.WriteString(region) + s.WriteRune('/') + s.WriteString(service) + return s.String() +} + +type derivedKey struct { + AccessKey string + Date time.Time + Credential []byte +} + +type derivedKeyCache struct { + values map[string]derivedKey + mutex sync.RWMutex +} + +func newDerivedKeyCache() derivedKeyCache { + return derivedKeyCache{ + values: make(map[string]derivedKey), + } +} + +func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte { + key := lookupKey(service, region) + s.mutex.RLock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.RUnlock() + return cred + } + s.mutex.RUnlock() + + s.mutex.Lock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.Unlock() + return cred + } + cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime) + entry := derivedKey{ + AccessKey: credentials.AccessKeyID, + Date: signingTime.Time, + Credential: cred, + } + s.values[key] = entry + s.mutex.Unlock() + + return cred +} + +func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) { + cacheEntry, ok := s.retrieveFromCache(key) + if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) { + return cacheEntry.Credential, true + } + return nil, false +} + +func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) { + if v, ok := s.values[key]; ok { + return v, true + } + return derivedKey{}, false +} + +// SigningKeyDeriver derives a signing key from a set of credentials +type SigningKeyDeriver struct { + cache derivedKeyCache +} + +// NewSigningKeyDeriver returns a new SigningKeyDeriver +func NewSigningKeyDeriver() *SigningKeyDeriver { + return &SigningKeyDeriver{ + cache: newDerivedKeyCache(), + } +} + +// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing. +func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte { + return k.cache.Get(credential, service, region, signingTime) +} + +func deriveKey(secret, service, region string, t SigningTime) []byte { + hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat())) + hmacRegion := HMACSHA256(hmacDate, []byte(region)) + hmacService := HMACSHA256(hmacRegion, []byte(service)) + return HMACSHA256(hmacService, []byte("aws4_request")) +} + +func isSameDay(x, y time.Time) bool { + xYear, xMonth, xDay := x.Date() + yYear, yMonth, yDay := y.Date() + + if xYear != yYear { + return false + } + + if xMonth != yMonth { + return false + } + + return xDay == yDay +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go new file mode 100644 index 000000000..a23cb003b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go @@ -0,0 +1,40 @@ +package v4 + +// Signature Version 4 (SigV4) Constants +const ( + // EmptyStringSHA256 is the hex encoded sha256 value of an empty string + EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + + // UnsignedPayload indicates that the request payload body is unsigned + UnsignedPayload = "UNSIGNED-PAYLOAD" + + // AmzAlgorithmKey indicates the signing algorithm + AmzAlgorithmKey = "X-Amz-Algorithm" + + // AmzSecurityTokenKey indicates the security token to be used with temporary credentials + AmzSecurityTokenKey = "X-Amz-Security-Token" + + // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' + AmzDateKey = "X-Amz-Date" + + // AmzCredentialKey is the access key ID and credential scope + AmzCredentialKey = "X-Amz-Credential" + + // AmzSignedHeadersKey is the set of headers signed for the request + AmzSignedHeadersKey = "X-Amz-SignedHeaders" + + // AmzSignatureKey is the query parameter to store the SigV4 signature + AmzSignatureKey = "X-Amz-Signature" + + // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter + TimeFormat = "20060102T150405Z" + + // ShortTimeFormat is the shorten time format used in the credential scope + ShortTimeFormat = "20060102" + + // ContentSHAKey is the SHA256 of request body + ContentSHAKey = "X-Amz-Content-Sha256" + + // StreamingEventsPayload indicates that the request payload body is a signed event stream. + StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go new file mode 100644 index 000000000..c61955ad5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" +) + +// Rules houses a set of Rule needed for validation of a +// string value +type Rules []Rule + +// Rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that Rule +type Rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r Rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// MapRule generic Rule for maps +type MapRule map[string]struct{} + +// IsValid for the map Rule satisfies whether it exists in the map +func (m MapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// AllowList is a generic Rule for include listing +type AllowList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (w AllowList) IsValid(value string) bool { + return w.Rule.IsValid(value) +} + +// ExcludeList is a generic Rule for exclude listing +type ExcludeList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (b ExcludeList) IsValid(value string) bool { + return !b.Rule.IsValid(value) +} + +// Patterns is a list of strings to match against +type Patterns []string + +// IsValid for Patterns checks each pattern and returns if a match has +// been found +func (p Patterns) IsValid(value string) bool { + for _, pattern := range p { + if sdkstrings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// InclusiveRules rules allow for rules to depend on one another +type InclusiveRules []Rule + +// IsValid will return true if all rules are true +func (r InclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go new file mode 100644 index 000000000..64c4c4845 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -0,0 +1,69 @@ +package v4 + +// IgnoredHeaders is a list of headers that are ignored during signing +var IgnoredHeaders = Rules{ + ExcludeList{ + MapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + "Expect": struct{}{}, + }, + }, +} + +// RequiredSignedHeaders is a allow list for Build canonical headers. +var RequiredSignedHeaders = Rules{ + AllowList{ + MapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + "X-Amz-Tagging": struct{}{}, + }, + }, + Patterns{"X-Amz-Object-Lock-"}, + Patterns{"X-Amz-Meta-"}, +} + +// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value +// represents whether or not it is a pattern. +var AllowedQueryHoisting = InclusiveRules{ + ExcludeList{RequiredSignedHeaders}, + Patterns{"X-Amz-"}, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go new file mode 100644 index 000000000..e7fa7a1b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go @@ -0,0 +1,13 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. +func HMACSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go new file mode 100644 index 000000000..bf93659a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go @@ -0,0 +1,75 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go new file mode 100644 index 000000000..fc7887909 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go @@ -0,0 +1,13 @@ +package v4 + +import "strings" + +// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope +func BuildCredentialScope(signingTime SigningTime, region, service string) string { + return strings.Join([]string{ + signingTime.ShortTimeFormat(), + region, + service, + "aws4_request", + }, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go new file mode 100644 index 000000000..1de06a765 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go @@ -0,0 +1,36 @@ +package v4 + +import "time" + +// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. +type SigningTime struct { + time.Time + timeFormat string + shortTimeFormat string +} + +// NewSigningTime creates a new SigningTime given a time.Time +func NewSigningTime(t time.Time) SigningTime { + return SigningTime{ + Time: t, + } +} + +// TimeFormat provides a time formatted in the X-Amz-Date format. +func (m *SigningTime) TimeFormat() string { + return m.format(&m.timeFormat, TimeFormat) +} + +// ShortTimeFormat provides a time formatted of 20060102. +func (m *SigningTime) ShortTimeFormat() string { + return m.format(&m.shortTimeFormat, ShortTimeFormat) +} + +func (m *SigningTime) format(target *string, format string) string { + if len(*target) > 0 { + return *target + } + v := m.Time.Format(format) + *target = v + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go new file mode 100644 index 000000000..d025dbaa0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go @@ -0,0 +1,80 @@ +package v4 + +import ( + "net/url" + "strings" +) + +const doubleSpace = " " + +// StripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func StripExcessSpaces(str string) string { + var j, k, l, m, spaces int + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + return str + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + return string(buf[:m]) +} + +// GetURIPath returns the escaped URI component from the provided URL. +func GetURIPath(u *url.URL) string { + var uriPath string + + if len(u.Opaque) > 0 { + const schemeSep, pathSep, queryStart = "//", "/", "?" + + opaque := u.Opaque + // Cut off the query string if present. + if idx := strings.Index(opaque, queryStart); idx >= 0 { + opaque = opaque[:idx] + } + + // Cutout the scheme separator if present. + if strings.Index(opaque, schemeSep) == 0 { + opaque = opaque[len(schemeSep):] + } + + // capture URI path starting with first path separator. + if idx := strings.Index(opaque, pathSep); idx >= 0 { + uriPath = opaque[idx:] + } + } else { + uriPath = u.EscapedPath() + } + + if len(uriPath) == 0 { + uriPath = "/" + } + + return uriPath +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go new file mode 100644 index 000000000..749bda69e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -0,0 +1,395 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const computePayloadHashMiddlewareID = "ComputePayloadHash" + +// HashComputationError indicates an error occurred while computing the signing hash +type HashComputationError struct { + Err error +} + +// Error is the error message +func (e *HashComputationError) Error() string { + return fmt.Sprintf("failed to compute payload hash: %v", e.Err) +} + +// Unwrap returns the underlying error if one is set +func (e *HashComputationError) Unwrap() error { + return e.Err +} + +// SigningError indicates an error condition occurred while performing SigV4 signing +type SigningError struct { + Err error +} + +func (e *SigningError) Error() string { + return fmt.Sprintf("failed to sign request: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *SigningError) Unwrap() error { + return e.Err +} + +// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that +// switches between unsigned and signed payload based on TLS state for request. +// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth. +// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection . +// +// Usage example - +// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to +// dynamically switch between unsigned and signed payload based on TLS state for request. +func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) + return err +} + +// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware. +type dynamicPayloadSigningMiddleware struct { +} + +// ID returns the resolver identifier +func (m *dynamicPayloadSigningMiddleware) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets a resolver that directs to the payload sha256 compute handler. +func (m *dynamicPayloadSigningMiddleware) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + // if TLS is enabled, use unsigned payload when supported + if req.IsHTTPS() { + return (&unsignedPayload{}).HandleBuild(ctx, in, next) + } + + // else fall back to signed payload + return (&computePayloadSHA256{}).HandleBuild(ctx, in, next) +} + +// unsignedPayload sets the SigV4 request payload hash to unsigned. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type unsignedPayload struct{} + +// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation +// middleware stack +func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&unsignedPayload{}, middleware.After) +} + +// ID returns the unsignedPayload identifier +func (m *unsignedPayload) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets the payload hash to be an unsigned payload +func (m *unsignedPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call). + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.UnsignedPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + return next.HandleBuild(ctx, in) +} + +// computePayloadSHA256 computes SHA256 payload hash to sign. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type computePayloadSHA256 struct{} + +// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the +// operation middleware stack +func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { + return stack.Build.Add(&computePayloadSHA256{}, middleware.After) +} + +// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the +// operation middleware stack +func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove(computePayloadHashMiddlewareID) + return err +} + +// ID is the middleware name +func (m *computePayloadSHA256) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild compute the payload hash for the request payload +func (m *computePayloadSHA256) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call) + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 { + return next.HandleBuild(ctx, in) + } + + hash := sha256.New() + if stream := req.GetStream(); stream != nil { + _, err = io.Copy(hash, stream) + if err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to compute payload hash, %w", err), + } + } + + if err := req.RewindStream(); err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to seek body to start, %w", err), + } + } + } + + ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) + + return next.HandleBuild(ctx, in) +} + +// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the +// ComputePayloadSHA256 middleware with the UnsignedPayload middleware. +// +// Use this to disable computing the Payload SHA256 checksum and instead use +// UNSIGNED-PAYLOAD for the SHA256 value. +func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + return err +} + +// contentSHA256Header sets the X-Amz-Content-Sha256 header value to +// the Payload hash stored in the context. +type contentSHA256Header struct{} + +// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the +// operation middleware stack +func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) +} + +// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware +// from the operation middleware stack +func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID()) + return err +} + +// ID returns the ContentSHA256HeaderMiddleware identifier +func (m *contentSHA256Header) ID() string { + return "SigV4ContentSHA256Header" +} + +// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash +// stored in the context. +func (m *contentSHA256Header) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) + + return next.HandleBuild(ctx, in) +} + +// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +type SignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Signer HTTPSigner + LogSigning bool +} + +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing +type SignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + signer HTTPSigner + logSigning bool +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + signer: options.Signer, + logSigning: options.LogSigning, + } +} + +// ID is the SignHTTPRequestMiddleware identifier +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !haveCredentialProvider(s.credentialsProvider) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} + } + + ctx = awsmiddleware.SetSigningCredentials(ctx, credentials) + + return next.HandleFinalize(ctx, in) +} + +type streamingEventsPayload struct{} + +// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. +func AddStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Build.Add(&streamingEventsPayload{}, middleware.After) +} + +func (s *streamingEventsPayload) ID() string { + return computePayloadHashMiddlewareID +} + +func (s *streamingEventsPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.StreamingEventsPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + + return next.HandleBuild(ctx, in) +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + const authHeaderSignatureElem = "Signature=" + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func haveCredentialProvider(p aws.CredentialsProvider) bool { + if p == nil { + return false + } + + return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil)) +} + +type payloadHashKey struct{} + +// GetPayloadHash retrieves the payload hash to use for signing +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPayloadHash(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string) + return v +} + +// SetPayloadHash sets the payload hash to be used for signing the request +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPayloadHash(ctx context.Context, hash string) context.Context { + return middleware.WithStackValue(ctx, payloadHashKey{}, hash) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go new file mode 100644 index 000000000..e1a066512 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go @@ -0,0 +1,127 @@ +package v4 + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyHTTP "github.com/aws/smithy-go/transport/http" +) + +// HTTPPresigner is an interface to a SigV4 signer that can sign create a +// presigned URL for a HTTP requests. +type HTTPPresigner interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignedHTTPRequest provides the URL and signed headers that are included +// in the presigned URL. +type PresignedHTTPRequest struct { + URL string + Method string + SignedHeader http.Header +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Presigner HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + presigner HTTPPresigner + logSigning bool +} + +// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware +// initialized with the presigner. +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (s *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyHTTP.Request) + if !ok { + return out, metadata, &SigningError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + httpReq := req.Build(ctx) + if !haveCredentialProvider(s.credentialsProvider) { + out.Result = &PresignedHTTPRequest{ + URL: httpReq.URL.String(), + Method: httpReq.Method, + SignedHeader: http.Header{}, + } + + return out, metadata, nil + } + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{ + Err: fmt.Errorf("computed payload hash missing from context"), + } + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + + u, h, err := s.presigner.PresignHTTP(ctx, credentials, + httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &PresignedHTTPRequest{ + URL: u, + Method: httpReq.Method, + SignedHeader: h, + } + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go new file mode 100644 index 000000000..66aa2bd6a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -0,0 +1,86 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "strings" + "time" +) + +// EventStreamSigner is an AWS EventStream protocol signer. +type EventStreamSigner interface { + GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) +} + +// StreamSignerOptions is the configuration options for StreamSigner. +type StreamSignerOptions struct{} + +// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads. +type StreamSigner struct { + options StreamSignerOptions + + credentials aws.Credentials + service string + region string + + prevSignature []byte + + signingKeyDeriver *v4Internal.SigningKeyDeriver +} + +// NewStreamSigner returns a new AWS EventStream protocol signer. +func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner { + o := StreamSignerOptions{} + + for _, fn := range optFns { + fn(&o) + } + + return &StreamSigner{ + options: o, + credentials: credentials, + service: service, + region: region, + signingKeyDeriver: v4Internal.NewSigningKeyDeriver(), + prevSignature: seedSignature, + } +} + +// GetSignature signs the provided header and payload bytes. +func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + prevSignature := s.prevSignature + + st := v4Internal.NewSigningTime(signingTime) + + sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) + + scope := v4Internal.BuildCredentialScope(st, s.region, s.service) + + stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st) + + signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign)) + s.prevSignature = signature + + return signature, nil +} + +func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string { + hash := sha256.New() + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + signingTime.TimeFormat(), + credentialScope, + hex.EncodeToString(previousSignature), + hex.EncodeToString(makeHash(hash, headers)), + hex.EncodeToString(makeHash(hash, payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go new file mode 100644 index 000000000..afd069c1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -0,0 +1,548 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// # Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// +// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// +// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "net/http" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/logging" +) + +const ( + signingAlgorithm = "AWS4-HMAC-SHA256" + authorizationHeader = "Authorization" +) + +// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests +type HTTPSigner interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error +} + +type keyDerivator interface { + DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte +} + +// SignerOptions is the SigV4 Signer options. +type SignerOptions struct { + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // The logger to send log messages to. + Logger logging.Logger + + // Enable logging of signed requests. + // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent + // presigned URL. + LogSigning bool +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + options SignerOptions + keyDerivator keyDerivator +} + +// NewSigner returns a new SigV4 Signer +func NewSigner(optFns ...func(signer *SignerOptions)) *Signer { + options := SignerOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()} +} + +type httpSigner struct { + Request *http.Request + ServiceName string + Region string + Time v4Internal.SigningTime + Credentials aws.Credentials + KeyDerivator keyDerivator + IsPreSign bool + + PayloadHash string + + DisableHeaderHoisting bool + DisableURIPathEscaping bool +} + +func (s *httpSigner) Build() (signedRequest, error) { + req := s.Request + + query := req.URL.Query() + headers := req.Header + + s.setRequiredSigningFields(headers, query) + + // Sort Each Query Key's Values + for key := range query { + sort.Strings(query[key]) + } + + v4Internal.SanitizeHostForHeader(req) + + credentialScope := s.buildCredentialScope() + credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope + if s.IsPreSign { + query.Set(v4Internal.AmzCredentialKey, credentialStr) + } + + unsignedHeaders := headers + if s.IsPreSign && !s.DisableHeaderHoisting { + var urlValues url.Values + urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers) + for k := range urlValues { + query[k] = urlValues[k] + } + } + + host := req.URL.Host + if len(req.Host) > 0 { + host = req.Host + } + + signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) + + if s.IsPreSign { + query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr) + } + + var rawQuery strings.Builder + rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1)) + + canonicalURI := v4Internal.GetURIPath(req.URL) + if !s.DisableURIPathEscaping { + canonicalURI = httpbinding.EscapePath(canonicalURI, false) + } + + canonicalString := s.buildCanonicalString( + req.Method, + canonicalURI, + rawQuery.String(), + signedHeadersStr, + canonicalHeaderStr, + ) + + strToSign := s.buildStringToSign(credentialScope, canonicalString) + signingSignature, err := s.buildSignature(strToSign) + if err != nil { + return signedRequest{}, err + } + + if s.IsPreSign { + rawQuery.WriteString("&X-Amz-Signature=") + rawQuery.WriteString(signingSignature) + } else { + headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) + } + + req.URL.RawQuery = rawQuery.String() + + return signedRequest{ + Request: req, + SignedHeaders: signedHeaders, + CanonicalString: canonicalString, + StringToSign: strToSign, + PreSigned: s.IsPreSign, + }, nil +} + +func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { + const credential = "Credential=" + const signedHeaders = "SignedHeaders=" + const signature = "Signature=" + const commaSpace = ", " + + var parts strings.Builder + parts.Grow(len(signingAlgorithm) + 1 + + len(credential) + len(credentialStr) + 2 + + len(signedHeaders) + len(signedHeadersStr) + 2 + + len(signature) + len(signingSignature), + ) + parts.WriteString(signingAlgorithm) + parts.WriteRune(' ') + parts.WriteString(credential) + parts.WriteString(credentialStr) + parts.WriteString(commaSpace) + parts.WriteString(signedHeaders) + parts.WriteString(signedHeadersStr) + parts.WriteString(commaSpace) + parts.WriteString(signature) + parts.WriteString(signingSignature) + return parts.String() +} + +// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The passed in request will be modified in place. +func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return err + } + + logSigningInfo(ctx, options, &signedRequest, false) + + return nil +} + +// PresignHTTP signs AWS v4 requests with the payload hash, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns the signed URL and the map of HTTP headers that were included in the +// signature or an error if signing the request failed. For presigned requests +// these headers and their values must be included on the HTTP request when it +// is made. This is helpful to know what header values need to be shared with +// the party the presigned request will be distributed to. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// PresignHTTP differs from SignHTTP in that it will sign the request using +// query string instead of header values. This allows you to share the +// Presigned Request's URL with third parties, or distribute it throughout your +// system with minimal dependencies. +// +// PresignHTTP will not set the expires time of the presigned request +// automatically. To specify the expire duration for a request add the +// "X-Amz-Expires" query parameter on the request with the value as the +// duration in seconds the presigned URL should be considered valid for. This +// parameter is not used by all AWS services, and is most notable used by +// Amazon S3 APIs. +// +// expires := 20 * time.Minute +// query := req.URL.Query() +// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10) +// req.URL.RawQuery = query.Encode() +// +// This method does not modify the provided request. +func (s *Signer) PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), +) (signedURI string, signedHeaders http.Header, err error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r.Clone(r.Context()), + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + IsPreSign: true, + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return "", nil, err + } + + logSigningInfo(ctx, options, &signedRequest, true) + + signedHeaders = make(http.Header) + + // For the signed headers we canonicalize the header keys in the returned map. + // This avoids situations where can standard library double headers like host header. For example the standard + // library will set the Host header, even if it is present in lower-case form. + for k, v := range signedRequest.SignedHeaders { + key := textproto.CanonicalMIMEHeaderKey(k) + signedHeaders[key] = append(signedHeaders[key], v...) + } + + return signedRequest.Request.URL.String(), signedHeaders, nil +} + +func (s *httpSigner) buildCredentialScope() string { + return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName) +} + +func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} + +func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { + signed = make(http.Header) + + var headers []string + const hostHeader = "host" + headers = append(headers, hostHeader) + signed[hostHeader] = append(signed[hostHeader], host) + + const contentLengthHeader = "content-length" + if length > 0 { + headers = append(headers, contentLengthHeader) + signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) + } + + for k, v := range header { + if !rule.IsValid(k) { + continue // ignored header + } + if strings.EqualFold(k, contentLengthHeader) { + // prevent signing already handled content-length header. + continue + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := signed[lowerCaseKey]; ok { + // include additional values + signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + signed[lowerCaseKey] = v + } + sort.Strings(headers) + + signedHeaders = strings.Join(headers, ";") + + var canonicalHeaders strings.Builder + n := len(headers) + const colon = ':' + for i := 0; i < n; i++ { + if headers[i] == hostHeader { + canonicalHeaders.WriteString(hostHeader) + canonicalHeaders.WriteRune(colon) + canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) + } else { + canonicalHeaders.WriteString(headers[i]) + canonicalHeaders.WriteRune(colon) + // Trim out leading, trailing, and dedup inner spaces from signed header values. + values := signed[headers[i]] + for j, v := range values { + cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) + canonicalHeaders.WriteString(cleanedValue) + if j < len(values)-1 { + canonicalHeaders.WriteRune(',') + } + } + } + canonicalHeaders.WriteRune('\n') + } + canonicalHeadersStr = canonicalHeaders.String() + + return signed, signedHeaders, canonicalHeadersStr +} + +func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { + return strings.Join([]string{ + method, + uri, + query, + canonicalHeaders, + signedHeaders, + s.PayloadHash, + }, "\n") +} + +func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { + return strings.Join([]string{ + signingAlgorithm, + s.Time.TimeFormat(), + credentialScope, + hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), + }, "\n") +} + +func makeHash(hash hash.Hash, b []byte) []byte { + hash.Reset() + hash.Write(b) + return hash.Sum(nil) +} + +func (s *httpSigner) buildSignature(strToSign string) (string, error) { + key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time) + return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil +} + +func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { + amzDate := s.Time.TimeFormat() + + if s.IsPreSign { + query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) + if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 { + query.Set("X-Amz-Security-Token", sessionToken) + } + + query.Set(v4Internal.AmzDateKey, amzDate) + return + } + + headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) + + if len(s.Credentials.SessionToken) > 0 { + headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) + } +} + +func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) { + if !options.LogSigning { + return + } + signedURLMsg := "" + if isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String()) + } + logger := logging.WithContext(ctx, options.Logger) + logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg) +} + +type signedRequest struct { + Request *http.Request + SignedHeaders http.Header + CanonicalString string + StringToSign string + PreSigned bool +} + +const logSignInfoMsg = `Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go new file mode 100644 index 000000000..f3fc4d610 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go @@ -0,0 +1,297 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return ptr.Bool(v) +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + return ptr.BoolSlice(vs) +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + return ptr.BoolMap(vs) +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return ptr.Byte(v) +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + return ptr.ByteSlice(vs) +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + return ptr.ByteMap(vs) +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return ptr.String(v) +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + return ptr.StringSlice(vs) +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + return ptr.StringMap(vs) +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return ptr.Int(v) +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + return ptr.IntSlice(vs) +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + return ptr.IntMap(vs) +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return ptr.Int8(v) +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + return ptr.Int8Slice(vs) +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + return ptr.Int8Map(vs) +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return ptr.Int16(v) +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + return ptr.Int16Slice(vs) +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + return ptr.Int16Map(vs) +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return ptr.Int32(v) +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + return ptr.Int32Slice(vs) +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + return ptr.Int32Map(vs) +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return ptr.Int64(v) +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + return ptr.Int64Slice(vs) +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + return ptr.Int64Map(vs) +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return ptr.Uint(v) +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + return ptr.UintSlice(vs) +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + return ptr.UintMap(vs) +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return ptr.Uint8(v) +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + return ptr.Uint8Slice(vs) +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + return ptr.Uint8Map(vs) +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return ptr.Uint16(v) +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + return ptr.Uint16Slice(vs) +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + return ptr.Uint16Map(vs) +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return ptr.Uint32(v) +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + return ptr.Uint32Slice(vs) +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + return ptr.Uint32Map(vs) +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return ptr.Uint64(v) +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + return ptr.Uint64Slice(vs) +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + return ptr.Uint64Map(vs) +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return ptr.Float32(v) +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + return ptr.Float32Slice(vs) +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + return ptr.Float32Map(vs) +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return ptr.Float64(v) +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + return ptr.Float64Slice(vs) +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + return ptr.Float64Map(vs) +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return ptr.Time(v) +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + return ptr.TimeSlice(vs) +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + return ptr.TimeMap(vs) +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return ptr.Duration(v) +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + return ptr.DurationSlice(vs) +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + return ptr.DurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go new file mode 100644 index 000000000..26d90719b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -0,0 +1,310 @@ +package http + +import ( + "crypto/tls" + "github.com/aws/aws-sdk-go-v2/aws" + "net" + "net/http" + "reflect" + "sync" + "time" +) + +// Defaults for the HTTPTransportBuilder. +var ( + // Default connection pool options + DefaultHTTPTransportMaxIdleConns = 100 + DefaultHTTPTransportMaxIdleConnsPerHost = 10 + + // Default connection timeouts + DefaultHTTPTransportIdleConnTimeout = 90 * time.Second + DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second + DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second + + // Default to TLS 1.2 for all HTTPS requests. + DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12 +) + +// Timeouts for net.Dialer's network connection. +var ( + DefaultDialConnectTimeout = 30 * time.Second + DefaultDialKeepAliveTimeout = 30 * time.Second +) + +// BuildableClient provides a HTTPClient implementation with options to +// create copies of the HTTPClient when additional configuration is provided. +// +// The client's methods will not share the http.Transport value between copies +// of the BuildableClient. Only exported member values of the Transport and +// optional Dialer will be copied between copies of BuildableClient. +type BuildableClient struct { + transport *http.Transport + dialer *net.Dialer + + initOnce sync.Once + + clientTimeout time.Duration + client *http.Client +} + +// NewBuildableClient returns an initialized client for invoking HTTP +// requests. +func NewBuildableClient() *BuildableClient { + return &BuildableClient{} +} + +// Do implements the HTTPClient interface's Do method to invoke a HTTP request, +// and receive the response. Uses the BuildableClient's current +// configuration to invoke the http.Request. +// +// If connection pooling is enabled (aka HTTP KeepAlive) the client will only +// share pooled connections with its own instance. Copies of the +// BuildableClient will have their own connection pools. +// +// Redirect (3xx) responses will not be followed, the HTTP response received +// will returned instead. +func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) { + b.initOnce.Do(b.build) + + return b.client.Do(req) +} + +// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient. +// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client. +func (b *BuildableClient) Freeze() aws.HTTPClient { + cpy := b.clone() + cpy.build() + return cpy.client +} + +func (b *BuildableClient) build() { + b.client = wrapWithLimitedRedirect(&http.Client{ + Timeout: b.clientTimeout, + Transport: b.GetTransport(), + }) +} + +func (b *BuildableClient) clone() *BuildableClient { + cpy := NewBuildableClient() + cpy.transport = b.GetTransport() + cpy.dialer = b.GetDialer() + cpy.clientTimeout = b.clientTimeout + + return cpy +} + +// WithTransportOptions copies the BuildableClient and returns it with the +// http.Transport options applied. +// +// If a non (*http.Transport) was set as the round tripper, the round tripper +// will be replaced with a default Transport value before invoking the option +// functions. +func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient { + cpy := b.clone() + + tr := cpy.GetTransport() + for _, opt := range opts { + opt(tr) + } + cpy.transport = tr + + return cpy +} + +// WithDialerOptions copies the BuildableClient and returns it with the +// net.Dialer options applied. Will set the client's http.Transport DialContext +// member. +func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient { + cpy := b.clone() + + dialer := cpy.GetDialer() + for _, opt := range opts { + opt(dialer) + } + cpy.dialer = dialer + + tr := cpy.GetTransport() + tr.DialContext = cpy.dialer.DialContext + cpy.transport = tr + + return cpy +} + +// WithTimeout Sets the timeout used by the client for all requests. +func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient { + cpy := b.clone() + cpy.clientTimeout = timeout + return cpy +} + +// GetTransport returns a copy of the client's HTTP Transport. +func (b *BuildableClient) GetTransport() *http.Transport { + var tr *http.Transport + if b.transport != nil { + tr = b.transport.Clone() + } else { + tr = defaultHTTPTransport() + } + + return tr +} + +// GetDialer returns a copy of the client's network dialer. +func (b *BuildableClient) GetDialer() *net.Dialer { + var dialer *net.Dialer + if b.dialer != nil { + dialer = shallowCopyStruct(b.dialer).(*net.Dialer) + } else { + dialer = defaultDialer() + } + + return dialer +} + +// GetTimeout returns a copy of the client's timeout to cancel requests with. +func (b *BuildableClient) GetTimeout() time.Duration { + return b.clientTimeout +} + +func defaultDialer() *net.Dialer { + return &net.Dialer{ + Timeout: DefaultDialConnectTimeout, + KeepAlive: DefaultDialKeepAliveTimeout, + DualStack: true, + } +} + +func defaultHTTPTransport() *http.Transport { + dialer := defaultDialer() + + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: dialer.DialContext, + TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, + MaxIdleConns: DefaultHTTPTransportMaxIdleConns, + MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, + IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, + ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, + ForceAttemptHTTP2: true, + TLSClientConfig: &tls.Config{ + MinVersion: DefaultHTTPTransportTLSMinVersion, + }, + } + + return tr +} + +// shallowCopyStruct creates a shallow copy of the passed in source struct, and +// returns that copy of the same struct type. +func shallowCopyStruct(src interface{}) interface{} { + srcVal := reflect.ValueOf(src) + srcValType := srcVal.Type() + + var returnAsPtr bool + if srcValType.Kind() == reflect.Ptr { + srcVal = srcVal.Elem() + srcValType = srcValType.Elem() + returnAsPtr = true + } + dstVal := reflect.New(srcValType).Elem() + + for i := 0; i < srcValType.NumField(); i++ { + ft := srcValType.Field(i) + if len(ft.PkgPath) != 0 { + // unexported fields have a PkgPath + continue + } + + dstVal.Field(i).Set(srcVal.Field(i)) + } + + if returnAsPtr { + dstVal = dstVal.Addr() + } + + return dstVal.Interface() +} + +// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to +// not follow any redirect other than 307 and 308. No other redirect will be +// followed. +// +// If the client does not have a Transport defined will use a new SDK default +// http.Transport configuration. +func wrapWithLimitedRedirect(c *http.Client) *http.Client { + tr := c.Transport + if tr == nil { + tr = defaultHTTPTransport() + } + + cc := *c + cc.CheckRedirect = limitedRedirect + cc.Transport = suppressBadHTTPRedirectTransport{ + tr: tr, + } + + return &cc +} + +// limitedRedirect is a CheckRedirect that prevents the client from following +// any non 307/308 HTTP status code redirects. +// +// The 307 and 308 redirects are allowed because the client must use the +// original HTTP method for the redirected to location. Whereas 301 and 302 +// allow the client to switch to GET for the redirect. +// +// Suppresses all redirect requests with a URL of badHTTPRedirectLocation. +func limitedRedirect(r *http.Request, via []*http.Request) error { + // Request.Response, in CheckRedirect is the response that is triggering + // the redirect. + resp := r.Response + if r.URL.String() == badHTTPRedirectLocation { + resp.Header.Del(badHTTPRedirectLocation) + return http.ErrUseLastResponse + } + + switch resp.StatusCode { + case 307, 308: + // Only allow 307 and 308 redirects as they preserve the method. + return nil + } + + return http.ErrUseLastResponse +} + +// suppressBadHTTPRedirectTransport provides an http.RoundTripper +// implementation that wraps another http.RoundTripper to prevent HTTP client +// receiving 301 and 302 HTTP responses redirects without the required location +// header. +// +// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect, +// that check for responses with having a URL of baseHTTPRedirectLocation, and +// suppress the redirect. +type suppressBadHTTPRedirectTransport struct { + tr http.RoundTripper +} + +const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation` + +// RoundTrip backfills a stub location when a 301/302 response is received +// without a location. This stub location is used by limitedRedirect to prevent +// the HTTP client from failing attempting to use follow a redirect without a +// location value. +func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := t.tr.RoundTrip(r) + if err != nil { + return resp, err + } + + // S3 is the only known service to return 301 without location header. + // The Go standard library HTTP client will return an opaque error if it + // tries to follow a 301/302 response missing the location header. + switch resp.StatusCode { + case 301, 302: + if v := resp.Header.Get("Location"); len(v) == 0 { + resp.Header.Set("Location", badHTTPRedirectLocation) + } + } + + return resp, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go new file mode 100644 index 000000000..556f54a7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go @@ -0,0 +1,42 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// removeContentTypeHeader is a build middleware that removes +// content type header if content-length header is unset or +// is set to zero, +type removeContentTypeHeader struct { +} + +// ID the name of the middleware. +func (m *removeContentTypeHeader) ID() string { + return "RemoveContentTypeHeader" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + // remove contentTypeHeader when content-length is zero + if req.ContentLength == 0 { + req.Header.Del("content-type") + } + + return next.HandleBuild(ctx, in) +} + +// RemoveContentTypeHeader removes content-type header if +// content length is unset or equal to zero. +func RemoveContentTypeHeader(stack *middleware.Stack) error { + return stack.Build.Add(&removeContentTypeHeader{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go new file mode 100644 index 000000000..44651c990 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go @@ -0,0 +1,33 @@ +package http + +import ( + "errors" + "fmt" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError struct { + *smithyhttp.ResponseError + + // RequestID associated with response error + RequestID string +} + +// ServiceRequestID returns the request id associated with Response Error +func (e *ResponseError) ServiceRequestID() string { return e.RequestID } + +// Error returns the formatted error +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "https response error StatusCode: %d, RequestID: %s, %v", + e.Response.StatusCode, e.RequestID, e.Err) +} + +// As populates target and returns true if the type of target is a error type that +// the ResponseError embeds, (e.g.AWS HTTP ResponseError) +func (e *ResponseError) As(target interface{}) bool { + return errors.As(e.ResponseError, target) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go new file mode 100644 index 000000000..8fd14cecd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go @@ -0,0 +1,54 @@ +package http + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddResponseErrorMiddleware adds response error wrapper middleware +func AddResponseErrorMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before request id retriever middleware so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before) +} + +type responseErrorWrapper struct { +} + +// ID returns the middleware identifier +func (m *responseErrorWrapper) ID() string { + return "ResponseErrorWrapper" +} + +func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err == nil { + // Nothing to do when there is no error. + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // look for request id in metadata + reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) + + // Wrap the returned smithy error with the request id retrieved from the metadata + err = &ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: resp, + Err: err, + }, + RequestID: reqID, + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go new file mode 100644 index 000000000..993929bd9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go @@ -0,0 +1,104 @@ +package http + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type readResult struct { + n int + err error +} + +// ResponseTimeoutError is an error when the reads from the response are +// delayed longer than the timeout the read was configured for. +type ResponseTimeoutError struct { + TimeoutDur time.Duration +} + +// Timeout returns that the error is was caused by a timeout, and can be +// retried. +func (*ResponseTimeoutError) Timeout() bool { return true } + +func (e *ResponseTimeoutError) Error() string { + return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur) +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, &ResponseTimeoutError{TimeoutDur: r.duration} + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the +// response body so that a read that takes too long will return an error. +func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { + return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) +} + +// readTimeout wraps the response body with a timeoutReadCloser +type readTimeout struct { + duration time.Duration +} + +// ID returns the id of the middleware +func (*readTimeout) ID() string { + return "ReadResponseTimeout" +} + +// HandleDeserialize implements the DeserializeMiddleware interface +func (m *readTimeout) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + response.Body = &timeoutReadCloser{ + reader: response.Body, + duration: m.duration, + } + out.RawResponse = response + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go new file mode 100644 index 000000000..cc3ae8114 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go @@ -0,0 +1,42 @@ +package aws + +import ( + "fmt" +) + +// Ternary is an enum allowing an unknown or none state in addition to a bool's +// true and false. +type Ternary int + +func (t Ternary) String() string { + switch t { + case UnknownTernary: + return "unknown" + case FalseTernary: + return "false" + case TrueTernary: + return "true" + default: + return fmt.Sprintf("unknown value, %d", int(t)) + } +} + +// Bool returns true if the value is TrueTernary, false otherwise. +func (t Ternary) Bool() bool { + return t == TrueTernary +} + +// Enumerations for the values of the Ternary type. +const ( + UnknownTernary Ternary = iota + FalseTernary + TrueTernary +) + +// BoolTernary returns a true or false Ternary value for the bool provided. +func BoolTernary(v bool) Ternary { + if v { + return TrueTernary + } + return FalseTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go new file mode 100644 index 000000000..5f729d45e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go-v2" + +// SDKVersion is the version of this SDK +const SDKVersion = goModuleVersion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml b/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml new file mode 100644 index 000000000..b11df5082 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml @@ -0,0 +1,12 @@ +version: 0.2 + +phases: + build: + commands: + - echo Build started on `date` + - export GOPATH=/go + - export SDK_CODEBUILD_ROOT=`pwd` + - make ci-test-no-generate + post_build: + commands: + - echo Build completed on `date` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md new file mode 100644 index 000000000..325779c09 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -0,0 +1,359 @@ +# v1.18.27 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.26 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.25 (2023-05-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.24 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.23 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.22 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.21 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.20 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.19 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.18 (2023-03-16) + +* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. + +# v1.18.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2023-02-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2023-01-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2022-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2022-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2022-12-15) + +* **Bug Fix**: Unify logic between shared config and in finding home directory +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2022-11-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2022-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2022-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2022-11-11) + +* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 +* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2022-11-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2022-09-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-08-14) + +* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. + +# v1.16.1 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-08-10) + +* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. + +# v1.15.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.14 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.13 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.12 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.11 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.10 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.9 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.8 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.7 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2022-05-09) + +* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) + +# v1.15.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-02-24) + +* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. +* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-01-28) + +* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. +* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-01-07) + +* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-12-02) + +* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-09-02) + +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Adds configuration setting for enabling endpoint discovery. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-20) + +* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. +* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go new file mode 100644 index 000000000..5940f8e7e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -0,0 +1,201 @@ +package config + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// defaultLoaders are a slice of functions that will read external configuration +// sources for configuration values. These values are read by the AWSConfigResolvers +// using interfaces to extract specific information from the external configuration. +var defaultLoaders = []loader{ + loadEnvConfig, + loadSharedConfigIgnoreNotExist, +} + +// defaultAWSConfigResolvers are a slice of functions that will resolve external +// configuration values into AWS configuration values. +// +// This will setup the AWS configuration's Region, +var defaultAWSConfigResolvers = []awsConfigResolver{ + // Resolves the default configuration the SDK's aws.Config will be + // initialized with. + resolveDefaultAWSConfig, + + // Sets the logger to be used. Could be user provided logger, and client + // logging mode. + resolveLogger, + resolveClientLogMode, + + // Sets the HTTP client and configuration to use for making requests using + // the HTTP transport. + resolveHTTPClient, + resolveCustomCABundle, + + // Sets the endpoint resolving behavior the API Clients will use for making + // requests to. Clients default to their own clients this allows overrides + // to be specified. The resolveEndpointResolver option is deprecated, but + // we still need to set it for backwards compatibility on config + // construction. + resolveEndpointResolver, + resolveEndpointResolverWithOptions, + + // Sets the retry behavior API clients will use within their retry attempt + // middleware. Defaults to unset, allowing API clients to define their own + // retry behavior. + resolveRetryer, + + // Sets the region the API Clients should use for making requests to. + resolveRegion, + resolveEC2IMDSRegion, + resolveDefaultRegion, + + // Sets the additional set of middleware stack mutators that will custom + // API client request pipeline middleware. + resolveAPIOptions, + + // Resolves the DefaultsMode that should be used by SDK clients. If this + // mode is set to DefaultsModeAuto. + // + // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is + // configured if provided before invoking IMDS if mode is auto. Comes + // before resolving credentials so that those subsequent clients use the + // configured auto mode. + resolveDefaultsModeOptions, + + // Sets the resolved credentials the API clients will use for + // authentication. Provides the SDK's default credential chain. + // + // Should probably be the last step in the resolve chain to ensure that all + // other configurations are resolved first in case downstream credentials + // implementations depend on or can be configured with earlier resolved + // configuration options. + resolveCredentials, + + // Sets the resolved bearer authentication token API clients will use for + // httpBearerAuth authentication scheme. + resolveBearerAuthToken, +} + +// A Config represents a generic configuration value or set of values. This type +// will be used by the AWSConfigResolvers to extract +// +// General the Config type will use type assertion against the Provider interfaces +// to extract specific data from the Config. +type Config interface{} + +// A loader is used to load external configuration data and returns it as +// a generic Config type. +// +// The loader should return an error if it fails to load the external configuration +// or the configuration data is malformed, or required components missing. +type loader func(context.Context, configs) (Config, error) + +// An awsConfigResolver will extract configuration data from the configs slice +// using the provider interfaces to extract specific functionality. The extracted +// configuration values will be written to the AWS Config value. +// +// The resolver should return an error if it it fails to extract the data, the +// data is malformed, or incomplete. +type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error + +// configs is a slice of Config values. These values will be used by the +// AWSConfigResolvers to extract external configuration values to populate the +// AWS Config type. +// +// Use AppendFromLoaders to add additional external Config values that are +// loaded from external sources. +// +// Use ResolveAWSConfig after external Config values have been added or loaded +// to extract the loaded configuration values into the AWS Config. +type configs []Config + +// AppendFromLoaders iterates over the slice of loaders passed in calling each +// loader function in order. The external config value returned by the loader +// will be added to the returned configs slice. +// +// If a loader returns an error this method will stop iterating and return +// that error. +func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) { + for _, fn := range loaders { + cfg, err := fn(ctx, cs) + if err != nil { + return nil, err + } + + cs = append(cs, cfg) + } + + return cs, nil +} + +// ResolveAWSConfig returns a AWS configuration populated with values by calling +// the resolvers slice passed in. Each resolver is called in order. Any resolver +// may overwrite the AWS Configuration value of a previous resolver. +// +// If an resolver returns an error this method will return that error, and stop +// iterating over the resolvers. +func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) { + var cfg aws.Config + + for _, fn := range resolvers { + if err := fn(ctx, &cfg, cs); err != nil { + return aws.Config{}, err + } + } + + return cfg, nil +} + +// ResolveConfig calls the provide function passing slice of configuration sources. +// This implements the aws.ConfigResolver interface. +func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { + var cfgs []interface{} + for i := range cs { + cfgs = append(cfgs, cs[i]) + } + return f(cfgs) +} + +// LoadDefaultConfig reads the SDK's default external configurations, and +// populates an AWS Config with the values from the external configurations. +// +// An optional variadic set of additional Config values can be provided as input +// that will be prepended to the configs slice. Use this to add custom configuration. +// The custom configurations must satisfy the respective providers for their data +// or the custom data will be ignored by the resolvers and config loaders. +// +// cfg, err := config.LoadDefaultConfig( context.TODO(), +// WithSharedConfigProfile("test-profile"), +// ) +// if err != nil { +// panic(fmt.Sprintf("failed loading config, %v", err)) +// } +// +// The default configuration sources are: +// * Environment Variables +// * Shared Configuration and Shared Credentials files. +func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) { + var options LoadOptions + for _, optFn := range optFns { + if err := optFn(&options); err != nil { + return aws.Config{}, err + } + } + + // assign Load Options to configs + var cfgCpy = configs{options} + + cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, defaultLoaders) + if err != nil { + return aws.Config{}, err + } + + cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers) + if err != nil { + return aws.Config{}, err + } + + return cfg, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go new file mode 100644 index 000000000..20b66367f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go @@ -0,0 +1,47 @@ +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +const execEnvVar = "AWS_EXECUTION_ENV" + +// DefaultsModeOptions is the set of options that are used to configure +type DefaultsModeOptions struct { + // The SDK configuration defaults mode. Defaults to legacy if not specified. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard + Mode aws.DefaultsMode + + // The EC2 Instance Metadata Client that should be used when performing environment + // discovery when aws.DefaultsModeAuto is set. + // + // If not specified the SDK will construct a client if the instance metadata service has not been disabled by + // the AWS_EC2_METADATA_DISABLED environment variable. + IMDSClient *imds.Client +} + +func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) { + getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{}) + // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection. + select { + case <-ctx.Done(): + return aws.RuntimeEnvironment{}, err + default: + } + + var imdsRegion string + if err == nil { + imdsRegion = getRegionOutput.Region + } + + return aws.RuntimeEnvironment{ + EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)), + Region: envConfig.Region, + EC2InstanceMetadataRegion: imdsRegion, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go new file mode 100644 index 000000000..aab7164e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go @@ -0,0 +1,20 @@ +// Package config provides utilities for loading configuration from multiple +// sources that can be used to configure the SDK's API clients, and utilities. +// +// The config package will load configuration from environment variables, AWS +// shared configuration file (~/.aws/config), and AWS shared credentials file +// (~/.aws/credentials). +// +// Use the LoadDefaultConfig to load configuration from all the SDK's supported +// sources, and resolve credentials using the SDK's default credential chain. +// +// LoadDefaultConfig allows for a variadic list of additional Config sources that can +// provide one or more configuration values which can be used to programmatically control the resolution +// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK. +// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will +// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources +// implement the same provider interface, priority will be handled by the order in which the sources were passed in. +// +// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider +// interface. These helpers should be used for overriding configuration programmatically at runtime. +package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go new file mode 100644 index 000000000..18c8e0121 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -0,0 +1,665 @@ +package config + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +// CredentialsSourceName provides a name of the provider when config is +// loaded from environment. +const CredentialsSourceName = "EnvConfigCredentials" + +// Environment variables that will be read for configuration values. +const ( + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsAccessKeyEnvVar = "AWS_ACCESS_KEY" + + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSecretKeyEnvVar = "AWS_SECRET_KEY" + + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + + awsRegionEnvVar = "AWS_REGION" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + + awsProfileEnvVar = "AWS_PROFILE" + awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" + + awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" + + awsConfigFileEnvVar = "AWS_CONFIG_FILE" + + awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" + + awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" + + awsRoleARNEnvVar = "AWS_ROLE_ARN" + awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" + + awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" + + awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" + + awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" + + awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + + awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" + + awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" + + awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" + + awsDefaultMode = "AWS_DEFAULTS_MODE" + + awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" + awsRetryMode = "AWS_RETRY_MODE" +) + +var ( + credAccessEnvKeys = []string{ + awsAccessKeyIDEnvVar, + awsAccessKeyEnvVar, + } + credSecretEnvKeys = []string{ + awsSecretAccessKeyEnvVar, + awsSecretKeyEnvVar, + } + regionEnvKeys = []string{ + awsRegionEnvVar, + awsDefaultRegionEnvVar, + } + profileEnvKeys = []string{ + awsProfileEnvVar, + awsDefaultProfileEnvVar, + } +) + +// EnvConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type EnvConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Credentials aws.Credentials + + // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials + // using the endpointcreds.Provider + ContainerCredentialsEndpoint string + + // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve + // credentials from the container endpoint. + ContainerCredentialsRelativePath string + + // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization + // header when attempting to retrieve credentials from the container credentials endpoint. + ContainerAuthorizationToken string + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-west-2 + // AWS_DEFAULT_REGION=us-west-2 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // AWS_DEFAULT_PROFILE=my_profile + SharedConfigProfile string + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the config. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion *bool + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies if the S3 service should disable multi-region access points + // support. + // + // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK Defaults Mode used by services. + // + // AWS_DEFAULTS_MODE=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // AWS_MAX_ATTEMPTS=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // aws_retry_mode=standard + RetryMode aws.RetryMode +} + +// loadEnvConfig reads configuration values from the OS's environment variables. +// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. +func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) { + return NewEnvConfig() +} + +// NewEnvConfig retrieves the SDK's environment configuration. +// See `EnvConfig` for the values that will be retrieved. +func NewEnvConfig() (EnvConfig, error) { + var cfg EnvConfig + + creds := aws.Credentials{ + Source: CredentialsSourceName, + } + setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) + setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) + if creds.HasKeys() { + creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) + cfg.Credentials = creds + } + + cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) + cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) + cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) + + setStringFromEnvVal(&cfg.Region, regionEnvKeys) + setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) + + cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) + cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) + + cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) + + cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) + + cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) + cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) + + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { + return cfg, err + } + + setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { + return cfg, err + } + cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) + + if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { + return cfg, err + } + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { + return cfg, err + } + + if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { + return cfg, err + } + + if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { + return cfg, err + } + if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { + return cfg, err + } + + return cfg, nil +} + +func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + return c.DefaultsMode, true, nil +} + +// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, +// and not 0. +func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a +// valid value. +func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + return c.RetryMode, true, nil +} + +func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + switch { + case strings.EqualFold(value, "true"): + *state = imds.ClientDisabled + case strings.EqualFold(value, "false"): + *state = imds.ClientEnabled + default: + continue + } + break + } +} + +func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid %s value: %s", k, value) + } + break + } + } + return nil +} + +func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + *mode, err = aws.ParseRetryMode(value) + if err != nil { + return fmt.Errorf("invalid %s value, %w", k, err) + } + break + } + } + return nil +} + +func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + } + return nil +} + +// GetRegion returns the AWS Region if set in the environment. Returns an empty +// string if not set. +func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetSharedConfigProfile returns the shared config profile if set in the +// environment. Returns an empty string if not set. +func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(c.SharedConfigProfile) == 0 { + return "", false, nil + } + + return c.SharedConfigProfile, true, nil +} + +// getSharedConfigFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Config +func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedConfigFile; len(v) > 0 { + files = append(files, v) + } + + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// getSharedCredentialsFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Credentials +func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedCredentialsFile; len(v) > 0 { + files = append(files, v) + } + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetS3DisableMultRegionAccessPoints returns whether to disable multi-region access point +// support for the S3 client. +func (c EnvConfig) GetS3DisableMultRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +func setStringFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func setIntFromEnvVal(dst *int, keys []string) error { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("invalid value %s=%s, %w", k, v, err) + } + *dst = int(i) + break + } + } + + return nil +} + +func setBoolPtrFromEnvVal(dst **bool, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + if *dst == nil { + *dst = new(bool) + } + + switch { + case strings.EqualFold(value, "false"): + **dst = false + case strings.EqualFold(value, "true"): + **dst = true + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + k, value) + } + break + } + + return nil +} + +func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false or auto", + k, value) + } + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting. +func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return c.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go new file mode 100644 index 000000000..654a7a77f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go @@ -0,0 +1,4 @@ +package config + +//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go +//go:generate gofmt -s -w ./ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go new file mode 100644 index 000000000..9c6d17216 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package config + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.18.27" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go new file mode 100644 index 000000000..625147e97 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -0,0 +1,1005 @@ +package config + +import ( + "context" + "io" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// LoadOptionsFunc is a type alias for LoadOptions functional option +type LoadOptionsFunc func(*LoadOptions) error + +// LoadOptions are discrete set of options that are valid for loading the +// configuration +type LoadOptions struct { + + // Region is the region to send requests to. + Region string + + // Credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // Token provider for authentication operations with bearer authentication. + BearerAuthTokenProvider smithybearer.TokenProvider + + // HTTPClient the SDK's API clients will use to invoke HTTP requests. + HTTPClient HTTPClient + + // EndpointResolver that can be used to provide or override an endpoint for + // the given service and region. + // + // See the `aws.EndpointResolver` documentation on usage. + // + // Deprecated: See EndpointResolverWithOptions + EndpointResolver aws.EndpointResolver + + // EndpointResolverWithOptions that can be used to provide or override an + // endpoint for the given service and region. + // + // See the `aws.EndpointResolverWithOptions` documentation on usage. + EndpointResolverWithOptions aws.EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // This value will only be used if Retryer option is nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // This value will only be used if Retryer option is nil. + RetryMode aws.RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored. + Retryer func() aws.Retryer + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // Logger writer interface to write logging messages to. + Logger logging.Logger + + // ClientLogMode is used to configure the events that will be sent to the + // configured logger. This can be used to configure the logging of signing, + // retries, request, and responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode *aws.ClientLogMode + + // SharedConfigProfile is the profile to be used when loading the SharedConfig + SharedConfigProfile string + + // SharedConfigFiles is the slice of custom shared config files to use when + // loading the SharedConfig. A non-default profile used within config file + // must have name defined with prefix 'profile '. eg [profile xyz] + // indicates a profile with name 'xyz'. To read more on the format of the + // config file, please refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config + // + // If duplicate profiles are provided within the same, or across multiple + // shared config files, the next parsed profile will override only the + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedConfigFiles []string + + // SharedCredentialsFile is the slice of custom shared credentials files to + // use when loading the SharedConfig. The profile name used within + // credentials file must not prefix 'profile '. eg [xyz] indicates a + // profile with name 'xyz'. Profile declared as [profile xyz] will be + // ignored. To read more on the format of the credentials file, please + // refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds + // + // If duplicate profiles are provided with a same, or across multiple + // shared credentials files, the next parsed profile will override only + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedCredentialsFiles []string + + // CustomCABundle is CA bundle PEM bytes reader + CustomCABundle io.Reader + + // DefaultRegion is the fall back region, used if a region was not resolved + // from other sources + DefaultRegion string + + // UseEC2IMDSRegion indicates if SDK should retrieve the region + // from the EC2 Metadata service + UseEC2IMDSRegion *UseEC2IMDSRegion + + // CredentialsCacheOptions is a function for setting the + // aws.CredentialsCacheOptions + CredentialsCacheOptions func(*aws.CredentialsCacheOptions) + + // BearerAuthTokenCacheOptions is a function for setting the smithy-go + // auth/bearer#TokenCacheOptions + BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions) + + // SSOTokenProviderOptions is a function for setting the + // credentials/ssocreds.SSOTokenProviderOptions + SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions) + + // ProcessCredentialOptions is a function for setting + // the processcreds.Options + ProcessCredentialOptions func(*processcreds.Options) + + // EC2RoleCredentialOptions is a function for setting + // the ec2rolecreds.Options + EC2RoleCredentialOptions func(*ec2rolecreds.Options) + + // EndpointCredentialOptions is a function for setting + // the endpointcreds.Options + EndpointCredentialOptions func(*endpointcreds.Options) + + // WebIdentityRoleCredentialOptions is a function for setting + // the stscreds.WebIdentityRoleOptions + WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions) + + // AssumeRoleCredentialOptions is a function for setting the + // stscreds.AssumeRoleOptions + AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions) + + // SSOProviderOptions is a function for setting + // the ssocreds.Options + SSOProviderOptions func(options *ssocreds.Options) + + // LogConfigurationWarnings when set to true, enables logging + // configuration warnings + LogConfigurationWarnings *bool + + // S3UseARNRegion specifies if the S3 service should allow ARNs to direct + // the region, the client's requests are sent to. + S3UseARNRegion *bool + + // EnableEndpointDiscovery specifies if endpoint discovery is enable for + // the client. + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + EC2IMDSEndpoint string + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK configuration mode for defaults. + DefaultsModeOptions DefaultsModeOptions +} + +func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(o.DefaultsModeOptions.Mode) == 0 { + return "", false, nil + } + return o.DefaultsModeOptions.Mode, true, nil +} + +// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the +// LoadOptions and not 0. +func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + return o.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode specified in the LoadOptions. +func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(o.RetryMode) == 0 { + return "", false, nil + } + return o.RetryMode, true, nil +} + +func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) { + if o.DefaultsModeOptions.IMDSClient == nil { + return nil, false, nil + } + return o.DefaultsModeOptions.IMDSClient, true, nil +} + +// getRegion returns Region from config's LoadOptions +func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) { + if len(o.Region) == 0 { + return "", false, nil + } + + return o.Region, true, nil +} + +// WithRegion is a helper function to construct functional options +// that sets Region on config's LoadOptions. Setting the region to +// an empty string, will result in the region value being ignored. +// If multiple WithRegion calls are made, the last call overrides +// the previous call values. +func WithRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Region = v + return nil + } +} + +// getDefaultRegion returns DefaultRegion from config's LoadOptions +func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { + if len(o.DefaultRegion) == 0 { + return "", false, nil + } + + return o.DefaultRegion, true, nil +} + +// WithDefaultRegion is a helper function to construct functional options +// that sets a DefaultRegion on config's LoadOptions. Setting the default +// region to an empty string, will result in the default region value +// being ignored. If multiple WithDefaultRegion calls are made, the last +// call overrides the previous call values. Note that both WithRegion and +// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call +// when resolving region. +func WithDefaultRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.DefaultRegion = v + return nil + } +} + +// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions +func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(o.SharedConfigProfile) == 0 { + return "", false, nil + } + + return o.SharedConfigProfile, true, nil +} + +// WithSharedConfigProfile is a helper function to construct functional options +// that sets SharedConfigProfile on config's LoadOptions. Setting the shared +// config profile to an empty string, will result in the shared config profile +// value being ignored. +// If multiple WithSharedConfigProfile calls are made, the last call overrides +// the previous call values. +func WithSharedConfigProfile(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigProfile = v + return nil + } +} + +// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions +func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedConfigFiles == nil { + return nil, false, nil + } + + return o.SharedConfigFiles, true, nil +} + +// WithSharedConfigFiles is a helper function to construct functional options +// that sets slice of SharedConfigFiles on config's LoadOptions. +// Setting the shared config files to an nil string slice, will result in the +// shared config files value being ignored. +// If multiple WithSharedConfigFiles calls are made, the last call overrides +// the previous call values. +func WithSharedConfigFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigFiles = v + return nil + } +} + +// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions +func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedCredentialsFiles == nil { + return nil, false, nil + } + + return o.SharedCredentialsFiles, true, nil +} + +// WithSharedCredentialsFiles is a helper function to construct functional options +// that sets slice of SharedCredentialsFiles on config's LoadOptions. +// Setting the shared credentials files to an nil string slice, will result in the +// shared credentials files value being ignored. +// If multiple WithSharedCredentialsFiles calls are made, the last call overrides +// the previous call values. +func WithSharedCredentialsFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedCredentialsFiles = v + return nil + } +} + +// getCustomCABundle returns CustomCABundle from LoadOptions +func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) { + if o.CustomCABundle == nil { + return nil, false, nil + } + + return o.CustomCABundle, true, nil +} + +// WithCustomCABundle is a helper function to construct functional options +// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle +// to nil will result in custom CA Bundle value being ignored. +// If multiple WithCustomCABundle calls are made, the last call overrides the +// previous call values. +func WithCustomCABundle(v io.Reader) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CustomCABundle = v + return nil + } +} + +// UseEC2IMDSRegion provides a regionProvider that retrieves the region +// from the EC2 Metadata service. +type UseEC2IMDSRegion struct { + // If unset will default to generic EC2 IMDS client. + Client *imds.Client +} + +// getRegion attempts to retrieve the region from EC2 Metadata service. +func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) { + if ctx == nil { + ctx = context.Background() + } + + client := p.Client + if client == nil { + client = imds.New(imds.Options{}) + } + + result, err := client.GetRegion(ctx, nil) + if err != nil { + return "", false, err + } + if len(result.Region) != 0 { + return result.Region, true, nil + } + return "", false, nil +} + +// getEC2IMDSRegion returns the value of EC2 IMDS region. +func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) { + if o.UseEC2IMDSRegion == nil { + return "", false, nil + } + + return o.UseEC2IMDSRegion.getRegion(ctx) +} + +// WithEC2IMDSRegion is a helper function to construct functional options +// that enables resolving EC2IMDS region. The function takes +// in a UseEC2IMDSRegion functional option, and can be used to set the +// EC2IMDS client which will be used to resolve EC2IMDSRegion. +// If no functional option is provided, an EC2IMDS client is built and used +// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last +// call overrides the previous call values. Note that the WithRegion calls takes +// precedence over WithEC2IMDSRegion when resolving region. +func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseEC2IMDSRegion = &UseEC2IMDSRegion{} + + for _, fn := range fnOpts { + fn(o.UseEC2IMDSRegion) + } + return nil + } +} + +// getCredentialsProvider returns the credentials value +func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) { + if o.Credentials == nil { + return nil, false, nil + } + + return o.Credentials, true, nil +} + +// WithCredentialsProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithCredentialsProvider calls are made, the last call overrides +// the previous call values. +func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Credentials = v + return nil + } +} + +// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions +func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) { + if o.CredentialsCacheOptions == nil { + return nil, false, nil + } + + return o.CredentialsCacheOptions, true, nil +} + +// WithCredentialsCacheOptions is a helper function to construct functional +// options that sets a function to modify the aws.CredentialsCacheOptions the +// aws.CredentialsCache will be configured with, if the CredentialsCache is used +// by the configuration loader. +// +// If multiple WithCredentialsCacheOptions calls are made, the last call +// overrides the previous call values. +func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CredentialsCacheOptions = v + return nil + } +} + +// getBearerAuthTokenProvider returns the credentials value +func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) { + if o.BearerAuthTokenProvider == nil { + return nil, false, nil + } + + return o.BearerAuthTokenProvider, true, nil +} + +// WithBearerAuthTokenProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenProvider = v + return nil + } +} + +// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) { + if o.BearerAuthTokenCacheOptions == nil { + return nil, false, nil + } + + return o.BearerAuthTokenCacheOptions, true, nil +} + +// WithBearerAuthTokenCacheOptions is a helper function to construct functional options +// that sets a function to modify the TokenCacheOptions the smithy-go +// auth/bearer#TokenCache will be configured with, if the TokenCache is used by +// the configuration loader. +// +// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenCacheOptions = v + return nil + } +} + +// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) { + if o.SSOTokenProviderOptions == nil { + return nil, false, nil + } + + return o.SSOTokenProviderOptions, true, nil +} + +// WithSSOTokenProviderOptions is a helper function to construct functional +// options that sets a function to modify the SSOtokenProviderOptions the SDK's +// credentials/ssocreds#SSOProvider will be configured with, if the +// SSOTokenProvider is used by the configuration loader. +// +// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOTokenProviderOptions = v + return nil + } +} + +// getProcessCredentialOptions returns the wrapped function to set processcreds.Options +func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { + if o.ProcessCredentialOptions == nil { + return nil, false, nil + } + + return o.ProcessCredentialOptions, true, nil +} + +// WithProcessCredentialOptions is a helper function to construct functional options +// that sets a function to use processcreds.Options on config's LoadOptions. +// If process credential options is set to nil, the process credential value will +// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ProcessCredentialOptions = v + return nil + } +} + +// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options +func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) { + if o.EC2RoleCredentialOptions == nil { + return nil, false, nil + } + + return o.EC2RoleCredentialOptions, true, nil +} + +// WithEC2RoleCredentialOptions is a helper function to construct functional options +// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If +// EC2 role credential options is set to nil, the EC2 role credential options value +// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2RoleCredentialOptions = v + return nil + } +} + +// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options +func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) { + if o.EndpointCredentialOptions == nil { + return nil, false, nil + } + + return o.EndpointCredentialOptions, true, nil +} + +// WithEndpointCredentialOptions is a helper function to construct functional options +// that sets a function to use endpointcreds.Options on config's LoadOptions. If +// endpoint credential options is set to nil, the endpoint credential options +// value will be ignored. If multiple WithEndpointCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointCredentialOptions = v + return nil + } +} + +// getWebIdentityRoleCredentialOptions returns the wrapped function +func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) { + if o.WebIdentityRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.WebIdentityRoleCredentialOptions, true, nil +} + +// WithWebIdentityRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.WebIdentityRoleOptions +// on config's LoadOptions. If web identity role credentials options is set to nil, +// the web identity role credentials value will be ignored. If multiple +// WithWebIdentityRoleCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.WebIdentityRoleCredentialOptions = v + return nil + } +} + +// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) { + if o.AssumeRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.AssumeRoleCredentialOptions, true, nil +} + +// WithAssumeRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.AssumeRoleOptions +// on config's LoadOptions. If assume role credentials options is set to nil, +// the assume role credentials value will be ignored. If multiple +// WithAssumeRoleCredentialOptions calls are made, the last call overrides +// the previous call values. +func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.AssumeRoleCredentialOptions = v + return nil + } +} + +func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) { + if o.HTTPClient == nil { + return nil, false, nil + } + + return o.HTTPClient, true, nil +} + +// WithHTTPClient is a helper function to construct functional options +// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil, +// the HTTPClient value will be ignored. +// If multiple WithHTTPClient calls are made, the last call overrides +// the previous call values. +func WithHTTPClient(v HTTPClient) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.HTTPClient = v + return nil + } +} + +func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) { + if o.APIOptions == nil { + return nil, false, nil + } + + return o.APIOptions, true, nil +} + +// WithAPIOptions is a helper function to construct functional options +// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the +// APIOptions value is ignored. If multiple WithAPIOptions calls are +// made, the last call overrides the previous call values. +func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc { + return func(o *LoadOptions) error { + if v == nil { + return nil + } + + o.APIOptions = append(o.APIOptions, v...) + return nil + } +} + +func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return o.RetryMaxAttempts, true, nil +} + +// WithRetryMaxAttempts is a helper function to construct functional options that sets +// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is +// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMaxAttempts(v int) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMaxAttempts = v + return nil + } +} + +func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if o.RetryMode == "" { + return "", false, nil + } + + return o.RetryMode, true, nil +} + +// WithRetryMode is a helper function to construct functional options that sets +// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is +// ignored. If multiple WithRetryMode calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMode(v aws.RetryMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMode = v + return nil + } +} + +func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) { + if o.Retryer == nil { + return nil, false, nil + } + + return o.Retryer, true, nil +} + +// WithRetryer is a helper function to construct functional options +// that sets Retryer on LoadOptions. If Retryer is set to nil, the +// Retryer value is ignored. If multiple WithRetryer calls are +// made, the last call overrides the previous call values. +func WithRetryer(v func() aws.Retryer) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Retryer = v + return nil + } +} + +func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) { + if o.EndpointResolver == nil { + return nil, false, nil + } + + return o.EndpointResolver, true, nil +} + +// WithEndpointResolver is a helper function to construct functional options +// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +// +// Deprecated: See WithEndpointResolverWithOptions +func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolver = v + return nil + } +} + +func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) { + if o.EndpointResolverWithOptions == nil { + return nil, false, nil + } + + return o.EndpointResolverWithOptions, true, nil +} + +// WithEndpointResolverWithOptions is a helper function to construct functional options +// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolverWithOptions = v + return nil + } +} + +func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) { + if o.Logger == nil { + return nil, false, nil + } + + return o.Logger, true, nil +} + +// WithLogger is a helper function to construct functional options +// that sets Logger on LoadOptions. If Logger is set to nil, the +// Logger value will be ignored. If multiple WithLogger calls are made, +// the last call overrides the previous call values. +func WithLogger(v logging.Logger) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Logger = v + return nil + } +} + +func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) { + if o.ClientLogMode == nil { + return 0, false, nil + } + + return *o.ClientLogMode, true, nil +} + +// WithClientLogMode is a helper function to construct functional options +// that sets client log mode on LoadOptions. If client log mode is set to nil, +// the client log mode value will be ignored. If multiple WithClientLogMode calls are made, +// the last call overrides the previous call values. +func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ClientLogMode = &v + return nil + } +} + +func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) { + if o.LogConfigurationWarnings == nil { + return false, false, nil + } + return *o.LogConfigurationWarnings, true, nil +} + +// WithLogConfigurationWarnings is a helper function to construct +// functional options that can be used to set LogConfigurationWarnings +// on LoadOptions. +// +// If multiple WithLogConfigurationWarnings calls are made, the last call +// overrides the previous call values. +func WithLogConfigurationWarnings(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.LogConfigurationWarnings = &v + return nil + } +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) { + if o.S3UseARNRegion == nil { + return false, false, nil + } + return *o.S3UseARNRegion, true, nil +} + +// WithS3UseARNRegion is a helper function to construct functional options +// that can be used to set S3UseARNRegion on LoadOptions. +// If multiple WithS3UseARNRegion calls are made, the last call overrides +// the previous call values. +func WithS3UseARNRegion(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3UseARNRegion = &v + return nil + } +} + +// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set. +func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + return o.EnableEndpointDiscovery, true, nil +} + +// WithEndpointDiscovery is a helper function to construct functional options +// that can be used to enable endpoint discovery on LoadOptions for supported clients. +// If multiple WithEndpointDiscovery calls are made, the last call overrides +// the previous call values. +func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EnableEndpointDiscovery = v + return nil + } +} + +// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) { + if o.SSOProviderOptions == nil { + return nil, false, nil + } + + return o.SSOProviderOptions, true, nil +} + +// WithSSOProviderOptions is a helper function to construct +// functional options that sets a function to use ssocreds.Options +// on config's LoadOptions. If the SSO credential provider options is set to nil, +// the sso provider options value will be ignored. If multiple +// WithSSOProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOProviderOptions = v + return nil + } +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return o.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return o.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) { + if len(o.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return o.EC2IMDSEndpoint, true, nil +} + +// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState. +func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSClientEnableState = v + return nil + } +} + +// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode. +func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpointMode = v + return nil + } +} + +// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint. +func WithEC2IMDSEndpoint(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpoint = v + return nil + } +} + +// WithUseDualStackEndpoint is a helper function to construct +// functional options that can be used to set UseDualStackEndpoint on LoadOptions. +func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseDualStackEndpoint = v + return nil + } +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + return o.UseDualStackEndpoint, true, nil +} + +// WithUseFIPSEndpoint is a helper function to construct +// functional options that can be used to set UseFIPSEndpoint on LoadOptions. +func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseFIPSEndpoint = v + return nil + } +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + return o.UseFIPSEndpoint, true, nil +} + +// WithDefaultsMode sets the SDK defaults configuration mode to the value provided. +// +// Zero or more functional options can be provided to provide configuration options for performing +// environment discovery when using aws.DefaultsModeAuto. +func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc { + do := DefaultsModeOptions{ + Mode: mode, + } + for _, fn := range optFns { + fn(&do) + } + return func(options *LoadOptions) error { + options.DefaultsModeOptions = do + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go new file mode 100644 index 000000000..b629137c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go @@ -0,0 +1,51 @@ +package config + +import ( + "fmt" + "net" + "net/url" +) + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + if len(addrs) == 0 { + return false, fmt.Errorf("no addrs found for host, %s", host) + } + + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func validateLocalURL(v string) error { + u, err := url.Parse(v) + if err != nil { + return err + } + + host := u.Hostname() + if len(host) == 0 { + return fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, err := isLoopbackHost(host); err != nil { + return fmt.Errorf("failed to resolve host %q, %v", host, err) + } else if !isLoopback { + return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go new file mode 100644 index 000000000..6f1ab8cd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -0,0 +1,601 @@ +package config + +import ( + "context" + "io" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// sharedConfigProfileProvider provides access to the shared config profile +// name external configuration value. +type sharedConfigProfileProvider interface { + getSharedConfigProfile(ctx context.Context) (string, bool, error) +} + +// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigProfileProvider); ok { + value, found, err = p.getSharedConfigProfile(ctx) + if err != nil || found { + break + } + } + } + return +} + +// sharedConfigFilesProvider provides access to the shared config filesnames +// external configuration value. +type sharedConfigFilesProvider interface { + getSharedConfigFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigFilesProvider); ok { + value, found, err = p.getSharedConfigFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// sharedCredentialsFilesProvider provides access to the shared credentials filesnames +// external configuration value. +type sharedCredentialsFilesProvider interface { + getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedCredentialsFilesProvider); ok { + value, found, err = p.getSharedCredentialsFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// customCABundleProvider provides access to the custom CA bundle PEM bytes. +type customCABundleProvider interface { + getCustomCABundle(ctx context.Context) (io.Reader, bool, error) +} + +// getCustomCABundle searches the configs for a customCABundleProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(customCABundleProvider); ok { + value, found, err = p.getCustomCABundle(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// regionProvider provides access to the region external configuration value. +type regionProvider interface { + getRegion(ctx context.Context) (string, bool, error) +} + +// getRegion searches the configs for a regionProvider and returns the value +// if found. Returns an error if a provider fails before a value is found. +func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(regionProvider); ok { + value, found, err = p.getRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2IMDSRegionProvider provides access to the ec2 imds region +// configuration value +type ec2IMDSRegionProvider interface { + getEC2IMDSRegion(ctx context.Context) (string, bool, error) +} + +// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and +// returns the value if found. Returns an error if a provider fails before +// a value is found. +func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(ec2IMDSRegionProvider); ok { + region, found, err = provider.getEC2IMDSRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsProviderProvider provides access to the credentials external +// configuration value. +type credentialsProviderProvider interface { + getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) +} + +// getCredentialsProvider searches the configs for a credentialsProviderProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(credentialsProviderProvider); ok { + p, found, err = provider.getCredentialsProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +type credentialsCacheOptionsProvider interface { + getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) +} + +// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( + f func(*aws.CredentialsCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(credentialsCacheOptionsProvider); ok { + f, found, err = p.getCredentialsCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenProviderProvider provides access to the bearer authentication +// token external configuration value. +type bearerAuthTokenProviderProvider interface { + getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error) +} + +// getBearerAuthTokenProvider searches the config sources for a +// bearerAuthTokenProviderProvider and returns the value if found. Returns an +// error if a provider fails before a value is found. +func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok { + p, found, err = provider.getBearerAuthTokenProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +type bearerAuthTokenCacheOptionsProvider interface { + getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) +} + +// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) ( + f func(*smithybearer.TokenCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok { + f, found, err = p.getBearerAuthTokenCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +type ssoTokenProviderOptionsProvider interface { + getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) +} + +// getSSOTokenProviderOptions is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +func getSSOTokenProviderOptions(ctx context.Context, configs configs) ( + f func(*ssocreds.SSOTokenProviderOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(ssoTokenProviderOptionsProvider); ok { + f, found, err = p.getSSOTokenProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider + +// processCredentialOptions is an interface for retrieving a function for setting +// the processcreds.Options. +type processCredentialOptions interface { + getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) +} + +// getProcessCredentialOptions searches the slice of configs and returns the first function found +func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(processCredentialOptions); ok { + f, found, err = p.getProcessCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2RoleCredentialOptionsProvider is an interface for retrieving a function +// for setting the ec2rolecreds.Provider options. +type ec2RoleCredentialOptionsProvider interface { + getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) +} + +// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(ec2RoleCredentialOptionsProvider); ok { + f, found, err = p.getEC2RoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources +type defaultRegionProvider interface { + getDefaultRegion(ctx context.Context) (string, bool, error) +} + +// getDefaultRegion searches the slice of configs and returns the first fallback region found +func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, config := range configs { + if p, ok := config.(defaultRegionProvider); ok { + value, found, err = p.getDefaultRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointCredentialOptionsProvider is an interface for retrieving a function for setting +// the endpointcreds.ProviderOptions. +type endpointCredentialOptionsProvider interface { + getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error) +} + +// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found +func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(endpointCredentialOptionsProvider); ok { + f, found, err = p.getEndpointCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.WebIdentityRoleProvider. +type webIdentityRoleCredentialOptionsProvider interface { + getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) +} + +// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found +func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok { + f, found, err = p.getWebIdentityRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.AssumeRoleOptions. +type assumeRoleCredentialOptionsProvider interface { + getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error) +} + +// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(assumeRoleCredentialOptionsProvider); ok { + f, found, err = p.getAssumeRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// HTTPClient is an HTTP client implementation +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// httpClientProvider is an interface for retrieving HTTPClient +type httpClientProvider interface { + getHTTPClient(ctx context.Context) (HTTPClient, bool, error) +} + +// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs +func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) { + for _, config := range configs { + if p, ok := config.(httpClientProvider); ok { + client, found, err = p.getHTTPClient(ctx) + if err != nil || found { + break + } + } + } + return +} + +// apiOptionsProvider is an interface for retrieving APIOptions +type apiOptionsProvider interface { + getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) +} + +// getAPIOptions searches the slice of configs and returns the APIOptions set on configs +func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) { + for _, config := range configs { + if p, ok := config.(apiOptionsProvider); ok { + // retrieve APIOptions from configs and set it on cfg + apiOptions, found, err = p.getAPIOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source +type endpointResolverProvider interface { + getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverProvider); ok { + f, found, err = p.getEndpointResolver(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source +type endpointResolverWithOptionsProvider interface { + getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverWithOptionsProvider); ok { + f, found, err = p.getEndpointResolverWithOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// loggerProvider is an interface for retrieving a logging.Logger from a configuration source. +type loggerProvider interface { + getLogger(ctx context.Context) (logging.Logger, bool, error) +} + +// getLogger searches the provided config sources for a logging.Logger that can be used +// to configure the aws.Config.Logger value. +func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) { + for _, c := range configs { + if p, ok := c.(loggerProvider); ok { + l, found, err = p.getLogger(ctx) + if err != nil || found { + break + } + } + } + return +} + +// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source. +type clientLogModeProvider interface { + getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) +} + +func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(clientLogModeProvider); ok { + m, found, err = p.getClientLogMode(ctx) + if err != nil || found { + break + } + } + } + return +} + +// retryProvider is an configuration provider for custom Retryer. +type retryProvider interface { + getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) +} + +func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryProvider); ok { + v, found, err = p.getRetryer(ctx) + if err != nil || found { + break + } + } + } + return +} + +// logConfigurationWarningsProvider is an configuration provider for +// retrieving a boolean indicating whether configuration issues should +// be logged when loading from config sources +type logConfigurationWarningsProvider interface { + getLogConfigurationWarnings(ctx context.Context) (bool, bool, error) +} + +func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) { + for _, c := range configs { + if p, ok := c.(logConfigurationWarningsProvider); ok { + v, found, err = p.getLogConfigurationWarnings(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoCredentialOptionsProvider is an interface for retrieving a function for setting +// the ssocreds.Options. +type ssoCredentialOptionsProvider interface { + getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error) +} + +func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) { + for _, c := range configs { + if p, ok := c.(ssoCredentialOptionsProvider); ok { + v, found, err = p.getSSOProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeIMDSClientProvider interface { + getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error) +} + +func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeIMDSClientProvider); ok { + v, found, err = p.getDefaultsModeIMDSClient(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeProvider interface { + getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error) +} + +func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeProvider); ok { + v, found, err = p.getDefaultsMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryMaxAttemptsProvider interface { + GetRetryMaxAttempts(context.Context) (int, bool, error) +} + +func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryMaxAttemptsProvider); ok { + v, found, err = p.GetRetryMaxAttempts(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryModeProvider interface { + GetRetryMode(context.Context) (aws.RetryMode, bool, error) +} + +func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryModeProvider); ok { + v, found, err = p.GetRetryMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go new file mode 100644 index 000000000..4428ba49c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -0,0 +1,307 @@ +package config + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/smithy-go/logging" +) + +// resolveDefaultAWSConfig will write default configuration values into the cfg +// value. It will write the default values, overwriting any previous value. +// +// This should be used as the first resolver in the slice of resolvers when +// resolving external configuration. +func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { + var sources []interface{} + for _, s := range cfgs { + sources = append(sources, s) + } + + *cfg = aws.Config{ + Credentials: aws.AnonymousCredentials{}, + Logger: logging.NewStandardLogger(os.Stderr), + ConfigSources: sources, + } + return nil +} + +// resolveCustomCABundle extracts the first instance of a custom CA bundle filename +// from the external configurations. It will update the HTTP Client's builder +// to be configured with the custom CA bundle. +// +// Config provider used: +// * customCABundleProvider +func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error { + pemCerts, found, err := getCustomCABundle(ctx, cfgs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + if cfg.HTTPClient == nil { + cfg.HTTPClient = awshttp.NewBuildableClient() + } + + trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+ + "has no WithTransportOptions, %T", cfg.HTTPClient) + } + + var appendErr error + client := trOpts.WithTransportOptions(func(tr *http.Transport) { + if tr.TLSClientConfig == nil { + tr.TLSClientConfig = &tls.Config{} + } + if tr.TLSClientConfig.RootCAs == nil { + tr.TLSClientConfig.RootCAs = x509.NewCertPool() + } + + b, err := ioutil.ReadAll(pemCerts) + if err != nil { + appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") + } + + if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) { + appendErr = fmt.Errorf("failed to load custom CA bundle PEM file") + } + }) + if appendErr != nil { + return appendErr + } + + cfg.HTTPClient = client + return err +} + +// resolveRegion extracts the first instance of a Region from the configs slice. +// +// Config providers used: +// * regionProvider +func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + v, found, err := getRegion(ctx, configs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + cfg.Region = v + return nil +} + +// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default +// region if region had not been resolved from other sources. +func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + v, found, err := getDefaultRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = v + + return nil +} + +// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance +// if one has not been resolved from other sources. +func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getHTTPClient(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.HTTPClient = c + return nil +} + +// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options +// if one has not been resolved from other sources. +func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + o, found, err := getAPIOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.APIOptions = o + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolver(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolver = endpointResolver + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolverWithOptions = endpointResolver + + return nil +} + +func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error { + logger, found, err := getLogger(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Logger = logger + + return nil +} + +func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error { + mode, found, err := getClientLogMode(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.ClientLogMode = mode + + return nil +} + +func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error { + retryer, found, err := getRetryer(ctx, configs) + if err != nil { + return err + } + + if found { + cfg.Retryer = retryer + return nil + } + + // Only load the retry options if a custom retryer has not be specified. + if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil { + return err + } + return resolveRetryMode(ctx, cfg, configs) +} + +func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + region, found, err := getEC2IMDSRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = region + + return nil +} + +func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + defaultsMode, found, err := getDefaultsMode(ctx, configs) + if err != nil { + return err + } + if !found { + defaultsMode = aws.DefaultsModeLegacy + } + + var environment aws.RuntimeEnvironment + if defaultsMode == aws.DefaultsModeAuto { + envConfig, _, _ := getAWSConfigSources(configs) + + client, found, err := getDefaultsModeIMDSClient(ctx, configs) + if err != nil { + return err + } + if !found { + client = imds.NewFromConfig(*cfg) + } + + environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client) + if err != nil { + return err + } + } + + cfg.DefaultsMode = defaultsMode + cfg.RuntimeEnvironment = environment + + return nil +} + +func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error { + maxAttempts, found, err := getRetryMaxAttempts(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMaxAttempts = maxAttempts + + return nil +} + +func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error { + retryMode, found, err := getRetryMode(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMode = retryMode + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go new file mode 100644 index 000000000..a8ebb3c0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go @@ -0,0 +1,122 @@ +package config + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + smithybearer "github.com/aws/smithy-go/auth/bearer" +) + +// resolveBearerAuthToken extracts a token provider from the config sources. +// +// If an explicit bearer authentication token provider is not found the +// resolver will fallback to resolving token provider via other config sources +// such as SharedConfig. +func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveBearerAuthTokenProviderChain(ctx, cfg, configs) +} + +// resolveBearerAuthTokenProvider extracts the first instance of +// BearerAuthTokenProvider from the config sources. +// +// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure +// the Token is only refreshed when needed. This also protects the +// TokenProvider so it can be used concurrently. +// +// Config providers used: +// * bearerAuthTokenProviderProvider +func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, tokenProvider) + if err != nil { + return false, err + } + + return true, nil +} + +func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + _, sharedConfig, _ := getAWSConfigSources(configs) + + var provider smithybearer.TokenProvider + + if sharedConfig.SSOSession != nil { + provider, err = resolveBearerAuthSSOTokenProvider( + ctx, cfg, sharedConfig.SSOSession, configs) + } + + if err == nil && provider != nil { + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, provider) + } + + return err +} + +func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + + cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err) + } + + client := ssooidc.NewFromConfig(*cfg) + provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...) + + return provider, nil +} + +// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go +// bearer/auth#TokenCache with the provided options if the provider is not +// already a TokenCache. +func wrapWithBearerAuthTokenCache( + ctx context.Context, + cfgs configs, + provider smithybearer.TokenProvider, + optFns ...func(*smithybearer.TokenCacheOptions), +) (smithybearer.TokenProvider, error) { + _, ok := provider.(*smithybearer.TokenCache) + if ok { + return provider, nil + } + + tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs) + if err != nil { + return nil, err + } + + opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns)) + opts = append(opts, func(o *smithybearer.TokenCacheOptions) { + o.RefreshBeforeExpires = 5 * time.Minute + o.RetrieveBearerTokenTimeout = 30 * time.Second + }) + opts = append(opts, optFns...) + if optionsFound { + opts = append(opts, tokenCacheConfigOptions) + } + + return smithybearer.NewTokenCache(provider, opts...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go new file mode 100644 index 000000000..b21cd3080 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -0,0 +1,499 @@ +package config + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/sso" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/aws-sdk-go-v2/service/sts" +) + +const ( + // valid credential source values + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +var ( + ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing +) + +// resolveCredentials extracts a credential provider from slice of config +// sources. +// +// If an explicit credential provider is not found the resolver will fallback +// to resolving credentials by extracting a credential provider from EnvConfig +// and SharedConfig. +func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveCredentialProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveCredentialChain(ctx, cfg, configs) +} + +// resolveCredentialProvider extracts the first instance of Credentials from the +// config slices. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +// +// Config providers used: +// * credentialsProviderProvider +func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + credProvider, found, err := getCredentialsProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) + if err != nil { + return false, err + } + + return true, nil +} + +// resolveCredentialChain resolves a credential provider chain using EnvConfig +// and SharedConfig if present in the slice of provided configs. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + envConfig, sharedConfig, other := getAWSConfigSources(configs) + + // When checking if a profile was specified programmatically we should only consider the "other" + // configuration sources that have been provided. This ensures we correctly honor the expected credential + // hierarchy. + _, sharedProfileSet, err := getSharedConfigProfile(ctx, other) + if err != nil { + return err + } + + switch { + case sharedProfileSet: + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + case envConfig.Credentials.HasKeys(): + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + case len(envConfig.WebIdentityTokenFilePath) > 0: + err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) + default: + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + } + if err != nil { + return err + } + + // Wrap the resolved provider in a cache so the SDK will cache credentials. + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) { + + switch { + case sharedConfig.Source != nil: + // Assume IAM role with credentials source from a different profile. + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) + + case sharedConfig.Credentials.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + cfg.Credentials = credentials.StaticCredentialsProvider{ + Value: sharedConfig.Credentials, + } + + case len(sharedConfig.CredentialSource) != 0: + err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) + + case len(sharedConfig.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) + + case sharedConfig.hasSSOConfiguration(): + err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) + + case len(sharedConfig.CredentialProcess) != 0: + // Get credentials from CredentialProcess + err = processCredentials(ctx, cfg, sharedConfig, configs) + + case len(envConfig.ContainerCredentialsEndpoint) != 0: + err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + + case len(envConfig.ContainerCredentialsRelativePath) != 0: + err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + default: + err = resolveEC2RoleCredentials(ctx, cfg, configs) + } + if err != nil { + return err + } + + if len(sharedConfig.RoleARN) > 0 { + return credsFromAssumeRole(ctx, cfg, sharedConfig, configs) + } + + return nil +} + +func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + if err := sharedConfig.validateSSOConfiguration(); err != nil { + return err + } + + var options []func(*ssocreds.Options) + v, found, err := getSSOProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + options = append(options, v) + } + + cfgCopy := cfg.Copy() + + if sharedConfig.SSOSession != nil { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + cfgCopy.Region = sharedConfig.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name) + if err != nil { + return err + } + oidcClient := ssooidc.NewFromConfig(cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...) + options = append(options, func(o *ssocreds.Options) { + o.SSOTokenProvider = tokenProvider + o.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = sharedConfig.SSORegion + } + + cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...) + + return nil +} + +func ecsContainerURI(path string) string { + return fmt.Sprintf("%s%s", ecsContainerEndpoint, path) +} + +func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + var opts []func(*processcreds.Options) + + options, found, err := getProcessCredentialOptions(ctx, configs) + if err != nil { + return err + } + if found { + opts = append(opts, options) + } + + cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) + + return nil +} + +func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { + var resolveErr error + + parsed, err := url.Parse(endpointURL) + if err != nil { + resolveErr = fmt.Errorf("invalid URL, %w", err) + } else { + host := parsed.Hostname() + if len(host) == 0 { + resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback hosts are allowed", host) + } + } + + if resolveErr != nil { + return resolveErr + } + + return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs) +} + +func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error { + optFns := []func(*endpointcreds.Options){ + func(options *endpointcreds.Options) { + if len(authToken) != 0 { + options.AuthorizationToken = authToken + } + options.APIOptions = cfg.APIOptions + if cfg.Retryer != nil { + options.Retryer = cfg.Retryer() + } + }, + } + + optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + provider := endpointcreds.New(url, optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) { + options.ExpiryWindow = 5 * time.Minute + }) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) { + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + return resolveEC2RoleCredentials(ctx, cfg, configs) + + case credSourceEnvironment: + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + + case credSourceECSContainer: + if len(envConfig.ContainerCredentialsRelativePath) == 0 { + return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set") + } + return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + default: + return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") + } + + return nil +} + +func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + optFns := make([]func(*ec2rolecreds.Options), 0, 2) + + optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + optFns = append(optFns, func(o *ec2rolecreds.Options) { + // Only define a client from config if not already defined. + if o.Client == nil { + o.Client = imds.NewFromConfig(*cfg) + } + }) + + provider := ec2rolecreds.New(optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider) + if err != nil { + return err + } + + return nil +} + +func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) { + var ( + envConfig *EnvConfig + sharedConfig *SharedConfig + other configs + ) + + for i := range cfgs { + switch c := cfgs[i].(type) { + case EnvConfig: + if envConfig == nil { + envConfig = &c + } + case *EnvConfig: + if envConfig == nil { + envConfig = c + } + case SharedConfig: + if sharedConfig == nil { + sharedConfig = &c + } + case *SharedConfig: + if envConfig == nil { + sharedConfig = c + } + default: + other = append(other, c) + } + } + + if envConfig == nil { + envConfig = &EnvConfig{} + } + + if sharedConfig == nil { + sharedConfig = &SharedConfig{} + } + + return envConfig, sharedConfig, other +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Error is the error message +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error { + if len(filepath) == 0 { + return fmt.Errorf("token file path is not set") + } + + optFns := []func(*stscreds.WebIdentityRoleOptions){ + func(options *stscreds.WebIdentityRoleOptions) { + options.RoleSessionName = sessionName + }, + } + + optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + + if found { + optFns = append(optFns, optFn) + } + + opts := stscreds.WebIdentityRoleOptions{ + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&opts) + } + + if len(opts.RoleARN) == 0 { + return fmt.Errorf("role ARN is not set") + } + + client := opts.Client + if client == nil { + client = sts.NewFromConfig(*cfg) + } + + provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) + + cfg.Credentials = provider + + return nil +} + +func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { + optFns := []func(*stscreds.AssumeRoleOptions){ + func(options *stscreds.AssumeRoleOptions) { + options.RoleSessionName = sharedCfg.RoleSessionName + if sharedCfg.RoleDurationSeconds != nil { + if *sharedCfg.RoleDurationSeconds/time.Minute > 15 { + options.Duration = *sharedCfg.RoleDurationSeconds + } + } + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + options.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) != 0 { + options.SerialNumber = aws.String(sharedCfg.MFASerial) + } + }, + } + + optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + { + // Synthesize options early to validate configuration errors sooner to ensure a token provider + // is present if the SerialNumber was set. + var o stscreds.AssumeRoleOptions + for _, fn := range optFns { + fn(&o) + } + if o.TokenProvider == nil && o.SerialNumber != nil { + return AssumeRoleTokenProviderNotSetError{} + } + } + + cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) + + return nil +} + +// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache +// with the provided options if the provider is not already a +// aws.CredentialsCache. +func wrapWithCredentialsCache( + ctx context.Context, + cfgs configs, + provider aws.CredentialsProvider, + optFns ...func(options *aws.CredentialsCacheOptions), +) (aws.CredentialsProvider, error) { + _, ok := provider.(*aws.CredentialsCache) + if ok { + return provider, nil + } + + credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs) + if err != nil { + return nil, err + } + + // force allocation of a new slice if the additional options are + // needed, to prevent overwriting the passed in slice of options. + optFns = optFns[:len(optFns):len(optFns)] + if optionsFound { + optFns = append(optFns, credCacheOptions) + } + + return aws.NewCredentialsCache(provider, optFns...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go new file mode 100644 index 000000000..aac8f8369 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -0,0 +1,1384 @@ +package config + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/internal/ini" + "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" + "github.com/aws/smithy-go/logging" +) + +const ( + // Prefix to use for filtering profiles. The profile prefix should only + // exist in the shared config file, not the credentials file. + profilePrefix = `profile ` + + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // string equivalent for boolean + endpointDiscoveryDisabled = `false` + endpointDiscoveryEnabled = `true` + endpointDiscoveryAuto = `auto` + + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + + ssoRegionKey = "sso_region" + ssoStartURLKey = "sso_start_url" + + ssoAccountIDKey = "sso_account_id" + ssoRoleNameKey = "sso_role_name" + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 Disable Multi-Region AccessPoints + s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points` + + useFIPSEndpointKey = "use_fips_endpoint" + + defaultsModeKey = "defaults_mode" + + // Retry options + retryMaxAttemptsKey = "max_attempts" + retryModeKey = "retry_mode" + + caBundleKey = "ca_bundle" +) + +// defaultSharedConfigProfile allows for swapping the default profile for testing +var defaultSharedConfigProfile = DefaultSharedConfigProfile + +// DefaultSharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func DefaultSharedCredentialsFilename() string { + return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials") +} + +// DefaultSharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func DefaultSharedConfigFilename() string { + return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config") +} + +// DefaultSharedConfigFiles is a slice of the default shared config files that +// the will be used in order to load the SharedConfig. +var DefaultSharedConfigFiles = []string{ + DefaultSharedConfigFilename(), +} + +// DefaultSharedCredentialsFiles is a slice of the default shared credentials +// files that the will be used in order to load the SharedConfig. +var DefaultSharedCredentialsFiles = []string{ + DefaultSharedCredentialsFilename(), +} + +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type SSOSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *SSOSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURLKey) +} + +// SharedConfig represents the configuration fields of the SDK config files. +type SharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Credentials aws.Credentials + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + // SSO session options + SSOSessionName string + SSOSession *SSOSession + + // Legacy SSO session options + SSORegion string + SSOStartURL string + + // SSO fields not used + SSOAccountID string + SSORoleName string + + RoleARN string + ExternalID string + MFASerial string + RoleSessionName string + RoleDurationSeconds *time.Duration + + SourceProfileName string + Source *SharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region = us-west-2 + Region string + + // EnableEndpointDiscovery can be enabled or disabled in the shared config + // by setting endpoint_discovery_enabled to true, or false respectively. + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion *bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies if the S3 service should disable support for Multi-Region + // access-points + // + // s3_disable_multiregion_access_points=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies which defaults mode should be used by services. + // + // defaults_mode=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // max_attempts=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // retry_mode=standard + RetryMode aws.RetryMode + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. Only use + // this if you want to configure the SDK to use a custom set of CAs. + // + // Enabling this option will attempt to merge the Transport into the SDK's + // HTTP client. If the client's Transport is not a http.Transport an error + // will be returned. If the Transport's TLS config is set this option will + // cause the SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this + // setting. To use this option and custom HTTP client, the HTTP client + // needs to be provided when creating the config. Not the service client. + // + // ca_bundle=$HOME/my_custom_ca_bundle + CustomCABundle string +} + +func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + + return c.DefaultsMode, true, nil +} + +// GetRetryMaxAttempts returns the maximum number of attempts an API client +// created Retryer should attempt an operation call before failing. +func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the model the API client should create its Retryer in. +func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + + return c.RetryMode, true, nil +} + +// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region +// the client's requests are sent to. +func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set. +func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region +// access-points. +func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetRegion returns the region for the profile if a region is set. +func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetCredentialsProvider returns the credentials for a profile if they were set. +func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) { + return c.Credentials, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the +// addition of ignoring when none of the files exist or when the profile +// is not found in any of the files. +func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) { + cfg, err := loadSharedConfig(ctx, configs) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistError); ok { + return SharedConfig{}, nil + } + return nil, err + } + + return cfg, nil +} + +// loadSharedConfig uses the configs passed in to load the SharedConfig from file +// The file names and profile name are sourced from the configs. +// +// If profile name is not provided DefaultSharedConfigProfile (default) will +// be used. +// +// If shared config filenames are not provided DefaultSharedConfigFiles will +// be used. +// +// Config providers used: +// * sharedConfigProfileProvider +// * sharedConfigFilesProvider +func loadSharedConfig(ctx context.Context, configs configs) (Config, error) { + var profile string + var configFiles []string + var credentialsFiles []string + var ok bool + var err error + + profile, ok, err = getSharedConfigProfile(ctx, configs) + if err != nil { + return nil, err + } + if !ok { + profile = defaultSharedConfigProfile + } + + configFiles, ok, err = getSharedConfigFiles(ctx, configs) + if err != nil { + return nil, err + } + + credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs) + if err != nil { + return nil, err + } + + // setup logger if log configuration warning is seti + var logger logging.Logger + logWarnings, found, err := getLogConfigurationWarnings(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if found && logWarnings { + logger, found, err = getLogger(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if !found { + logger = logging.NewStandardLogger(os.Stderr) + } + } + + return LoadSharedConfigProfile(ctx, profile, + func(o *LoadSharedConfigOptions) { + o.Logger = logger + o.ConfigFiles = configFiles + o.CredentialsFiles = credentialsFiles + }, + ) +} + +// LoadSharedConfigOptions struct contains optional values that can be used to load the config. +type LoadSharedConfigOptions struct { + + // CredentialsFiles are the shared credentials files + CredentialsFiles []string + + // ConfigFiles are the shared config files + ConfigFiles []string + + // Logger is the logger used to log shared config behavior + Logger logging.Logger +} + +// LoadSharedConfigProfile retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// If config files are not set, SDK will default to using a file at location `.aws/config` if present. +// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present. +// No default files are set, if files set to an empty slice. +// +// You can read more about shared config and credentials file location at +// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location +func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { + var option LoadSharedConfigOptions + for _, fn := range optFns { + fn(&option) + } + + if option.ConfigFiles == nil { + option.ConfigFiles = DefaultSharedConfigFiles + } + + if option.CredentialsFiles == nil { + option.CredentialsFiles = DefaultSharedCredentialsFiles + } + + // load shared configuration sections from shared configuration INI options + configSections, err := loadIniFiles(option.ConfigFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processConfigSections(ctx, &configSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + // load shared credentials sections from shared credentials INI options + credentialsSections, err := loadIniFiles(option.CredentialsFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processCredentialsSections(ctx, &credentialsSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + err = mergeSections(&configSections, credentialsSections) + if err != nil { + return SharedConfig{}, err + } + + cfg := SharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { + return SharedConfig{}, err + } + + return cfg, nil +} + +func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + skipSections := map[string]struct{}{} + + for _, section := range sections.List() { + if _, ok := skipSections[section]; ok { + continue + } + + // drop sections from config file that do not have expected prefixes. + switch { + case strings.HasPrefix(section, profilePrefix): + // Rename sections to remove "profile " prefixing to match with + // credentials file. If default is already present, it will be + // dropped. + newName, err := renameProfileSection(section, sections, logger) + if err != nil { + return fmt.Errorf("failed to rename profile section, %w", err) + } + skipSections[newName] = struct{}{} + + case strings.HasPrefix(section, ssoSectionPrefix): + case strings.EqualFold(section, "default"): + default: + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+ + "For use within a shared configuration file, "+ + "a non-default profile must have `profile ` "+ + "prefixed to the profile name.", + section, + ) + } + } + } + return nil +} + +func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) { + v, ok := sections.GetSection(section) + if !ok { + return "", fmt.Errorf("error processing profiles within the shared configuration files") + } + + // delete section with profile as prefix + sections.DeleteSection(section) + + // set the value to non-prefixed name in sections. + section = strings.TrimPrefix(section, profilePrefix) + if sections.HasSection(section) { + oldSection, _ := sections.GetSection(section) + v.Logs = append(v.Logs, + fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+ + "overriding non-default profile from %s", + v.SourceFile, oldSection.SourceFile)) + sections.DeleteSection(section) + } + + // assign non-prefixed name to section + v.Name = section + sections.SetSection(section, v) + + return section, nil +} + +func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + for _, section := range sections.List() { + // drop profiles with prefix for credential files + if strings.HasPrefix(section, profilePrefix) { + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, + "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+ + "for the shared credentials file.\n", + section, + ) + } + } + } + return nil +} + +func loadIniFiles(filenames []string) (ini.Sections, error) { + mergedSections := ini.NewSections() + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + var v *ini.UnableToReadFile + if ok := errors.As(err, &v); ok { + // Skip files which can't be opened and read for whatever reason. + // We treat such files as empty, and do not fall back to other locations. + continue + } else if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + + // mergeSections into mergedSections + err = mergeSections(&mergedSections, sections) + if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + } + + return mergedSections, nil +} + +// mergeSections merges source section properties into destination section properties +func mergeSections(dst *ini.Sections, src ini.Sections) error { + for _, sectionName := range src.List() { + srcSection, _ := src.GetSection(sectionName) + + if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) || + (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) { + srcSection.Errors = append(srcSection.Errors, + fmt.Errorf("partial credentials found for profile %v", sectionName)) + } + + if !dst.HasSection(sectionName) { + dst.SetSection(sectionName, srcSection) + continue + } + + // merge with destination srcSection + dstSection, _ := dst.GetSection(sectionName) + + // errors should be overriden if any + dstSection.Errors = srcSection.Errors + + // Access key id update + if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) { + accessKey := srcSection.String(accessKeyIDKey) + secretKey := srcSection.String(secretAccessKey) + + if dstSection.Has(accessKeyIDKey) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey, + dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey])) + } + + // update access key + v, err := ini.NewStringValue(accessKey) + if err != nil { + return fmt.Errorf("error merging access key, %w", err) + } + dstSection.UpdateValue(accessKeyIDKey, v) + + // update secret key + v, err = ini.NewStringValue(secretKey) + if err != nil { + return fmt.Errorf("error merging secret key, %w", err) + } + dstSection.UpdateValue(secretAccessKey, v) + + // update session token + if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil { + return err + } + + // update source file to reflect where the static creds came from + dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey]) + dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey]) + } + + stringKeys := []string{ + roleArnKey, + sourceProfileKey, + credentialSourceKey, + externalIDKey, + mfaSerialKey, + roleSessionNameKey, + regionKey, + enableEndpointDiscoveryKey, + credentialProcessKey, + webIdentityTokenFileKey, + s3UseARNRegionKey, + s3DisableMultiRegionAccessPointsKey, + ec2MetadataServiceEndpointModeKey, + ec2MetadataServiceEndpointKey, + useDualStackEndpoint, + useFIPSEndpointKey, + defaultsModeKey, + retryModeKey, + caBundleKey, + + ssoSessionNameKey, + ssoAccountIDKey, + ssoRegionKey, + ssoRoleNameKey, + ssoStartURLKey, + } + for i := range stringKeys { + if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { + return err + } + } + + intKeys := []string{ + roleDurationSecondsKey, + retryMaxAttemptsKey, + } + for i := range intKeys { + if err := mergeIntKey(&srcSection, &dstSection, sectionName, intKeys[i]); err != nil { + return err + } + } + + // set srcSection on dst srcSection + *dst = dst.SetSection(sectionName, dstSection) + } + + return nil +} + +func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { + if srcSection.Has(key) { + srcValue := srcSection.String(key) + val, err := ini.NewStringValue(srcValue) + if err != nil { + return fmt.Errorf("error merging %s, %w", key, err) + } + + if dstSection.Has(key) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, + dstSection.SourceFile[key], srcSection.SourceFile[key])) + } + + dstSection.UpdateValue(key, val) + dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) + } + return nil +} + +func mergeIntKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { + if srcSection.Has(key) { + srcValue := srcSection.Int(key) + v, err := ini.NewIntValue(srcValue) + if err != nil { + return fmt.Errorf("error merging %s, %w", key, err) + } + + if dstSection.Has(key) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, + dstSection.SourceFile[key], srcSection.SourceFile[key])) + + } + + dstSection.UpdateValue(key, v) + dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) + } + return nil +} + +func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { + return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ + "with a %v value found in a duplicate profile defined at file %v. \n", + sectionName, key, dstSourceFile, key, srcSourceFile) +} + +// Returns an error if all of the files fail to load. If at least one file is +// successfully loaded and contains the profile, no error will be returned. +func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string, + sections ini.Sections, logger logging.Logger) error { + c.Profile = profile + + section, ok := sections.GetSection(profile) + if !ok { + return SharedConfigProfileNotExistError{ + Profile: profile, + } + } + + // if logs are appended to the section, log them + if section.Logs != nil && logger != nil { + for _, log := range section.Logs { + logger.Logf(logging.Debug, log) + } + } + + // set config from the provided INI section + err := c.setFromIniSection(profile, section) + if err != nil { + return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + c.clearAssumeRoleOptions() + } else { + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set + if err := c.validateCredentialsConfig(profile); err != nil { + return err + } + } + + // if not top level profile and has credentials, return with credentials. + if len(profiles) != 0 && c.Credentials.HasKeys() { + return nil + } + + profiles[profile] = struct{}{} + + // validate no colliding credentials type are present + if err := c.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(c.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + c.clearCredentialOptions() + + srcCfg := &SharedConfig{} + err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger) + if err != nil { + // SourceProfileName that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + Err: err, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + } + } + + c.Source = srcCfg + } + + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if c.hasSSOTokenProviderConfiguration() { + section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName)) + if !ok { + return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName) + } + var ssoSession SSOSession + ssoSession.setFromIniSection(section) + ssoSession.Name = c.SSOSessionName + c.SSOSession = &ssoSession + } + + return nil +} + +// setFromIniSection loads the configuration from the profile section defined in +// the provided INI file. A SharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error { + if len(section.Name) == 0 { + sources := make([]string, 0) + for _, v := range section.SourceFile { + sources = append(sources, v) + } + + return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources) + } + + if len(section.Errors) != 0 { + var errStatement string + for i, e := range section.Errors { + errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error()) + } + return fmt.Errorf("Error using profile: \n %v", errStatement) + } + + // Assume Role + updateString(&c.RoleARN, section, roleArnKey) + updateString(&c.ExternalID, section, externalIDKey) + updateString(&c.MFASerial, section, mfaSerialKey) + updateString(&c.RoleSessionName, section, roleSessionNameKey) + updateString(&c.SourceProfileName, section, sourceProfileKey) + updateString(&c.CredentialSource, section, credentialSourceKey) + updateString(&c.Region, section, regionKey) + + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&c.SSOSessionName, section, ssoSessionNameKey) + + // Legacy SSO session options + updateString(&c.SSORegion, section, ssoRegionKey) + updateString(&c.SSOStartURL, section, ssoStartURLKey) + + // SSO fields not used + updateString(&c.SSOAccountID, section, ssoAccountIDKey) + updateString(&c.SSORoleName, section, ssoRoleNameKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + c.RoleDurationSeconds = &d + } + + updateString(&c.CredentialProcess, section, credentialProcessKey) + updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) + updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) + + if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) + } + updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + + updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) + updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) + + if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err) + } + + if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err) + } + if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err) + } + + updateString(&c.CustomCABundle, section, caBundleKey) + + // Shared Credentials + creds := aws.Credentials{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + } + + if creds.HasKeys() { + c.Credentials = creds + } + + return nil +} + +func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid value: %s", value) + } + return nil +} + +func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) { + if !section.Has(key) { + return nil + } + value := section.String(key) + if *mode, err = aws.ParseRetryMode(value); err != nil { + return err + } + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (c *SharedConfig) validateCredentialsConfig(profile string) error { + if err := c.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (c *SharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(c.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(c.CredentialSource) != 0: + credSource = credentialSourceKey + case len(c.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(c.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (c *SharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(c.SourceProfileName) != 0, + len(c.CredentialSource) != 0, + len(c.CredentialProcess) != 0, + len(c.WebIdentityTokenFile) != 0, + ) { + return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token") + } + + return nil +} + +func (c *SharedConfig) validateSSOConfiguration() error { + if c.hasSSOTokenProviderConfiguration() { + err := c.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } + return nil + } + + if c.hasLegacySSOConfiguration() { + err := c.validateLegacySSOConfiguration() + if err != nil { + return err + } + } + return nil +} + +func (c *SharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) + } + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURLKey) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix) + } + + return nil +} + +func (c *SharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURLKey) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil +} + +func (c *SharedConfig) hasCredentials() bool { + switch { + case len(c.SourceProfileName) != 0: + case len(c.CredentialSource) != 0: + case len(c.CredentialProcess) != 0: + case len(c.WebIdentityTokenFile) != 0: + case c.hasSSOConfiguration(): + case c.Credentials.HasKeys(): + default: + return false + } + + return true +} + +func (c *SharedConfig) hasSSOConfiguration() bool { + return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration() +} + +func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *SharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *SharedConfig) clearAssumeRoleOptions() { + c.RoleARN = "" + c.ExternalID = "" + c.MFASerial = "" + c.RoleSessionName = "" + c.SourceProfileName = "" +} + +func (c *SharedConfig) clearCredentialOptions() { + c.CredentialSource = "" + c.CredentialProcess = "" + c.WebIdentityTokenFile = "" + c.Credentials = aws.Credentials{} + c.SSOAccountID = "" + c.SSORegion = "" + c.SSORoleName = "" + c.SSOStartURL = "" +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigLoadError) Unwrap() error { + return e.Err +} + +func (e SharedConfigLoadError) Error() string { + return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) +} + +// SharedConfigProfileNotExistError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistError struct { + Filename []string + Profile string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigProfileNotExistError) Unwrap() error { + return e.Err +} + +func (e SharedConfigProfileNotExistError) Error() string { + return fmt.Sprintf("failed to get shared config profile, %s", e.Profile) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + Profile string + RoleARN string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) Unwrap() error { + return e.Err +} + +func (e SharedConfigAssumeRoleError) Error() string { + return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", + e.RoleARN, e.Profile, e.Err) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateInt will only update the dst with the value in the section key, key +// is present in the section. +// +// Down casts the INI integer value from a int64 to an int, which could be +// different bit size depending on platform. +func updateInt(dst *int, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + if vt, _ := section.ValueType(key); vt != ini.IntegerType { + return fmt.Errorf("invalid value %s=%s, expect integer", + key, section.String(key)) + + } + *dst = int(section.Int(key)) + return nil +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + value := section.String(key) + if len(value) == 0 { + return + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + } +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = aws.DualStackEndpointStateEnabled + } else { + *dst = aws.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = aws.FIPSEndpointStateEnabled + } else { + *dst = aws.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md new file mode 100644 index 000000000..0b0958bdf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -0,0 +1,323 @@ +# v1.13.26 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.25 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.24 (2023-05-09) + +* No change notes available for this release. + +# v1.13.23 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.22 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.21 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.20 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.19 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.18 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2023-02-01) + +* No change notes available for this release. + +# v1.13.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2023-01-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2022-12-15) + +* **Bug Fix**: Unify logic between shared config and in finding home directory +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-11-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-11-11) + +* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 +* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider + +# v1.12.24 (2022-11-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.21 (2022-09-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.20 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.19 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.15 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-04-25) + +* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-23) + +* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-02-24) + +* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.5 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.4 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-10) + +* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. + +# v1.4.0 (2021-08-27) + +* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go new file mode 100644 index 000000000..f6e2873ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go @@ -0,0 +1,4 @@ +/* +Package credentials provides types for retrieving credentials from credentials sources. +*/ +package credentials diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go new file mode 100644 index 000000000..6ed71b42b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go @@ -0,0 +1,58 @@ +// Package ec2rolecreds provides the credentials provider implementation for +// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. +// +// # Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// # Loading credentials with the SDK's AWS Config +// +// The EC2 Instance role credentials provider will automatically be the resolved +// credential provider in the credential chain if no other credential provider is +// resolved first. +// +// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance +// role for credentials, you specify a `credentials_source` property in the config +// profile the SDK will load. +// +// [default] +// credential_source = Ec2InstanceMetadata +// +// # Loading credentials with the Provider directly +// +// Another way to use the EC2 Instance role credentials provider is to create it +// directly and assign it as the credentials provider for an API client. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// provider := imds.New(imds.Options{}) +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set the configuration options on the +// credentials provider using the imds.Options type to configure the EC2 IMDS +// API Client and ExpiryWindow of the retrieved credentials. +// +// provider := imds.New(imds.Options{ +// // See imds.Options type's documentation for more options available. +// Client: imds.New(Options{ +// HTTPClient: customHTTPClient, +// }), +// +// // Modify how soon credentials expire prior to their original expiry time. +// ExpiryWindow: 5 * time.Minute, +// }) +// +// # EC2 IMDS API Client +// +// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on +// configuring the client, and options available. +package ec2rolecreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go new file mode 100644 index 000000000..5c699f166 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go @@ -0,0 +1,229 @@ +package ec2rolecreds + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "math" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the +// GetMetadata operation. +type GetMetadataAPIClient interface { + GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error) +} + +// A Provider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// The New function must be used to create the with a custom EC2 IMDS client. +// +// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ +// o.Client = imds.New(imds.Options{/* custom options */}) +// }) +type Provider struct { + options Options +} + +// Options is a list of user settable options for setting the behavior of the Provider. +type Options struct { + // The API client that will be used by the provider to make GetMetadata API + // calls to EC2 IMDS. + // + // If nil, the provider will default to the EC2 IMDS client. + Client GetMetadataAPIClient +} + +// New returns an initialized Provider value configured to retrieve +// credentials from EC2 Instance Metadata service. +func New(optFns ...func(*Options)) *Provider { + options := Options{} + + for _, fn := range optFns { + fn(&options) + } + + if options.Client == nil { + options.Client = imds.New(imds.Options{}) + } + + return &Provider{ + options: options, + } +} + +// Retrieve retrieves credentials from the EC2 service. Error will be returned +// if the request fails, or unable to extract the desired credentials. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + credsList, err := requestCredList(ctx, p.options.Client) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + if len(credsList) == 0 { + return aws.Credentials{Source: ProviderName}, + fmt.Errorf("unexpected empty EC2 IMDS role list") + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, p.options.Client, credsName) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + creds := aws.Credentials{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + Source: ProviderName, + + CanExpire: true, + Expires: roleCreds.Expiration, + } + + // Cap role credentials Expires to 1 hour so they can be refreshed more + // often. Jitter will be applied credentials cache if being used. + if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) { + creds.Expires = anHour + } + + return creds, nil +} + +// HandleFailToRefresh will extend the credentials Expires time if it it is +// expired. If the credentials will not expire within the minimum time, they +// will be returned. +// +// If the credentials cannot expire, the original error will be returned. +func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) ( + aws.Credentials, error, +) { + if !prevCreds.CanExpire { + return aws.Credentials{}, err + } + + if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) { + return prevCreds, nil + } + + newCreds := prevCreds + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err) + } + + // Random distribution of [5,15) minutes. + expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute + newCreds.Expires = sdk.NowTime().Add(expireOffset) + + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes())) + + return newCreds, nil +} + +// AdjustExpiresBy will adds the passed in duration to the passed in +// credential's Expires time, unless the time until Expires is less than 15 +// minutes. Returns the credentials, even if not updated. +func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) ( + aws.Credentials, error, +) { + if !creds.CanExpire { + return creds, nil + } + if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} + +// ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. If +// there are no credentials, or there is an error making or receiving the +// request +func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: iamSecurityCredsPath, + }) + if err != nil { + return nil, fmt.Errorf("no EC2 IMDS role found, %w", err) + } + defer resp.Content.Close() + + credsList := []string{} + s := bufio.NewScanner(resp.Content) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: path.Join(iamSecurityCredsPath, credsName), + }) + if err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, err) + } + defer resp.Content.Close() + + var respCreds ec2RoleCredRespBody + if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w", + credsName, err) + } + + if !strings.EqualFold(respCreds.Code, "Success") { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, + &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message}) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go new file mode 100644 index 000000000..60b8298f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -0,0 +1,148 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID is the client identifer +const ServiceID = "endpoint-credentials" + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is the endpoint client configurable options +type Options struct { + // The endpoint to retrieve credentials from + Endpoint string + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*smithymiddleware.Stack) error +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + return to +} + +// Client is an client for retrieving AWS credentials from an endpoint +type Client struct { + options Options +} + +// New constructs a new Client from the given options +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + if options.HTTPClient == nil { + options.HTTPClient = awshttp.NewBuildableClient() + } + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +// GetCredentialsInput is the input to send with the endpoint service to receive credentials. +type GetCredentialsInput struct { + AuthorizationToken string +} + +// GetCredentials retrieves credentials from credential endpoint +func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) { + stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After) + stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After) + stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After) + retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer}) + middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID) + smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) + smithyhttp.AddCloseResponseBodyMiddleware(stack) + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, err + } + } + + handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, _, err := handler.Handle(ctx, params) + if err != nil { + return nil, err + } + + return result.(*GetCredentialsOutput), err +} + +// GetCredentialsOutput is the response from the credential endpoint +type GetCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +// EndpointError is an error returned from the endpoint service +type EndpointError struct { + Code string `json:"code"` + Message string `json:"message"` + Fault smithy.ErrorFault `json:"-"` +} + +// Error is the error mesage string +func (e *EndpointError) Error() string { + return fmt.Sprintf("%s: %s", e.Code, e.Message) +} + +// ErrorCode is the error code returned by the endpoint +func (e *EndpointError) ErrorCode() string { + return e.Code +} + +// ErrorMessage is the error message returned by the endpoint +func (e *EndpointError) ErrorMessage() string { + return e.Message +} + +// ErrorFault indicates error fault classification +func (e *EndpointError) ErrorFault() smithy.ErrorFault { + return e.Fault +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go new file mode 100644 index 000000000..40747a53c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go @@ -0,0 +1,120 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type buildEndpoint struct { + Endpoint string +} + +func (b *buildEndpoint) ID() string { + return "BuildEndpoint" +} + +func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) ( + out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport, %T", in.Request) + } + + if len(b.Endpoint) == 0 { + return out, metadata, fmt.Errorf("endpoint not provided") + } + + parsed, err := url.Parse(b.Endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err) + } + + request.URL = parsed + + return next.HandleBuild(ctx, in) +} + +type serializeOpGetCredential struct{} + +func (s *serializeOpGetCredential) ID() string { + return "OperationSerializer" +} + +func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) ( + out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request) + } + + params, ok := in.Parameters.(*GetCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters) + } + + const acceptHeader = "Accept" + request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json") + + if len(params.AuthorizationToken) > 0 { + const authHeader = "Authorization" + request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken) + } + + return next.HandleSerialize(ctx, in) +} + +type deserializeOpGetCredential struct{} + +func (d *deserializeOpGetCredential) ID() string { + return "OperationDeserializer" +} + +func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) ( + out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, deserializeError(response) + } + + var shape *GetCredentialsOutput + if err = json.NewDecoder(response.Body).Decode(&shape); err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)} + } + + out.Result = shape + return out, metadata, err +} + +func deserializeError(response *smithyhttp.Response) error { + var errShape *EndpointError + err := json.NewDecoder(response.Body).Decode(&errShape) + if err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode error message, %w", err)} + } + + if response.StatusCode >= 500 { + errShape.Fault = smithy.FaultServer + } else { + errShape.Fault = smithy.FaultClient + } + + return errShape +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..adc7fc6b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -0,0 +1,136 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "context" + "fmt" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +type getCredentialsAPIClient interface { + GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error) +} + +// Provider satisfies the aws.CredentialsProvider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + // The AWS Client to make HTTP requests to the endpoint with. The endpoint + // the request will be made to is provided by the aws.Config's + // EndpointResolver. + client getCredentialsAPIClient + + options Options +} + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is structure of configurable options for Provider +type Options struct { + // Endpoint to retrieve credentials from. Required + Endpoint string + + // HTTPClient to handle sending HTTP requests to the target endpoint. + HTTPClient HTTPClient + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*middleware.Stack) error + + // The Retryer to be used for determining whether a failed requested should be retried + Retryer aws.Retryer + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// New returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func New(endpoint string, optFns ...func(*Options)) *Provider { + o := Options{ + Endpoint: endpoint, + } + + for _, fn := range optFns { + fn(&o) + } + + p := &Provider{ + client: client.New(client.Options{ + HTTPClient: o.HTTPClient, + Endpoint: o.Endpoint, + APIOptions: o.APIOptions, + Retryer: o.Retryer, + }), + options: o, + } + + return p +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err) + } + + creds := aws.Credentials{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + Source: ProviderName, + } + + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { + return p.client.GetCredentials(ctx, &client.GetCredentialsInput{AuthorizationToken: p.options.AuthorizationToken}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go new file mode 100644 index 000000000..878dfaf6e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package credentials + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.13.26" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go new file mode 100644 index 000000000..a3137b8fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go @@ -0,0 +1,92 @@ +// Package processcreds is a credentials provider to retrieve credentials from a +// external CLI invoked process. +// +// WARNING: The following describes a method of sourcing credentials from an external +// process. This can potentially be dangerous, so proceed with caution. Other +// credential providers should be preferred if at all possible. If using this +// option, you should make sure that the config file is as locked down as possible +// using security best practices for your operating system. +// +// # Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// # Loading credentials with the SDKs AWS Config +// +// You can use credentials from a AWS shared config `credential_process` in a +// variety of ways. +// +// One way is to setup your shared config file, located in the default +// location, with the `credential_process` key and the command you want to be +// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. +// +// [default] +// credential_process = /command/to/call +// +// Loading configuration using external will use the credential process to +// retrieve credentials. NOTE: If there are credentials in the profile you are +// using, the credential process will not be used. +// +// // Initialize a session to load credentials. +// cfg, _ := config.LoadDefaultConfig(context.TODO()) +// +// // Create S3 service client to use the credentials. +// svc := s3.NewFromConfig(cfg) +// +// # Loading credentials with the Provider directly +// +// Another way to use the credentials process provider is by using the +// `NewProvider` constructor to create the provider and providing a it with a +// command to be executed to retrieve credentials. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// // Create credentials using the Provider. +// provider := processcreds.NewProvider("/path/to/command") +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set any configurable options in the +// credentials using one or more option functions. +// +// provider := processcreds.NewProvider("/path/to/command", +// func(o *processcreds.Options) { +// // Override the provider's default timeout +// o.Timeout = 2 * time.Minute +// }) +// +// You can also use your own `exec.Cmd` value by satisfying a value that satisfies +// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. +// +// // Create an exec.Cmd +// cmdBuilder := processcreds.NewCommandBuilderFunc( +// func(ctx context.Context) (*exec.Cmd, error) { +// cmd := exec.CommandContext(ctx, +// "customCLICommand", +// "-a", "argument", +// ) +// cmd.Env = []string{ +// "ENV_VAR_FOO=value", +// "ENV_VAR_BAR=other_value", +// } +// +// return cmd, nil +// }, +// ) +// +// // Create credentials using your exec.Cmd and custom timeout +// provider := processcreds.NewProviderCommand(cmdBuilder, +// func(opt *processcreds.Provider) { +// // optionally override the provider's default timeout +// opt.Timeout = 1 * time.Second +// }) +package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go new file mode 100644 index 000000000..fe9345e28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -0,0 +1,281 @@ +package processcreds + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProviderError is an error indicating failure initializing or executing the +// process credentials provider +type ProviderError struct { + Err error +} + +// Error returns the error message. +func (e *ProviderError) Error() string { + return fmt.Sprintf("process provider error: %v", e.Err) +} + +// Unwrap returns the underlying error the provider error wraps. +func (e *ProviderError) Unwrap() error { + return e.Err +} + +// Provider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type Provider struct { + // Provides a constructor for exec.Cmd that are invoked by the provider for + // retrieving credentials. Use this to provide custom creation of exec.Cmd + // with things like environment variables, or other configuration. + // + // The provider defaults to the DefaultNewCommand function. + commandBuilder NewCommandBuilder + + options Options +} + +// Options is the configuration options for configuring the Provider. +type Options struct { + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCommandBuilder provides the interface for specifying how command will be +// created that the Provider will use to retrieve credentials with. +type NewCommandBuilder interface { + NewCommand(context.Context) (*exec.Cmd, error) +} + +// NewCommandBuilderFunc provides a wrapper type around a function pointer to +// satisfy the NewCommandBuilder interface. +type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error) + +// NewCommand calls the underlying function pointer the builder was initialized with. +func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) { + return fn(ctx) +} + +// DefaultNewCommandBuilder provides the default NewCommandBuilder +// implementation used by the provider. It takes a command and arguments to +// invoke. The command will also be initialized with the current process +// environment variables, stderr, and stdin pipes. +type DefaultNewCommandBuilder struct { + Args []string +} + +// NewCommand returns an initialized exec.Cmd with the builder's initialized +// Args. The command is also initialized current process environment variables, +// stderr, and stdin pipes. +func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) { + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(b.Args) == 0 { + return nil, &ProviderError{ + Err: fmt.Errorf("failed to prepare command: command must not be empty"), + } + } + + cmdArgs = append(cmdArgs, b.Args...) + cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) + cmd.Env = os.Environ() + + cmd.Stderr = os.Stderr // display stderr on console for MFA + cmd.Stdin = os.Stdin // enable stdin for MFA + + return cmd, nil +} + +// NewProvider returns a pointer to a new Credentials object wrapping the +// Provider. +// +// The provider defaults to the DefaultNewCommandBuilder for creating command +// the Provider will use to retrieve credentials with. +func NewProvider(command string, options ...func(*Options)) *Provider { + var args []string + + // Ensure that the command arguments are not set if the provided command is + // empty. This will error out when the command is executed since no + // arguments are specified. + if len(command) > 0 { + args = []string{command} + } + + commanBuilder := DefaultNewCommandBuilder{ + Args: args, + } + return NewProviderCommand(commanBuilder, options...) +} + +// NewProviderCommand returns a pointer to a new Credentials object with the +// specified command, and default timeout duration. Use this to provide custom +// creation of exec.Cmd for options like environment variables, or other +// configuration. +func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider { + p := &Provider{ + commandBuilder: builder, + options: Options{ + Timeout: DefaultTimeout, + }, + } + + for _, option := range options { + option(&p.options) + } + + return p +} + +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. + SecretAccessKey string + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time +} + +// Retrieve executes the credential process command and returns the +// credentials, or error if the command fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + out, err := p.executeCredentialProcess(ctx) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + // Serialize and validate response + resp := &CredentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), + } + } + + if resp.Version != 1 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("wrong version in process output (not 1)"), + } + } + + if len(resp.AccessKeyID) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing AccessKeyId in process output"), + } + } + + if len(resp.SecretAccessKey) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing SecretAccessKey in process output"), + } + } + + creds := aws.Credentials{ + Source: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + } + + // Handle expiration + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) { + if p.options.Timeout >= 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout) + defer cancelFunc() + } + + cmd, err := p.commandBuilder.NewCommand(ctx) + if err != nil { + return nil, err + } + + // get creds json on process's stdout + output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte))) + if cmd.Stdout != nil { + cmd.Stdout = io.MultiWriter(cmd.Stdout, output) + } else { + cmd.Stdout = output + } + + execCh := make(chan error, 1) + go executeCommand(cmd, execCh) + + select { + case execError := <-execCh: + if execError == nil { + break + } + select { + case <-ctx.Done(): + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("credential process timed out: %w", execError), + } + default: + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("error in credential_process: %w", execError), + } + } + } + + out := output.Bytes() + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) + } + + return out, nil +} + +func executeCommand(cmd *exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go new file mode 100644 index 000000000..ece1e65f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go @@ -0,0 +1,81 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS +// credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS +// SSO login flow. The SDK provider expects that you have already performed the +// SSO login flow using AWS CLI using the "aws sso login" command, or by some +// other mechanism. The provider must find a valid non-expired access token for +// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not +// found, it is expired, or the file is malformed an error will be returned. +// +// # Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// specifying the required keys in the profile and referencing an sso-session: +// +// sso_session +// sso_account_id +// sso_role_name +// +// For example, the following defines a profile "devsso" and specifies the AWS +// SSO parameters that defines the target account, role, sign-on portal, and +// the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_session = dev-session +// sso_role_name = SSOReadOnlyRole +// sso_account_id = 123456789012 +// +// [sso-session dev-session] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_region = us-east-1 +// sso_registration_scopes = sso:account:access +// +// Using the config module, you can load the AWS SDK shared configuration, and +// specify that this profile be used to retrieve credentials. For example: +// +// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) +// if err != nil { +// return err +// } +// +// # Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, +// and provide the necessary information to load and retrieve temporary +// credentials using an access token from ~/.aws/sso/cache. +// +// ssoClient := sso.NewFromConfig(cfg) +// ssoOidcClient := ssooidc.NewFromConfig(cfg) +// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session") +// if err != nil { +// return err +// } +// +// var provider aws.CredentialsProvider +// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) { +// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath) +// }) +// +// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time +// provider = aws.NewCredentialsCache(provider) +// +// credentials, err := provider.Retrieve(context.TODO()) +// if err != nil { +// return err +// } +// +// It is important that you wrap the Provider with aws.CredentialsCache if you +// are programmatically constructing the provider directly. This prevents your +// application from accessing the cached access token and requesting new +// credentials each time the credentials are used. +// +// # Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: +// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 000000000..3b97e6dd4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,233 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" +) + +var osUserHomeDur = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := osUserHomeDur() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type token struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +func (t token) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +func (t *token) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %w", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (token, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) + } + + var t token + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return token{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %w", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %w", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %w", err) + } + + return nil +} + +type rfc3339 time.Time + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err) + } + + return rfc3339(parsed), nil +} + +func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { + var value string + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // unquoting rules. + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + + return nil +} + +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go new file mode 100644 index 000000000..b3cf7853e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -0,0 +1,152 @@ +package ssocreds + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sso" +) + +// ProviderName is the name of the provider used to specify the source of +// credentials. +const ProviderName = "SSOProvider" + +// GetRoleCredentialsAPIClient is a API client that implements the +// GetRoleCredentials operation. +type GetRoleCredentialsAPIClient interface { + GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) ( + *sso.GetRoleCredentialsOutput, error, + ) +} + +// Options is the Provider options structure. +type Options struct { + // The Client which is configured for the AWS Region where the AWS SSO user + // portal is located. + Client GetRoleCredentialsAPIClient + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) + // user portal. + StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + SSOTokenProvider *SSOTokenProvider +} + +// Provider is an AWS credential provider that retrieves temporary AWS +// credentials by exchanging an SSO login token. +type Provider struct { + options Options + + cachedTokenFilepath string +} + +// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The +// provided client is expected to be configured for the AWS Region where the +// AWS SSO user portal is located. +func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { + options := Options{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(&options) + } + + return &Provider{ + options: options, + cachedTokenFilepath: options.CachedTokenFilepath, + } +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon +// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present +// in ~/.aws/sso/cache. However, if a token provider configuration exists +// in the shared config, then we ought to use the token provider rather then +// direct access on the cached token. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + var accessToken *string + if p.options.SSOTokenProvider != nil { + token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return aws.Credentials{}, err + } + accessToken = &token.Value + } else { + if p.cachedTokenFilepath == "" { + cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + p.cachedTokenFilepath = cachedTokenFilepath + } + + tokenFile, err := loadCachedToken(p.cachedTokenFilepath) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + + if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) { + return aws.Credentials{}, &InvalidTokenError{} + } + accessToken = &tokenFile.AccessToken + } + + output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: accessToken, + AccountId: &p.options.AccountID, + RoleName: &p.options.RoleName, + }) + if err != nil { + return aws.Credentials{}, err + } + + return aws.Credentials{ + AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.ToString(output.RoleCredentials.SessionToken), + CanExpire: true, + Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), + Source: ProviderName, + }, nil +} + +// InvalidTokenError is the error type that is returned if loaded token has +// expired or is otherwise invalid. To refresh the SSO session run AWS SSO +// login with the corresponding profile. +type InvalidTokenError struct { + Err error +} + +func (i *InvalidTokenError) Unwrap() error { + return i.Err +} + +func (i *InvalidTokenError) Error() string { + const msg = "the SSO session has expired or is invalid" + if i.Err == nil { + return msg + } + return msg + ": " + i.Err.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go new file mode 100644 index 000000000..7f4fc5467 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go @@ -0,0 +1,147 @@ +package ssocreds + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/smithy-go/auth/bearer" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) ( + *ssooidc.CreateTokenOutput, error, + ) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The set of API Client options to be applied when invoking the + // CreateToken operation. + ClientOptions []func(*ssooidc.Options) + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in +// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's +// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with +// the smithy-go TokenCache, if the external configuration loaded configured +// for an SSO session. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +var _ bearer.TokenProvider = (*SSOTokenProvider)(nil) + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(ctx, cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err) + } + } + + expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) { + if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" { + return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{ + ClientId: &cachedToken.ClientID, + ClientSecret: &cachedToken.ClientSecret, + RefreshToken: &cachedToken.RefreshToken, + GrantType: aws.String("refresh_token"), + }, p.options.ClientOptions...) + if err != nil { + return token{}, fmt.Errorf("unable to refresh SSO token, %w", err) + } + + expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second) + + cachedToken.AccessToken = aws.ToString(createResult.AccessToken) + cachedToken.ExpiresAt = (*rfc3339)(&expiresAt) + cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken) + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil { + return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err) + } + + return cachedToken, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go new file mode 100644 index 000000000..d525cac09 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go @@ -0,0 +1,53 @@ +package credentials + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +const ( + // StaticCredentialsName provides a name of Static provider + StaticCredentialsName = "StaticCredentials" +) + +// StaticCredentialsEmptyError is emitted when static credentials are empty. +type StaticCredentialsEmptyError struct{} + +func (*StaticCredentialsEmptyError) Error() string { + return "static credentials are empty" +} + +// A StaticCredentialsProvider is a set of credentials which are set, and will +// never expire. +type StaticCredentialsProvider struct { + Value aws.Credentials +} + +// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS +// credentials passed in. +func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider { + return StaticCredentialsProvider{ + Value: aws.Credentials{ + AccessKeyID: key, + SecretAccessKey: secret, + SessionToken: session, + }, + } +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) { + v := s.Value + if v.AccessKeyID == "" || v.SecretAccessKey == "" { + return aws.Credentials{ + Source: StaticCredentialsName, + }, &StaticCredentialsEmptyError{} + } + + if len(v.Source) == 0 { + v.Source = StaticCredentialsName + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..289707b6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,320 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +// +// The SDK will ensure that per instance of credentials.Credentials all requests +// to refresh the credentials will be synchronized. But, the SDK is unable to +// ensure synchronous usage of the AssumeRoleProvider if the value is shared +// between multiple Credentials or service clients. +// +// # Assume Role +// +// To assume an IAM role using STS with the SDK you can create a new Credentials +// with the SDKs's stscreds package. +// +// // Initial credentials loaded from SDK's default credential chain. Such as +// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance +// // Role. These credentials will be used to to make the STS Assume Role API. +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN. +// stsSvc := sts.NewFromConfig(cfg) +// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// # Assume Role with custom MFA Token provider +// +// To assume an IAM role with a MFA token you can either specify a custom MFA +// token provider or use the SDK's built in StdinTokenProvider that will prompt +// the user for a token code each time the credentials need to to be refreshed. +// Specifying a custom token provider allows you to control where the token +// code is retrieved from, and how it is refreshed. +// +// With a custom token provider, the provider is responsible for refreshing the +// token code when called. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// staticTokenProvider := func() (string, error) { +// return someTokenCode, nil +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = staticTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// # Assume Role with MFA Token Provider +// +// To assume an IAM role with MFA for longer running tasks where the credentials +// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +// will allow the credential provider to prompt for new MFA token code when the +// role's credentials need to be refreshed. +// +// The StdinTokenProvider function is available to prompt on stdin to retrieve +// the MFA token code from the user. You can also implement custom prompts by +// satisfying the TokenProvider function signature. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = stscreds.StdinTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +package stscreds + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation. +type AssumeRoleAPIClient interface { + AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the +// credentials will be valid for. This value is only used by AssumeRoleProvider +// for specifying the default expiry duration of an assume role. +// +// Other providers such as WebIdentityRoleProvider do not use this value, and +// instead rely on STS API's default parameter handing to assign a default +// value. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + options AssumeRoleOptions +} + +// AssumeRoleOptions is the configurable options for AssumeRoleProvider +type AssumeRoleOptions struct { + // Client implementation of the AssumeRole operation. Required + Client AssumeRoleAPIClient + + // IAM Role ARN to be assumed. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyARNs []types.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + SourceIdentity *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is set. + TokenProvider func() (string, error) + + // A list of session tags that you want to pass. Each session tag consists of a key + // name and an associated value. For more information about session tags, see + // Tagging STS Sessions + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + Tags []types.Tag + + // A list of keys for session tags that you want to set as transitive. If you set a + // tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. + TransitiveTagKeys []string +} + +// NewAssumeRoleProvider constructs and returns a credentials provider that +// will retrieve credentials by assuming a IAM role using STS. +func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider { + o := AssumeRoleOptions{ + Client: client, + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&o) + } + + return &AssumeRoleProvider{ + options: o, + } +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + // Apply defaults where parameters are not set. + if len(p.options.RoleSessionName) == 0 { + // Try to work out a role name that will hopefully end up unique. + p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano()) + } + if p.options.Duration == 0 { + // Expire as often as AWS permits. + p.options.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)), + PolicyArns: p.options.PolicyARNs, + RoleArn: aws.String(p.options.RoleARN), + RoleSessionName: aws.String(p.options.RoleSessionName), + ExternalId: p.options.ExternalID, + SourceIdentity: p.options.SourceIdentity, + Tags: p.options.Tags, + TransitiveTagKeys: p.options.TransitiveTagKeys, + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + if p.options.SerialNumber != nil { + if p.options.TokenProvider != nil { + input.SerialNumber = p.options.SerialNumber + code, err := p.options.TokenProvider() + if err != nil { + return aws.Credentials{}, err + } + input.TokenCode = aws.String(code) + } else { + return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set") + } + } + + resp, err := p.options.Client.AssumeRole(ctx, input) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + return aws.Credentials{ + AccessKeyID: *resp.Credentials.AccessKeyId, + SecretAccessKey: *resp.Credentials.SecretAccessKey, + SessionToken: *resp.Credentials.SessionToken, + Source: ProviderName, + + CanExpire: true, + Expires: *resp.Credentials.Expiration, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go new file mode 100644 index 000000000..ddaf6df6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,150 @@ +package stscreds + +import ( + "context" + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode() + +const ( + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation. +type AssumeRoleWithWebIdentityAPIClient interface { + AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + options WebIdentityRoleOptions +} + +// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider +type WebIdentityRoleOptions struct { + // Client implementation of the AssumeRoleWithWebIdentity operation. Required + Client AssumeRoleWithWebIdentityAPIClient + + // JWT Token Provider. Required + TokenRetriever IdentityTokenRetriever + + // IAM Role ARN to assume. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. STS will assign a default expiry + // duration if this value is unset. This is different from the Duration + // option of AssumeRoleProvider, which automatically assigns 15 minutes if + // Duration is unset. + // + // See the STS AssumeRoleWithWebIdentity API reference guide for more + // information on defaults. + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + Duration time.Duration + + // An IAM policy in JSON format that you want to use as an inline session policy. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you + // want to use as managed session policies. The policies must exist in the + // same account as the role. + PolicyARNs []types.PolicyDescriptorType +} + +// IdentityTokenRetriever is an interface for retrieving a JWT +type IdentityTokenRetriever interface { + GetIdentityToken() ([]byte, error) +} + +// IdentityTokenFile is for retrieving an identity token from the given file name +type IdentityTokenFile string + +// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte +func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { + b, err := ioutil.ReadFile(string(j)) + if err != nil { + return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) + } + + return b, nil +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.ClientAPI +func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider { + o := WebIdentityRoleOptions{ + Client: client, + RoleARN: roleARN, + TokenRetriever: tokenRetriever, + } + + for _, fn := range optFns { + fn(&o) + } + + return &WebIdentityRoleProvider{options: o} +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + b, err := p.options.TokenRetriever.GetIdentityToken() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err) + } + + sessionName := p.options.RoleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + } + input := &sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.options.PolicyARNs, + RoleArn: &p.options.RoleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + } + if p.options.Duration != 0 { + // If set use the value, otherwise STS will assign a default expiration duration. + input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + + resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { + options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) + }) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) + } + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + + value := aws.Credentials{ + AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey), + SessionToken: aws.ToString(resp.Credentials.SessionToken), + Source: WebIdentityProviderName, + CanExpire: true, + Expires: *resp.Credentials.Expiration, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/doc.go new file mode 100644 index 000000000..944feac55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/doc.go @@ -0,0 +1,58 @@ +// Package sdk is the official AWS SDK v2 for the Go programming language. +// +// aws-sdk-go-v2 is the the v2 of the AWS SDK for the Go programming language. +// +// # Getting started +// +// The best way to get started working with the SDK is to use `go get` to add the +// SDK and desired service clients to your Go dependencies explicitly. +// +// go get github.com/aws/aws-sdk-go-v2 +// go get github.com/aws/aws-sdk-go-v2/config +// go get github.com/aws/aws-sdk-go-v2/service/dynamodb +// +// # Hello AWS +// +// This example shows how you can use the v2 SDK to make an API request using the +// SDK's Amazon DynamoDB client. +// +// package main +// +// import ( +// "context" +// "fmt" +// "log" +// +// "github.com/aws/aws-sdk-go-v2/aws" +// "github.com/aws/aws-sdk-go-v2/config" +// "github.com/aws/aws-sdk-go-v2/service/dynamodb" +// ) +// +// func main() { +// // Using the SDK's default configuration, loading additional config +// // and credentials values from the environment variables, shared +// // credentials, and shared configuration files +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithRegion("us-west-2"), +// ) +// if err != nil { +// log.Fatalf("unable to load SDK config, %v", err) +// } +// +// // Using the Config value, create the DynamoDB client +// svc := dynamodb.NewFromConfig(cfg) +// +// // Build the request with its input parameters +// resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{ +// Limit: aws.Int32(5), +// }) +// if err != nil { +// log.Fatalf("failed to list tables, %v", err) +// } +// +// fmt.Println("Tables:") +// for _, tableName := range resp.TableNames { +// fmt.Println(tableName) +// } +// } +package sdk diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md new file mode 100644 index 000000000..b4d50c424 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -0,0 +1,200 @@ +# v1.13.4 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2023-03-14) + +* **Feature**: Add flag to disable IMDSv1 fallback + +# v1.12.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.19 (2022-10-24) + +* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. +* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go new file mode 100644 index 000000000..e55edd992 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -0,0 +1,330 @@ +package imds + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID provides the unique name of this API client +const ServiceID = "ec2imds" + +// Client provides the API client for interacting with the Amazon EC2 Instance +// Metadata Service API. +type Client struct { + options Options +} + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState = internalconfig.ClientEnableState + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior + ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled + ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled +) + +// EndpointModeState is an enum configuration variable describing the client endpoint mode. +// Not configurable directly, but used when using the NewFromConfig. +type EndpointModeState = internalconfig.EndpointModeState + +// Enumeration values for EndpointModeState +const ( + EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset + EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4 + EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6 +) + +const ( + disableClientEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Client endpoint options + endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + defaultIPv4Endpoint = "http://169.254.169.254" + defaultIPv6Endpoint = "http://[fd00:ec2::254]" +) + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + options.HTTPClient = resolveHTTPClient(options.HTTPClient) + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second) + + if options.ClientEnableState == ClientDefaultEnableState { + if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") { + options.ClientEnableState = ClientDisabled + } + } + + if len(options.Endpoint) == 0 { + if v := os.Getenv(endpointEnvVar); len(v) != 0 { + options.Endpoint = v + } + } + + client := &Client{ + options: options, + } + + if client.options.tokenProvider == nil && !client.options.disableAPIToken { + client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL) + } + + return client +} + +// NewFromConfig returns an initialized Client based the AWS SDK config, and +// functional options. Provide additional functional options to further +// configure the behavior of the client, such as changing the client's endpoint +// or adding custom middleware behavior. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...), + HTTPClient: cfg.HTTPClient, + ClientLogMode: cfg.ClientLogMode, + Logger: cfg.Logger, + } + + if cfg.Retryer != nil { + opts.Retryer = cfg.Retryer() + } + + resolveClientEnableState(cfg, &opts) + resolveEndpointConfig(cfg, &opts) + resolveEndpointModeConfig(cfg, &opts) + + return New(opts, optFns...) +} + +// Options provides the fields for configuring the API client's behavior. +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation + // call to modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The endpoint the client will use to retrieve EC2 instance metadata. + // + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode. + // + // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT + // has a value the client will use the value of the environment variable as + // the endpoint for operation calls. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + Endpoint string + + // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field. + // + // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint. + // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint. + // + // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4. + EndpointMode EndpointModeState + + // The HTTP client to invoke API calls with. Defaults to client's default + // HTTP implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Changes if the EC2 Instance Metadata client is enabled or not. Client + // will default to enabled if not set to ClientDisabled. When the client is + // disabled it will return an error for all operation calls. + // + // If ClientEnableState value is ClientDefaultEnableState (default value), + // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to + // "true", the client will be disabled. + // + // AWS_EC2_METADATA_DISABLED=true + ClientEnableState ClientEnableState + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // Configure IMDSv1 fallback behavior. By default, the client will attempt + // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] + // the client will return any errors encountered from attempting to fetch a token + // instead of silently using the insecure data flow of IMDSv1. + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EnableFallback aws.Ternary + + // provides the caching of API tokens used for operation calls. If unset, + // the API token will not be retrieved for the operation. + tokenProvider *tokenProvider + + // option to disable the API token provider for testing. + disableAPIToken bool +} + +// HTTPClient provides the interface for a client making HTTP requests with the +// API. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...) + return to +} + +// WithAPIOptions wraps the API middleware functions, as a functional option +// for the API Client Options. Use this helper to add additional functional +// options to the API client, or operation calls. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), + stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + if options.ClientEnableState == ClientDisabled { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: fmt.Errorf( + "access disabled to EC2 IMDS via client option, or %q environment variable", + disableClientEnvVar), + } + } + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + return result, metadata, err +} + +const ( + // HTTP client constants + defaultDialerTimeout = 250 * time.Millisecond + defaultResponseHeaderTimeout = 500 * time.Millisecond +) + +func resolveHTTPClient(client HTTPClient) HTTPClient { + if client == nil { + client = awshttp.NewBuildableClient() + } + + if c, ok := client.(*awshttp.BuildableClient); ok { + client = c. + WithDialerOptions(func(d *net.Dialer) { + // Use a custom Dial timeout for the EC2 Metadata service to account + // for the possibility the application might not be running in an + // environment with the service present. The client should fail fast in + // this case. + d.Timeout = defaultDialerTimeout + }). + WithTransportOptions(func(tr *http.Transport) { + // Use a custom Transport timeout for the EC2 Metadata service to + // account for the possibility that the application might be running in + // a container, and EC2Metadata service drops the connection after a + // single IP Hop. The client should fail fast in this case. + tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout + }) + } + + return client +} + +func resolveClientEnableState(cfg aws.Config, options *Options) error { + if options.ClientEnableState != ClientDefaultEnableState { + return nil + } + value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.ClientEnableState = value + return nil +} + +func resolveEndpointModeConfig(cfg aws.Config, options *Options) error { + if options.EndpointMode != EndpointModeStateUnset { + return nil + } + value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.EndpointMode = value + return nil +} + +func resolveEndpointConfig(cfg aws.Config, options *Options) error { + if len(options.Endpoint) != 0 { + return nil + } + value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.Endpoint = value + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go new file mode 100644 index 000000000..9e3bdb0e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go @@ -0,0 +1,76 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getDynamicDataPath = "/latest/dynamic" + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) { + if params == nil { + params = &GetDynamicDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns, + addGetDynamicDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetDynamicDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetDynamicDataInput provides the input parameters for the GetDynamicData +// operation. +type GetDynamicDataInput struct { + // The relative dynamic data path to retrieve. Can be empty string to + // retrieve a response containing a new line separated list of dynamic data + // resources available. + // + // Must not include the dynamic data base path. + // + // May include leading slash. If Path includes trailing slash the trailing + // slash will be included in the request for the resource. + Path string +} + +// GetDynamicDataOutput provides the output parameters for the GetDynamicData +// operation. +type GetDynamicDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetDynamicDataPath, + buildGetDynamicDataOutput) +} + +func buildGetDynamicDataPath(params interface{}) (string, error) { + p, ok := params.(*GetDynamicDataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getDynamicDataPath, p.Path), nil +} + +func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetDynamicDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go new file mode 100644 index 000000000..24845dccd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go @@ -0,0 +1,102 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getIAMInfoPath = getMetadataPath + "/iam/info" + +// GetIAMInfo retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetIAMInfo( + ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options), +) ( + *GetIAMInfoOutput, error, +) { + if params == nil { + params = &GetIAMInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns, + addGetIAMInfoMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetIAMInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetIAMInfoInput provides the input parameters for GetIAMInfo operation. +type GetIAMInfoInput struct{} + +// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation. +type GetIAMInfoOutput struct { + IAMInfo + + ResultMetadata middleware.Metadata +} + +func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetIAMInfoPath, + buildGetIAMInfoOutput, + ) +} + +func buildGetIAMInfoPath(params interface{}) (string, error) { + return getIAMInfoPath, nil +} + +func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + imdsResult := &GetIAMInfoOutput{} + if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + // Any code other success is an error + if !strings.EqualFold(imdsResult.Code, "success") { + return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s", + imdsResult.Code) + } + + return imdsResult, nil +} + +// IAMInfo provides the shape for unmarshaling an IAM info from the metadata +// API. +type IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go new file mode 100644 index 000000000..a87758ed3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go @@ -0,0 +1,109 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document" + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetInstanceIdentityDocument( + ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options), +) ( + *GetInstanceIdentityDocumentOutput, error, +) { + if params == nil { + params = &GetInstanceIdentityDocumentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns, + addGetInstanceIdentityDocumentMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetInstanceIdentityDocumentOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetInstanceIdentityDocumentInput provides the input parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentInput struct{} + +// GetInstanceIdentityDocumentOutput provides the output parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentOutput struct { + InstanceIdentityDocument + + ResultMetadata middleware.Metadata +} + +func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetInstanceIdentityDocumentPath, + buildGetInstanceIdentityDocumentOutput, + ) +} + +func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { + return getInstanceIdentityDocumentPath, nil +} + +func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + output := &GetInstanceIdentityDocumentOutput{} + if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + + return output, nil +} + +// InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go new file mode 100644 index 000000000..cb0ce4c00 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go @@ -0,0 +1,76 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getMetadataPath = "/latest/meta-data" + +// GetMetadata uses the path provided to request information from the Amazon +// EC2 Instance Metadata Service. The content will be returned as a string, or +// error if the request failed. +func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) { + if params == nil { + params = &GetMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns, + addGetMetadataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetMetadataInput provides the input parameters for the GetMetadata +// operation. +type GetMetadataInput struct { + // The relative metadata path to retrieve. Can be empty string to retrieve + // a response containing a new line separated list of metadata resources + // available. + // + // Must not include the metadata base path. + // + // May include leading slash. If Path includes trailing slash the trailing slash + // will be included in the request for the resource. + Path string +} + +// GetMetadataOutput provides the output parameters for the GetMetadata +// operation. +type GetMetadataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetMetadataPath, + buildGetMetadataOutput) +} + +func buildGetMetadataPath(params interface{}) (string, error) { + p, ok := params.(*GetMetadataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getMetadataPath, p.Path), nil +} + +func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetMetadataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go new file mode 100644 index 000000000..7b9b48912 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go @@ -0,0 +1,72 @@ +package imds + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// GetRegion retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetRegion( + ctx context.Context, params *GetRegionInput, optFns ...func(*Options), +) ( + *GetRegionOutput, error, +) { + if params == nil { + params = &GetRegionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns, + addGetRegionMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetRegionOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetRegionInput provides the input parameters for GetRegion operation. +type GetRegionInput struct{} + +// GetRegionOutput provides the output parameters for GetRegion operation. +type GetRegionOutput struct { + Region string + + ResultMetadata middleware.Metadata +} + +func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetInstanceIdentityDocumentPath, + buildGetRegionOutput, + ) +} + +func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { + out, err := buildGetInstanceIdentityDocumentOutput(resp) + if err != nil { + return nil, err + } + + result, ok := out.(*GetInstanceIdentityDocumentOutput) + if !ok { + return nil, fmt.Errorf("unexpected instance identity document type, %T", out) + } + + region := result.Region + if len(region) == 0 { + return "", fmt.Errorf("instance metadata did not return a region value") + } + + return &GetRegionOutput{ + Region: region, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go new file mode 100644 index 000000000..841f802c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go @@ -0,0 +1,118 @@ +package imds + +import ( + "context" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getTokenPath = "/latest/api/token" +const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds" + +// getToken uses the duration to return a token for EC2 IMDS, or an error if +// the request failed. +func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) { + if params == nil { + params = &getTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns, + addGetTokenMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*getTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type getTokenInput struct { + TokenTTL time.Duration +} + +type getTokenOutput struct { + Token string + TokenTTL time.Duration + + ResultMetadata middleware.Metadata +} + +func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { + err := addRequestMiddleware(stack, + options, + "PUT", + buildGetTokenPath, + buildGetTokenOutput) + if err != nil { + return err + } + + err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After) + if err != nil { + return err + } + + return nil +} + +func buildGetTokenPath(interface{}) (string, error) { + return getTokenPath, nil +} + +func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + ttlHeader := resp.Header.Get(tokenTTLHeader) + tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse API token, %w", err) + } + + var token strings.Builder + if _, err = io.Copy(&token, resp.Body); err != nil { + return nil, fmt.Errorf("unable to read API token, %w", err) + } + + return &getTokenOutput{ + Token: token.String(), + TokenTTL: time.Duration(tokenTTL) * time.Second, + }, nil +} + +type tokenTTLRequestHeader struct{} + +func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" } +func (*tokenTTLRequestHeader) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request) + } + + input, ok := in.Parameters.(*getTokenInput) + if !ok { + return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters) + } + + req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second))) + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go new file mode 100644 index 000000000..88aa61e9a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go @@ -0,0 +1,60 @@ +package imds + +import ( + "context" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getUserDataPath = "/latest/user-data" + +// GetUserData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) { + if params == nil { + params = &GetUserDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns, + addGetUserDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetUserDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetUserDataInput provides the input parameters for the GetUserData +// operation. +type GetUserDataInput struct{} + +// GetUserDataOutput provides the output parameters for the GetUserData +// operation. +type GetUserDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetUserDataPath, + buildGetUserDataOutput) +} + +func buildGetUserDataPath(params interface{}) (string, error) { + return getUserDataPath, nil +} + +func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetUserDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go new file mode 100644 index 000000000..bacdb5d21 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go @@ -0,0 +1,11 @@ +// Package imds provides the API client for interacting with the Amazon EC2 +// Instance Metadata Service. +// +// All Client operation calls have a default timeout. If the operation is not +// completed before this timeout expires, the operation will be canceled. This +// timeout can be overridden by providing Context with a timeout or deadline +// with calling the client's operations. +// +// See the EC2 IMDS user guide for more information on using the API. +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +package imds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go new file mode 100644 index 000000000..6fab6e917 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package imds + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.13.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go new file mode 100644 index 000000000..d72fcb562 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go @@ -0,0 +1,98 @@ +package config + +import ( + "fmt" + "strings" +) + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState uint + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = iota + ClientDisabled + ClientEnabled +) + +// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode +type EndpointModeState uint + +// Enumeration values for ClientEnableState +const ( + EndpointModeStateUnset EndpointModeState = iota + EndpointModeStateIPv4 + EndpointModeStateIPv6 +) + +// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset +func (e *EndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + +// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled. +type ClientEnableStateResolver interface { + GetEC2IMDSClientEnableState() (ClientEnableState, bool, error) +} + +// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration. +type EndpointModeResolver interface { + GetEC2IMDSEndpointMode() (EndpointModeState, bool, error) +} + +// EndpointResolver is a config resolver interface for retrieving the endpoint. +type EndpointResolver interface { + GetEC2IMDSEndpoint() (string, bool, error) +} + +// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. +func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(ClientEnableStateResolver); ok { + value, found, err = resolver.GetEC2IMDSClientEnableState() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources. +func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointModeResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpointMode() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointConfig resolves the endpoint from a list of configuration sources. +func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpoint() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go new file mode 100644 index 000000000..c8abd6491 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go @@ -0,0 +1,285 @@ +package imds + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/url" + "path" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func addAPIRequestMiddleware(stack *middleware.Stack, + options Options, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = addRequestMiddleware(stack, options, "GET", getPath, getOutput) + if err != nil { + return err + } + + // Token Serializer build and state management. + if !options.disableAPIToken { + err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before) + if err != nil { + return err + } + } + + return nil +} + +func addRequestMiddleware(stack *middleware.Stack, + options Options, + method string, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) + if err != nil { + return err + } + + // Operation timeout + err = stack.Initialize.Add(&operationTimeout{ + DefaultTimeout: defaultOperationTimeout, + }, middleware.Before) + if err != nil { + return err + } + + // Operation Serializer + err = stack.Serialize.Add(&serializeRequest{ + GetPath: getPath, + Method: method, + }, middleware.After) + if err != nil { + return err + } + + // Operation endpoint resolver + err = stack.Serialize.Insert(&resolveEndpoint{ + Endpoint: options.Endpoint, + EndpointMode: options.EndpointMode, + }, "OperationSerializer", middleware.Before) + if err != nil { + return err + } + + // Operation Deserializer + err = stack.Deserialize.Add(&deserializeResponse{ + GetOutput: getOutput, + }, middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: options.ClientLogMode.IsRequest(), + LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(), + LogResponse: options.ClientLogMode.IsResponse(), + LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(), + }, middleware.After) + if err != nil { + return err + } + + err = addSetLoggerMiddleware(stack, options) + if err != nil { + return err + } + + // Retry support + return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{ + Retryer: options.Retryer, + LogRetryAttempts: options.ClientLogMode.IsRetries(), + }) +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +type serializeRequest struct { + GetPath func(interface{}) (string, error) + Method string +} + +func (*serializeRequest) ID() string { + return "OperationSerializer" +} + +func (m *serializeRequest) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + reqPath, err := m.GetPath(in.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request URL path, %w", err) + } + + request.Request.URL.Path = reqPath + request.Request.Method = m.Method + + return next.HandleSerialize(ctx, in) +} + +type deserializeResponse struct { + GetOutput func(*smithyhttp.Response) (interface{}, error) +} + +func (*deserializeResponse) ID() string { + return "OperationDeserializer" +} + +func (m *deserializeResponse) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf( + "unexpected transport response type, %T, want %T", out.RawResponse, resp) + } + defer resp.Body.Close() + + // read the full body so that any operation timeouts cleanup will not race + // the body being read. + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return out, metadata, fmt.Errorf("read response body failed, %w", err) + } + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // Anything that's not 200 |< 300 is error + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return out, metadata, &smithyhttp.ResponseError{ + Response: resp, + Err: fmt.Errorf("request to EC2 IMDS failed"), + } + } + + result, err := m.GetOutput(resp) + if err != nil { + return out, metadata, fmt.Errorf( + "unable to get deserialized result for response, %w", err, + ) + } + out.Result = result + + return out, metadata, err +} + +type resolveEndpoint struct { + Endpoint string + EndpointMode EndpointModeState +} + +func (*resolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *resolveEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + var endpoint string + if len(m.Endpoint) > 0 { + endpoint = m.Endpoint + } else { + switch m.EndpointMode { + case EndpointModeStateIPv6: + endpoint = defaultIPv6Endpoint + case EndpointModeStateIPv4: + fallthrough + case EndpointModeStateUnset: + endpoint = defaultIPv4Endpoint + default: + return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode") + } + } + + req.URL, err = url.Parse(endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + return next.HandleSerialize(ctx, in) +} + +const ( + defaultOperationTimeout = 5 * time.Second +) + +// operationTimeout adds a timeout on the middleware stack if the Context the +// stack was called with does not have a deadline. The next middleware must +// complete before the timeout, or the context will be canceled. +// +// If DefaultTimeout is zero, no default timeout will be used if the Context +// does not have a timeout. +// +// The next middleware must also ensure that any resources that are also +// canceled by the stack's context are completely consumed before returning. +// Otherwise the timeout cleanup will race the resource being consumed +// upstream. +type operationTimeout struct { + DefaultTimeout time.Duration +} + +func (*operationTimeout) ID() string { return "OperationTimeout" } + +func (m *operationTimeout) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + output middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { + var cancelFn func() + ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) + defer cancelFn() + } + + return next.HandleInitialize(ctx, input) +} + +// appendURIPath joins a URI path component to the existing path with `/` +// separators between the path components. If the path being added ends with a +// trailing `/` that slash will be maintained. +func appendURIPath(base, add string) string { + reqPath := path.Join(base, add) + if len(add) != 0 && add[len(add)-1] == '/' { + reqPath += "/" + } + return reqPath +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go new file mode 100644 index 000000000..5703c6e16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go @@ -0,0 +1,261 @@ +package imds + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + // Headers for Token and TTL + tokenHeader = "x-aws-ec2-metadata-token" + defaultTokenTTL = 5 * time.Minute +) + +type tokenProvider struct { + client *Client + tokenTTL time.Duration + + token *apiToken + tokenMux sync.RWMutex + + disabled uint32 // Atomic updated +} + +func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider { + return &tokenProvider{ + client: client, + tokenTTL: ttl, + } +} + +// apiToken provides the API token used by all operation calls for th EC2 +// Instance metadata service. +type apiToken struct { + token string + expires time.Time +} + +var timeNow = time.Now + +// Expired returns if the token is expired. +func (t *apiToken) Expired() bool { + // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry + // time is always based on reported wall-clock time. + return timeNow().Round(0).After(t.expires) +} + +func (t *tokenProvider) ID() string { return "APITokenProvider" } + +// HandleFinalize is the finalize stack middleware, that if the token provider is +// enabled, will attempt to add the cached API token to the request. If the API +// token is not cached, it will be retrieved in a separate API call, getToken. +// +// For retry attempts, handler must be added after attempt retryer. +// +// If request for getToken fails the token provider may be disabled from future +// requests, depending on the response status code. +func (t *tokenProvider) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if t.fallbackEnabled() && !t.enabled() { + // short-circuits to insecure data flow if token provider is disabled. + return next.HandleFinalize(ctx, input) + } + + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request) + } + + tok, err := t.getToken(ctx) + if err != nil { + // If the error allows the token to downgrade to insecure flow allow that. + var bypassErr *bypassTokenRetrievalError + if errors.As(err, &bypassErr) { + return next.HandleFinalize(ctx, input) + } + + return out, metadata, fmt.Errorf("failed to get API token, %w", err) + } + + req.Header.Set(tokenHeader, tok.token) + + return next.HandleFinalize(ctx, input) +} + +// HandleDeserialize is the deserialize stack middleware for determining if the +// operation the token provider is decorating failed because of a 401 +// unauthorized status code. If the operation failed for that reason the token +// provider needs to be re-enabled so that it can start adding the API token to +// operation calls. +func (t *tokenProvider) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, input) + if err == nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse) + } + + if resp.StatusCode == http.StatusUnauthorized { // unauthorized + t.enable() + err = &retryableError{Err: err, isRetryable: true} + } + + return out, metadata, err +} + +func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { + if t.fallbackEnabled() && !t.enabled() { + return nil, &bypassTokenRetrievalError{ + Err: fmt.Errorf("cannot get API token, provider disabled"), + } + } + + t.tokenMux.RLock() + tok = t.token + t.tokenMux.RUnlock() + + if tok != nil && !tok.Expired() { + return tok, nil + } + + tok, err = t.updateToken(ctx) + if err != nil { + return nil, err + } + + return tok, nil +} + +func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { + t.tokenMux.Lock() + defer t.tokenMux.Unlock() + + // Prevent multiple requests to update retrieving the token. + if t.token != nil && !t.token.Expired() { + tok := t.token + return tok, nil + } + + result, err := t.client.getToken(ctx, &getTokenInput{ + TokenTTL: t.tokenTTL, + }) + if err != nil { + var statusErr interface{ HTTPStatusCode() int } + if errors.As(err, &statusErr) { + switch statusErr.HTTPStatusCode() { + // Disable future get token if failed because of 403, 404, or 405 + case http.StatusForbidden, + http.StatusNotFound, + http.StatusMethodNotAllowed: + + if t.fallbackEnabled() { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) + t.disable() + } + + // 400 errors are terminal, and need to be upstreamed + case http.StatusBadRequest: + return nil, err + } + } + + // Disable if request send failed or timed out getting response + var re *smithyhttp.RequestSendError + var ce *smithy.CanceledError + if errors.As(err, &re) || errors.As(err, &ce) { + atomic.StoreUint32(&t.disabled, 1) + } + + if !t.fallbackEnabled() { + // NOTE: getToken() is an implementation detail of some outer operation + // (e.g. GetMetadata). It has its own retries that have already been exhausted. + // Mark the underlying error as a terminal error. + err = &retryableError{Err: err, isRetryable: false} + return nil, err + } + + // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request + // and allow the request to proceed. Future requests _may_ re-attempt fetching a + // token if not disabled. + return nil, &bypassTokenRetrievalError{Err: err} + } + + tok := &apiToken{ + token: result.Token, + expires: timeNow().Add(result.TokenTTL), + } + t.token = tok + + return tok, nil +} + +// enabled returns if the token provider is current enabled or not. +func (t *tokenProvider) enabled() bool { + return atomic.LoadUint32(&t.disabled) == 0 +} + +// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise +func (t *tokenProvider) fallbackEnabled() bool { + switch t.client.options.EnableFallback { + case aws.FalseTernary: + return false + default: + return true + } +} + +// disable disables the token provider and it will no longer attempt to inject +// the token, nor request updates. +func (t *tokenProvider) disable() { + atomic.StoreUint32(&t.disabled, 1) +} + +// enable enables the token provide to start refreshing tokens, and adding them +// to the pending request. +func (t *tokenProvider) enable() { + t.tokenMux.Lock() + t.token = nil + t.tokenMux.Unlock() + atomic.StoreUint32(&t.disabled, 0) +} + +type bypassTokenRetrievalError struct { + Err error +} + +func (e *bypassTokenRetrievalError) Error() string { + return fmt.Sprintf("bypass token retrieval, %v", e.Err) +} + +func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } + +type retryableError struct { + Err error + isRetryable bool +} + +func (e *retryableError) RetryableError() bool { return e.isRetryable } + +func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md new file mode 100644 index 000000000..99808e849 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md @@ -0,0 +1,249 @@ +# v1.11.33 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.32 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.31 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.30 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.29 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.28 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.27 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.26 (2022-08-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.25 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.24 (2022-08-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.23 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.22 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.21 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.19 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.18 (2022-07-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.16 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.11 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2022-05-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2022-05-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.8 (2022-05-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2022-04-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.6 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.5 (2022-04-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2022-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2022-01-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.5 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.4 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.4 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.3 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-06-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go new file mode 100644 index 000000000..4059f9851 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go @@ -0,0 +1,37 @@ +package manager + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +// DeleteObjectsAPIClient is an S3 API client that can invoke the DeleteObjects operation. +type DeleteObjectsAPIClient interface { + DeleteObjects(context.Context, *s3.DeleteObjectsInput, ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error) +} + +// DownloadAPIClient is an S3 API client that can invoke the GetObject operation. +type DownloadAPIClient interface { + GetObject(context.Context, *s3.GetObjectInput, ...func(*s3.Options)) (*s3.GetObjectOutput, error) +} + +// HeadBucketAPIClient is an S3 API client that can invoke the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *s3.HeadBucketInput, ...func(*s3.Options)) (*s3.HeadBucketOutput, error) +} + +// ListObjectsV2APIClient is an S3 API client that can invoke the ListObjectV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *s3.ListObjectsV2Input, ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) +} + +// UploadAPIClient is an S3 API client that can invoke PutObject, UploadPart, CreateMultipartUpload, +// CompleteMultipartUpload, and AbortMultipartUpload operations. +type UploadAPIClient interface { + PutObject(context.Context, *s3.PutObjectInput, ...func(*s3.Options)) (*s3.PutObjectOutput, error) + UploadPart(context.Context, *s3.UploadPartInput, ...func(*s3.Options)) (*s3.UploadPartOutput, error) + CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput, ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error) + CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput, ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error) + AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput, ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go new file mode 100644 index 000000000..d3b828979 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go @@ -0,0 +1,23 @@ +package manager + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +func validateSupportedARNType(bucket string) error { + if !arn.IsARN(bucket) { + return nil + } + + parsedARN, err := arn.Parse(bucket) + if err != nil { + return err + } + + if parsedARN.Service == "s3-object-lambda" { + return fmt.Errorf("manager does not support s3-object-lambda service ARNs") + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go new file mode 100644 index 000000000..2d8bd7e00 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go @@ -0,0 +1,139 @@ +package manager + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const bucketRegionHeader = "X-Amz-Bucket-Region" + +// GetBucketRegion will attempt to get the region for a bucket using the +// client's configured region to determine which AWS partition to perform the query on. +// +// The request will not be signed, and will not use your AWS credentials. +// +// A BucketNotFound error will be returned if the bucket does not exist in the +// AWS partition the client region belongs to. +// +// For example to get the region of a bucket which exists in "eu-central-1" +// you could provide a region hint of "us-west-2". +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// log.Println("error:", err) +// return +// } +// +// bucket := "my-bucket" +// region, err := manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket) +// if err != nil { +// var bnf manager.BucketNotFound +// if errors.As(err, &bnf) { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region\n", bucket) +// } +// return +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// By default the request will be made to the Amazon S3 endpoint using the virtual-hosted-style addressing. +// +// bucketname.s3.us-west-2.amazonaws.com/ +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the EndpointResolver on the config or client the +// utility is called with. +// +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithEndpointResolver( +// aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { +// return aws.Endpoint{URL: "https://s3-fips.us-west-2.amazonaws.com"}, nil +// }), +// ) +// if err != nil { +// panic(err) +// } +func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket string, optFns ...func(*s3.Options)) (string, error) { + var captureBucketRegion deserializeBucketRegion + + clientOptionFns := make([]func(*s3.Options), len(optFns)+1) + clientOptionFns[0] = func(options *s3.Options) { + options.Credentials = aws.AnonymousCredentials{} + options.APIOptions = append(options.APIOptions, captureBucketRegion.RegisterMiddleware) + } + copy(clientOptionFns[1:], optFns) + + _, err := client.HeadBucket(ctx, &s3.HeadBucketInput{ + Bucket: aws.String(bucket), + }, clientOptionFns...) + if len(captureBucketRegion.BucketRegion) == 0 && err != nil { + var httpStatusErr interface { + HTTPStatusCode() int + } + if !errors.As(err, &httpStatusErr) { + return "", err + } + + if httpStatusErr.HTTPStatusCode() == http.StatusNotFound { + return "", &bucketNotFound{} + } + + return "", err + } + + return captureBucketRegion.BucketRegion, nil +} + +type deserializeBucketRegion struct { + BucketRegion string +} + +func (d *deserializeBucketRegion) RegisterMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Add(d, middleware.After) +} + +func (d *deserializeBucketRegion) ID() string { + return "DeserializeBucketRegion" +} + +func (d *deserializeBucketRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) + } + + d.BucketRegion = resp.Header.Get(bucketRegionHeader) + + return out, metadata, err +} + +// BucketNotFound indicates the bucket was not found in the partition when calling GetBucketRegion. +type BucketNotFound interface { + error + + isBucketNotFound() +} + +type bucketNotFound struct{} + +func (b *bucketNotFound) Error() string { + return "bucket not found" +} + +func (b *bucketNotFound) isBucketNotFound() {} + +var _ BucketNotFound = (*bucketNotFound)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go new file mode 100644 index 000000000..e781aef61 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go @@ -0,0 +1,79 @@ +package manager + +import ( + "io" +) + +// BufferedReadSeeker is buffered io.ReadSeeker +type BufferedReadSeeker struct { + r io.ReadSeeker + buffer []byte + readIdx, writeIdx int +} + +// NewBufferedReadSeeker returns a new BufferedReadSeeker +// if len(b) == 0 then the buffer will be initialized to 64 KiB. +func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { + if len(b) == 0 { + b = make([]byte, 64*1024) + } + return &BufferedReadSeeker{r: r, buffer: b} +} + +func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { + b.r = r + b.readIdx, b.writeIdx = 0, 0 +} + +// Read will read up len(p) bytes into p and will return +// the number of bytes read and any error that occurred. +// If the len(p) > the buffer size then a single read request +// will be issued to the underlying io.ReadSeeker for len(p) bytes. +// A Read request will at most perform a single Read to the underlying +// io.ReadSeeker, and may return < len(p) if serviced from the buffer. +func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return n, err + } + + if b.readIdx == b.writeIdx { + if len(p) >= len(b.buffer) { + n, err = b.r.Read(p) + return n, err + } + b.readIdx, b.writeIdx = 0, 0 + + n, err = b.r.Read(b.buffer) + if n == 0 { + return n, err + } + + b.writeIdx += n + } + + n = copy(p, b.buffer[b.readIdx:b.writeIdx]) + b.readIdx += n + + return n, err +} + +// Seek will position then underlying io.ReadSeeker to the given offset +// and will clear the buffer. +func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { + n, err := b.r.Seek(offset, whence) + + b.reset(b.r) + + return n, err +} + +// ReadAt will read up to len(p) bytes at the given file offset. +// This will result in the buffer being cleared. +func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { + _, err := b.Seek(off, io.SeekStart) + if err != nil { + return 0, err + } + + return b.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go new file mode 100644 index 000000000..e2ab143b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go new file mode 100644 index 000000000..1ae881c10 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go @@ -0,0 +1,5 @@ +package manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return NewBufferedReadSeekerWriteToPool(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go new file mode 100644 index 000000000..179fe10f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go new file mode 100644 index 000000000..88887ff58 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go @@ -0,0 +1,5 @@ +package manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return NewPooledBufferedWriterReadFromProvider(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go new file mode 100644 index 000000000..31171a698 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go @@ -0,0 +1,3 @@ +// Package manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package manager diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go new file mode 100644 index 000000000..2ebcea585 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go @@ -0,0 +1,525 @@ +package manager + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/smithy-go/logging" +) + +const userAgentKey = "s3-transfer" + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download. +const DefaultPartBodyMaxRetries = 3 + +type errReadingBody struct { + err error +} + +func (e *errReadingBody) Error() string { + return fmt.Sprintf("failed to read part body: %v", e.err) +} + +func (e *errReadingBody) Unwrap() error { + return e.err +} + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The size (in bytes) to request from S3 for each part. + // The minimum allowed part size is 5MB, and if this value is set to zero, + // the DefaultDownloadPartSize value will be used. + // + // PartSize is ignored if the Range input parameter is provided. + PartSize int64 + + // PartBodyMaxRetries is the number of retry attempts to make for failed part downloads. + PartBodyMaxRetries int + + // Logger to send logging messages to + Logger logging.Logger + + // Enable Logging of part download retry attempts + LogInterruptedDownloads bool + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultDownloadConcurrency value will be used. + // + // Concurrency of 1 will download the parts sequentially. + // + // Concurrency is ignored if the Range input parameter is provided. + Concurrency int + + // An S3 client to use when performing downloads. + S3 DownloadAPIClient + + // List of client options that will be passed down to individual API + // operation requests made by the downloader. + ClientOptions []func(*s3.Options) + + // Defines the buffer strategy used when downloading a part. + // + // If a WriterReadFromProvider is given the Download manager + // will pass the io.WriterAt of the Download request to the provider + // and will use the returned WriterReadFrom from the provider as the + // destination writer when copying from http response body. + BufferProvider WriterReadFromProvider +} + +// WithDownloaderClientOptions appends to the Downloader's API request options. +func WithDownloaderClientOptions(opts ...func(*s3.Options)) func(*Downloader) { + return func(d *Downloader) { + d.ClientOptions = append(d.ClientOptions, opts...) + } +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// +// // Load AWS Config +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create an S3 client using the loaded configuration +// s3.NewFromConfig(cfg) +// +// // Create a downloader passing it the S3 client +// downloader := manager.NewDownloader(s3.NewFromConfig(cfg)) +// +// // Create a downloader with the client and custom downloader options +// downloader := manager.NewDownloader(client, func(d *manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: c, + PartSize: DefaultDownloadPartSize, + PartBodyMaxRetries: DefaultPartBodyMaxRetries, + Concurrency: DefaultDownloadConcurrency, + BufferProvider: defaultDownloadBufferProvider(), + } + for _, option := range options { + option(d) + } + + return d +} + +// Download downloads an object in S3 and writes the payload into w +// using concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. +// +// DownloadWithContext is the same as Download with the additional support for +// Context input parameters. The Context must not be nil. A nil Context will +// cause a panic. Use the Context to add deadlining, timeouts, etc. The +// DownloadWithContext may create sub-contexts for individual underlying +// requests. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is +// called from. Modifying the options will not impact the original Downloader +// instance. Use the WithDownloaderClientOptions helper function to pass in request +// options that will be applied to all API operations made with this downloader. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. In case you download +// files into memory do not forget to pre-allocate memory to avoid additional allocations +// and GC runs. +// +// Example: +// +// // pre-allocate in memory buffer, where headObject type is *s3.HeadObjectOutput +// buf := make([]byte, int(headObject.ContentLength)) +// // wrap with aws.WriteAtBuffer +// w := s3manager.NewWriteAtBuffer(buf) +// // download file into the memory +// numBytesDownloaded, err := downloader.Download(ctx, w, &s3.GetObjectInput{ +// Bucket: aws.String(bucket), +// Key: aws.String(item), +// }) +// +// Specifying a Downloader.Concurrency of 1 will cause the Downloader to +// download the parts from S3 sequentially. +// +// It is safe to call this method concurrently across goroutines. +// +// If the GetObjectInput's Range value is provided that will cause the downloader +// to perform a single GetObjectInput request for that object's range. This will +// caused the part size, and concurrency configurations to be ignored. +func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + if err := validateSupportedARNType(aws.ToString(input.Bucket)); err != nil { + return 0, err + } + + impl := downloader{w: w, in: input, cfg: d, ctx: ctx} + + // Copy ClientOptions + clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1) + clientOptions = append(clientOptions, func(o *s3.Options) { + o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey)) + }) + clientOptions = append(clientOptions, impl.cfg.ClientOptions...) + impl.cfg.ClientOptions = clientOptions + + for _, option := range options { + option(&impl.cfg) + } + + // Ensures we don't need nil checks later on + impl.cfg.Logger = logging.WithContext(ctx, impl.cfg.Logger) + + impl.partBodyMaxRetries = d.PartBodyMaxRetries + + impl.totalBytes = -1 + if impl.cfg.Concurrency == 0 { + impl.cfg.Concurrency = DefaultDownloadConcurrency + } + + if impl.cfg.PartSize == 0 { + impl.cfg.PartSize = DefaultDownloadPartSize + } + + return impl.download() +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx context.Context + cfg Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error + + partBodyMaxRetries int +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + // If range is specified fall back to single download of that range + // this enables the functionality of ranged gets with the downloader but + // at the cost of no multipart downloads. + if rng := aws.ToString(d.in.Range); len(rng) > 0 { + d.downloadRange(rng) + return d.written, d.err + } + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.cfg.Concurrency) + + for i := 0; i < d.cfg.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queuing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + var responseError interface { + HTTPStatusCode() int + } + if errors.As(d.err, &responseError) { + if responseError.HTTPStatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + if d.getErr() != nil { + // Drain the channel if there is an error, to prevent deadlocking + // of download producer. + continue + } + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } +} + +// downloadRange downloads an Object given the passed in Byte-Range value. +// The chunk used down download the range will be configured for that range. +func (d *downloader) downloadRange(rng string) { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos} + // Ranges specified will short circuit the multipart download + chunk.withRange = rng + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + + // Update the position based on the amount of data received. + d.pos = d.written +} + +// downloadChunk downloads the chunk from s3 +func (d *downloader) downloadChunk(chunk dlchunk) error { + var params s3.GetObjectInput + awsutil.Copy(¶ms, d.in) + + // Get the next byte range of data + params.Range = aws.String(chunk.ByteRange()) + + var n int64 + var err error + for retry := 0; retry <= d.partBodyMaxRetries; retry++ { + n, err = d.tryDownloadChunk(¶ms, &chunk) + if err == nil { + break + } + // Check if the returned error is an errReadingBody. + // If err is errReadingBody this indicates that an error + // occurred while copying the http response body. + // If this occurs we unwrap the err to set the underlying error + // and attempt any remaining retries. + if bodyErr, ok := err.(*errReadingBody); ok { + err = bodyErr.Unwrap() + } else { + return err + } + + chunk.cur = 0 + + d.cfg.Logger.Logf(logging.Debug, + "object part body download interrupted %s, err, %v, retrying attempt %d", + aws.ToString(params.Key), err, retry) + } + + d.incrWritten(n) + + return err +} + +func (d *downloader) tryDownloadChunk(params *s3.GetObjectInput, w io.Writer) (int64, error) { + cleanup := func() {} + if d.cfg.BufferProvider != nil { + w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) + } + defer cleanup() + + resp, err := d.cfg.S3.GetObject(d.ctx, params, d.cfg.ClientOptions...) + if err != nil { + return 0, err + } + d.setTotalBytes(resp) // Set total if not yet set. + + var src io.Reader = resp.Body + if d.cfg.BufferProvider != nil { + src = &suppressWriterAt{suppressed: src} + } + n, err := io.Copy(w, src) + resp.Body.Close() + if err != nil { + return n, &errReadingBody{err: err} + } + + return n, nil +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provided, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength > 0 { + d.totalBytes = resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 + + // specifies the byte range the chunk should be downloaded with. + withRange string +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +// +// If a range is specified on the dlchunk the size will be ignored when writing. +// as the total size may not of be known ahead of time. +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size && len(c.withRange) == 0 { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} + +// ByteRange returns a HTTP Byte-Range header value that should be used by the +// client to request the chunk's range. +func (c *dlchunk) ByteRange() string { + if len(c.withRange) != 0 { + return c.withRange + } + + return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go new file mode 100644 index 000000000..6f51b8d76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package manager + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.11.33" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go new file mode 100644 index 000000000..6b93a3bc4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go @@ -0,0 +1,251 @@ +package manager + +import ( + "context" + "fmt" + "sync" +) + +type byteSlicePool interface { + Get(context.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + // allocator is defined as a function pointer to allow + // for test cases to instrument custom tracers when allocations + // occur. + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // pass + } + + select { + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go new file mode 100644 index 000000000..ce117c32a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go @@ -0,0 +1,65 @@ +package manager + +import ( + "io" + "sync" +) + +// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker +type ReadSeekerWriteTo interface { + io.ReadSeeker + io.WriterTo +} + +// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt +// implementation. +type BufferedReadSeekerWriteTo struct { + *BufferedReadSeeker +} + +// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or +// an error occurs. Returns the number of bytes written and any error encountered during the write. +func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { + return io.Copy(writer, b.BufferedReadSeeker) +} + +// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker +type ReadSeekerWriteToProvider interface { + GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) +} + +// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse +// []byte slices for buffering parts in memory +type BufferedReadSeekerWriteToPool struct { + pool sync.Pool +} + +// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create +// a pool of reusable buffers . If size is less then < 64 KiB then the buffer +// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom +// respectively will default to copying 32 KiB. +func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { + if size < 65536 { + size = 65536 + } + + return &BufferedReadSeekerWriteToPool{ + pool: sync.Pool{New: func() interface{} { + return make([]byte, size) + }}, + } +} + +// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. +// The provided cleanup must be called after operations have been completed on the +// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. +func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { + buffer := p.pool.Get().([]byte) + + r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} + cleanup = func() { + p.pool.Put(buffer) + } + + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go new file mode 100644 index 000000000..968f90732 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go @@ -0,0 +1,187 @@ +package manager + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A readSeekCloser wrapping an nonseekable io.Reader used in an API operation's +// input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if yhe operation +// requires payload signing. +// +// Note: If using with S3 PutObject to stream an object upload. The SDK's S3 +// Upload Manager(s3manager.Uploader) provides support for streaming +// with the ability to retry network errors. +func ReadSeekCloser(r io.Reader) *ReaderSeekerCloser { + return &ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// seekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func seekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case *ReaderSeekerCloser: + return v.GetLen() + } + + return computeSeekerLength(s) +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r *ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return computeSeekerLength(s) + } + + return -1, nil +} + +func computeSeekerLength(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, io.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r *ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r *ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r *ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r *ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r *ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go new file mode 100644 index 000000000..d68246c2b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go @@ -0,0 +1,808 @@ +package manager + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "sort" + "sync" + + "github.com/aws/aws-sdk-go-v2/aws" + + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts int32 = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := manager.NewUploader(client) +// output, err := u.upload(context.Background(), input) +// if err != nil { +// var multierr manager.MultiUploadFailure +// if errors.As(err, &multierr) { +// fmt.Printf("upload failure UploadID=%s, %s\n", multierr.UploadID(), multierr.Error()) +// } else { +// fmt.Printf("upload failure, %s\n", err.Error()) +// } +// } +type MultiUploadFailure interface { + error + + // UploadID returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + err error + + // ID for multipart upload which failed. + uploadID string +} + +// batchItemError returns the string representation of the error. +// +// # See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m *multiUploadError) Error() string { + var extra string + if m.err != nil { + extra = fmt.Sprintf(", cause: %s", m.err.Error()) + } + return fmt.Sprintf("upload multipart failed, upload id: %s%s", m.uploadID, extra) +} + +// Unwrap returns the underlying error that cause the upload failure +func (m *multiUploadError) Unwrap() error { + return m.err +} + +// UploadID returns the id of the S3 upload which failed. +func (m *multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + // Will be empty string if multipart upload was not used, and the object + // was uploaded as a single PutObject call. + UploadID string + + // The list of parts that were uploaded and their checksums. Will be empty + // if multipart upload was not used, and the object was uploaded as a + // single PutObject call. + CompletedParts []types.CompletedPart + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. + ChecksumSHA256 *string + + // Entity tag for the uploaded object. + ETag *string + + // If the object expiration is configured, this will contain the expiration date + // (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string + + // The object key of the newly created object. + Key *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) + // that was used for the object. + SSEKMSKeyId *string + + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an Amazon Web Services KMS customer master key (CMK) in your + // initiate multipart upload request, the response includes this header. It + // confirms the encryption algorithm that Amazon S3 used to encrypt the object. + ServerSideEncryption types.ServerSideEncryption + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string +} + +// WithUploaderRequestOptions appends to the Uploader's API client options. +func WithUploaderRequestOptions(opts ...func(*s3.Options)) func(*Uploader) { + return func(u *Uploader) { + u.ClientOptions = append(u.ClientOptions, opts...) + } +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +// +// # Pre-computed Checksums +// +// Care must be taken when using pre-computed checksums the transfer upload +// manager. The format and value of the checksum differs based on if the upload +// will preformed as a single or multipart upload. +// +// Uploads that are smaller than the Uploader's PartSize will be uploaded using +// the PutObject API operation. Pre-computed checksum of the uploaded object's +// content are valid for these single part uploads. If the checksum provided +// does not match the uploaded content the upload will fail. +// +// Uploads that are larger than the Uploader's PartSize will be uploaded using +// multi-part upload. The Pre-computed checksums for these uploads are a +// checksum of checksums of each part. Not a checksum of the full uploaded +// bytes. With the format of "-", (e.g. +// "DUoRhQ==-3"). If a pre-computed checksum is provided that does not match +// this format, as matches the content uploaded, the upload will fail. +// +// ContentMD5 for multipart upload is explicitly ignored for multipart upload, +// and its value is suppressed. +// +// # Automatically Computed Checksums +// +// When the ChecksumAlgorithm member of Upload's input parameter PutObjectInput +// is set to a valid value, the SDK will automatically compute the checksum of +// the individual uploaded parts. The UploadOutput result from Upload will +// include the checksum of part checksums provided by S3 +// CompleteMultipartUpload API call. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultUploadPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel per call to Upload when + // sending parts. If this is set to zero, the DefaultUploadConcurrency value + // will be used. + // + // The concurrency pool is not shared between calls to Upload. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. + // + // Defaults to package const's MaxUploadParts value. + MaxUploadParts int32 + + // The client to use when uploading to S3. + S3 UploadAPIClient + + // List of request options that will be passed down to individual API + // operation requests made by the uploader. + ClientOptions []func(*s3.Options) + + // Defines the buffer strategy used when uploading a part + BufferProvider ReadSeekerWriteToProvider + + // partPool allows for the re-usage of streaming payload part buffers between upload calls + partPool byteSlicePool +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// +// // Load AWS Config +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create an S3 Client with the config +// client := s3.NewFromConfig(cfg) +// +// // Create an uploader passing it the client +// uploader := manager.NewUploader(client) +// +// // Create an uploader with the client and custom options +// uploader := manager.NewUploader(client, func(u *manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(client UploadAPIClient, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: client, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + BufferProvider: defaultUploadBufferProvider(), + } + + for _, option := range options { + option(u) + } + + u.partPool = newByteSlicePool(u.PartSize) + + return u +} + +// Upload uploads an object to S3, intelligently buffering large +// files into smaller chunks and sending them in parallel across multiple +// goroutines. You can configure the buffer size and concurrency through the +// Uploader parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// Use the WithUploaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this uploader. +// +// It is safe to call this method concurrently across goroutines. +func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) ( + *UploadOutput, error, +) { + i := uploader{in: input, cfg: u, ctx: ctx} + + // Copy ClientOptions + clientOptions := make([]func(*s3.Options), 0, len(i.cfg.ClientOptions)+1) + clientOptions = append(clientOptions, func(o *s3.Options) { + o.APIOptions = append(o.APIOptions, + middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), + ) + }) + clientOptions = append(clientOptions, i.cfg.ClientOptions...) + i.cfg.ClientOptions = clientOptions + + for _, opt := range opts { + opt(&i.cfg) + } + + return i.upload() +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx context.Context + cfg Uploader + + in *s3.PutObjectInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + if err := u.init(); err != nil { + return nil, fmt.Errorf("unable to initialize upload: %w", err) + } + defer u.cfg.partPool.Close() + + if u.cfg.PartSize < MinUploadPartSize { + return nil, fmt.Errorf("part size must be at least %d bytes", MinUploadPartSize) + } + + // Do one read to determine if we have more than one part + reader, _, cleanup, err := u.nextReader() + if err == io.EOF { // single part + return u.singlePart(reader, cleanup) + } else if err != nil { + cleanup() + return nil, fmt.Errorf("read upload data failed: %w", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(reader, cleanup) +} + +// init will initialize all default options. +func (u *uploader) init() error { + if err := validateSupportedARNType(aws.ToString(u.in.Bucket)); err != nil { + return err + } + + if u.cfg.Concurrency == 0 { + u.cfg.Concurrency = DefaultUploadConcurrency + } + if u.cfg.PartSize == 0 { + u.cfg.PartSize = DefaultUploadPartSize + } + if u.cfg.MaxUploadParts == 0 { + u.cfg.MaxUploadParts = MaxUploadParts + } + + // Try to get the total size for some optimizations + if err := u.initSize(); err != nil { + return err + } + + // If PartSize was changed or partPool was never setup then we need to allocated a new pool + // so that we return []byte slices of the correct size + poolCap := u.cfg.Concurrency + 1 + if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { + u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) + u.cfg.partPool.ModifyCapacity(poolCap) + } else { + u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} + u.cfg.partPool.ModifyCapacity(poolCap) + } + + return nil +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() error { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + n, err := seekerLen(r) + if err != nil { + return err + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 + } + } + + return nil +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { + switch r := u.in.Body.(type) { + case readerAtSeeker: + var err error + + n := u.cfg.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft <= u.cfg.PartSize { + err = io.EOF + n = bytesLeft + } + } + + var ( + reader io.ReadSeeker + cleanup func() + ) + + reader = io.NewSectionReader(r, u.readerPos, n) + if u.cfg.BufferProvider != nil { + reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) + } else { + cleanup = func() {} + } + + u.readerPos += n + + return reader, int(n), cleanup, err + + default: + part, err := u.cfg.partPool.Get(u.ctx) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFillBuf(r, *part) + u.readerPos += int64(n) + + cleanup := func() { + u.cfg.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err + } +} + +func readFillBuf(r io.Reader, b []byte) (offset int, err error) { + for offset < len(b) && err == nil { + var n int + n, err = r.Read(b[offset:]) + offset += n + } + + return offset, err +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + defer cleanup() + + var params s3.PutObjectInput + awsutil.Copy(¶ms, u.in) + params.Body = r + + // Need to use request form because URL generated in request is + // used in return. + + var locationRecorder recordLocationClient + out, err := u.cfg.S3.PutObject(u.ctx, ¶ms, + append(u.cfg.ClientOptions, locationRecorder.WrapClient())...) + if err != nil { + return nil, err + } + + return &UploadOutput{ + Location: locationRecorder.location, + + BucketKeyEnabled: out.BucketKeyEnabled, + ChecksumCRC32: out.ChecksumCRC32, + ChecksumCRC32C: out.ChecksumCRC32C, + ChecksumSHA1: out.ChecksumSHA1, + ChecksumSHA256: out.ChecksumSHA256, + ETag: out.ETag, + Expiration: out.Expiration, + Key: params.Key, + RequestCharged: out.RequestCharged, + SSEKMSKeyId: out.SSEKMSKeyId, + ServerSideEncryption: out.ServerSideEncryption, + VersionID: out.VersionId, + }, nil +} + +type httpClient interface { + Do(r *http.Request) (*http.Response, error) +} + +type recordLocationClient struct { + httpClient + location string +} + +func (c *recordLocationClient) WrapClient() func(o *s3.Options) { + return func(o *s3.Options) { + c.httpClient = o.HTTPClient + o.HTTPClient = c + } +} + +func (c *recordLocationClient) Do(r *http.Request) (resp *http.Response, err error) { + resp, err = c.httpClient.Do(r) + if err != nil { + return resp, err + } + + if resp.Request != nil && resp.Request.URL != nil { + url := *resp.Request.URL + url.RawQuery = "" + c.location = url.String() + } + + return resp, err +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int32 + cleanup func() +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []types.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + var params s3.CreateMultipartUploadInput + awsutil.Copy(¶ms, u.in) + + // Create the multipart + var locationRecorder recordLocationClient + resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, ¶ms, + append(u.cfg.ClientOptions, locationRecorder.WrapClient())...) + if err != nil { + cleanup() + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.cfg.Concurrency) + for i := 0; i < u.cfg.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int32 = 1 + ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} + + // Read and queue the rest of the parts + for u.geterr() == nil && err == nil { + var ( + reader io.ReadSeeker + nextChunkLen int + ok bool + ) + + reader, nextChunkLen, cleanup, err = u.nextReader() + ok, err = u.shouldContinue(num, nextChunkLen, err) + if !ok { + cleanup() + if err != nil { + u.seterr(err) + } + break + } + + num++ + + ch <- chunk{buf: reader, num: num, cleanup: cleanup} + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + completeOut := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + err: err, + uploadID: u.uploadID, + } + } + + return &UploadOutput{ + Location: locationRecorder.location, + UploadID: u.uploadID, + CompletedParts: u.parts, + + BucketKeyEnabled: completeOut.BucketKeyEnabled, + ChecksumCRC32: completeOut.ChecksumCRC32, + ChecksumCRC32C: completeOut.ChecksumCRC32C, + ChecksumSHA1: completeOut.ChecksumSHA1, + ChecksumSHA256: completeOut.ChecksumSHA256, + ETag: completeOut.ETag, + Expiration: completeOut.Expiration, + Key: completeOut.Key, + RequestCharged: completeOut.RequestCharged, + SSEKMSKeyId: completeOut.SSEKMSKeyId, + ServerSideEncryption: completeOut.ServerSideEncryption, + VersionID: completeOut.VersionId, + }, nil +} + +func (u *multiuploader) shouldContinue(part int32, nextChunkLen int, err error) (bool, error) { + if err != nil && err != io.EOF { + return false, fmt.Errorf("read multipart upload data failed, %w", err) + } + + if nextChunkLen == 0 { + // No need to upload empty part, if file was empty to start + // with empty single part would of been created and never + // started multipart upload. + return false, nil + } + + part++ + // This upload exceeded maximum number of supported parts, error now. + if part > u.cfg.MaxUploadParts || part > MaxUploadParts { + var msg string + if part > u.cfg.MaxUploadParts { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + return false, fmt.Errorf(msg) + } + + return true, err +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + + data.cleanup() + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + params := &s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + SSECustomerAlgorithm: u.in.SSECustomerAlgorithm, + SSECustomerKey: u.in.SSECustomerKey, + SSECustomerKeyMD5: u.in.SSECustomerKeyMD5, + ExpectedBucketOwner: u.in.ExpectedBucketOwner, + RequestPayer: u.in.RequestPayer, + + ChecksumAlgorithm: u.in.ChecksumAlgorithm, + // Invalid to set any of the individual ChecksumXXX members from + // PutObject as they are never valid for individual parts of a + // multipart upload. + + PartNumber: c.num, + UploadId: &u.uploadID, + } + // TODO should do copy then clear? + + resp, err := u.cfg.S3.UploadPart(u.ctx, params, u.cfg.ClientOptions...) + if err != nil { + return err + } + + var completed types.CompletedPart + awsutil.Copy(&completed, resp) + completed.PartNumber = c.num + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.cfg.LeavePartsOnError { + return + } + + params := &s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + } + _, err := u.cfg.S3.AbortMultipartUpload(u.ctx, params, u.cfg.ClientOptions...) + if err != nil { + // TODO: Add logging + //logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err)) + _ = err + } +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + var params s3.CompleteMultipartUploadInput + awsutil.Copy(¶ms, u.in) + params.UploadId = &u.uploadID + params.MultipartUpload = &types.CompletedMultipartUpload{Parts: u.parts} + + resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, ¶ms, u.cfg.ClientOptions...) + if err != nil { + u.seterr(err) + u.fail() + } + + return resp +} + +type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go new file mode 100644 index 000000000..fb10ec309 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go @@ -0,0 +1,83 @@ +package manager + +import ( + "bufio" + "io" + "sync" + + "github.com/aws/aws-sdk-go-v2/internal/sdkio" +) + +// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom +type WriterReadFrom interface { + io.Writer + io.ReaderFrom +} + +// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer +type WriterReadFromProvider interface { + GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) +} + +type bufferedWriter interface { + WriterReadFrom + Flush() error + Reset(io.Writer) +} + +type bufferedReadFrom struct { + bufferedWriter +} + +func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := b.bufferedWriter.ReadFrom(r) + if flushErr := b.Flush(); flushErr != nil && err == nil { + err = flushErr + } + return n, err +} + +// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool +// to manage allocation and reuse of *bufio.Writer structures. +type PooledBufferedReadFromProvider struct { + pool sync.Pool +} + +// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider +// Size is used to control the size of the underlying *bufio.Writer created for +// calls to GetReadFrom. +func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { + if size < int(32*sdkio.KibiByte) { + size = int(64 * sdkio.KibiByte) + } + + return &PooledBufferedReadFromProvider{ + pool: sync.Pool{ + New: func() interface{} { + return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} + }, + }, + } +} + +// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom +// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom +// has been completed in order to allow the reuse of the *bufio.Writer +func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { + buffer := p.pool.Get().(*bufferedReadFrom) + buffer.Reset(writer) + r = buffer + cleanup = func() { + buffer.Reset(nil) // Reset to nil writer to release reference + p.pool.Put(buffer) + } + return r, cleanup +} + +type suppressWriterAt struct { + suppressed io.Reader +} + +func (s *suppressWriterAt) Read(p []byte) (n int, err error) { + return s.suppressed.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go new file mode 100644 index 000000000..938cd14c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go @@ -0,0 +1,112 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + if dst.Kind() == reflect.String { + dst.SetString(e.String()) + } else { + dst.Set(reflect.New(e)) + } + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if dst.Kind() != reflect.String && src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go new file mode 100644 index 000000000..bcfe51a2b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go @@ -0,0 +1,33 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + // Special casing for strings as typed enumerations are string aliases + // but are not deep equal. + if ra.Kind() == reflect.String && rb.Kind() == reflect.String { + return ra.String() == rb.String() + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go new file mode 100644 index 000000000..58ef438a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go @@ -0,0 +1,225 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + if dstVal.Kind() == reflect.String { + dstVal.SetString(srcVal.String()) + } else { + dstVal.Set(srcVal) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go new file mode 100644 index 000000000..1adecae6b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go @@ -0,0 +1,131 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + isPtr := false + for v.Kind() == reflect.Ptr { + isPtr = true + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + if isPtr { + buf.WriteRune('&') + } + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + + for v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + + if v.Kind() == reflect.Ptr || v.Kind() == reflect.Struct || v.Kind() == reflect.Map || v.Kind() == reflect.Slice { + prettify(v, indent, buf) + return + } + + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go new file mode 100644 index 000000000..645df2450 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md new file mode 100644 index 000000000..297f1e42c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -0,0 +1,174 @@ +# v1.1.34 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.33 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.32 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.31 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.30 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.29 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.28 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.27 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.25 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.24 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.23 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.22 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.19 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.18 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.14 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.13 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.12 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.11 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.10 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.9 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.8 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.7 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.7 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.6 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.5 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.4 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.2 (2021-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-06-25) + +* **Release**: Release new modules +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go new file mode 100644 index 000000000..cd4d19b89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go @@ -0,0 +1,65 @@ +package configsources + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" +) + +// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value +// for Enable Endpoint Discovery +type EnableEndpointDiscoveryProvider interface { + GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) +} + +// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. +// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, +// and error if one is encountered. +func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { + value, found, err = p.GetEnableEndpointDiscovery(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint +type UseDualStackEndpointProvider interface { + GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error) +} + +// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseDualStackEndpointProvider); ok { + value, found, err = p.GetUseDualStackEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint +type UseFIPSEndpointProvider interface { + GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error) +} + +// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. +// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseFIPSEndpointProvider); ok { + value, found, err = p.GetUseFIPSEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go new file mode 100644 index 000000000..2948b3154 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package configsources + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.1.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md new file mode 100644 index 000000000..0ffc56c39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -0,0 +1,147 @@ +# v2.4.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.3.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.2.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.1.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.0 (2021-11-06) + +* **Release**: Endpoint Variant Model Support +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go new file mode 100644 index 000000000..32251a7e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go @@ -0,0 +1,302 @@ +package endpoints + +import ( + "fmt" + "github.com/aws/smithy-go/logging" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// DefaultKey is a compound map key of a variant and other values. +type DefaultKey struct { + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointKey is a compound map key of a region and associated variant value. +type EndpointKey struct { + Region string + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointVariant is a bit field to describe the endpoints attributes. +type EndpointVariant uint64 + +const ( + // FIPSVariant indicates that the endpoint is FIPS capable. + FIPSVariant EndpointVariant = 1 << (64 - 1 - iota) + + // DualStackVariant indicates that the endpoint is DualStack capable. + DualStackVariant +) + +// ServiceVariant is a bit field to describe the service endpoint attributes. +type ServiceVariant uint64 + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "s3v4"} +) + +// Options provide configuration needed to direct how endpoints are resolved. +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger. + LogDeprecated bool + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Disable usage of HTTPS (TLS / SSL) + DisableHTTPS bool + + // Instruct the resolver to use a service endpoint that supports dual-stack. + // If a service does not have a dual-stack endpoint an error will be returned by the resolver. + UseDualStackEndpoint aws.DualStackEndpointState + + // Instruct the resolver to use a service endpoint that supports FIPS. + // If a service does not have a FIPS endpoint an error will be returned by the resolver. + UseFIPSEndpoint aws.FIPSEndpointState + + // ServiceVariant is a bitfield of service specified endpoint variant data. + ServiceVariant ServiceVariant +} + +// GetEndpointVariant returns the EndpointVariant for the variant associated options. +func (o Options) GetEndpointVariant() (v EndpointVariant) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + v |= DualStackVariant + } + if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { + v |= FIPSVariant + } + return v +} + +// Partitions is a slice of partition +type Partitions []Partition + +// ResolveEndpoint resolves a service endpoint for the given region and options. +func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { + if len(ps) == 0 { + return aws.Endpoint{}, fmt.Errorf("no partitions found") + } + + if opts.Logger == nil { + opts.Logger = logging.Nop{} + } + + if len(opts.ResolvedRegion) > 0 { + region = opts.ResolvedRegion + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(region, opts) { + continue + } + + return ps[i].ResolveEndpoint(region, opts) + } + + // fallback to first partition format to use when resolving the endpoint. + return ps[0].ResolveEndpoint(region, opts) +} + +// Partition is an AWS partition description for a service and its' region endpoints. +type Partition struct { + ID string + RegionRegex *regexp.Regexp + PartitionEndpoint string + IsRegionalized bool + Defaults map[DefaultKey]Endpoint + Endpoints Endpoints +} + +func (p Partition) canResolveEndpoint(region string, opts Options) bool { + _, ok := p.Endpoints[EndpointKey{ + Region: region, + Variant: opts.GetEndpointVariant(), + }] + return ok || p.RegionRegex.MatchString(region) +} + +// ResolveEndpoint resolves and service endpoint for the given region and options. +func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { + if len(region) == 0 && len(p.PartitionEndpoint) != 0 { + region = p.PartitionEndpoint + } + + endpoints := p.Endpoints + + variant := options.GetEndpointVariant() + serviceVariant := options.ServiceVariant + + defaults := p.Defaults[DefaultKey{ + Variant: variant, + ServiceVariant: serviceVariant, + }] + + return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options) +} + +func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint { + key := EndpointKey{ + Region: region, + Variant: variant, + } + + if e, ok := endpoints[key]; ok { + return e + } + + if !p.IsRegionalized { + return endpoints[EndpointKey{ + Region: p.PartitionEndpoint, + Variant: variant, + ServiceVariant: serviceVariant, + }] + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return Endpoint{} +} + +// Endpoints is a map of service config regions to endpoints +type Endpoints map[EndpointKey]Endpoint + +// CredentialScope is the credential scope of a region and service +type CredentialScope struct { + Region string + Service string +} + +// Endpoint is a service endpoint description +type Endpoint struct { + // True if the endpoint cannot be resolved for this partition/region/service + Unresolveable aws.Ternary + + Hostname string + Protocols []string + + CredentialScope CredentialScope + + SignatureVersions []string + + // Indicates that this endpoint is deprecated. + Deprecated aws.Ternary +} + +// IsZero returns whether the endpoint structure is an empty (zero) value. +func (e Endpoint) IsZero() bool { + switch { + case e.Unresolveable != aws.UnknownTernary: + return false + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (CredentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + } + return true +} + +func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) { + var merged Endpoint + merged.mergeIn(def) + merged.mergeIn(e) + e = merged + + if e.IsZero() { + return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region) + } + + var u string + if e.Unresolveable != aws.TrueTernary { + // Only attempt to resolve the endpoint if it can be resolved. + hostname := strings.Replace(e.Hostname, "{region}", region, 1) + + scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) + u = scheme + "://" + hostname + } + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + + if e.Deprecated == aws.TrueTernary && options.LogDeprecated { + options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u) + } + + return aws.Endpoint{ + URL: u, + PartitionID: partition, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func (e *Endpoint) mergeIn(other Endpoint) { + if other.Unresolveable != aws.UnknownTernary { + e.Unresolveable = other.Unresolveable + } + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if other.Deprecated != aws.UnknownTernary { + e.Deprecated = other.Deprecated + } +} + +func getEndpointScheme(protocols []string, disableHTTPS bool) string { + if disableHTTPS { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go new file mode 100644 index 000000000..0a8b6901f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package endpoints + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "2.4.28" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md new file mode 100644 index 000000000..494c4dbaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -0,0 +1,188 @@ +# v1.3.35 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.34 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.28 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.27 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.26 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.25 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.24 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.23 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.22 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2022-05-17) + +* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2022-01-28) + +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. + +# v1.3.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-07-01) + +* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. + +# v1.0.1 (2021-06-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-05-20) + +* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go new file mode 100644 index 000000000..e83a99886 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go new file mode 100644 index 000000000..0895d53cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go new file mode 100644 index 000000000..0b76999ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go new file mode 100644 index 000000000..f5ebe52e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go @@ -0,0 +1,6 @@ +package ini + +import ( + // internal/ini module was carved out of this module + _ "github.com/aws/aws-sdk-go-v2" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go new file mode 100644 index 000000000..fdd5321b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go @@ -0,0 +1,43 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// +// Grammar: +// stmt -> section | stmt' +// stmt' -> epsilon | expr +// expr -> value (stmt)* | equal_expr (stmt)* +// equal_expr -> value ( ':' | '=' ) equal_expr' +// equal_expr' -> number | string | quoted_string +// quoted_string -> " quoted_string' +// quoted_string' -> string quoted_string_end +// quoted_string_end -> " +// +// section -> [ section' +// section' -> section_value section_close +// section_value -> number | string_subset | boolean | quoted_string_subset +// quoted_string_subset -> " quoted_string_subset' +// quoted_string_subset' -> string_subset quoted_string_end +// quoted_string_subset -> " +// section_close -> ] +// +// value -> number | string_subset | boolean +// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? +// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go new file mode 100644 index 000000000..04345a54c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go new file mode 100644 index 000000000..0f278d55e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go @@ -0,0 +1,22 @@ +package ini + +import "fmt" + +// UnableToReadFile is an error indicating that a ini file could not be read +type UnableToReadFile struct { + Err error +} + +// Error returns an error message and the underlying error message if present +func (e *UnableToReadFile) Error() string { + base := "unable to read file" + if e.Err == nil { + return base + } + return fmt.Sprintf("%s: %v", base, e.Err) +} + +// Unwrap returns the underlying error +func (e *UnableToReadFile) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go new file mode 100644 index 000000000..91ba2a59d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go new file mode 100644 index 000000000..479343626 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ini + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.3.35" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go new file mode 100644 index 000000000..f74062313 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go @@ -0,0 +1,58 @@ +package ini + +import ( + "fmt" + "io" + "os" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (sections Sections, err error) { + f, oerr := os.Open(path) + if oerr != nil { + return Sections{}, &UnableToReadFile{Err: oerr} + } + + defer func() { + closeErr := f.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("close error: %v, original error: %w", closeErr, err) + } + }() + + return Parse(f, path) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader, path string) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor(path) + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor("") + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go new file mode 100644 index 000000000..abf1fb036 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go @@ -0,0 +1,157 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, &UnableToReadFile{Err: err} + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go new file mode 100644 index 000000000..12fc7d5aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go @@ -0,0 +1,349 @@ +package ini + +import ( + "fmt" + "io" +) + +// ParseState represents the current state of the parser. +type ParseState uint + +// State enums for the parse table +const ( + InvalidState ParseState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]ParseState{ + ASTKindStart: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: { + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: { + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: { + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: { + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + root.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k.Kind, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which exludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go new file mode 100644 index 000000000..eca42d1b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go @@ -0,0 +1,336 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isCaselessLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. +func isCaselessLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != unicode.ToLower(have[i]) { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = isCaselessLitValue(runesTrue, v.raw) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// NewStringValue returns a Value type generated using a string input. +func NewStringValue(str string) (Value, error) { + return newValue(StringType, 10, []rune(str)) +} + +// NewIntValue returns a Value type generated using an int64 input. +func NewIntValue(i int64) (Value, error) { + v := strconv.FormatInt(i, 10) + return newValue(IntegerType, 10, []rune(v)) +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go new file mode 100644 index 000000000..e52ac399f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go new file mode 100644 index 000000000..a45c0bc56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go new file mode 100644 index 000000000..8a84c7cbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go new file mode 100644 index 000000000..30ae0b8f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go @@ -0,0 +1,19 @@ +package ini + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +func (err *ParseError) Error() string { + return err.msg +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go new file mode 100644 index 000000000..7f01cf7c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go new file mode 100644 index 000000000..f82095ba2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go new file mode 100644 index 000000000..07e90876a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go new file mode 100644 index 000000000..ba0af01b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini defintion. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go new file mode 100644 index 000000000..b5480fdeb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isCaselessLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go new file mode 100644 index 000000000..a07a63738 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go @@ -0,0 +1,269 @@ +package ini + +import ( + "fmt" + "sort" + "strings" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + + // scope is the profile which is being visited + scope string + + // path is the file path which the visitor is visiting + path string + + // Sections defines list of the profile section + Sections Sections +} + +// NewDefaultVisitor returns a DefaultVisitor. It takes in a filepath +// which points to the file it is visiting. +func NewDefaultVisitor(filepath string) *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + path: filepath, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + if t.SourceFile == nil { + t.SourceFile = make(map[string]string, 0) + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. + // If the token is not either a literal or one of the token types that identifies those four additional + // tokens then error. + if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + val, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + // lower case key to standardize + k := strings.ToLower(key) + + // identify if the section already had this key, append log on section + if t.Has(k) { + t.Logs = append(t.Logs, + fmt.Sprintf("For profile: %v, overriding %v value, "+ + "with a %v value found in a duplicate profile defined later in the same file %v. \n", + t.Name, k, k, v.path)) + } + + // assign the value + t.values[k] = val + // update the source file path for region + t.SourceFile[k] = v.path + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + + // trim start and end space + name = strings.TrimSpace(name) + + // if has prefix "profile " + [ws+] + "profile-name", + // we standardize by removing the [ws+] between prefix and profile-name. + if strings.HasPrefix(name, "profile ") { + names := strings.SplitN(name, " ", 2) + name = names[0] + " " + strings.TrimLeft(names[1], " ") + } + + // attach profile name on section + if !v.Sections.HasSection(name) { + v.Sections.container[name] = NewSection(name) + } + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// NewSections returns empty ini Sections +func NewSections() Sections { + return Sections{ + container: make(map[string]Section, 0), + } +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// HasSection denotes if Sections consist of a section with +// provided name. +func (t Sections) HasSection(p string) bool { + _, ok := t.container[p] + return ok +} + +// SetSection sets a section value for provided section name. +func (t Sections) SetSection(p string, v Section) Sections { + t.container[p] = v + return t +} + +// DeleteSection deletes a section entry/value for provided section name./ +func (t Sections) DeleteSection(p string) { + delete(t.container, p) +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + // Name is the Section profile name + Name string + + // values are the values within parsed profile + values values + + // Errors is the list of errors + Errors []error + + // Logs is the list of logs + Logs []string + + // SourceFile is the INI Source file from where this section + // was retrieved. They key is the property, value is the + // source file the property was retrieved from. + SourceFile map[string]string +} + +// NewSection returns an initialize section for the name +func NewSection(name string) Section { + return Section{ + Name: name, + values: values{}, + SourceFile: map[string]string{}, + } +} + +// UpdateSourceFile updates source file for a property to provided filepath. +func (t Section) UpdateSourceFile(property string, filepath string) { + t.SourceFile[property] = filepath +} + +// UpdateValue updates value for a provided key with provided value +func (t Section) UpdateValue(k string, v Value) error { + t.values[k] = v + return nil +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go new file mode 100644 index 000000000..99915f7f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go new file mode 100644 index 000000000..7ffb4ae06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go new file mode 100644 index 000000000..c8484dcd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go @@ -0,0 +1,33 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +var floatMaxBigInt = big.NewInt(1 << 53) + +// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0). +func Float64(reader io.Reader) (float64, error) { + bi, err := rand.Int(reader, floatMaxBigInt) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %v", err) + } + + return float64(bi.Int64()) / (1 << 53), nil +} + +// CryptoRandFloat64 returns a random float64 obtained from the crypto rand +// source. +func CryptoRandFloat64() (float64, error) { + return Float64(Reader) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go new file mode 100644 index 000000000..2b42cbe64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go @@ -0,0 +1,9 @@ +package sdk + +// Invalidator provides access to a type's invalidate method to make it +// invalidate it cache. +// +// e.g aws.SafeCredentialsProvider's Invalidate method. +type Invalidator interface { + Invalidate() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go new file mode 100644 index 000000000..8e8dabad5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go @@ -0,0 +1,74 @@ +package sdk + +import ( + "context" + "time" +) + +func init() { + NowTime = time.Now + Sleep = time.Sleep + SleepWithContext = sleepWithContext +} + +// NowTime is a value for getting the current time. This value can be overridden +// for testing mocking out current time. +var NowTime func() time.Time + +// Sleep is a value for sleeping for a duration. This value can be overridden +// for testing and mocking out sleep duration. +var Sleep func(time.Duration) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// This value can be overridden for testing and mocking out sleep duration. +var SleepWithContext func(context.Context, time.Duration) error + +// sleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the +// Context's error will be returned. +func sleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// noOpSleepWithContext does nothing, returns immediately. +func noOpSleepWithContext(context.Context, time.Duration) error { + return nil +} + +func noOpSleep(time.Duration) {} + +// TestingUseNopSleep is a utility for disabling sleep across the SDK for +// testing. +func TestingUseNopSleep() func() { + SleepWithContext = noOpSleepWithContext + Sleep = noOpSleep + + return func() { + SleepWithContext = sleepWithContext + Sleep = time.Sleep + } +} + +// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time +// for testing purposes. +func TestingUseReferenceTime(referenceTime time.Time) func() { + NowTime = func() time.Time { + return referenceTime + } + return func() { + NowTime = time.Now + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go new file mode 100644 index 000000000..6c443988b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go new file mode 100644 index 000000000..c96b717e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go @@ -0,0 +1,47 @@ +package shareddefaults + +import ( + "os" + "os/user" + "path/filepath" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + // Ignore errors since we only care about Windows and *nix. + home, _ := os.UserHomeDir() + + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir + } + + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go new file mode 100644 index 000000000..d008ae27c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000..fe6a62006 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go new file mode 100644 index 000000000..cb70616e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go @@ -0,0 +1,7 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight +package singleflight diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000..e8a1b17d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go new file mode 100644 index 000000000..5d69db5f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go @@ -0,0 +1,13 @@ +package timeconv + +import "time" + +// FloatSecondsDur converts a fractional seconds to duration. +func FloatSecondsDur(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +// DurSecondsFloat converts a duration into fractional seconds. +func DurSecondsFloat(d time.Duration) float64 { + return float64(d) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md new file mode 100644 index 000000000..2979f8b93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -0,0 +1,61 @@ +# v1.0.14 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.13 (2022-09-14) + +* **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.12 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.11 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.10 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.9 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.8 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.7 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.6 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.5 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.4 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.3 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.2 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.1 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2022-04-07) + +* **Release**: New internal v4a signing module location. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go new file mode 100644 index 000000000..3ae3a019e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go @@ -0,0 +1,141 @@ +package v4a + +import ( + "context" + "crypto/ecdsa" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +// Credentials is Context, ECDSA, and Optional Session Token that can be used +// to sign requests using SigV4a +type Credentials struct { + Context string + PrivateKey *ecdsa.PrivateKey + SessionToken string + + // Time the credentials will expire. + CanExpire bool + Expires time.Time +} + +// Expired returns if the credentials have expired. +func (v Credentials) Expired() bool { + if v.CanExpire { + return !v.Expires.After(sdk.NowTime()) + } + + return false +} + +// HasKeys returns if the credentials keys are set. +func (v Credentials) HasKeys() bool { + return len(v.Context) > 0 && v.PrivateKey != nil +} + +// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials +// to a ECDSA PrivateKey for signing with SiV4a +type SymmetricCredentialAdaptor struct { + SymmetricProvider aws.CredentialsProvider + + asymmetric atomic.Value + m sync.Mutex +} + +// Retrieve retrieves symmetric credentials from the underlying provider. +func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) { + symCreds, err := s.retrieveFromSymmetricProvider(ctx) + if err != nil { + return aws.Credentials{}, err + } + + if asymCreds := s.getCreds(); asymCreds == nil { + return symCreds, nil + } + + s.m.Lock() + defer s.m.Unlock() + + asymCreds := s.getCreds() + if asymCreds == nil { + return symCreds, nil + } + + // if the context does not match the access key id clear it + if asymCreds.Context != symCreds.AccessKeyID { + s.asymmetric.Store((*Credentials)(nil)) + } + + return symCreds, nil +} + +// RetrievePrivateKey returns credentials suitable for SigV4a signing +func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) { + if asymCreds := s.getCreds(); asymCreds != nil { + return *asymCreds, nil + } + + s.m.Lock() + defer s.m.Unlock() + + if asymCreds := s.getCreds(); asymCreds != nil { + return *asymCreds, nil + } + + symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx) + if err != nil { + return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err) + } + + privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey) + if err != nil { + return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials") + } + + creds := Credentials{ + Context: symmetricCreds.AccessKeyID, + PrivateKey: privateKey, + SessionToken: symmetricCreds.SessionToken, + CanExpire: symmetricCreds.CanExpire, + Expires: symmetricCreds.Expires, + } + + s.asymmetric.Store(&creds) + + return creds, nil +} + +func (s *SymmetricCredentialAdaptor) getCreds() *Credentials { + v := s.asymmetric.Load() + + if v == nil { + return nil + } + + c := v.(*Credentials) + if c != nil && c.HasKeys() && !c.Expired() { + return c + } + + return nil +} + +func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) { + credentials, err := s.SymmetricProvider.Retrieve(ctx) + if err != nil { + return aws.Credentials{}, err + } + + return credentials, nil +} + +// CredentialsProvider is the interface for a provider to retrieve credentials +// to sign requests with. +type CredentialsProvider interface { + RetrievePrivateKey(context.Context) (Credentials, error) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go new file mode 100644 index 000000000..380d17427 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go @@ -0,0 +1,17 @@ +package v4a + +import "fmt" + +// SigningError indicates an error condition occurred while performing SigV4a signing +type SigningError struct { + Err error +} + +func (e *SigningError) Error() string { + return fmt.Sprintf("failed to sign request: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *SigningError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go new file mode 100644 index 000000000..d4c420fbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package v4a + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.0.14" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go new file mode 100644 index 000000000..1d0f25f8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go @@ -0,0 +1,30 @@ +package crypto + +import "fmt" + +// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison +// if the two byte slices assuming they represent a big-endian number. +// +// error if len(x) != len(y) +// -1 if x < y +// 0 if x == y +// +1 if x > y +func ConstantTimeByteCompare(x, y []byte) (int, error) { + if len(x) != len(y) { + return 0, fmt.Errorf("slice lengths do not match") + } + + xLarger, yLarger := 0, 0 + + for i := 0; i < len(x); i++ { + xByte, yByte := int(x[i]), int(y[i]) + + x := ((yByte - xByte) >> 8) & 1 + y := ((xByte - yByte) >> 8) & 1 + + xLarger |= x &^ yLarger + yLarger |= y &^ xLarger + } + + return xLarger - yLarger, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go new file mode 100644 index 000000000..758c73fcb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go @@ -0,0 +1,113 @@ +package crypto + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "encoding/asn1" + "encoding/binary" + "fmt" + "hash" + "math" + "math/big" +) + +type ecdsaSignature struct { + R, S *big.Int +} + +// ECDSAKey takes the given elliptic curve, and private key (d) byte slice +// and returns the private ECDSA key. +func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey { + return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d)) +} + +// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the +// private and public keypair +func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey { + pX, pY := curve.ScalarBaseMult(d.Bytes()) + + privKey := &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: pX, + Y: pY, + }, + D: d, + } + + return privKey +} + +// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns +// *ecdsa.PublicKey. Returns an error if the given points are not on the curve. +func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) { + xPoint := (&big.Int{}).SetBytes(x) + yPoint := (&big.Int{}).SetBytes(y) + + if !curve.IsOnCurve(xPoint, yPoint) { + return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String()) + } + + return &ecdsa.PublicKey{ + Curve: curve, + X: xPoint, + Y: yPoint, + }, nil +} + +// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns +// whether the given signature is valid. +func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) { + var ecdsaSignature ecdsaSignature + + _, err := asn1.Unmarshal(signature, &ecdsaSignature) + if err != nil { + return false, err + } + + return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil +} + +// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode. +// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of +// `r` is defined as a 4 byte counter. +func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) { + // verify that we won't overflow the counter + n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size()))) + if n > 0x7FFFFFFF { + return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen) + } + + // verify the requested bit length is not larger then the length encoding size + if int64(bitLen) > 0x7FFFFFFF { + return nil, fmt.Errorf("bitLen is greater than 32-bits") + } + + fixedInput := bytes.NewBuffer(nil) + fixedInput.Write(label) + fixedInput.WriteByte(0x00) + fixedInput.Write(context) + if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil { + return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err) + } + + var output []byte + + h := hmac.New(hash, key) + + for i := int64(1); i <= n; i++ { + h.Reset() + if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil { + return nil, err + } + _, err := h.Write(fixedInput.Bytes()) + if err != nil { + return nil, err + } + output = append(output, h.Sum(nil)...) + } + + return output[:bitLen/8], nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go new file mode 100644 index 000000000..89a76e2ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go @@ -0,0 +1,36 @@ +package v4 + +const ( + // EmptyStringSHA256 is the hex encoded sha256 value of an empty string + EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + + // UnsignedPayload indicates that the request payload body is unsigned + UnsignedPayload = "UNSIGNED-PAYLOAD" + + // AmzAlgorithmKey indicates the signing algorithm + AmzAlgorithmKey = "X-Amz-Algorithm" + + // AmzSecurityTokenKey indicates the security token to be used with temporary credentials + AmzSecurityTokenKey = "X-Amz-Security-Token" + + // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' + AmzDateKey = "X-Amz-Date" + + // AmzCredentialKey is the access key ID and credential scope + AmzCredentialKey = "X-Amz-Credential" + + // AmzSignedHeadersKey is the set of headers signed for the request + AmzSignedHeadersKey = "X-Amz-SignedHeaders" + + // AmzSignatureKey is the query parameter to store the SigV4 signature + AmzSignatureKey = "X-Amz-Signature" + + // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter + TimeFormat = "20060102T150405Z" + + // ShortTimeFormat is the shorten time format used in the credential scope + ShortTimeFormat = "20060102" + + // ContentSHAKey is the SHA256 of request body + ContentSHAKey = "X-Amz-Content-Sha256" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go new file mode 100644 index 000000000..a15177e8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" +) + +// Rules houses a set of Rule needed for validation of a +// string value +type Rules []Rule + +// Rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that Rule +type Rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r Rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// MapRule generic Rule for maps +type MapRule map[string]struct{} + +// IsValid for the map Rule satisfies whether it exists in the map +func (m MapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// AllowList is a generic Rule for whitelisting +type AllowList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (w AllowList) IsValid(value string) bool { + return w.Rule.IsValid(value) +} + +// DenyList is a generic Rule for blacklisting +type DenyList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (b DenyList) IsValid(value string) bool { + return !b.Rule.IsValid(value) +} + +// Patterns is a list of strings to match against +type Patterns []string + +// IsValid for Patterns checks each pattern and returns if a match has +// been found +func (p Patterns) IsValid(value string) bool { + for _, pattern := range p { + if sdkstrings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// InclusiveRules rules allow for rules to depend on one another +type InclusiveRules []Rule + +// IsValid will return true if all rules are true +func (r InclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go new file mode 100644 index 000000000..3487dc335 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go @@ -0,0 +1,67 @@ +package v4 + +// IgnoredHeaders is a list of headers that are ignored during signing +var IgnoredHeaders = Rules{ + DenyList{ + MapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// RequiredSignedHeaders is a whitelist for Build canonical headers. +var RequiredSignedHeaders = Rules{ + AllowList{ + MapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + "X-Amz-Tagging": struct{}{}, + }, + }, + Patterns{"X-Amz-Meta-"}, +} + +// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value +// represents whether or not it is a pattern. +var AllowedQueryHoisting = InclusiveRules{ + DenyList{RequiredSignedHeaders}, + Patterns{"X-Amz-"}, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go new file mode 100644 index 000000000..e7fa7a1b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go @@ -0,0 +1,13 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. +func HMACSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go new file mode 100644 index 000000000..bf93659a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go @@ -0,0 +1,75 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go new file mode 100644 index 000000000..1de06a765 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go @@ -0,0 +1,36 @@ +package v4 + +import "time" + +// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. +type SigningTime struct { + time.Time + timeFormat string + shortTimeFormat string +} + +// NewSigningTime creates a new SigningTime given a time.Time +func NewSigningTime(t time.Time) SigningTime { + return SigningTime{ + Time: t, + } +} + +// TimeFormat provides a time formatted in the X-Amz-Date format. +func (m *SigningTime) TimeFormat() string { + return m.format(&m.timeFormat, TimeFormat) +} + +// ShortTimeFormat provides a time formatted of 20060102. +func (m *SigningTime) ShortTimeFormat() string { + return m.format(&m.shortTimeFormat, ShortTimeFormat) +} + +func (m *SigningTime) format(target *string, format string) string { + if len(*target) > 0 { + return *target + } + v := m.Time.Format(format) + *target = v + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go new file mode 100644 index 000000000..741019b5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go @@ -0,0 +1,64 @@ +package v4 + +import ( + "net/url" + "strings" +) + +const doubleSpace = " " + +// StripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func StripExcessSpaces(str string) string { + var j, k, l, m, spaces int + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + return str + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + return string(buf[:m]) +} + +// GetURIPath returns the escaped URI component from the provided URL +func GetURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go new file mode 100644 index 000000000..55d5e8abd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go @@ -0,0 +1,105 @@ +package v4a + +import ( + "context" + "fmt" + "net/http" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// HTTPSigner is SigV4a HTTP signer implementation +type HTTPSigner interface { + SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optfns ...func(*SignerOptions)) error +} + +// SignHTTPRequestMiddlewareOptions is the middleware options for constructing a SignHTTPRequestMiddleware. +type SignHTTPRequestMiddlewareOptions struct { + Credentials CredentialsProvider + Signer HTTPSigner + LogSigning bool +} + +// SignHTTPRequestMiddleware is a middleware for signing an HTTP request using SigV4a. +type SignHTTPRequestMiddleware struct { + credentials CredentialsProvider + signer HTTPSigner + logSigning bool +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given SignHTTPRequestMiddlewareOptions. +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentials: options.Credentials, + signer: options.Signer, + logSigning: options.LogSigning, + } +} + +// ID the middleware identifier. +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize signs an HTTP request using SigV4a. +func (s *SignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !hasCredentialProvider(s.credentials) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected request middleware type %T", in.Request) + } + + signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) + payloadHash := v4.GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} + } + + credentials, err := s.credentials.RetrievePrivateKey(ctx) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, []string{signingRegion}, time.Now().UTC(), func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} + } + + return next.HandleFinalize(ctx, in) +} + +func hasCredentialProvider(p CredentialsProvider) bool { + if p == nil { + return false + } + + return true +} + +// RegisterSigningMiddleware registers the SigV4a signing middleware to the stack. If a signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { + const signedID = "Signing" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go new file mode 100644 index 000000000..951fc415d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go @@ -0,0 +1,117 @@ +package v4a + +import ( + "context" + "fmt" + "net/http" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyHTTP "github.com/aws/smithy-go/transport/http" +) + +// HTTPPresigner is an interface to a SigV4a signer that can sign create a +// presigned URL for a HTTP requests. +type HTTPPresigner interface { + PresignHTTP( + ctx context.Context, credentials Credentials, r *http.Request, + payloadHash string, service string, regionSet []string, signingTime time.Time, + optFns ...func(*SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider CredentialsProvider + Presigner HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + credentialsProvider CredentialsProvider + presigner HTTPPresigner + logSigning bool +} + +// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware +// initialized with the presigner. +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +func (s *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyHTTP.Request) + if !ok { + return out, metadata, &SigningError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + httpReq := req.Build(ctx) + if !hasCredentialProvider(s.credentialsProvider) { + out.Result = &v4.PresignedHTTPRequest{ + URL: httpReq.URL.String(), + Method: httpReq.Method, + SignedHeader: http.Header{}, + } + + return out, metadata, nil + } + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + payloadHash := v4.GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{ + Err: fmt.Errorf("computed payload hash missing from context"), + } + } + + credentials, err := s.credentialsProvider.RetrievePrivateKey(ctx) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + + u, h, err := s.presigner.PresignHTTP(ctx, credentials, + httpReq, payloadHash, signingName, []string{signingRegion}, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &v4.PresignedHTTPRequest{ + URL: u, + Method: httpReq.Method, + SignedHeader: h, + } + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go new file mode 100644 index 000000000..f1f6ecc37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go @@ -0,0 +1,520 @@ +package v4a + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "math/big" + "net/http" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + "time" + + signerCrypto "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto" + v4Internal "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/logging" +) + +const ( + // AmzRegionSetKey represents the region set header used for sigv4a + AmzRegionSetKey = "X-Amz-Region-Set" + amzAlgorithmKey = v4Internal.AmzAlgorithmKey + amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey + amzDateKey = v4Internal.AmzDateKey + amzCredentialKey = v4Internal.AmzCredentialKey + amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey + authorizationHeader = "Authorization" + + signingAlgorithm = "AWS4-ECDSA-P256-SHA256" + + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string + EmptyStringSHA256 = v4Internal.EmptyStringSHA256 + + // Version of signing v4a + Version = "SigV4A" +) + +var ( + p256 elliptic.Curve + nMinusTwoP256 *big.Int + + one = new(big.Int).SetInt64(1) +) + +func init() { + // Ensure the elliptic curve parameters are initialized on package import rather then on first usage + p256 = elliptic.P256() + + nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes()) + nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2)) +} + +// SignerOptions is the SigV4a signing options for constructing a Signer. +type SignerOptions struct { + Logger logging.Logger + LogSigning bool + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool +} + +// Signer is a SigV4a HTTP signing implementation +type Signer struct { + options SignerOptions +} + +// NewSigner constructs a SigV4a Signer. +func NewSigner(optFns ...func(*SignerOptions)) *Signer { + options := SignerOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Signer{options: options} +} + +// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given +// IAM AccessKey and SecretKey pair. +// +// Based on FIPS.186-4 Appendix B.4.2 +func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) { + params := p256.Params() + bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits + counter := 0x01 + + buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey) + kdfContext := bytes.NewBuffer(buffer) + + inputKey := append([]byte("AWS4A"), []byte(secretKey)...) + + d := new(big.Int) + for { + kdfContext.Reset() + kdfContext.WriteString(accessKey) + kdfContext.WriteByte(byte(counter)) + + key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes()) + if err != nil { + return nil, err + } + + // Check key first before calling SetBytes if key key is in fact a valid candidate. + // This ensures the byte slice is the correct length (32-bytes) to compare in constant-time + cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes()) + if err != nil { + return nil, err + } + if cmp == -1 { + d.SetBytes(key) + break + } + + counter++ + if counter > 0xFF { + return nil, fmt.Errorf("exhausted single byte external counter") + } + } + d = d.Add(d, one) + + priv := new(ecdsa.PrivateKey) + priv.PublicKey.Curve = p256 + priv.D = d + priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes()) + + return priv, nil +} + +type httpSigner struct { + Request *http.Request + ServiceName string + RegionSet []string + Time time.Time + Credentials Credentials + IsPreSign bool + + Logger logging.Logger + Debug bool + + // PayloadHash is the hex encoded SHA-256 hash of the request payload + // If len(PayloadHash) == 0 the signer will attempt to send the request + // as an unsigned payload. Note: Unsigned payloads only work for a subset of services. + PayloadHash string + + DisableHeaderHoisting bool + DisableURIPathEscaping bool +} + +// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a. +// The passed in request will be modified in place. +func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error { + options := s.options + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + RegionSet: regionSet, + Credentials: credentials, + Time: signingTime.UTC(), + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + } + + signedRequest, err := signer.Build() + if err != nil { + return err + } + + logHTTPSigningInfo(ctx, options, signedRequest) + + return nil +} + +// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a +// Returns the presigned URL along with the headers that were signed with the request. +// +// PresignHTTP will not set the expires time of the presigned request +// automatically. To specify the expire duration for a request add the +// "X-Amz-Expires" query parameter on the request with the value as the +// duration in seconds the presigned URL should be considered valid for. This +// parameter is not used by all AWS services, and is most notable used by +// Amazon S3 APIs. +func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) { + options := s.options + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + RegionSet: regionSet, + Credentials: credentials, + Time: signingTime.UTC(), + IsPreSign: true, + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + } + + signedRequest, err := signer.Build() + if err != nil { + return "", nil, err + } + + logHTTPSigningInfo(ctx, options, signedRequest) + + signedHeaders = make(http.Header) + + // For the signed headers we canonicalize the header keys in the returned map. + // This avoids situations where can standard library double headers like host header. For example the standard + // library will set the Host header, even if it is present in lower-case form. + for k, v := range signedRequest.SignedHeaders { + key := textproto.CanonicalMIMEHeaderKey(k) + signedHeaders[key] = append(signedHeaders[key], v...) + } + + return signedRequest.Request.URL.String(), signedHeaders, nil +} + +func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { + amzDate := s.Time.Format(timeFormat) + + if s.IsPreSign { + query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) + query.Set(amzDateKey, amzDate) + query.Set(amzAlgorithmKey, signingAlgorithm) + if len(s.Credentials.SessionToken) > 0 { + query.Set(amzSecurityTokenKey, s.Credentials.SessionToken) + } + return + } + + headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) + headers.Set(amzDateKey, amzDate) + if len(s.Credentials.SessionToken) > 0 { + headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken) + } +} + +func (s *httpSigner) Build() (signedRequest, error) { + req := s.Request + + query := req.URL.Query() + headers := req.Header + + s.setRequiredSigningFields(headers, query) + + // Sort Each Query Key's Values + for key := range query { + sort.Strings(query[key]) + } + + v4Internal.SanitizeHostForHeader(req) + + credentialScope := s.buildCredentialScope() + credentialStr := s.Credentials.Context + "/" + credentialScope + if s.IsPreSign { + query.Set(amzCredentialKey, credentialStr) + } + + unsignedHeaders := headers + if s.IsPreSign && !s.DisableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders) + for k := range urlValues { + query[k] = urlValues[k] + } + } + + host := req.URL.Host + if len(req.Host) > 0 { + host = req.Host + } + + signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) + + if s.IsPreSign { + query.Set(amzSignedHeadersKey, signedHeadersStr) + } + + rawQuery := strings.Replace(query.Encode(), "+", "%20", -1) + + canonicalURI := v4Internal.GetURIPath(req.URL) + if !s.DisableURIPathEscaping { + canonicalURI = httpbinding.EscapePath(canonicalURI, false) + } + + canonicalString := s.buildCanonicalString( + req.Method, + canonicalURI, + rawQuery, + signedHeadersStr, + canonicalHeaderStr, + ) + + strToSign := s.buildStringToSign(credentialScope, canonicalString) + signingSignature, err := s.buildSignature(strToSign) + if err != nil { + return signedRequest{}, err + } + + if s.IsPreSign { + rawQuery += "&X-Amz-Signature=" + signingSignature + } else { + headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) + } + + req.URL.RawQuery = rawQuery + + return signedRequest{ + Request: req, + SignedHeaders: signedHeaders, + CanonicalString: canonicalString, + StringToSign: strToSign, + PreSigned: s.IsPreSign, + }, nil +} + +func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { + const credential = "Credential=" + const signedHeaders = "SignedHeaders=" + const signature = "Signature=" + const commaSpace = ", " + + var parts strings.Builder + parts.Grow(len(signingAlgorithm) + 1 + + len(credential) + len(credentialStr) + len(commaSpace) + + len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) + + len(signature) + len(signingSignature), + ) + parts.WriteString(signingAlgorithm) + parts.WriteRune(' ') + parts.WriteString(credential) + parts.WriteString(credentialStr) + parts.WriteString(commaSpace) + parts.WriteString(signedHeaders) + parts.WriteString(signedHeadersStr) + parts.WriteString(commaSpace) + parts.WriteString(signature) + parts.WriteString(signingSignature) + return parts.String() +} + +func (s *httpSigner) buildCredentialScope() string { + return strings.Join([]string{ + s.Time.Format(shortTimeFormat), + s.ServiceName, + "aws4_request", + }, "/") + +} + +func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} + +func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { + signed = make(http.Header) + + var headers []string + const hostHeader = "host" + headers = append(headers, hostHeader) + signed[hostHeader] = append(signed[hostHeader], host) + + if length > 0 { + const contentLengthHeader = "content-length" + headers = append(headers, contentLengthHeader) + signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) + } + + for k, v := range header { + if !rule.IsValid(k) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := signed[lowerCaseKey]; ok { + // include additional values + signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + signed[lowerCaseKey] = v + } + sort.Strings(headers) + + signedHeaders = strings.Join(headers, ";") + + var canonicalHeaders strings.Builder + n := len(headers) + const colon = ':' + for i := 0; i < n; i++ { + if headers[i] == hostHeader { + canonicalHeaders.WriteString(hostHeader) + canonicalHeaders.WriteRune(colon) + canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) + } else { + canonicalHeaders.WriteString(headers[i]) + canonicalHeaders.WriteRune(colon) + // Trim out leading, trailing, and dedup inner spaces from signed header values. + values := signed[headers[i]] + for j, v := range values { + cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) + canonicalHeaders.WriteString(cleanedValue) + if j < len(values)-1 { + canonicalHeaders.WriteRune(',') + } + } + } + canonicalHeaders.WriteRune('\n') + } + canonicalHeadersStr = canonicalHeaders.String() + + return signed, signedHeaders, canonicalHeadersStr +} + +func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { + return strings.Join([]string{ + method, + uri, + query, + canonicalHeaders, + signedHeaders, + s.PayloadHash, + }, "\n") +} + +func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { + return strings.Join([]string{ + signingAlgorithm, + s.Time.Format(timeFormat), + credentialScope, + hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), + }, "\n") +} + +func makeHash(hash hash.Hash, b []byte) []byte { + hash.Reset() + hash.Write(b) + return hash.Sum(nil) +} + +func (s *httpSigner) buildSignature(strToSign string) (string, error) { + sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256) + if err != nil { + return "", err + } + return hex.EncodeToString(sig), nil +} + +const logSignInfoMsg = `Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func logHTTPSigningInfo(ctx context.Context, options SignerOptions, r signedRequest) { + if !options.LogSigning { + return + } + signedURLMsg := "" + if r.PreSigned { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String()) + } + logger := logging.WithContext(ctx, options.Logger) + logger.Logf(logging.Debug, logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg) +} + +type signedRequest struct { + Request *http.Request + SignedHeaders http.Header + CanonicalString string + StringToSign string + PreSigned bool +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh b/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh new file mode 100644 index 000000000..81a836127 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +PROJECT_DIR="" +SDK_SOURCE_DIR=$(cd `dirname $0` && pwd) + +usage() { + echo "Usage: $0 [-s SDK_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 + exit 1 +} + +while getopts "hs:d:" options; do + case "${options}" in + s) + SDK_SOURCE_DIR=${OPTARG} + if [ "$SDK_SOURCE_DIR" == "" ]; then + echo "path to SDK source directory is required" || exit + usage + fi + ;; + d) + PROJECT_DIR=${OPTARG} + ;; + h) + usage + ;; + *) + usage + ;; + esac +done + +if [ "$PROJECT_DIR" != "" ]; then + cd "$PROJECT_DIR" || exit +fi + +go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/aws-sdk-go-v2" | while read x; do + repPath=${x/github.com\/aws\/aws-sdk-go-v2/${SDK_SOURCE_DIR}} + echo -replace $x=$repPath +done | xargs go mod edit diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml new file mode 100644 index 000000000..b6d07cdd6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml @@ -0,0 +1,78 @@ + +[dependencies] + "github.com/aws/aws-sdk-go" = "v1.44.28" + "github.com/aws/smithy-go" = "v1.13.5" + "github.com/google/go-cmp" = "v0.5.8" + "github.com/jmespath/go-jmespath" = "v0.4.0" + "golang.org/x/net" = "v0.1.0" + +[modules] + + [modules."."] + metadata_package = "aws" + + [modules.codegen] + no_tag = true + + [modules."example/service/dynamodb/createTable"] + no_tag = true + + [modules."example/service/dynamodb/scanItems"] + no_tag = true + + [modules."example/service/s3/listObjects"] + no_tag = true + + [modules."example/service/s3/usingPrivateLink"] + no_tag = true + + [modules."feature/ec2/imds/internal/configtesting"] + no_tag = true + + [modules."internal/codegen"] + no_tag = true + + [modules."internal/configsources/configtesting"] + no_tag = true + + [modules."internal/protocoltest/awsrestjson"] + no_tag = true + + [modules."internal/protocoltest/ec2query"] + no_tag = true + + [modules."internal/protocoltest/jsonrpc"] + no_tag = true + + [modules."internal/protocoltest/jsonrpc10"] + no_tag = true + + [modules."internal/protocoltest/query"] + no_tag = true + + [modules."internal/protocoltest/restxml"] + no_tag = true + + [modules."internal/protocoltest/restxmlwithnamespace"] + no_tag = true + + [modules."internal/repotools"] + no_tag = true + + [modules."internal/repotools/changes"] + no_tag = true + + [modules."service/internal/benchmark"] + no_tag = true + + [modules."service/internal/integrationtest"] + no_tag = true + + [modules."service/kinesis/internal/testing"] + no_tag = true + + [modules."service/s3/internal/configtesting"] + no_tag = true + + [modules."service/transcribestreaming/internal/testing"] + no_tag = true diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md new file mode 100644 index 000000000..c5699ec02 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/CHANGELOG.md @@ -0,0 +1,242 @@ +# v1.18.7 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.18.2 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + +# v1.18.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.17.25 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.24 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.23 (2022-11-22) + +* No change notes available for this release. + +# v1.17.22 (2022-11-16) + +* No change notes available for this release. + +# v1.17.21 (2022-11-10) + +* No change notes available for this release. + +# v1.17.20 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.19 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2022-08-30) + +* No change notes available for this release. + +# v1.17.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-03-21) + +* **Feature**: This release includes a fix in the DescribeImageScanFindings paginated output. + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service client model to latest release. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.10.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. +* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-09-24) + +* **Feature**: API client updated + +# v1.6.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go new file mode 100644 index 000000000..c51da7830 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_client.go @@ -0,0 +1,434 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "ECR" +const ServiceAPIVersion = "2015-09-21" + +// Client provides the API client to make operations call for Amazon EC2 Container +// Registry. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ecr", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go new file mode 100644 index 000000000..cea7521f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchCheckLayerAvailability.go @@ -0,0 +1,140 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Checks the availability of one or more image layers in a repository. When an +// image is pushed to a repository, each image layer is checked to verify if it has +// been uploaded before. If it has been uploaded, then the image layer is skipped. +// This operation is used by the Amazon ECR proxy and is not generally used by +// customers for pulling and pushing images. In most cases, you should use the +// docker CLI to pull, tag, and push images. +func (c *Client) BatchCheckLayerAvailability(ctx context.Context, params *BatchCheckLayerAvailabilityInput, optFns ...func(*Options)) (*BatchCheckLayerAvailabilityOutput, error) { + if params == nil { + params = &BatchCheckLayerAvailabilityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchCheckLayerAvailability", params, optFns, c.addOperationBatchCheckLayerAvailabilityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchCheckLayerAvailabilityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchCheckLayerAvailabilityInput struct { + + // The digests of the image layers to check. + // + // This member is required. + LayerDigests []string + + // The name of the repository that is associated with the image layers to check. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the image layers to check. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type BatchCheckLayerAvailabilityOutput struct { + + // Any failures associated with the call. + Failures []types.LayerFailure + + // A list of image layer objects corresponding to the image layer references in the + // request. + Layers []types.Layer + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchCheckLayerAvailability{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchCheckLayerAvailability{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchCheckLayerAvailabilityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchCheckLayerAvailability(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchCheckLayerAvailability(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "BatchCheckLayerAvailability", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go new file mode 100644 index 000000000..80c9d5b5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchDeleteImage.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a list of specified images within a repository. Images are specified +// with either an imageTag or imageDigest. You can remove a tag from an image by +// specifying the image's tag in your request. When you remove the last tag from an +// image, the image is deleted from your repository. You can completely delete an +// image (and all of its tags) by specifying the image's digest in your request. +func (c *Client) BatchDeleteImage(ctx context.Context, params *BatchDeleteImageInput, optFns ...func(*Options)) (*BatchDeleteImageOutput, error) { + if params == nil { + params = &BatchDeleteImageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchDeleteImage", params, optFns, c.addOperationBatchDeleteImageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchDeleteImageOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Deletes specified images within a specified repository. Images are specified +// with either the imageTag or imageDigest. +type BatchDeleteImageInput struct { + + // A list of image ID references that correspond to images to delete. The format of + // the imageIds reference is imageTag=tag or imageDigest=digest. + // + // This member is required. + ImageIds []types.ImageIdentifier + + // The repository that contains the image to delete. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the image to delete. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type BatchDeleteImageOutput struct { + + // Any failures associated with the call. + Failures []types.ImageFailure + + // The image IDs of the deleted images. + ImageIds []types.ImageIdentifier + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchDeleteImage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchDeleteImage{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchDeleteImageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchDeleteImage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchDeleteImage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "BatchDeleteImage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go new file mode 100644 index 000000000..b2724f89a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetImage.go @@ -0,0 +1,143 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets detailed information for an image. Images are specified with either an +// imageTag or imageDigest. When an image is pulled, the BatchGetImage API is +// called once to retrieve the image manifest. +func (c *Client) BatchGetImage(ctx context.Context, params *BatchGetImageInput, optFns ...func(*Options)) (*BatchGetImageOutput, error) { + if params == nil { + params = &BatchGetImageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchGetImage", params, optFns, c.addOperationBatchGetImageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchGetImageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchGetImageInput struct { + + // A list of image ID references that correspond to images to describe. The format + // of the imageIds reference is imageTag=tag or imageDigest=digest. + // + // This member is required. + ImageIds []types.ImageIdentifier + + // The repository that contains the images to describe. + // + // This member is required. + RepositoryName *string + + // The accepted media types for the request. Valid values: + // application/vnd.docker.distribution.manifest.v1+json | + // application/vnd.docker.distribution.manifest.v2+json | + // application/vnd.oci.image.manifest.v1+json + AcceptedMediaTypes []string + + // The Amazon Web Services account ID associated with the registry that contains + // the images to describe. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type BatchGetImageOutput struct { + + // Any failures associated with the call. + Failures []types.ImageFailure + + // A list of image objects corresponding to the image references in the request. + Images []types.Image + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchGetImageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchGetImage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchGetImage{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchGetImageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetImage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchGetImage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "BatchGetImage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go new file mode 100644 index 000000000..42adcbbc4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_BatchGetRepositoryScanningConfiguration.go @@ -0,0 +1,124 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the scanning configuration for one or more repositories. +func (c *Client) BatchGetRepositoryScanningConfiguration(ctx context.Context, params *BatchGetRepositoryScanningConfigurationInput, optFns ...func(*Options)) (*BatchGetRepositoryScanningConfigurationOutput, error) { + if params == nil { + params = &BatchGetRepositoryScanningConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchGetRepositoryScanningConfiguration", params, optFns, c.addOperationBatchGetRepositoryScanningConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchGetRepositoryScanningConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchGetRepositoryScanningConfigurationInput struct { + + // One or more repository names to get the scanning configuration for. + // + // This member is required. + RepositoryNames []string + + noSmithyDocumentSerde +} + +type BatchGetRepositoryScanningConfigurationOutput struct { + + // Any failures associated with the call. + Failures []types.RepositoryScanningConfigurationFailure + + // The scanning configuration for the requested repositories. + ScanningConfigurations []types.RepositoryScanningConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchGetRepositoryScanningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchGetRepositoryScanningConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchGetRepositoryScanningConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchGetRepositoryScanningConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetRepositoryScanningConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchGetRepositoryScanningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "BatchGetRepositoryScanningConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go new file mode 100644 index 000000000..ac1fe8a37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CompleteLayerUpload.go @@ -0,0 +1,151 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Informs Amazon ECR that the image layer upload has completed for a specified +// registry, repository name, and upload ID. You can optionally provide a sha256 +// digest of the image layer for data validation purposes. When an image is pushed, +// the CompleteLayerUpload API is called once per each new image layer to verify +// that the upload has completed. This operation is used by the Amazon ECR proxy +// and is not generally used by customers for pulling and pushing images. In most +// cases, you should use the docker CLI to pull, tag, and push images. +func (c *Client) CompleteLayerUpload(ctx context.Context, params *CompleteLayerUploadInput, optFns ...func(*Options)) (*CompleteLayerUploadOutput, error) { + if params == nil { + params = &CompleteLayerUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CompleteLayerUpload", params, optFns, c.addOperationCompleteLayerUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CompleteLayerUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CompleteLayerUploadInput struct { + + // The sha256 digest of the image layer. + // + // This member is required. + LayerDigests []string + + // The name of the repository to associate with the image layer. + // + // This member is required. + RepositoryName *string + + // The upload ID from a previous InitiateLayerUpload operation to associate with + // the image layer. + // + // This member is required. + UploadId *string + + // The Amazon Web Services account ID associated with the registry to which to + // upload layers. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type CompleteLayerUploadOutput struct { + + // The sha256 digest of the image layer. + LayerDigest *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // The upload ID associated with the layer. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCompleteLayerUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCompleteLayerUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCompleteLayerUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteLayerUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCompleteLayerUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "CompleteLayerUpload", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go new file mode 100644 index 000000000..6a67466e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreatePullThroughCacheRule.go @@ -0,0 +1,144 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Creates a pull through cache rule. A pull through cache rule provides a way to +// cache images from an external public registry in your Amazon ECR private +// registry. +func (c *Client) CreatePullThroughCacheRule(ctx context.Context, params *CreatePullThroughCacheRuleInput, optFns ...func(*Options)) (*CreatePullThroughCacheRuleOutput, error) { + if params == nil { + params = &CreatePullThroughCacheRuleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreatePullThroughCacheRule", params, optFns, c.addOperationCreatePullThroughCacheRuleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreatePullThroughCacheRuleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreatePullThroughCacheRuleInput struct { + + // The repository name prefix to use when caching images from the source registry. + // + // This member is required. + EcrRepositoryPrefix *string + + // The registry URL of the upstream public registry to use as the source for the + // pull through cache rule. + // + // This member is required. + UpstreamRegistryUrl *string + + // The Amazon Web Services account ID associated with the registry to create the + // pull through cache rule for. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type CreatePullThroughCacheRuleOutput struct { + + // The date and time, in JavaScript date format, when the pull through cache rule + // was created. + CreatedAt *time.Time + + // The Amazon ECR repository prefix associated with the pull through cache rule. + EcrRepositoryPrefix *string + + // The registry ID associated with the request. + RegistryId *string + + // The upstream registry URL associated with the pull through cache rule. + UpstreamRegistryUrl *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreatePullThroughCacheRuleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreatePullThroughCacheRule{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreatePullThroughCacheRule{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreatePullThroughCacheRuleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreatePullThroughCacheRule(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreatePullThroughCacheRule(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "CreatePullThroughCacheRule", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go new file mode 100644 index 000000000..1f22e5c82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_CreateRepository.go @@ -0,0 +1,150 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a repository. For more information, see Amazon ECR repositories +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) in +// the Amazon Elastic Container Registry User Guide. +func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryInput, optFns ...func(*Options)) (*CreateRepositoryOutput, error) { + if params == nil { + params = &CreateRepositoryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateRepository", params, optFns, c.addOperationCreateRepositoryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateRepositoryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateRepositoryInput struct { + + // The name to use for the repository. The repository name may be specified on its + // own (such as nginx-web-app) or it can be prepended with a namespace to group the + // repository into a category (such as project-a/nginx-web-app). + // + // This member is required. + RepositoryName *string + + // The encryption configuration for the repository. This determines how the + // contents of your repository are encrypted at rest. + EncryptionConfiguration *types.EncryptionConfiguration + + // The image scanning configuration for the repository. This determines whether + // images are scanned for known vulnerabilities after being pushed to the + // repository. + ImageScanningConfiguration *types.ImageScanningConfiguration + + // The tag mutability setting for the repository. If this parameter is omitted, the + // default setting of MUTABLE will be used which will allow image tags to be + // overwritten. If IMMUTABLE is specified, all image tags within the repository + // will be immutable which will prevent them from being overwritten. + ImageTagMutability types.ImageTagMutability + + // The Amazon Web Services account ID associated with the registry to create the + // repository. If you do not specify a registry, the default registry is assumed. + RegistryId *string + + // The metadata that you apply to the repository to help you categorize and + // organize them. Each tag consists of a key and an optional value, both of which + // you define. Tag keys can have a maximum character length of 128 characters, and + // tag values can have a maximum length of 256 characters. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateRepositoryOutput struct { + + // The repository that was created. + Repository *types.Repository + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateRepository{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateRepository{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateRepositoryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRepository(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateRepository(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "CreateRepository", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go new file mode 100644 index 000000000..1f215085f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteLifecyclePolicy.go @@ -0,0 +1,135 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Deletes the lifecycle policy associated with the specified repository. +func (c *Client) DeleteLifecyclePolicy(ctx context.Context, params *DeleteLifecyclePolicyInput, optFns ...func(*Options)) (*DeleteLifecyclePolicyOutput, error) { + if params == nil { + params = &DeleteLifecyclePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteLifecyclePolicy", params, optFns, c.addOperationDeleteLifecyclePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteLifecyclePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteLifecyclePolicyInput struct { + + // The name of the repository. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DeleteLifecyclePolicyOutput struct { + + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time + + // The JSON lifecycle policy text. + LifecyclePolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteLifecyclePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteLifecyclePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteLifecyclePolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteLifecyclePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteLifecyclePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteLifecyclePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DeleteLifecyclePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go new file mode 100644 index 000000000..1f4bd9701 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeletePullThroughCacheRule.go @@ -0,0 +1,136 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Deletes a pull through cache rule. +func (c *Client) DeletePullThroughCacheRule(ctx context.Context, params *DeletePullThroughCacheRuleInput, optFns ...func(*Options)) (*DeletePullThroughCacheRuleOutput, error) { + if params == nil { + params = &DeletePullThroughCacheRuleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeletePullThroughCacheRule", params, optFns, c.addOperationDeletePullThroughCacheRuleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeletePullThroughCacheRuleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeletePullThroughCacheRuleInput struct { + + // The Amazon ECR repository prefix associated with the pull through cache rule to + // delete. + // + // This member is required. + EcrRepositoryPrefix *string + + // The Amazon Web Services account ID associated with the registry that contains + // the pull through cache rule. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DeletePullThroughCacheRuleOutput struct { + + // The timestamp associated with the pull through cache rule. + CreatedAt *time.Time + + // The Amazon ECR repository prefix associated with the request. + EcrRepositoryPrefix *string + + // The registry ID associated with the request. + RegistryId *string + + // The upstream registry URL associated with the pull through cache rule. + UpstreamRegistryUrl *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeletePullThroughCacheRuleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeletePullThroughCacheRule{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeletePullThroughCacheRule{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeletePullThroughCacheRuleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePullThroughCacheRule(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeletePullThroughCacheRule(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DeletePullThroughCacheRule", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go new file mode 100644 index 000000000..a9230f956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRegistryPolicy.go @@ -0,0 +1,114 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the registry permissions policy. +func (c *Client) DeleteRegistryPolicy(ctx context.Context, params *DeleteRegistryPolicyInput, optFns ...func(*Options)) (*DeleteRegistryPolicyOutput, error) { + if params == nil { + params = &DeleteRegistryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteRegistryPolicy", params, optFns, c.addOperationDeleteRegistryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteRegistryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteRegistryPolicyInput struct { + noSmithyDocumentSerde +} + +type DeleteRegistryPolicyOutput struct { + + // The contents of the registry permissions policy that was deleted. + PolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteRegistryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRegistryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRegistryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRegistryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteRegistryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DeleteRegistryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go new file mode 100644 index 000000000..525834b91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepository.go @@ -0,0 +1,130 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a repository. If the repository contains images, you must either delete +// all images in the repository or use the force option to delete the repository. +func (c *Client) DeleteRepository(ctx context.Context, params *DeleteRepositoryInput, optFns ...func(*Options)) (*DeleteRepositoryOutput, error) { + if params == nil { + params = &DeleteRepositoryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteRepository", params, optFns, c.addOperationDeleteRepositoryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteRepositoryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteRepositoryInput struct { + + // The name of the repository to delete. + // + // This member is required. + RepositoryName *string + + // If a repository contains images, forces the deletion. + Force bool + + // The Amazon Web Services account ID associated with the registry that contains + // the repository to delete. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DeleteRepositoryOutput struct { + + // The repository that was deleted. + Repository *types.Repository + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRepository{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRepository{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteRepositoryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepository(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteRepository(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DeleteRepository", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go new file mode 100644 index 000000000..dd23305f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DeleteRepositoryPolicy.go @@ -0,0 +1,132 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the repository policy associated with the specified repository. +func (c *Client) DeleteRepositoryPolicy(ctx context.Context, params *DeleteRepositoryPolicyInput, optFns ...func(*Options)) (*DeleteRepositoryPolicyOutput, error) { + if params == nil { + params = &DeleteRepositoryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteRepositoryPolicy", params, optFns, c.addOperationDeleteRepositoryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteRepositoryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteRepositoryPolicyInput struct { + + // The name of the repository that is associated with the repository policy to + // delete. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository policy to delete. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DeleteRepositoryPolicyOutput struct { + + // The JSON repository policy that was deleted from the repository. + PolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteRepositoryPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepositoryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DeleteRepositoryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go new file mode 100644 index 000000000..0edd166a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageReplicationStatus.go @@ -0,0 +1,136 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the replication status for a specified image. +func (c *Client) DescribeImageReplicationStatus(ctx context.Context, params *DescribeImageReplicationStatusInput, optFns ...func(*Options)) (*DescribeImageReplicationStatusOutput, error) { + if params == nil { + params = &DescribeImageReplicationStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImageReplicationStatus", params, optFns, c.addOperationDescribeImageReplicationStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImageReplicationStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImageReplicationStatusInput struct { + + // An object with identifying information for an image in an Amazon ECR repository. + // + // This member is required. + ImageId *types.ImageIdentifier + + // The name of the repository that the image is in. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry. If you do not + // specify a registry, the default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DescribeImageReplicationStatusOutput struct { + + // An object with identifying information for an image in an Amazon ECR repository. + ImageId *types.ImageIdentifier + + // The replication status details for the images in the specified repository. + ReplicationStatuses []types.ImageReplicationStatus + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImageReplicationStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImageReplicationStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImageReplicationStatus{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeImageReplicationStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageReplicationStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeImageReplicationStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DescribeImageReplicationStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go new file mode 100644 index 000000000..5908d7ed0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImageScanFindings.go @@ -0,0 +1,447 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "github.com/jmespath/go-jmespath" + "time" +) + +// Returns the scan findings for the specified image. +func (c *Client) DescribeImageScanFindings(ctx context.Context, params *DescribeImageScanFindingsInput, optFns ...func(*Options)) (*DescribeImageScanFindingsOutput, error) { + if params == nil { + params = &DescribeImageScanFindingsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImageScanFindings", params, optFns, c.addOperationDescribeImageScanFindingsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImageScanFindingsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImageScanFindingsInput struct { + + // An object with identifying information for an image in an Amazon ECR repository. + // + // This member is required. + ImageId *types.ImageIdentifier + + // The repository for the image for which to describe the scan findings. + // + // This member is required. + RepositoryName *string + + // The maximum number of image scan results returned by DescribeImageScanFindings + // in paginated output. When this parameter is used, DescribeImageScanFindings only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeImageScanFindings request with the returned nextToken value. + // This value can be between 1 and 1000. If this parameter is not used, then + // DescribeImageScanFindings returns up to 100 results and a nextToken value, if + // applicable. + MaxResults *int32 + + // The nextToken value returned from a previous paginated DescribeImageScanFindings + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to describe the image scan findings for. If you do not + // specify a registry, the default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DescribeImageScanFindingsOutput struct { + + // An object with identifying information for an image in an Amazon ECR repository. + ImageId *types.ImageIdentifier + + // The information contained in the image scan findings. + ImageScanFindings *types.ImageScanFindings + + // The current state of the scan. + ImageScanStatus *types.ImageScanStatus + + // The nextToken value to include in a future DescribeImageScanFindings request. + // When the results of a DescribeImageScanFindings request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImageScanFindingsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImageScanFindings{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImageScanFindings{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeImageScanFindingsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageScanFindings(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeImageScanFindingsAPIClient is a client that implements the +// DescribeImageScanFindings operation. +type DescribeImageScanFindingsAPIClient interface { + DescribeImageScanFindings(context.Context, *DescribeImageScanFindingsInput, ...func(*Options)) (*DescribeImageScanFindingsOutput, error) +} + +var _ DescribeImageScanFindingsAPIClient = (*Client)(nil) + +// DescribeImageScanFindingsPaginatorOptions is the paginator options for +// DescribeImageScanFindings +type DescribeImageScanFindingsPaginatorOptions struct { + // The maximum number of image scan results returned by DescribeImageScanFindings + // in paginated output. When this parameter is used, DescribeImageScanFindings only + // returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeImageScanFindings request with the returned nextToken value. + // This value can be between 1 and 1000. If this parameter is not used, then + // DescribeImageScanFindings returns up to 100 results and a nextToken value, if + // applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeImageScanFindingsPaginator is a paginator for DescribeImageScanFindings +type DescribeImageScanFindingsPaginator struct { + options DescribeImageScanFindingsPaginatorOptions + client DescribeImageScanFindingsAPIClient + params *DescribeImageScanFindingsInput + nextToken *string + firstPage bool +} + +// NewDescribeImageScanFindingsPaginator returns a new +// DescribeImageScanFindingsPaginator +func NewDescribeImageScanFindingsPaginator(client DescribeImageScanFindingsAPIClient, params *DescribeImageScanFindingsInput, optFns ...func(*DescribeImageScanFindingsPaginatorOptions)) *DescribeImageScanFindingsPaginator { + if params == nil { + params = &DescribeImageScanFindingsInput{} + } + + options := DescribeImageScanFindingsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeImageScanFindingsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeImageScanFindingsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeImageScanFindings page. +func (p *DescribeImageScanFindingsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImageScanFindingsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribeImageScanFindings(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ImageScanCompleteWaiterOptions are waiter options for ImageScanCompleteWaiter +type ImageScanCompleteWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ImageScanCompleteWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, ImageScanCompleteWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeImageScanFindingsInput, *DescribeImageScanFindingsOutput, error) (bool, error) +} + +// ImageScanCompleteWaiter defines the waiters for ImageScanComplete +type ImageScanCompleteWaiter struct { + client DescribeImageScanFindingsAPIClient + + options ImageScanCompleteWaiterOptions +} + +// NewImageScanCompleteWaiter constructs a ImageScanCompleteWaiter. +func NewImageScanCompleteWaiter(client DescribeImageScanFindingsAPIClient, optFns ...func(*ImageScanCompleteWaiterOptions)) *ImageScanCompleteWaiter { + options := ImageScanCompleteWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = imageScanCompleteStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ImageScanCompleteWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for ImageScanComplete waiter. The maxWaitDur is +// the maximum wait duration the waiter will wait. The maxWaitDur is required and +// must be greater than zero. +func (w *ImageScanCompleteWaiter) Wait(ctx context.Context, params *DescribeImageScanFindingsInput, maxWaitDur time.Duration, optFns ...func(*ImageScanCompleteWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ImageScanComplete waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *ImageScanCompleteWaiter) WaitForOutput(ctx context.Context, params *DescribeImageScanFindingsInput, maxWaitDur time.Duration, optFns ...func(*ImageScanCompleteWaiterOptions)) (*DescribeImageScanFindingsOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeImageScanFindings(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ImageScanComplete waiter") +} + +func imageScanCompleteStateRetryable(ctx context.Context, input *DescribeImageScanFindingsInput, output *DescribeImageScanFindingsOutput, err error) (bool, error) { + + if err == nil { + pathValue, err := jmespath.Search("imageScanStatus.status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "COMPLETE" + value, ok := pathValue.(types.ScanStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.ScanStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return false, nil + } + } + + if err == nil { + pathValue, err := jmespath.Search("imageScanStatus.status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "FAILED" + value, ok := pathValue.(types.ScanStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.ScanStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + + return true, nil +} + +func newServiceMetadataMiddleware_opDescribeImageScanFindings(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DescribeImageScanFindings", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go new file mode 100644 index 000000000..f4b654290 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeImages.go @@ -0,0 +1,257 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns metadata about the images in a repository. Beginning with Docker version +// 1.9, the Docker client compresses image layers before pushing them to a V2 +// Docker registry. The output of the docker images command shows the uncompressed +// image size, so it may return a larger image size than the image sizes returned +// by DescribeImages. +func (c *Client) DescribeImages(ctx context.Context, params *DescribeImagesInput, optFns ...func(*Options)) (*DescribeImagesOutput, error) { + if params == nil { + params = &DescribeImagesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImages", params, optFns, c.addOperationDescribeImagesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImagesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImagesInput struct { + + // The repository that contains the images to describe. + // + // This member is required. + RepositoryName *string + + // The filter key and value with which to filter your DescribeImages results. + Filter *types.DescribeImagesFilter + + // The list of image IDs for the requested repository. + ImageIds []types.ImageIdentifier + + // The maximum number of repository results returned by DescribeImages in paginated + // output. When this parameter is used, DescribeImages only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeImages + // request with the returned nextToken value. This value can be between 1 and 1000. + // If this parameter is not used, then DescribeImages returns up to 100 results and + // a nextToken value, if applicable. This option cannot be used when you specify + // images with imageIds. + MaxResults *int32 + + // The nextToken value returned from a previous paginated DescribeImages request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // This option cannot be used when you specify images with imageIds. + NextToken *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to describe images. If you do not specify a registry, + // the default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DescribeImagesOutput struct { + + // A list of ImageDetail objects that contain data about the image. + ImageDetails []types.ImageDetail + + // The nextToken value to include in a future DescribeImages request. When the + // results of a DescribeImages request exceed maxResults, this value can be used to + // retrieve the next page of results. This value is null when there are no more + // results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImages{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImages{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeImagesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImages(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeImagesAPIClient is a client that implements the DescribeImages +// operation. +type DescribeImagesAPIClient interface { + DescribeImages(context.Context, *DescribeImagesInput, ...func(*Options)) (*DescribeImagesOutput, error) +} + +var _ DescribeImagesAPIClient = (*Client)(nil) + +// DescribeImagesPaginatorOptions is the paginator options for DescribeImages +type DescribeImagesPaginatorOptions struct { + // The maximum number of repository results returned by DescribeImages in paginated + // output. When this parameter is used, DescribeImages only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeImages + // request with the returned nextToken value. This value can be between 1 and 1000. + // If this parameter is not used, then DescribeImages returns up to 100 results and + // a nextToken value, if applicable. This option cannot be used when you specify + // images with imageIds. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeImagesPaginator is a paginator for DescribeImages +type DescribeImagesPaginator struct { + options DescribeImagesPaginatorOptions + client DescribeImagesAPIClient + params *DescribeImagesInput + nextToken *string + firstPage bool +} + +// NewDescribeImagesPaginator returns a new DescribeImagesPaginator +func NewDescribeImagesPaginator(client DescribeImagesAPIClient, params *DescribeImagesInput, optFns ...func(*DescribeImagesPaginatorOptions)) *DescribeImagesPaginator { + if params == nil { + params = &DescribeImagesInput{} + } + + options := DescribeImagesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeImagesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeImagesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeImages page. +func (p *DescribeImagesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImagesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribeImages(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeImages(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DescribeImages", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go new file mode 100644 index 000000000..2bfefbd7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribePullThroughCacheRules.go @@ -0,0 +1,249 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the pull through cache rules for a registry. +func (c *Client) DescribePullThroughCacheRules(ctx context.Context, params *DescribePullThroughCacheRulesInput, optFns ...func(*Options)) (*DescribePullThroughCacheRulesOutput, error) { + if params == nil { + params = &DescribePullThroughCacheRulesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribePullThroughCacheRules", params, optFns, c.addOperationDescribePullThroughCacheRulesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribePullThroughCacheRulesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribePullThroughCacheRulesInput struct { + + // The Amazon ECR repository prefixes associated with the pull through cache rules + // to return. If no repository prefix value is specified, all pull through cache + // rules are returned. + EcrRepositoryPrefixes []string + + // The maximum number of pull through cache rules returned by + // DescribePullThroughCacheRulesRequest in paginated output. When this parameter is + // used, DescribePullThroughCacheRulesRequest only returns maxResults results in a + // single page along with a nextToken response element. The remaining results of + // the initial request can be seen by sending another + // DescribePullThroughCacheRulesRequest request with the returned nextToken value. + // This value can be between 1 and 1000. If this parameter is not used, then + // DescribePullThroughCacheRulesRequest returns up to 100 results and a nextToken + // value, if applicable. + MaxResults *int32 + + // The nextToken value returned from a previous paginated + // DescribePullThroughCacheRulesRequest request where maxResults was used and the + // results exceeded the value of that parameter. Pagination continues from the end + // of the previous results that returned the nextToken value. This value is null + // when there are no more results to return. + NextToken *string + + // The Amazon Web Services account ID associated with the registry to return the + // pull through cache rules for. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DescribePullThroughCacheRulesOutput struct { + + // The nextToken value to include in a future DescribePullThroughCacheRulesRequest + // request. When the results of a DescribePullThroughCacheRulesRequest request + // exceed maxResults, this value can be used to retrieve the next page of results. + // This value is null when there are no more results to return. + NextToken *string + + // The details of the pull through cache rules. + PullThroughCacheRules []types.PullThroughCacheRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribePullThroughCacheRulesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribePullThroughCacheRules{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribePullThroughCacheRules{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribePullThroughCacheRules(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribePullThroughCacheRulesAPIClient is a client that implements the +// DescribePullThroughCacheRules operation. +type DescribePullThroughCacheRulesAPIClient interface { + DescribePullThroughCacheRules(context.Context, *DescribePullThroughCacheRulesInput, ...func(*Options)) (*DescribePullThroughCacheRulesOutput, error) +} + +var _ DescribePullThroughCacheRulesAPIClient = (*Client)(nil) + +// DescribePullThroughCacheRulesPaginatorOptions is the paginator options for +// DescribePullThroughCacheRules +type DescribePullThroughCacheRulesPaginatorOptions struct { + // The maximum number of pull through cache rules returned by + // DescribePullThroughCacheRulesRequest in paginated output. When this parameter is + // used, DescribePullThroughCacheRulesRequest only returns maxResults results in a + // single page along with a nextToken response element. The remaining results of + // the initial request can be seen by sending another + // DescribePullThroughCacheRulesRequest request with the returned nextToken value. + // This value can be between 1 and 1000. If this parameter is not used, then + // DescribePullThroughCacheRulesRequest returns up to 100 results and a nextToken + // value, if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribePullThroughCacheRulesPaginator is a paginator for +// DescribePullThroughCacheRules +type DescribePullThroughCacheRulesPaginator struct { + options DescribePullThroughCacheRulesPaginatorOptions + client DescribePullThroughCacheRulesAPIClient + params *DescribePullThroughCacheRulesInput + nextToken *string + firstPage bool +} + +// NewDescribePullThroughCacheRulesPaginator returns a new +// DescribePullThroughCacheRulesPaginator +func NewDescribePullThroughCacheRulesPaginator(client DescribePullThroughCacheRulesAPIClient, params *DescribePullThroughCacheRulesInput, optFns ...func(*DescribePullThroughCacheRulesPaginatorOptions)) *DescribePullThroughCacheRulesPaginator { + if params == nil { + params = &DescribePullThroughCacheRulesInput{} + } + + options := DescribePullThroughCacheRulesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribePullThroughCacheRulesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribePullThroughCacheRulesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribePullThroughCacheRules page. +func (p *DescribePullThroughCacheRulesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribePullThroughCacheRulesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribePullThroughCacheRules(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribePullThroughCacheRules(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DescribePullThroughCacheRules", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go new file mode 100644 index 000000000..8d5f44222 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRegistry.go @@ -0,0 +1,117 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes the settings for a registry. The replication configuration for a +// repository can be created or updated with the PutReplicationConfiguration API +// action. +func (c *Client) DescribeRegistry(ctx context.Context, params *DescribeRegistryInput, optFns ...func(*Options)) (*DescribeRegistryOutput, error) { + if params == nil { + params = &DescribeRegistryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeRegistry", params, optFns, c.addOperationDescribeRegistryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeRegistryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeRegistryInput struct { + noSmithyDocumentSerde +} + +type DescribeRegistryOutput struct { + + // The ID of the registry. + RegistryId *string + + // The replication configuration for the registry. + ReplicationConfiguration *types.ReplicationConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeRegistryMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeRegistry{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeRegistry{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRegistry(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeRegistry(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DescribeRegistry", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go new file mode 100644 index 000000000..cef08268c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_DescribeRepositories.go @@ -0,0 +1,247 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes image repositories in a registry. +func (c *Client) DescribeRepositories(ctx context.Context, params *DescribeRepositoriesInput, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { + if params == nil { + params = &DescribeRepositoriesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeRepositories", params, optFns, c.addOperationDescribeRepositoriesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeRepositoriesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeRepositoriesInput struct { + + // The maximum number of repository results returned by DescribeRepositories in + // paginated output. When this parameter is used, DescribeRepositories only returns + // maxResults results in a single page along with a nextToken response element. The + // remaining results of the initial request can be seen by sending another + // DescribeRepositories request with the returned nextToken value. This value can + // be between 1 and 1000. If this parameter is not used, then DescribeRepositories + // returns up to 100 results and a nextToken value, if applicable. This option + // cannot be used when you specify repositories with repositoryNames. + MaxResults *int32 + + // The nextToken value returned from a previous paginated DescribeRepositories + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. This option cannot be used when you specify repositories with + // repositoryNames. This token should be treated as an opaque identifier that is + // only used to retrieve the next items in a list and not for other programmatic + // purposes. + NextToken *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repositories to be described. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string + + // A list of repositories to describe. If this parameter is omitted, then all + // repositories in a registry are described. + RepositoryNames []string + + noSmithyDocumentSerde +} + +type DescribeRepositoriesOutput struct { + + // The nextToken value to include in a future DescribeRepositories request. When + // the results of a DescribeRepositories request exceed maxResults, this value can + // be used to retrieve the next page of results. This value is null when there are + // no more results to return. + NextToken *string + + // A list of repository objects corresponding to valid repositories. + Repositories []types.Repository + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeRepositories{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeRepositories{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRepositories(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeRepositoriesAPIClient is a client that implements the +// DescribeRepositories operation. +type DescribeRepositoriesAPIClient interface { + DescribeRepositories(context.Context, *DescribeRepositoriesInput, ...func(*Options)) (*DescribeRepositoriesOutput, error) +} + +var _ DescribeRepositoriesAPIClient = (*Client)(nil) + +// DescribeRepositoriesPaginatorOptions is the paginator options for +// DescribeRepositories +type DescribeRepositoriesPaginatorOptions struct { + // The maximum number of repository results returned by DescribeRepositories in + // paginated output. When this parameter is used, DescribeRepositories only returns + // maxResults results in a single page along with a nextToken response element. The + // remaining results of the initial request can be seen by sending another + // DescribeRepositories request with the returned nextToken value. This value can + // be between 1 and 1000. If this parameter is not used, then DescribeRepositories + // returns up to 100 results and a nextToken value, if applicable. This option + // cannot be used when you specify repositories with repositoryNames. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeRepositoriesPaginator is a paginator for DescribeRepositories +type DescribeRepositoriesPaginator struct { + options DescribeRepositoriesPaginatorOptions + client DescribeRepositoriesAPIClient + params *DescribeRepositoriesInput + nextToken *string + firstPage bool +} + +// NewDescribeRepositoriesPaginator returns a new DescribeRepositoriesPaginator +func NewDescribeRepositoriesPaginator(client DescribeRepositoriesAPIClient, params *DescribeRepositoriesInput, optFns ...func(*DescribeRepositoriesPaginatorOptions)) *DescribeRepositoriesPaginator { + if params == nil { + params = &DescribeRepositoriesInput{} + } + + options := DescribeRepositoriesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeRepositoriesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeRepositoriesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeRepositories page. +func (p *DescribeRepositoriesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribeRepositories(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeRepositories(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "DescribeRepositories", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go new file mode 100644 index 000000000..4d6dfbbd6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetAuthorizationToken.go @@ -0,0 +1,132 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves an authorization token. An authorization token represents your IAM +// authentication credentials and can be used to access any Amazon ECR registry +// that your IAM principal has access to. The authorization token is valid for 12 +// hours. The authorizationToken returned is a base64 encoded string that can be +// decoded and used in a docker login command to authenticate to a registry. The +// CLI offers an get-login-password command that simplifies the login process. For +// more information, see Registry authentication +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth) +// in the Amazon Elastic Container Registry User Guide. +func (c *Client) GetAuthorizationToken(ctx context.Context, params *GetAuthorizationTokenInput, optFns ...func(*Options)) (*GetAuthorizationTokenOutput, error) { + if params == nil { + params = &GetAuthorizationTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetAuthorizationToken", params, optFns, c.addOperationGetAuthorizationTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetAuthorizationTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetAuthorizationTokenInput struct { + + // A list of Amazon Web Services account IDs that are associated with the + // registries for which to get AuthorizationData objects. If you do not specify a + // registry, the default registry is assumed. + // + // Deprecated: This field is deprecated. The returned authorization token can be + // used to access any Amazon ECR registry that the IAM principal has access to, + // specifying a registry ID doesn't change the permissions scope of the + // authorization token. + RegistryIds []string + + noSmithyDocumentSerde +} + +type GetAuthorizationTokenOutput struct { + + // A list of authorization token data objects that correspond to the registryIds + // values in the request. + AuthorizationData []types.AuthorizationData + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetAuthorizationToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetAuthorizationToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAuthorizationToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetAuthorizationToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "GetAuthorizationToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go new file mode 100644 index 000000000..82b584fe3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetDownloadUrlForLayer.go @@ -0,0 +1,138 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. +// You can only get URLs for image layers that are referenced in an image. When an +// image is pulled, the GetDownloadUrlForLayer API is called once per image layer +// that is not already cached. This operation is used by the Amazon ECR proxy and +// is not generally used by customers for pulling and pushing images. In most +// cases, you should use the docker CLI to pull, tag, and push images. +func (c *Client) GetDownloadUrlForLayer(ctx context.Context, params *GetDownloadUrlForLayerInput, optFns ...func(*Options)) (*GetDownloadUrlForLayerOutput, error) { + if params == nil { + params = &GetDownloadUrlForLayerInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDownloadUrlForLayer", params, optFns, c.addOperationGetDownloadUrlForLayerMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetDownloadUrlForLayerOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetDownloadUrlForLayerInput struct { + + // The digest of the image layer to download. + // + // This member is required. + LayerDigest *string + + // The name of the repository that is associated with the image layer to download. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the image layer to download. If you do not specify a registry, the default + // registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type GetDownloadUrlForLayerOutput struct { + + // The pre-signed Amazon S3 download URL for the requested layer. + DownloadUrl *string + + // The digest of the image layer to download. + LayerDigest *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetDownloadUrlForLayerMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetDownloadUrlForLayer{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetDownloadUrlForLayer{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetDownloadUrlForLayerValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDownloadUrlForLayer(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetDownloadUrlForLayer(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "GetDownloadUrlForLayer", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go new file mode 100644 index 000000000..76e8a061c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicy.go @@ -0,0 +1,135 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Retrieves the lifecycle policy for the specified repository. +func (c *Client) GetLifecyclePolicy(ctx context.Context, params *GetLifecyclePolicyInput, optFns ...func(*Options)) (*GetLifecyclePolicyOutput, error) { + if params == nil { + params = &GetLifecyclePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetLifecyclePolicy", params, optFns, c.addOperationGetLifecyclePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetLifecyclePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetLifecyclePolicyInput struct { + + // The name of the repository. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type GetLifecyclePolicyOutput struct { + + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time + + // The JSON lifecycle policy text. + LifecyclePolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetLifecyclePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetLifecyclePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetLifecyclePolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetLifecyclePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetLifecyclePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetLifecyclePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "GetLifecyclePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go new file mode 100644 index 000000000..5114fd1b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetLifecyclePolicyPreview.go @@ -0,0 +1,463 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "github.com/jmespath/go-jmespath" + "time" +) + +// Retrieves the results of the lifecycle policy preview request for the specified +// repository. +func (c *Client) GetLifecyclePolicyPreview(ctx context.Context, params *GetLifecyclePolicyPreviewInput, optFns ...func(*Options)) (*GetLifecyclePolicyPreviewOutput, error) { + if params == nil { + params = &GetLifecyclePolicyPreviewInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetLifecyclePolicyPreview", params, optFns, c.addOperationGetLifecyclePolicyPreviewMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetLifecyclePolicyPreviewOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetLifecyclePolicyPreviewInput struct { + + // The name of the repository. + // + // This member is required. + RepositoryName *string + + // An optional parameter that filters results based on image tag status and all + // tags, if tagged. + Filter *types.LifecyclePolicyPreviewFilter + + // The list of imageIDs to be included. + ImageIds []types.ImageIdentifier + + // The maximum number of repository results returned by + // GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is + // used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a + // single page along with a nextToken
 response element. The remaining results of + // the initial request can be seen by sending
 another + // GetLifecyclePolicyPreviewRequest request with the returned nextToken
 value. + // This value can be between 1 and 1000. If this
 parameter is not used, then + // GetLifecyclePolicyPreviewRequest returns up to
 100 results and a nextToken + // value, if
 applicable. This option cannot be used when you specify images with + // imageIds. + MaxResults *int32 + + // The nextToken value returned from a previous paginated + // GetLifecyclePolicyPreviewRequest request where maxResults was used and the + // results exceeded the value of that parameter. Pagination continues from the end + // of the
 previous results that returned the nextToken value. This value is
 null + // when there are no more results to return. This option cannot be used when you + // specify images with imageIds. + NextToken *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type GetLifecyclePolicyPreviewOutput struct { + + // The JSON lifecycle policy text. + LifecyclePolicyText *string + + // The nextToken value to include in a future GetLifecyclePolicyPreview request. + // When the results of a GetLifecyclePolicyPreview request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string + + // The results of the lifecycle policy preview request. + PreviewResults []types.LifecyclePolicyPreviewResult + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // The status of the lifecycle policy preview request. + Status types.LifecyclePolicyPreviewStatus + + // The list of images that is returned as a result of the action. + Summary *types.LifecyclePolicyPreviewSummary + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetLifecyclePolicyPreviewMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetLifecyclePolicyPreview{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetLifecyclePolicyPreview{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetLifecyclePolicyPreviewValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetLifecyclePolicyPreview(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// GetLifecyclePolicyPreviewAPIClient is a client that implements the +// GetLifecyclePolicyPreview operation. +type GetLifecyclePolicyPreviewAPIClient interface { + GetLifecyclePolicyPreview(context.Context, *GetLifecyclePolicyPreviewInput, ...func(*Options)) (*GetLifecyclePolicyPreviewOutput, error) +} + +var _ GetLifecyclePolicyPreviewAPIClient = (*Client)(nil) + +// GetLifecyclePolicyPreviewPaginatorOptions is the paginator options for +// GetLifecyclePolicyPreview +type GetLifecyclePolicyPreviewPaginatorOptions struct { + // The maximum number of repository results returned by + // GetLifecyclePolicyPreviewRequest in
 paginated output. When this parameter is + // used, GetLifecyclePolicyPreviewRequest only returns
 maxResults results in a + // single page along with a nextToken
 response element. The remaining results of + // the initial request can be seen by sending
 another + // GetLifecyclePolicyPreviewRequest request with the returned nextToken
 value. + // This value can be between 1 and 1000. If this
 parameter is not used, then + // GetLifecyclePolicyPreviewRequest returns up to
 100 results and a nextToken + // value, if
 applicable. This option cannot be used when you specify images with + // imageIds. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// GetLifecyclePolicyPreviewPaginator is a paginator for GetLifecyclePolicyPreview +type GetLifecyclePolicyPreviewPaginator struct { + options GetLifecyclePolicyPreviewPaginatorOptions + client GetLifecyclePolicyPreviewAPIClient + params *GetLifecyclePolicyPreviewInput + nextToken *string + firstPage bool +} + +// NewGetLifecyclePolicyPreviewPaginator returns a new +// GetLifecyclePolicyPreviewPaginator +func NewGetLifecyclePolicyPreviewPaginator(client GetLifecyclePolicyPreviewAPIClient, params *GetLifecyclePolicyPreviewInput, optFns ...func(*GetLifecyclePolicyPreviewPaginatorOptions)) *GetLifecyclePolicyPreviewPaginator { + if params == nil { + params = &GetLifecyclePolicyPreviewInput{} + } + + options := GetLifecyclePolicyPreviewPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &GetLifecyclePolicyPreviewPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *GetLifecyclePolicyPreviewPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next GetLifecyclePolicyPreview page. +func (p *GetLifecyclePolicyPreviewPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*GetLifecyclePolicyPreviewOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.GetLifecyclePolicyPreview(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// LifecyclePolicyPreviewCompleteWaiterOptions are waiter options for +// LifecyclePolicyPreviewCompleteWaiter +type LifecyclePolicyPreviewCompleteWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // LifecyclePolicyPreviewCompleteWaiter will use default minimum delay of 5 + // seconds. Note that MinDelay must resolve to a value lesser than or equal to the + // MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, LifecyclePolicyPreviewCompleteWaiter will use default max delay of 120 + // seconds. Note that MaxDelay must resolve to value greater than or equal to the + // MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *GetLifecyclePolicyPreviewInput, *GetLifecyclePolicyPreviewOutput, error) (bool, error) +} + +// LifecyclePolicyPreviewCompleteWaiter defines the waiters for +// LifecyclePolicyPreviewComplete +type LifecyclePolicyPreviewCompleteWaiter struct { + client GetLifecyclePolicyPreviewAPIClient + + options LifecyclePolicyPreviewCompleteWaiterOptions +} + +// NewLifecyclePolicyPreviewCompleteWaiter constructs a +// LifecyclePolicyPreviewCompleteWaiter. +func NewLifecyclePolicyPreviewCompleteWaiter(client GetLifecyclePolicyPreviewAPIClient, optFns ...func(*LifecyclePolicyPreviewCompleteWaiterOptions)) *LifecyclePolicyPreviewCompleteWaiter { + options := LifecyclePolicyPreviewCompleteWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = lifecyclePolicyPreviewCompleteStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &LifecyclePolicyPreviewCompleteWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for LifecyclePolicyPreviewComplete waiter. The +// maxWaitDur is the maximum wait duration the waiter will wait. The maxWaitDur is +// required and must be greater than zero. +func (w *LifecyclePolicyPreviewCompleteWaiter) Wait(ctx context.Context, params *GetLifecyclePolicyPreviewInput, maxWaitDur time.Duration, optFns ...func(*LifecyclePolicyPreviewCompleteWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for LifecyclePolicyPreviewComplete +// waiter and returns the output of the successful operation. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *LifecyclePolicyPreviewCompleteWaiter) WaitForOutput(ctx context.Context, params *GetLifecyclePolicyPreviewInput, maxWaitDur time.Duration, optFns ...func(*LifecyclePolicyPreviewCompleteWaiterOptions)) (*GetLifecyclePolicyPreviewOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.GetLifecyclePolicyPreview(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for LifecyclePolicyPreviewComplete waiter") +} + +func lifecyclePolicyPreviewCompleteStateRetryable(ctx context.Context, input *GetLifecyclePolicyPreviewInput, output *GetLifecyclePolicyPreviewOutput, err error) (bool, error) { + + if err == nil { + pathValue, err := jmespath.Search("status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "COMPLETE" + value, ok := pathValue.(types.LifecyclePolicyPreviewStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.LifecyclePolicyPreviewStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return false, nil + } + } + + if err == nil { + pathValue, err := jmespath.Search("status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "FAILED" + value, ok := pathValue.(types.LifecyclePolicyPreviewStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.LifecyclePolicyPreviewStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + + return true, nil +} + +func newServiceMetadataMiddleware_opGetLifecyclePolicyPreview(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "GetLifecyclePolicyPreview", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go new file mode 100644 index 000000000..b9086f937 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryPolicy.go @@ -0,0 +1,114 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the permissions policy for a registry. +func (c *Client) GetRegistryPolicy(ctx context.Context, params *GetRegistryPolicyInput, optFns ...func(*Options)) (*GetRegistryPolicyOutput, error) { + if params == nil { + params = &GetRegistryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRegistryPolicy", params, optFns, c.addOperationGetRegistryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRegistryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRegistryPolicyInput struct { + noSmithyDocumentSerde +} + +type GetRegistryPolicyOutput struct { + + // The JSON text of the permissions policy for a registry. + PolicyText *string + + // The ID of the registry. + RegistryId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRegistryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRegistryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRegistryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRegistryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "GetRegistryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go new file mode 100644 index 000000000..07f5e4dc3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRegistryScanningConfiguration.go @@ -0,0 +1,115 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the scanning configuration for a registry. +func (c *Client) GetRegistryScanningConfiguration(ctx context.Context, params *GetRegistryScanningConfigurationInput, optFns ...func(*Options)) (*GetRegistryScanningConfigurationOutput, error) { + if params == nil { + params = &GetRegistryScanningConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRegistryScanningConfiguration", params, optFns, c.addOperationGetRegistryScanningConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRegistryScanningConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRegistryScanningConfigurationInput struct { + noSmithyDocumentSerde +} + +type GetRegistryScanningConfigurationOutput struct { + + // The ID of the registry. + RegistryId *string + + // The scanning configuration for the registry. + ScanningConfiguration *types.RegistryScanningConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRegistryScanningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRegistryScanningConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRegistryScanningConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryScanningConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRegistryScanningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "GetRegistryScanningConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go new file mode 100644 index 000000000..15a292ad6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_GetRepositoryPolicy.go @@ -0,0 +1,131 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the repository policy for the specified repository. +func (c *Client) GetRepositoryPolicy(ctx context.Context, params *GetRepositoryPolicyInput, optFns ...func(*Options)) (*GetRepositoryPolicyOutput, error) { + if params == nil { + params = &GetRepositoryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRepositoryPolicy", params, optFns, c.addOperationGetRepositoryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRepositoryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRepositoryPolicyInput struct { + + // The name of the repository with the policy to retrieve. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type GetRepositoryPolicyOutput struct { + + // The JSON repository policy text associated with the repository. + PolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetRepositoryPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "GetRepositoryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go new file mode 100644 index 000000000..d700d833f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_InitiateLayerUpload.go @@ -0,0 +1,135 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Notifies Amazon ECR that you intend to upload an image layer. When an image is +// pushed, the InitiateLayerUpload API is called once per image layer that has not +// already been uploaded. Whether or not an image layer has been uploaded is +// determined by the BatchCheckLayerAvailability API action. This operation is used +// by the Amazon ECR proxy and is not generally used by customers for pulling and +// pushing images. In most cases, you should use the docker CLI to pull, tag, and +// push images. +func (c *Client) InitiateLayerUpload(ctx context.Context, params *InitiateLayerUploadInput, optFns ...func(*Options)) (*InitiateLayerUploadOutput, error) { + if params == nil { + params = &InitiateLayerUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "InitiateLayerUpload", params, optFns, c.addOperationInitiateLayerUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*InitiateLayerUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type InitiateLayerUploadInput struct { + + // The name of the repository to which you intend to upload layers. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry to which you + // intend to upload layers. If you do not specify a registry, the default registry + // is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type InitiateLayerUploadOutput struct { + + // The size, in bytes, that Amazon ECR expects future layer part uploads to be. + PartSize *int64 + + // The upload ID for the layer upload. This parameter is passed to further + // UploadLayerPart and CompleteLayerUpload operations. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpInitiateLayerUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpInitiateLayerUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpInitiateLayerUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opInitiateLayerUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opInitiateLayerUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "InitiateLayerUpload", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go new file mode 100644 index 000000000..b3d096d5b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListImages.go @@ -0,0 +1,253 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all the image IDs for the specified repository. You can filter images +// based on whether or not they are tagged by using the tagStatus filter and +// specifying either TAGGED, UNTAGGED or ANY. For example, you can filter your +// results to return only UNTAGGED images and then pipe that result to a +// BatchDeleteImage operation to delete them. Or, you can filter your results to +// return only TAGGED images to list all of the tags in your repository. +func (c *Client) ListImages(ctx context.Context, params *ListImagesInput, optFns ...func(*Options)) (*ListImagesOutput, error) { + if params == nil { + params = &ListImagesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListImages", params, optFns, c.addOperationListImagesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListImagesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListImagesInput struct { + + // The repository with image IDs to be listed. + // + // This member is required. + RepositoryName *string + + // The filter key and value with which to filter your ListImages results. + Filter *types.ListImagesFilter + + // The maximum number of image results returned by ListImages in paginated output. + // When this parameter is used, ListImages only returns maxResults results in a + // single page along with a nextToken response element. The remaining results of + // the initial request can be seen by sending another ListImages request with the + // returned nextToken value. This value can be between 1 and 1000. If this + // parameter is not used, then ListImages returns up to 100 results and a nextToken + // value, if applicable. + MaxResults *int32 + + // The nextToken value returned from a previous paginated ListImages request where + // maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // This token should be treated as an opaque identifier that is only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to list images. If you do not specify a registry, the + // default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type ListImagesOutput struct { + + // The list of image IDs for the requested repository. + ImageIds []types.ImageIdentifier + + // The nextToken value to include in a future ListImages request. When the results + // of a ListImages request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results to + // return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListImagesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListImages{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListImages{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListImagesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListImages(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListImagesAPIClient is a client that implements the ListImages operation. +type ListImagesAPIClient interface { + ListImages(context.Context, *ListImagesInput, ...func(*Options)) (*ListImagesOutput, error) +} + +var _ ListImagesAPIClient = (*Client)(nil) + +// ListImagesPaginatorOptions is the paginator options for ListImages +type ListImagesPaginatorOptions struct { + // The maximum number of image results returned by ListImages in paginated output. + // When this parameter is used, ListImages only returns maxResults results in a + // single page along with a nextToken response element. The remaining results of + // the initial request can be seen by sending another ListImages request with the + // returned nextToken value. This value can be between 1 and 1000. If this + // parameter is not used, then ListImages returns up to 100 results and a nextToken + // value, if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListImagesPaginator is a paginator for ListImages +type ListImagesPaginator struct { + options ListImagesPaginatorOptions + client ListImagesAPIClient + params *ListImagesInput + nextToken *string + firstPage bool +} + +// NewListImagesPaginator returns a new ListImagesPaginator +func NewListImagesPaginator(client ListImagesAPIClient, params *ListImagesInput, optFns ...func(*ListImagesPaginatorOptions)) *ListImagesPaginator { + if params == nil { + params = &ListImagesInput{} + } + + options := ListImagesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListImagesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListImagesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListImages page. +func (p *ListImagesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListImagesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListImages(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListImages(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "ListImages", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go new file mode 100644 index 000000000..91a372fe5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_ListTagsForResource.go @@ -0,0 +1,122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// List the tags for an Amazon ECR resource. +func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { + if params == nil { + params = &ListTagsForResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagsForResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagsForResourceInput struct { + + // The Amazon Resource Name (ARN) that identifies the resource for which to list + // the tags. Currently, the only supported resource is an Amazon ECR repository. + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +type ListTagsForResourceOutput struct { + + // The tags for the resource. + Tags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "ListTagsForResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go new file mode 100644 index 000000000..631a9b408 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImage.go @@ -0,0 +1,149 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or updates the image manifest and tags associated with an image. When an +// image is pushed and all new image layers have been uploaded, the PutImage API is +// called once to create or update the image manifest and the tags associated with +// the image. This operation is used by the Amazon ECR proxy and is not generally +// used by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. +func (c *Client) PutImage(ctx context.Context, params *PutImageInput, optFns ...func(*Options)) (*PutImageOutput, error) { + if params == nil { + params = &PutImageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutImage", params, optFns, c.addOperationPutImageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutImageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutImageInput struct { + + // The image manifest corresponding to the image to be uploaded. + // + // This member is required. + ImageManifest *string + + // The name of the repository in which to put the image. + // + // This member is required. + RepositoryName *string + + // The image digest of the image manifest corresponding to the image. + ImageDigest *string + + // The media type of the image manifest. If you push an image manifest that does + // not contain the mediaType field, you must specify the imageManifestMediaType in + // the request. + ImageManifestMediaType *string + + // The tag to associate with the image. This parameter is required for images that + // use the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) + // formats. + ImageTag *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to put the image. If you do not specify a registry, the + // default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type PutImageOutput struct { + + // Details of the image uploaded. + Image *types.Image + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutImage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutImage{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutImageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutImage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "PutImage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go new file mode 100644 index 000000000..74d8fe456 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageScanningConfiguration.go @@ -0,0 +1,143 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// The PutImageScanningConfiguration API is being deprecated, in favor of +// specifying the image scanning configuration at the registry level. For more +// information, see PutRegistryScanningConfiguration. Updates the image scanning +// configuration for the specified repository. +func (c *Client) PutImageScanningConfiguration(ctx context.Context, params *PutImageScanningConfigurationInput, optFns ...func(*Options)) (*PutImageScanningConfigurationOutput, error) { + if params == nil { + params = &PutImageScanningConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutImageScanningConfiguration", params, optFns, c.addOperationPutImageScanningConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutImageScanningConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutImageScanningConfigurationInput struct { + + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to the + // repository. + // + // This member is required. + ImageScanningConfiguration *types.ImageScanningConfiguration + + // The name of the repository in which to update the image scanning configuration + // setting. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to update the image scanning configuration setting. If + // you do not specify a registry, the default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type PutImageScanningConfigurationOutput struct { + + // The image scanning configuration setting for the repository. + ImageScanningConfiguration *types.ImageScanningConfiguration + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutImageScanningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutImageScanningConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutImageScanningConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutImageScanningConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImageScanningConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutImageScanningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "PutImageScanningConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go new file mode 100644 index 000000000..5e42488a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutImageTagMutability.go @@ -0,0 +1,142 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the image tag mutability settings for the specified repository. For more +// information, see Image tag mutability +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) +// in the Amazon Elastic Container Registry User Guide. +func (c *Client) PutImageTagMutability(ctx context.Context, params *PutImageTagMutabilityInput, optFns ...func(*Options)) (*PutImageTagMutabilityOutput, error) { + if params == nil { + params = &PutImageTagMutabilityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutImageTagMutability", params, optFns, c.addOperationPutImageTagMutabilityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutImageTagMutabilityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutImageTagMutabilityInput struct { + + // The tag mutability setting for the repository. If MUTABLE is specified, image + // tags can be overwritten. If IMMUTABLE is specified, all image tags within the + // repository will be immutable which will prevent them from being overwritten. + // + // This member is required. + ImageTagMutability types.ImageTagMutability + + // The name of the repository in which to update the image tag mutability settings. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to update the image tag mutability settings. If you do + // not specify a registry, the default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type PutImageTagMutabilityOutput struct { + + // The image tag mutability setting for the repository. + ImageTagMutability types.ImageTagMutability + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutImageTagMutabilityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutImageTagMutability{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutImageTagMutability{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutImageTagMutabilityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImageTagMutability(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutImageTagMutability(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "PutImageTagMutability", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go new file mode 100644 index 000000000..782382eef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutLifecyclePolicy.go @@ -0,0 +1,138 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or updates the lifecycle policy for the specified repository. For more +// information, see Lifecycle policy template +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html). +func (c *Client) PutLifecyclePolicy(ctx context.Context, params *PutLifecyclePolicyInput, optFns ...func(*Options)) (*PutLifecyclePolicyOutput, error) { + if params == nil { + params = &PutLifecyclePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutLifecyclePolicy", params, optFns, c.addOperationPutLifecyclePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutLifecyclePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutLifecyclePolicyInput struct { + + // The JSON repository policy text to apply to the repository. + // + // This member is required. + LifecyclePolicyText *string + + // The name of the repository to receive the policy. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do
 not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type PutLifecyclePolicyOutput struct { + + // The JSON repository policy text. + LifecyclePolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutLifecyclePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutLifecyclePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutLifecyclePolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutLifecyclePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutLifecyclePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutLifecyclePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "PutLifecyclePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go new file mode 100644 index 000000000..b253e0e0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryPolicy.go @@ -0,0 +1,131 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or updates the permissions policy for your registry. A registry policy +// is used to specify permissions for another Amazon Web Services account and is +// used when configuring cross-account replication. For more information, see +// Registry permissions +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) +// in the Amazon Elastic Container Registry User Guide. +func (c *Client) PutRegistryPolicy(ctx context.Context, params *PutRegistryPolicyInput, optFns ...func(*Options)) (*PutRegistryPolicyOutput, error) { + if params == nil { + params = &PutRegistryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutRegistryPolicy", params, optFns, c.addOperationPutRegistryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutRegistryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutRegistryPolicyInput struct { + + // The JSON policy text to apply to your registry. The policy text follows the same + // format as IAM policy text. For more information, see Registry permissions + // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) + // in the Amazon Elastic Container Registry User Guide. + // + // This member is required. + PolicyText *string + + noSmithyDocumentSerde +} + +type PutRegistryPolicyOutput struct { + + // The JSON policy text for your registry. + PolicyText *string + + // The registry ID. + RegistryId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutRegistryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutRegistryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutRegistryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutRegistryPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutRegistryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "PutRegistryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go new file mode 100644 index 000000000..fe20cf376 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutRegistryScanningConfiguration.go @@ -0,0 +1,131 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or updates the scanning configuration for your private registry. +func (c *Client) PutRegistryScanningConfiguration(ctx context.Context, params *PutRegistryScanningConfigurationInput, optFns ...func(*Options)) (*PutRegistryScanningConfigurationOutput, error) { + if params == nil { + params = &PutRegistryScanningConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutRegistryScanningConfiguration", params, optFns, c.addOperationPutRegistryScanningConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutRegistryScanningConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutRegistryScanningConfigurationInput struct { + + // The scanning rules to use for the registry. A scanning rule is used to determine + // which repository filters are used and at what frequency scanning will occur. + Rules []types.RegistryScanningRule + + // The scanning type to set for the registry. When a registry scanning + // configuration is not defined, by default the BASIC scan type is used. When basic + // scanning is used, you may specify filters to determine which individual + // repositories, or all repositories, are scanned when new images are pushed to + // those repositories. Alternatively, you can do manual scans of images with basic + // scanning. When the ENHANCED scan type is set, Amazon Inspector provides + // automated vulnerability scanning. You may choose between continuous scanning or + // scan on push and you may specify filters to determine which individual + // repositories, or all repositories, are scanned. + ScanType types.ScanType + + noSmithyDocumentSerde +} + +type PutRegistryScanningConfigurationOutput struct { + + // The scanning configuration for your registry. + RegistryScanningConfiguration *types.RegistryScanningConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutRegistryScanningConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutRegistryScanningConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutRegistryScanningConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutRegistryScanningConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryScanningConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutRegistryScanningConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "PutRegistryScanningConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go new file mode 100644 index 000000000..e8e84df6e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_PutReplicationConfiguration.go @@ -0,0 +1,131 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or updates the replication configuration for a registry. The existing +// replication configuration for a repository can be retrieved with the +// DescribeRegistry API action. The first time the PutReplicationConfiguration API +// is called, a service-linked IAM role is created in your account for the +// replication process. For more information, see Using service-linked roles for +// Amazon ECR +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html) +// in the Amazon Elastic Container Registry User Guide. When configuring +// cross-account replication, the destination account must grant the source account +// permission to replicate. This permission is controlled using a registry +// permissions policy. For more information, see PutRegistryPolicy. +func (c *Client) PutReplicationConfiguration(ctx context.Context, params *PutReplicationConfigurationInput, optFns ...func(*Options)) (*PutReplicationConfigurationOutput, error) { + if params == nil { + params = &PutReplicationConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutReplicationConfiguration", params, optFns, c.addOperationPutReplicationConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutReplicationConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutReplicationConfigurationInput struct { + + // An object representing the replication configuration for a registry. + // + // This member is required. + ReplicationConfiguration *types.ReplicationConfiguration + + noSmithyDocumentSerde +} + +type PutReplicationConfigurationOutput struct { + + // The contents of the replication configuration for the registry. + ReplicationConfiguration *types.ReplicationConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutReplicationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutReplicationConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutReplicationConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutReplicationConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutReplicationConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutReplicationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "PutReplicationConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go new file mode 100644 index 000000000..1078970c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_SetRepositoryPolicy.go @@ -0,0 +1,148 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Applies a repository policy to the specified repository to control access +// permissions. For more information, see Amazon ECR Repository policies +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) +// in the Amazon Elastic Container Registry User Guide. +func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryPolicyInput, optFns ...func(*Options)) (*SetRepositoryPolicyOutput, error) { + if params == nil { + params = &SetRepositoryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "SetRepositoryPolicy", params, optFns, c.addOperationSetRepositoryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SetRepositoryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SetRepositoryPolicyInput struct { + + // The JSON repository policy text to apply to the repository. For more + // information, see Amazon ECR repository policies + // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) + // in the Amazon Elastic Container Registry User Guide. + // + // This member is required. + PolicyText *string + + // The name of the repository to receive the policy. + // + // This member is required. + RepositoryName *string + + // If the policy you are attempting to set on a repository policy would prevent you + // from setting another policy in the future, you must force the + // SetRepositoryPolicy operation. This is intended to prevent accidental repository + // lock outs. + Force bool + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type SetRepositoryPolicyOutput struct { + + // The JSON repository policy text applied to the repository. + PolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSetRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSetRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpSetRepositoryPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSetRepositoryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSetRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "SetRepositoryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go new file mode 100644 index 000000000..8a9785f49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartImageScan.go @@ -0,0 +1,144 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Starts an image vulnerability scan. An image scan can only be started once per +// 24 hours on an individual image. This limit includes if an image was scanned on +// initial push. For more information, see Image scanning +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) in +// the Amazon Elastic Container Registry User Guide. +func (c *Client) StartImageScan(ctx context.Context, params *StartImageScanInput, optFns ...func(*Options)) (*StartImageScanOutput, error) { + if params == nil { + params = &StartImageScanInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartImageScan", params, optFns, c.addOperationStartImageScanMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartImageScanOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartImageScanInput struct { + + // An object with identifying information for an image in an Amazon ECR repository. + // + // This member is required. + ImageId *types.ImageIdentifier + + // The name of the repository that contains the images to scan. + // + // This member is required. + RepositoryName *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to start an image scan request. If you do not specify a + // registry, the default registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type StartImageScanOutput struct { + + // An object with identifying information for an image in an Amazon ECR repository. + ImageId *types.ImageIdentifier + + // The current state of the scan. + ImageScanStatus *types.ImageScanStatus + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartImageScanMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartImageScan{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartImageScan{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpStartImageScanValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartImageScan(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartImageScan(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "StartImageScan", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go new file mode 100644 index 000000000..1169ac443 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_StartLifecyclePolicyPreview.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Starts a preview of a lifecycle policy for the specified repository. This allows +// you to see the results before associating the lifecycle policy with the +// repository. +func (c *Client) StartLifecyclePolicyPreview(ctx context.Context, params *StartLifecyclePolicyPreviewInput, optFns ...func(*Options)) (*StartLifecyclePolicyPreviewOutput, error) { + if params == nil { + params = &StartLifecyclePolicyPreviewInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartLifecyclePolicyPreview", params, optFns, c.addOperationStartLifecyclePolicyPreviewMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartLifecyclePolicyPreviewOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartLifecyclePolicyPreviewInput struct { + + // The name of the repository to be evaluated. + // + // This member is required. + RepositoryName *string + + // The policy to be evaluated against. If you do not specify a policy, the current + // policy for the repository is used. + LifecyclePolicyText *string + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type StartLifecyclePolicyPreviewOutput struct { + + // The JSON repository policy text. + LifecyclePolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // The status of the lifecycle policy preview request. + Status types.LifecyclePolicyPreviewStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartLifecyclePolicyPreviewMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartLifecyclePolicyPreview{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartLifecyclePolicyPreview{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpStartLifecyclePolicyPreviewValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartLifecyclePolicyPreview(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartLifecyclePolicyPreview(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "StartLifecyclePolicyPreview", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go new file mode 100644 index 000000000..d283acf67 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_TagResource.go @@ -0,0 +1,126 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds specified tags to a resource with the specified ARN. Existing tags on a +// resource are not changed if they are not specified in the request parameters. +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // The Amazon Resource Name (ARN) of the the resource to which to add tags. + // Currently, the only supported resource is an Amazon ECR repository. + // + // This member is required. + ResourceArn *string + + // The tags to add to the resource. A tag is an array of key-value pairs. Tag keys + // can have a maximum character length of 128 characters, and tag values can have a + // maximum length of 256 characters. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "TagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go new file mode 100644 index 000000000..e42275bde --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UntagResource.go @@ -0,0 +1,122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes specified tags from a resource. +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // The Amazon Resource Name (ARN) of the resource from which to remove tags. + // Currently, the only supported resource is an Amazon ECR repository. + // + // This member is required. + ResourceArn *string + + // The keys of the tags to be removed. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "UntagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go new file mode 100644 index 000000000..4acf7e5b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/api_op_UploadLayerPart.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Uploads an image layer part to Amazon ECR. When an image is pushed, each new +// image layer is uploaded in parts. The maximum size of each image layer part can +// be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per +// each new image layer part. This operation is used by the Amazon ECR proxy and is +// not generally used by customers for pulling and pushing images. In most cases, +// you should use the docker CLI to pull, tag, and push images. +func (c *Client) UploadLayerPart(ctx context.Context, params *UploadLayerPartInput, optFns ...func(*Options)) (*UploadLayerPartOutput, error) { + if params == nil { + params = &UploadLayerPartInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UploadLayerPart", params, optFns, c.addOperationUploadLayerPartMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UploadLayerPartOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UploadLayerPartInput struct { + + // The base64-encoded layer part payload. + // + // This member is required. + LayerPartBlob []byte + + // The position of the first byte of the layer part witin the overall image layer. + // + // This member is required. + PartFirstByte *int64 + + // The position of the last byte of the layer part within the overall image layer. + // + // This member is required. + PartLastByte *int64 + + // The name of the repository to which you are uploading layer parts. + // + // This member is required. + RepositoryName *string + + // The upload ID from a previous InitiateLayerUpload operation to associate with + // the layer part upload. + // + // This member is required. + UploadId *string + + // The Amazon Web Services account ID associated with the registry to which you are + // uploading layer parts. If you do not specify a registry, the default registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type UploadLayerPartOutput struct { + + // The integer value of the last byte received in the request. + LastByteReceived *int64 + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // The upload ID associated with the request. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUploadLayerPart{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUploadLayerPart{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUploadLayerPartValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadLayerPart(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUploadLayerPart(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr", + OperationName: "UploadLayerPart", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go new file mode 100644 index 000000000..79c79b7ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/deserializers.go @@ -0,0 +1,13512 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "math" + "strings" +) + +type awsAwsjson11_deserializeOpBatchCheckLayerAvailability struct { +} + +func (*awsAwsjson11_deserializeOpBatchCheckLayerAvailability) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchCheckLayerAvailability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response, &metadata) + } + output := &BatchCheckLayerAvailabilityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchCheckLayerAvailabilityOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpBatchDeleteImage struct { +} + +func (*awsAwsjson11_deserializeOpBatchDeleteImage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchDeleteImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchDeleteImage(response, &metadata) + } + output := &BatchDeleteImageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchDeleteImageOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpBatchGetImage struct { +} + +func (*awsAwsjson11_deserializeOpBatchGetImage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchGetImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchGetImage(response, &metadata) + } + output := &BatchGetImageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchGetImageOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchGetImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpBatchGetRepositoryScanningConfiguration struct { +} + +func (*awsAwsjson11_deserializeOpBatchGetRepositoryScanningConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchGetRepositoryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchGetRepositoryScanningConfiguration(response, &metadata) + } + output := &BatchGetRepositoryScanningConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchGetRepositoryScanningConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchGetRepositoryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCompleteLayerUpload struct { +} + +func (*awsAwsjson11_deserializeOpCompleteLayerUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCompleteLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response, &metadata) + } + output := &CompleteLayerUploadOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCompleteLayerUploadOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("EmptyUploadException", errorCode): + return awsAwsjson11_deserializeErrorEmptyUploadException(response, errorBody) + + case strings.EqualFold("InvalidLayerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidLayerException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("KmsException", errorCode): + return awsAwsjson11_deserializeErrorKmsException(response, errorBody) + + case strings.EqualFold("LayerAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorLayerAlreadyExistsException(response, errorBody) + + case strings.EqualFold("LayerPartTooSmallException", errorCode): + return awsAwsjson11_deserializeErrorLayerPartTooSmallException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UploadNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreatePullThroughCacheRule struct { +} + +func (*awsAwsjson11_deserializeOpCreatePullThroughCacheRule) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreatePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreatePullThroughCacheRule(response, &metadata) + } + output := &CreatePullThroughCacheRuleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreatePullThroughCacheRuleOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreatePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("PullThroughCacheRuleAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorPullThroughCacheRuleAlreadyExistsException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedUpstreamRegistryException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedUpstreamRegistryException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateRepository struct { +} + +func (*awsAwsjson11_deserializeOpCreateRepository) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateRepository(response, &metadata) + } + output := &CreateRepositoryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateRepositoryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("InvalidTagParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) + + case strings.EqualFold("KmsException", errorCode): + return awsAwsjson11_deserializeErrorKmsException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("RepositoryAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TooManyTagsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteLifecyclePolicy struct { +} + +func (*awsAwsjson11_deserializeOpDeleteLifecyclePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteLifecyclePolicy(response, &metadata) + } + output := &DeleteLifecyclePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteLifecyclePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeletePullThroughCacheRule struct { +} + +func (*awsAwsjson11_deserializeOpDeletePullThroughCacheRule) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeletePullThroughCacheRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeletePullThroughCacheRule(response, &metadata) + } + output := &DeletePullThroughCacheRuleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeletePullThroughCacheRuleOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeletePullThroughCacheRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteRegistryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpDeleteRegistryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRegistryPolicy(response, &metadata) + } + output := &DeleteRegistryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteRegistryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RegistryPolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRegistryPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteRepository struct { +} + +func (*awsAwsjson11_deserializeOpDeleteRepository) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRepository(response, &metadata) + } + output := &DeleteRepositoryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteRepositoryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("KmsException", errorCode): + return awsAwsjson11_deserializeErrorKmsException(response, errorBody) + + case strings.EqualFold("RepositoryNotEmptyException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteRepositoryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpDeleteRepositoryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response, &metadata) + } + output := &DeleteRepositoryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeImageReplicationStatus struct { +} + +func (*awsAwsjson11_deserializeOpDescribeImageReplicationStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeImageReplicationStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response, &metadata) + } + output := &DescribeImageReplicationStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeImageReplicationStatusOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeImageReplicationStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeImages struct { +} + +func (*awsAwsjson11_deserializeOpDescribeImages) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImages(response, &metadata) + } + output := &DescribeImagesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeImageScanFindings struct { +} + +func (*awsAwsjson11_deserializeOpDescribeImageScanFindings) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeImageScanFindings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response, &metadata) + } + output := &DescribeImageScanFindingsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeImageScanFindingsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeImageScanFindings(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ScanNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorScanNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribePullThroughCacheRules struct { +} + +func (*awsAwsjson11_deserializeOpDescribePullThroughCacheRules) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribePullThroughCacheRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response, &metadata) + } + output := &DescribePullThroughCacheRulesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribePullThroughCacheRulesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribePullThroughCacheRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("PullThroughCacheRuleNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeRegistry struct { +} + +func (*awsAwsjson11_deserializeOpDescribeRegistry) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeRegistry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRegistry(response, &metadata) + } + output := &DescribeRegistryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeRegistryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeRegistry(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeRepositories struct { +} + +func (*awsAwsjson11_deserializeOpDescribeRepositories) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRepositories(response, &metadata) + } + output := &DescribeRepositoriesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetAuthorizationToken struct { +} + +func (*awsAwsjson11_deserializeOpGetAuthorizationToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response, &metadata) + } + output := &GetAuthorizationTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetDownloadUrlForLayer struct { +} + +func (*awsAwsjson11_deserializeOpGetDownloadUrlForLayer) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetDownloadUrlForLayer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response, &metadata) + } + output := &GetDownloadUrlForLayerOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetDownloadUrlForLayerOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetDownloadUrlForLayer(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LayerInaccessibleException", errorCode): + return awsAwsjson11_deserializeErrorLayerInaccessibleException(response, errorBody) + + case strings.EqualFold("LayersNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetLifecyclePolicy struct { +} + +func (*awsAwsjson11_deserializeOpGetLifecyclePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response, &metadata) + } + output := &GetLifecyclePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetLifecyclePolicyPreview struct { +} + +func (*awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetLifecyclePolicyPreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response, &metadata) + } + output := &GetLifecyclePolicyPreviewOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyPreviewOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetLifecyclePolicyPreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LifecyclePolicyPreviewNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyPreviewNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetRegistryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpGetRegistryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response, &metadata) + } + output := &GetRegistryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RegistryPolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRegistryPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetRegistryScanningConfiguration struct { +} + +func (*awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetRegistryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response, &metadata) + } + output := &GetRegistryScanningConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetRegistryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetRepositoryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpGetRepositoryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response, &metadata) + } + output := &GetRepositoryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpInitiateLayerUpload struct { +} + +func (*awsAwsjson11_deserializeOpInitiateLayerUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response, &metadata) + } + output := &InitiateLayerUploadOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("KmsException", errorCode): + return awsAwsjson11_deserializeErrorKmsException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListImages struct { +} + +func (*awsAwsjson11_deserializeOpListImages) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListImages(response, &metadata) + } + output := &ListImagesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListImagesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTagsForResource struct { +} + +func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) + } + output := &ListTagsForResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutImage struct { +} + +func (*awsAwsjson11_deserializeOpPutImage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutImage(response, &metadata) + } + output := &PutImageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutImageOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ImageAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorImageAlreadyExistsException(response, errorBody) + + case strings.EqualFold("ImageDigestDoesNotMatchException", errorCode): + return awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response, errorBody) + + case strings.EqualFold("ImageTagAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("KmsException", errorCode): + return awsAwsjson11_deserializeErrorKmsException(response, errorBody) + + case strings.EqualFold("LayersNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ReferencedImagesNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutImageScanningConfiguration struct { +} + +func (*awsAwsjson11_deserializeOpPutImageScanningConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutImageScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response, &metadata) + } + output := &PutImageScanningConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutImageScanningConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutImageScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutImageTagMutability struct { +} + +func (*awsAwsjson11_deserializeOpPutImageTagMutability) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutImageTagMutability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutImageTagMutability(response, &metadata) + } + output := &PutImageTagMutabilityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutImageTagMutabilityOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutImageTagMutability(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutLifecyclePolicy struct { +} + +func (*awsAwsjson11_deserializeOpPutLifecyclePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutLifecyclePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response, &metadata) + } + output := &PutLifecyclePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutLifecyclePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutLifecyclePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutRegistryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpPutRegistryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutRegistryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response, &metadata) + } + output := &PutRegistryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutRegistryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutRegistryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutRegistryScanningConfiguration struct { +} + +func (*awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutRegistryScanningConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response, &metadata) + } + output := &PutRegistryScanningConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutRegistryScanningConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutRegistryScanningConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutReplicationConfiguration struct { +} + +func (*awsAwsjson11_deserializeOpPutReplicationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutReplicationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response, &metadata) + } + output := &PutReplicationConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutReplicationConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutReplicationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpSetRepositoryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpSetRepositoryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response, &metadata) + } + output := &SetRepositoryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStartImageScan struct { +} + +func (*awsAwsjson11_deserializeOpStartImageScan) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartImageScan) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartImageScan(response, &metadata) + } + output := &StartImageScanOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStartImageScanOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStartImageScan(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedImageTypeException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedImageTypeException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStartLifecyclePolicyPreview struct { +} + +func (*awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartLifecyclePolicyPreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response, &metadata) + } + output := &StartLifecyclePolicyPreviewOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStartLifecyclePolicyPreviewOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStartLifecyclePolicyPreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LifecyclePolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response, errorBody) + + case strings.EqualFold("LifecyclePolicyPreviewInProgressException", errorCode): + return awsAwsjson11_deserializeErrorLifecyclePolicyPreviewInProgressException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("InvalidTagParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TooManyTagsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("InvalidTagParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TooManyTagsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUploadLayerPart struct { +} + +func (*awsAwsjson11_deserializeOpUploadLayerPart) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUploadLayerPart(response, &metadata) + } + output := &UploadLayerPartOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidLayerPartException", errorCode): + return awsAwsjson11_deserializeErrorInvalidLayerPartException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("KmsException", errorCode): + return awsAwsjson11_deserializeErrorKmsException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UploadNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorEmptyUploadException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.EmptyUploadException{} + err := awsAwsjson11_deserializeDocumentEmptyUploadException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentImageAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageDigestDoesNotMatchException{} + err := awsAwsjson11_deserializeDocumentImageDigestDoesNotMatchException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageNotFoundException{} + err := awsAwsjson11_deserializeDocumentImageNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageTagAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidLayerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidLayerException{} + err := awsAwsjson11_deserializeDocumentInvalidLayerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidLayerPartException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidLayerPartException{} + err := awsAwsjson11_deserializeDocumentInvalidLayerPartException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidParameterException{} + err := awsAwsjson11_deserializeDocumentInvalidParameterException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidTagParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidTagParameterException{} + err := awsAwsjson11_deserializeDocumentInvalidTagParameterException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKmsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KmsException{} + err := awsAwsjson11_deserializeDocumentKmsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLayerAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LayerAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentLayerAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLayerInaccessibleException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LayerInaccessibleException{} + err := awsAwsjson11_deserializeDocumentLayerInaccessibleException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLayerPartTooSmallException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LayerPartTooSmallException{} + err := awsAwsjson11_deserializeDocumentLayerPartTooSmallException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLayersNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LayersNotFoundException{} + err := awsAwsjson11_deserializeDocumentLayersNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLifecyclePolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LifecyclePolicyNotFoundException{} + err := awsAwsjson11_deserializeDocumentLifecyclePolicyNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLifecyclePolicyPreviewInProgressException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LifecyclePolicyPreviewInProgressException{} + err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewInProgressException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLifecyclePolicyPreviewNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LifecyclePolicyPreviewNotFoundException{} + err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LimitExceededException{} + err := awsAwsjson11_deserializeDocumentLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorPullThroughCacheRuleAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.PullThroughCacheRuleAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentPullThroughCacheRuleAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorPullThroughCacheRuleNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.PullThroughCacheRuleNotFoundException{} + err := awsAwsjson11_deserializeDocumentPullThroughCacheRuleNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ReferencedImagesNotFoundException{} + err := awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRegistryPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RegistryPolicyNotFoundException{} + err := awsAwsjson11_deserializeDocumentRegistryPolicyNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryNotEmptyException{} + err := awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRepositoryNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryNotFoundException{} + err := awsAwsjson11_deserializeDocumentRepositoryNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryPolicyNotFoundException{} + err := awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorScanNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ScanNotFoundException{} + err := awsAwsjson11_deserializeDocumentScanNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ServerException{} + err := awsAwsjson11_deserializeDocumentServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTooManyTagsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TooManyTagsException{} + err := awsAwsjson11_deserializeDocumentTooManyTagsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUnsupportedImageTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UnsupportedImageTypeException{} + err := awsAwsjson11_deserializeDocumentUnsupportedImageTypeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUnsupportedUpstreamRegistryException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UnsupportedUpstreamRegistryException{} + err := awsAwsjson11_deserializeDocumentUnsupportedUpstreamRegistryException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUploadNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UploadNotFoundException{} + err := awsAwsjson11_deserializeDocumentUploadNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ValidationException{} + err := awsAwsjson11_deserializeDocumentValidationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeDocumentAttribute(v **types.Attribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Attribute + if *v == nil { + sv = &types.Attribute{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeKey to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AttributeValue to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAttributeList(v *[]types.Attribute, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Attribute + if *v == nil { + cv = []types.Attribute{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Attribute + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAttribute(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAuthorizationData(v **types.AuthorizationData, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AuthorizationData + if *v == nil { + sv = &types.AuthorizationData{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Base64 to be of type string, got %T instead", value) + } + sv.AuthorizationToken = ptr.String(jtv) + } + + case "expiresAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ExpiresAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ExpirationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "proxyEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProxyEndpoint to be of type string, got %T instead", value) + } + sv.ProxyEndpoint = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAuthorizationDataList(v *[]types.AuthorizationData, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AuthorizationData + if *v == nil { + cv = []types.AuthorizationData{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AuthorizationData + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAuthorizationData(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAwsEcrContainerImageDetails(v **types.AwsEcrContainerImageDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AwsEcrContainerImageDetails + if *v == nil { + sv = &types.AwsEcrContainerImageDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "architecture": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arch to be of type string, got %T instead", value) + } + sv.Architecture = ptr.String(jtv) + } + + case "author": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Author to be of type string, got %T instead", value) + } + sv.Author = ptr.String(jtv) + } + + case "imageHash": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageHash = ptr.String(jtv) + } + + case "imageTags": + if err := awsAwsjson11_deserializeDocumentImageTagsList(&sv.ImageTags, value); err != nil { + return err + } + + case "platform": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Platform to be of type string, got %T instead", value) + } + sv.Platform = ptr.String(jtv) + } + + case "pushedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.PushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "registry": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.Registry = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCvssScore(v **types.CvssScore, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CvssScore + if *v == nil { + sv = &types.CvssScore{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "baseScore": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.BaseScore = f64 + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.BaseScore = f64 + + default: + return fmt.Errorf("expected BaseScore to be a JSON Number, got %T instead", value) + + } + } + + case "scoringVector": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScoringVector to be of type string, got %T instead", value) + } + sv.ScoringVector = ptr.String(jtv) + } + + case "source": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Source to be of type string, got %T instead", value) + } + sv.Source = ptr.String(jtv) + } + + case "version": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Version to be of type string, got %T instead", value) + } + sv.Version = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCvssScoreAdjustment(v **types.CvssScoreAdjustment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CvssScoreAdjustment + if *v == nil { + sv = &types.CvssScoreAdjustment{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "metric": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Metric to be of type string, got %T instead", value) + } + sv.Metric = ptr.String(jtv) + } + + case "reason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Reason to be of type string, got %T instead", value) + } + sv.Reason = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCvssScoreAdjustmentList(v *[]types.CvssScoreAdjustment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CvssScoreAdjustment + if *v == nil { + cv = []types.CvssScoreAdjustment{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CvssScoreAdjustment + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCvssScoreAdjustment(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCvssScoreDetails(v **types.CvssScoreDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CvssScoreDetails + if *v == nil { + sv = &types.CvssScoreDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "adjustments": + if err := awsAwsjson11_deserializeDocumentCvssScoreAdjustmentList(&sv.Adjustments, value); err != nil { + return err + } + + case "score": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Score = f64 + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.Score = f64 + + default: + return fmt.Errorf("expected Score to be a JSON Number, got %T instead", value) + + } + } + + case "scoreSource": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Source to be of type string, got %T instead", value) + } + sv.ScoreSource = ptr.String(jtv) + } + + case "scoringVector": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScoringVector to be of type string, got %T instead", value) + } + sv.ScoringVector = ptr.String(jtv) + } + + case "version": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Version to be of type string, got %T instead", value) + } + sv.Version = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCvssScoreList(v *[]types.CvssScore, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CvssScore + if *v == nil { + cv = []types.CvssScore{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CvssScore + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCvssScore(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEmptyUploadException(v **types.EmptyUploadException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EmptyUploadException + if *v == nil { + sv = &types.EmptyUploadException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EncryptionConfiguration + if *v == nil { + sv = &types.EncryptionConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "encryptionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionType to be of type string, got %T instead", value) + } + sv.EncryptionType = types.EncryptionType(jtv) + } + + case "kmsKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KmsKey to be of type string, got %T instead", value) + } + sv.KmsKey = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEnhancedImageScanFinding(v **types.EnhancedImageScanFinding, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EnhancedImageScanFinding + if *v == nil { + sv = &types.EnhancedImageScanFinding{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "awsAccountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.AwsAccountId = ptr.String(jtv) + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FindingDescription to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "findingArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FindingArn to be of type string, got %T instead", value) + } + sv.FindingArn = ptr.String(jtv) + } + + case "firstObservedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.FirstObservedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "lastObservedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastObservedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "packageVulnerabilityDetails": + if err := awsAwsjson11_deserializeDocumentPackageVulnerabilityDetails(&sv.PackageVulnerabilityDetails, value); err != nil { + return err + } + + case "remediation": + if err := awsAwsjson11_deserializeDocumentRemediation(&sv.Remediation, value); err != nil { + return err + } + + case "resources": + if err := awsAwsjson11_deserializeDocumentResourceList(&sv.Resources, value); err != nil { + return err + } + + case "score": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.Score = f64 + + case string: + var f64 float64 + switch { + case strings.EqualFold(jtv, "NaN"): + f64 = math.NaN() + + case strings.EqualFold(jtv, "Infinity"): + f64 = math.Inf(1) + + case strings.EqualFold(jtv, "-Infinity"): + f64 = math.Inf(-1) + + default: + return fmt.Errorf("unknown JSON number value: %s", jtv) + + } + sv.Score = f64 + + default: + return fmt.Errorf("expected Score to be a JSON Number, got %T instead", value) + + } + } + + case "scoreDetails": + if err := awsAwsjson11_deserializeDocumentScoreDetails(&sv.ScoreDetails, value); err != nil { + return err + } + + case "severity": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Severity to be of type string, got %T instead", value) + } + sv.Severity = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Status to be of type string, got %T instead", value) + } + sv.Status = ptr.String(jtv) + } + + case "title": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Title to be of type string, got %T instead", value) + } + sv.Title = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Type to be of type string, got %T instead", value) + } + sv.Type = ptr.String(jtv) + } + + case "updatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEnhancedImageScanFindingList(v *[]types.EnhancedImageScanFinding, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.EnhancedImageScanFinding + if *v == nil { + cv = []types.EnhancedImageScanFinding{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EnhancedImageScanFinding + destAddr := &col + if err := awsAwsjson11_deserializeDocumentEnhancedImageScanFinding(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentFindingSeverityCounts(v *map[string]int32, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]int32 + if *v == nil { + mv = map[string]int32{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal int32 + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected SeverityCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + parsedVal = int32(i64) + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentImage(v **types.Image, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Image + if *v == nil { + sv = &types.Image{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + case "imageManifest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageManifest to be of type string, got %T instead", value) + } + sv.ImageManifest = ptr.String(jtv) + } + + case "imageManifestMediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.ImageManifestMediaType = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageAlreadyExistsException(v **types.ImageAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageAlreadyExistsException + if *v == nil { + sv = &types.ImageAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageDetail(v **types.ImageDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageDetail + if *v == nil { + sv = &types.ImageDetail{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "artifactMediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.ArtifactMediaType = ptr.String(jtv) + } + + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + + case "imageManifestMediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.ImageManifestMediaType = ptr.String(jtv) + } + + case "imagePushedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ImagePushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected PushTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "imageScanFindingsSummary": + if err := awsAwsjson11_deserializeDocumentImageScanFindingsSummary(&sv.ImageScanFindingsSummary, value); err != nil { + return err + } + + case "imageScanStatus": + if err := awsAwsjson11_deserializeDocumentImageScanStatus(&sv.ImageScanStatus, value); err != nil { + return err + } + + case "imageSizeInBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ImageSizeInBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ImageSizeInBytes = ptr.Int64(i64) + } + + case "imageTags": + if err := awsAwsjson11_deserializeDocumentImageTagList(&sv.ImageTags, value); err != nil { + return err + } + + case "lastRecordedPullTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastRecordedPullTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected RecordedPullTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageDetailList(v *[]types.ImageDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageDetail + if *v == nil { + cv = []types.ImageDetail{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageDetail + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageDetail(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageDigestDoesNotMatchException(v **types.ImageDigestDoesNotMatchException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageDigestDoesNotMatchException + if *v == nil { + sv = &types.ImageDigestDoesNotMatchException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageFailure(v **types.ImageFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageFailure + if *v == nil { + sv = &types.ImageFailure{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageFailureCode to be of type string, got %T instead", value) + } + sv.FailureCode = types.ImageFailureCode(jtv) + } + + case "failureReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageFailureReason to be of type string, got %T instead", value) + } + sv.FailureReason = ptr.String(jtv) + } + + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageFailureList(v *[]types.ImageFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageFailure + if *v == nil { + cv = []types.ImageFailure{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageFailure + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageFailure(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageIdentifier(v **types.ImageIdentifier, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageIdentifier + if *v == nil { + sv = &types.ImageIdentifier{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + + case "imageTag": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) + } + sv.ImageTag = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageIdentifierList(v *[]types.ImageIdentifier, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageIdentifier + if *v == nil { + cv = []types.ImageIdentifier{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageIdentifier + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageList(v *[]types.Image, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Image + if *v == nil { + cv = []types.Image{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Image + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImage(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageNotFoundException(v **types.ImageNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageNotFoundException + if *v == nil { + sv = &types.ImageNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageReplicationStatus(v **types.ImageReplicationStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageReplicationStatus + if *v == nil { + sv = &types.ImageReplicationStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ReplicationError to be of type string, got %T instead", value) + } + sv.FailureCode = ptr.String(jtv) + } + + case "region": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Region to be of type string, got %T instead", value) + } + sv.Region = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ReplicationStatus to be of type string, got %T instead", value) + } + sv.Status = types.ReplicationStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageReplicationStatusList(v *[]types.ImageReplicationStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageReplicationStatus + if *v == nil { + cv = []types.ImageReplicationStatus{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageReplicationStatus + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageReplicationStatus(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageScanFinding(v **types.ImageScanFinding, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageScanFinding + if *v == nil { + sv = &types.ImageScanFinding{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "attributes": + if err := awsAwsjson11_deserializeDocumentAttributeList(&sv.Attributes, value); err != nil { + return err + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FindingDescription to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FindingName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "severity": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FindingSeverity to be of type string, got %T instead", value) + } + sv.Severity = types.FindingSeverity(jtv) + } + + case "uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.Uri = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageScanFindingList(v *[]types.ImageScanFinding, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageScanFinding + if *v == nil { + cv = []types.ImageScanFinding{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageScanFinding + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageScanFinding(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageScanFindings(v **types.ImageScanFindings, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageScanFindings + if *v == nil { + sv = &types.ImageScanFindings{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "enhancedFindings": + if err := awsAwsjson11_deserializeDocumentEnhancedImageScanFindingList(&sv.EnhancedFindings, value); err != nil { + return err + } + + case "findings": + if err := awsAwsjson11_deserializeDocumentImageScanFindingList(&sv.Findings, value); err != nil { + return err + } + + case "findingSeverityCounts": + if err := awsAwsjson11_deserializeDocumentFindingSeverityCounts(&sv.FindingSeverityCounts, value); err != nil { + return err + } + + case "imageScanCompletedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ImageScanCompletedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ScanTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "vulnerabilitySourceUpdatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.VulnerabilitySourceUpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected VulnerabilitySourceUpdateTimestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageScanFindingsSummary(v **types.ImageScanFindingsSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageScanFindingsSummary + if *v == nil { + sv = &types.ImageScanFindingsSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "findingSeverityCounts": + if err := awsAwsjson11_deserializeDocumentFindingSeverityCounts(&sv.FindingSeverityCounts, value); err != nil { + return err + } + + case "imageScanCompletedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ImageScanCompletedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ScanTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "vulnerabilitySourceUpdatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.VulnerabilitySourceUpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected VulnerabilitySourceUpdateTimestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageScanningConfiguration(v **types.ImageScanningConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageScanningConfiguration + if *v == nil { + sv = &types.ImageScanningConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "scanOnPush": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected ScanOnPushFlag to be of type *bool, got %T instead", value) + } + sv.ScanOnPush = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageScanStatus(v **types.ImageScanStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageScanStatus + if *v == nil { + sv = &types.ImageScanStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanStatusDescription to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanStatus to be of type string, got %T instead", value) + } + sv.Status = types.ScanStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(v **types.ImageTagAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageTagAlreadyExistsException + if *v == nil { + sv = &types.ImageTagAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageTagList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageTagsList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidLayerException(v **types.InvalidLayerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidLayerException + if *v == nil { + sv = &types.InvalidLayerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidLayerPartException(v **types.InvalidLayerPartException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidLayerPartException + if *v == nil { + sv = &types.InvalidLayerPartException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lastValidByteReceived": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.LastValidByteReceived = ptr.Int64(i64) + } + + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "uploadId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) + } + sv.UploadId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidParameterException(v **types.InvalidParameterException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidParameterException + if *v == nil { + sv = &types.InvalidParameterException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidTagParameterException(v **types.InvalidTagParameterException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidTagParameterException + if *v == nil { + sv = &types.InvalidTagParameterException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKmsException(v **types.KmsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KmsException + if *v == nil { + sv = &types.KmsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "kmsError": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KmsError to be of type string, got %T instead", value) + } + sv.KmsError = ptr.String(jtv) + } + + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayer(v **types.Layer, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Layer + if *v == nil { + sv = &types.Layer{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "layerAvailability": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerAvailability to be of type string, got %T instead", value) + } + sv.LayerAvailability = types.LayerAvailability(jtv) + } + + case "layerDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) + } + sv.LayerDigest = ptr.String(jtv) + } + + case "layerSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LayerSizeInBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.LayerSize = ptr.Int64(i64) + } + + case "mediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.MediaType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerAlreadyExistsException(v **types.LayerAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayerAlreadyExistsException + if *v == nil { + sv = &types.LayerAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerFailure(v **types.LayerFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayerFailure + if *v == nil { + sv = &types.LayerFailure{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerFailureCode to be of type string, got %T instead", value) + } + sv.FailureCode = types.LayerFailureCode(jtv) + } + + case "failureReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerFailureReason to be of type string, got %T instead", value) + } + sv.FailureReason = ptr.String(jtv) + } + + case "layerDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BatchedOperationLayerDigest to be of type string, got %T instead", value) + } + sv.LayerDigest = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerFailureList(v *[]types.LayerFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.LayerFailure + if *v == nil { + cv = []types.LayerFailure{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.LayerFailure + destAddr := &col + if err := awsAwsjson11_deserializeDocumentLayerFailure(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerInaccessibleException(v **types.LayerInaccessibleException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayerInaccessibleException + if *v == nil { + sv = &types.LayerInaccessibleException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerList(v *[]types.Layer, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Layer + if *v == nil { + cv = []types.Layer{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Layer + destAddr := &col + if err := awsAwsjson11_deserializeDocumentLayer(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerPartTooSmallException(v **types.LayerPartTooSmallException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayerPartTooSmallException + if *v == nil { + sv = &types.LayerPartTooSmallException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayersNotFoundException(v **types.LayersNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayersNotFoundException + if *v == nil { + sv = &types.LayersNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLifecyclePolicyNotFoundException(v **types.LifecyclePolicyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LifecyclePolicyNotFoundException + if *v == nil { + sv = &types.LifecyclePolicyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewInProgressException(v **types.LifecyclePolicyPreviewInProgressException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LifecyclePolicyPreviewInProgressException + if *v == nil { + sv = &types.LifecyclePolicyPreviewInProgressException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewNotFoundException(v **types.LifecyclePolicyPreviewNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LifecyclePolicyPreviewNotFoundException + if *v == nil { + sv = &types.LifecyclePolicyPreviewNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResult(v **types.LifecyclePolicyPreviewResult, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LifecyclePolicyPreviewResult + if *v == nil { + sv = &types.LifecyclePolicyPreviewResult{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "action": + if err := awsAwsjson11_deserializeDocumentLifecyclePolicyRuleAction(&sv.Action, value); err != nil { + return err + } + + case "appliedRulePriority": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LifecyclePolicyRulePriority to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AppliedRulePriority = ptr.Int32(int32(i64)) + } + + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + + case "imagePushedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ImagePushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected PushTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "imageTags": + if err := awsAwsjson11_deserializeDocumentImageTagList(&sv.ImageTags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResultList(v *[]types.LifecyclePolicyPreviewResult, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.LifecyclePolicyPreviewResult + if *v == nil { + cv = []types.LifecyclePolicyPreviewResult{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.LifecyclePolicyPreviewResult + destAddr := &col + if err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResult(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewSummary(v **types.LifecyclePolicyPreviewSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LifecyclePolicyPreviewSummary + if *v == nil { + sv = &types.LifecyclePolicyPreviewSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "expiringImageTotalCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ImageCount to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiringImageTotalCount = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLifecyclePolicyRuleAction(v **types.LifecyclePolicyRuleAction, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LifecyclePolicyRuleAction + if *v == nil { + sv = &types.LifecyclePolicyRuleAction{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageActionType to be of type string, got %T instead", value) + } + sv.Type = types.ImageActionType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LimitExceededException + if *v == nil { + sv = &types.LimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPackageVulnerabilityDetails(v **types.PackageVulnerabilityDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PackageVulnerabilityDetails + if *v == nil { + sv = &types.PackageVulnerabilityDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cvss": + if err := awsAwsjson11_deserializeDocumentCvssScoreList(&sv.Cvss, value); err != nil { + return err + } + + case "referenceUrls": + if err := awsAwsjson11_deserializeDocumentReferenceUrlsList(&sv.ReferenceUrls, value); err != nil { + return err + } + + case "relatedVulnerabilities": + if err := awsAwsjson11_deserializeDocumentRelatedVulnerabilitiesList(&sv.RelatedVulnerabilities, value); err != nil { + return err + } + + case "source": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Source to be of type string, got %T instead", value) + } + sv.Source = ptr.String(jtv) + } + + case "sourceUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.SourceUrl = ptr.String(jtv) + } + + case "vendorCreatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.VendorCreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "vendorSeverity": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Severity to be of type string, got %T instead", value) + } + sv.VendorSeverity = ptr.String(jtv) + } + + case "vendorUpdatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.VendorUpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "vulnerabilityId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected VulnerabilityId to be of type string, got %T instead", value) + } + sv.VulnerabilityId = ptr.String(jtv) + } + + case "vulnerablePackages": + if err := awsAwsjson11_deserializeDocumentVulnerablePackagesList(&sv.VulnerablePackages, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPullThroughCacheRule(v **types.PullThroughCacheRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PullThroughCacheRule + if *v == nil { + sv = &types.PullThroughCacheRule{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "ecrRepositoryPrefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PullThroughCacheRuleRepositoryPrefix to be of type string, got %T instead", value) + } + sv.EcrRepositoryPrefix = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "upstreamRegistryUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.UpstreamRegistryUrl = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPullThroughCacheRuleAlreadyExistsException(v **types.PullThroughCacheRuleAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PullThroughCacheRuleAlreadyExistsException + if *v == nil { + sv = &types.PullThroughCacheRuleAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPullThroughCacheRuleList(v *[]types.PullThroughCacheRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PullThroughCacheRule + if *v == nil { + cv = []types.PullThroughCacheRule{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PullThroughCacheRule + destAddr := &col + if err := awsAwsjson11_deserializeDocumentPullThroughCacheRule(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentPullThroughCacheRuleNotFoundException(v **types.PullThroughCacheRuleNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PullThroughCacheRuleNotFoundException + if *v == nil { + sv = &types.PullThroughCacheRuleNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRecommendation(v **types.Recommendation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Recommendation + if *v == nil { + sv = &types.Recommendation{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "text": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RecommendationText to be of type string, got %T instead", value) + } + sv.Text = ptr.String(jtv) + } + + case "url": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.Url = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(v **types.ReferencedImagesNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReferencedImagesNotFoundException + if *v == nil { + sv = &types.ReferencedImagesNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReferenceUrlsList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryPolicyNotFoundException(v **types.RegistryPolicyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryPolicyNotFoundException + if *v == nil { + sv = &types.RegistryPolicyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(v **types.RegistryScanningConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryScanningConfiguration + if *v == nil { + sv = &types.RegistryScanningConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "rules": + if err := awsAwsjson11_deserializeDocumentRegistryScanningRuleList(&sv.Rules, value); err != nil { + return err + } + + case "scanType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanType to be of type string, got %T instead", value) + } + sv.ScanType = types.ScanType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryScanningRule(v **types.RegistryScanningRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryScanningRule + if *v == nil { + sv = &types.RegistryScanningRule{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "repositoryFilters": + if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { + return err + } + + case "scanFrequency": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanFrequency to be of type string, got %T instead", value) + } + sv.ScanFrequency = types.ScanFrequency(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryScanningRuleList(v *[]types.RegistryScanningRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RegistryScanningRule + if *v == nil { + cv = []types.RegistryScanningRule{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RegistryScanningRule + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRegistryScanningRule(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRelatedVulnerabilitiesList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RelatedVulnerability to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRemediation(v **types.Remediation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Remediation + if *v == nil { + sv = &types.Remediation{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "recommendation": + if err := awsAwsjson11_deserializeDocumentRecommendation(&sv.Recommendation, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicationConfiguration + if *v == nil { + sv = &types.ReplicationConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "rules": + if err := awsAwsjson11_deserializeDocumentReplicationRuleList(&sv.Rules, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReplicationDestination(v **types.ReplicationDestination, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicationDestination + if *v == nil { + sv = &types.ReplicationDestination{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "region": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Region to be of type string, got %T instead", value) + } + sv.Region = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReplicationDestinationList(v *[]types.ReplicationDestination, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ReplicationDestination + if *v == nil { + cv = []types.ReplicationDestination{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ReplicationDestination + destAddr := &col + if err := awsAwsjson11_deserializeDocumentReplicationDestination(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentReplicationRule(v **types.ReplicationRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReplicationRule + if *v == nil { + sv = &types.ReplicationRule{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "destinations": + if err := awsAwsjson11_deserializeDocumentReplicationDestinationList(&sv.Destinations, value); err != nil { + return err + } + + case "repositoryFilters": + if err := awsAwsjson11_deserializeDocumentRepositoryFilterList(&sv.RepositoryFilters, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReplicationRuleList(v *[]types.ReplicationRule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ReplicationRule + if *v == nil { + cv = []types.ReplicationRule{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ReplicationRule + destAddr := &col + if err := awsAwsjson11_deserializeDocumentReplicationRule(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRepository(v **types.Repository, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Repository + if *v == nil { + sv = &types.Repository{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "encryptionConfiguration": + if err := awsAwsjson11_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, value); err != nil { + return err + } + + case "imageScanningConfiguration": + if err := awsAwsjson11_deserializeDocumentImageScanningConfiguration(&sv.ImageScanningConfiguration, value); err != nil { + return err + } + + case "imageTagMutability": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTagMutability to be of type string, got %T instead", value) + } + sv.ImageTagMutability = types.ImageTagMutability(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.RepositoryArn = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "repositoryUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.RepositoryUri = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(v **types.RepositoryAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryAlreadyExistsException + if *v == nil { + sv = &types.RepositoryAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryFilter(v **types.RepositoryFilter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryFilter + if *v == nil { + sv = &types.RepositoryFilter{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "filter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryFilterValue to be of type string, got %T instead", value) + } + sv.Filter = ptr.String(jtv) + } + + case "filterType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryFilterType to be of type string, got %T instead", value) + } + sv.FilterType = types.RepositoryFilterType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryFilterList(v *[]types.RepositoryFilter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RepositoryFilter + if *v == nil { + cv = []types.RepositoryFilter{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RepositoryFilter + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRepositoryFilter(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Repository + if *v == nil { + cv = []types.Repository{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Repository + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRepository(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(v **types.RepositoryNotEmptyException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryNotEmptyException + if *v == nil { + sv = &types.RepositoryNotEmptyException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryNotFoundException(v **types.RepositoryNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryNotFoundException + if *v == nil { + sv = &types.RepositoryNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(v **types.RepositoryPolicyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryPolicyNotFoundException + if *v == nil { + sv = &types.RepositoryPolicyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(v **types.RepositoryScanningConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryScanningConfiguration + if *v == nil { + sv = &types.RepositoryScanningConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "appliedScanFilters": + if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(&sv.AppliedScanFilters, value); err != nil { + return err + } + + case "repositoryArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.RepositoryArn = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "scanFrequency": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanFrequency to be of type string, got %T instead", value) + } + sv.ScanFrequency = types.ScanFrequency(jtv) + } + + case "scanOnPush": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected ScanOnPushFlag to be of type *bool, got %T instead", value) + } + sv.ScanOnPush = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(v **types.RepositoryScanningConfigurationFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryScanningConfigurationFailure + if *v == nil { + sv = &types.RepositoryScanningConfigurationFailure{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanningConfigurationFailureCode to be of type string, got %T instead", value) + } + sv.FailureCode = types.ScanningConfigurationFailureCode(jtv) + } + + case "failureReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanningConfigurationFailureReason to be of type string, got %T instead", value) + } + sv.FailureReason = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailureList(v *[]types.RepositoryScanningConfigurationFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RepositoryScanningConfigurationFailure + if *v == nil { + cv = []types.RepositoryScanningConfigurationFailure{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RepositoryScanningConfigurationFailure + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailure(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationList(v *[]types.RepositoryScanningConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RepositoryScanningConfiguration + if *v == nil { + cv = []types.RepositoryScanningConfiguration{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RepositoryScanningConfiguration + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfiguration(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentResource(v **types.Resource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Resource + if *v == nil { + sv = &types.Resource{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "details": + if err := awsAwsjson11_deserializeDocumentResourceDetails(&sv.Details, value); err != nil { + return err + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceId to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "tags": + if err := awsAwsjson11_deserializeDocumentTags(&sv.Tags, value); err != nil { + return err + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Type to be of type string, got %T instead", value) + } + sv.Type = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResourceDetails(v **types.ResourceDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceDetails + if *v == nil { + sv = &types.ResourceDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "awsEcrContainerImage": + if err := awsAwsjson11_deserializeDocumentAwsEcrContainerImageDetails(&sv.AwsEcrContainerImage, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResourceList(v *[]types.Resource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Resource + if *v == nil { + cv = []types.Resource{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Resource + destAddr := &col + if err := awsAwsjson11_deserializeDocumentResource(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentScanningRepositoryFilter(v **types.ScanningRepositoryFilter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ScanningRepositoryFilter + if *v == nil { + sv = &types.ScanningRepositoryFilter{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "filter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanningRepositoryFilterValue to be of type string, got %T instead", value) + } + sv.Filter = ptr.String(jtv) + } + + case "filterType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ScanningRepositoryFilterType to be of type string, got %T instead", value) + } + sv.FilterType = types.ScanningRepositoryFilterType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentScanningRepositoryFilterList(v *[]types.ScanningRepositoryFilter, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ScanningRepositoryFilter + if *v == nil { + cv = []types.ScanningRepositoryFilter{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ScanningRepositoryFilter + destAddr := &col + if err := awsAwsjson11_deserializeDocumentScanningRepositoryFilter(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentScanNotFoundException(v **types.ScanNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ScanNotFoundException + if *v == nil { + sv = &types.ScanNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentScoreDetails(v **types.ScoreDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ScoreDetails + if *v == nil { + sv = &types.ScoreDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cvss": + if err := awsAwsjson11_deserializeDocumentCvssScoreDetails(&sv.Cvss, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServerException(v **types.ServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServerException + if *v == nil { + sv = &types.ServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + case "Value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTags(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentTooManyTagsException(v **types.TooManyTagsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyTagsException + if *v == nil { + sv = &types.TooManyTagsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnsupportedImageTypeException(v **types.UnsupportedImageTypeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedImageTypeException + if *v == nil { + sv = &types.UnsupportedImageTypeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnsupportedUpstreamRegistryException(v **types.UnsupportedUpstreamRegistryException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedUpstreamRegistryException + if *v == nil { + sv = &types.UnsupportedUpstreamRegistryException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUploadNotFoundException(v **types.UploadNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UploadNotFoundException + if *v == nil { + sv = &types.UploadNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentValidationException(v **types.ValidationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ValidationException + if *v == nil { + sv = &types.ValidationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentVulnerablePackage(v **types.VulnerablePackage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.VulnerablePackage + if *v == nil { + sv = &types.VulnerablePackage{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "arch": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arch to be of type string, got %T instead", value) + } + sv.Arch = ptr.String(jtv) + } + + case "epoch": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Epoch to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Epoch = ptr.Int32(int32(i64)) + } + + case "filePath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FilePath to be of type string, got %T instead", value) + } + sv.FilePath = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected VulnerablePackageName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "packageManager": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PackageManager to be of type string, got %T instead", value) + } + sv.PackageManager = ptr.String(jtv) + } + + case "release": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Release to be of type string, got %T instead", value) + } + sv.Release = ptr.String(jtv) + } + + case "sourceLayerHash": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SourceLayerHash to be of type string, got %T instead", value) + } + sv.SourceLayerHash = ptr.String(jtv) + } + + case "version": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Version to be of type string, got %T instead", value) + } + sv.Version = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentVulnerablePackagesList(v *[]types.VulnerablePackage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.VulnerablePackage + if *v == nil { + cv = []types.VulnerablePackage{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.VulnerablePackage + destAddr := &col + if err := awsAwsjson11_deserializeDocumentVulnerablePackage(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchCheckLayerAvailabilityOutput(v **BatchCheckLayerAvailabilityOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchCheckLayerAvailabilityOutput + if *v == nil { + sv = &BatchCheckLayerAvailabilityOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentLayerFailureList(&sv.Failures, value); err != nil { + return err + } + + case "layers": + if err := awsAwsjson11_deserializeDocumentLayerList(&sv.Layers, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchDeleteImageOutput(v **BatchDeleteImageOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchDeleteImageOutput + if *v == nil { + sv = &BatchDeleteImageOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentImageFailureList(&sv.Failures, value); err != nil { + return err + } + + case "imageIds": + if err := awsAwsjson11_deserializeDocumentImageIdentifierList(&sv.ImageIds, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchGetImageOutput(v **BatchGetImageOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchGetImageOutput + if *v == nil { + sv = &BatchGetImageOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentImageFailureList(&sv.Failures, value); err != nil { + return err + } + + case "images": + if err := awsAwsjson11_deserializeDocumentImageList(&sv.Images, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchGetRepositoryScanningConfigurationOutput(v **BatchGetRepositoryScanningConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchGetRepositoryScanningConfigurationOutput + if *v == nil { + sv = &BatchGetRepositoryScanningConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationFailureList(&sv.Failures, value); err != nil { + return err + } + + case "scanningConfigurations": + if err := awsAwsjson11_deserializeDocumentRepositoryScanningConfigurationList(&sv.ScanningConfigurations, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCompleteLayerUploadOutput(v **CompleteLayerUploadOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CompleteLayerUploadOutput + if *v == nil { + sv = &CompleteLayerUploadOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "layerDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) + } + sv.LayerDigest = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "uploadId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) + } + sv.UploadId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreatePullThroughCacheRuleOutput(v **CreatePullThroughCacheRuleOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreatePullThroughCacheRuleOutput + if *v == nil { + sv = &CreatePullThroughCacheRuleOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "ecrRepositoryPrefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PullThroughCacheRuleRepositoryPrefix to be of type string, got %T instead", value) + } + sv.EcrRepositoryPrefix = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "upstreamRegistryUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.UpstreamRegistryUrl = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateRepositoryOutput(v **CreateRepositoryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateRepositoryOutput + if *v == nil { + sv = &CreateRepositoryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "repository": + if err := awsAwsjson11_deserializeDocumentRepository(&sv.Repository, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteLifecyclePolicyOutput(v **DeleteLifecyclePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteLifecyclePolicyOutput + if *v == nil { + sv = &DeleteLifecyclePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lastEvaluatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastEvaluatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected EvaluationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "lifecyclePolicyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) + } + sv.LifecyclePolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeletePullThroughCacheRuleOutput(v **DeletePullThroughCacheRuleOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeletePullThroughCacheRuleOutput + if *v == nil { + sv = &DeletePullThroughCacheRuleOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "ecrRepositoryPrefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PullThroughCacheRuleRepositoryPrefix to be of type string, got %T instead", value) + } + sv.EcrRepositoryPrefix = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "upstreamRegistryUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.UpstreamRegistryUrl = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteRegistryPolicyOutput(v **DeleteRegistryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteRegistryPolicyOutput + if *v == nil { + sv = &DeleteRegistryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteRepositoryOutput(v **DeleteRepositoryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteRepositoryOutput + if *v == nil { + sv = &DeleteRepositoryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "repository": + if err := awsAwsjson11_deserializeDocumentRepository(&sv.Repository, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(v **DeleteRepositoryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteRepositoryPolicyOutput + if *v == nil { + sv = &DeleteRepositoryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeImageReplicationStatusOutput(v **DescribeImageReplicationStatusOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeImageReplicationStatusOutput + if *v == nil { + sv = &DescribeImageReplicationStatusOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + case "replicationStatuses": + if err := awsAwsjson11_deserializeDocumentImageReplicationStatusList(&sv.ReplicationStatuses, value); err != nil { + return err + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeImageScanFindingsOutput(v **DescribeImageScanFindingsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeImageScanFindingsOutput + if *v == nil { + sv = &DescribeImageScanFindingsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + case "imageScanFindings": + if err := awsAwsjson11_deserializeDocumentImageScanFindings(&sv.ImageScanFindings, value); err != nil { + return err + } + + case "imageScanStatus": + if err := awsAwsjson11_deserializeDocumentImageScanStatus(&sv.ImageScanStatus, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(v **DescribeImagesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeImagesOutput + if *v == nil { + sv = &DescribeImagesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageDetails": + if err := awsAwsjson11_deserializeDocumentImageDetailList(&sv.ImageDetails, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribePullThroughCacheRulesOutput(v **DescribePullThroughCacheRulesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribePullThroughCacheRulesOutput + if *v == nil { + sv = &DescribePullThroughCacheRulesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "pullThroughCacheRules": + if err := awsAwsjson11_deserializeDocumentPullThroughCacheRuleList(&sv.PullThroughCacheRules, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeRegistryOutput(v **DescribeRegistryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeRegistryOutput + if *v == nil { + sv = &DescribeRegistryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "replicationConfiguration": + if err := awsAwsjson11_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(v **DescribeRepositoriesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeRepositoriesOutput + if *v == nil { + sv = &DescribeRepositoriesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "repositories": + if err := awsAwsjson11_deserializeDocumentRepositoryList(&sv.Repositories, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(v **GetAuthorizationTokenOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetAuthorizationTokenOutput + if *v == nil { + sv = &GetAuthorizationTokenOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationData": + if err := awsAwsjson11_deserializeDocumentAuthorizationDataList(&sv.AuthorizationData, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetDownloadUrlForLayerOutput(v **GetDownloadUrlForLayerOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetDownloadUrlForLayerOutput + if *v == nil { + sv = &GetDownloadUrlForLayerOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "downloadUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.DownloadUrl = ptr.String(jtv) + } + + case "layerDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) + } + sv.LayerDigest = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyOutput(v **GetLifecyclePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetLifecyclePolicyOutput + if *v == nil { + sv = &GetLifecyclePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lastEvaluatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastEvaluatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected EvaluationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "lifecyclePolicyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) + } + sv.LifecyclePolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetLifecyclePolicyPreviewOutput(v **GetLifecyclePolicyPreviewOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetLifecyclePolicyPreviewOutput + if *v == nil { + sv = &GetLifecyclePolicyPreviewOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lifecyclePolicyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) + } + sv.LifecyclePolicyText = ptr.String(jtv) + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "previewResults": + if err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewResultList(&sv.PreviewResults, value); err != nil { + return err + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyPreviewStatus to be of type string, got %T instead", value) + } + sv.Status = types.LifecyclePolicyPreviewStatus(jtv) + } + + case "summary": + if err := awsAwsjson11_deserializeDocumentLifecyclePolicyPreviewSummary(&sv.Summary, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetRegistryPolicyOutput(v **GetRegistryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRegistryPolicyOutput + if *v == nil { + sv = &GetRegistryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetRegistryScanningConfigurationOutput(v **GetRegistryScanningConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRegistryScanningConfigurationOutput + if *v == nil { + sv = &GetRegistryScanningConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "scanningConfiguration": + if err := awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(&sv.ScanningConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(v **GetRepositoryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRepositoryPolicyOutput + if *v == nil { + sv = &GetRepositoryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(v **InitiateLayerUploadOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *InitiateLayerUploadOutput + if *v == nil { + sv = &InitiateLayerUploadOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "partSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PartSize = ptr.Int64(i64) + } + + case "uploadId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) + } + sv.UploadId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListImagesOutput(v **ListImagesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListImagesOutput + if *v == nil { + sv = &ListImagesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageIds": + if err := awsAwsjson11_deserializeDocumentImageIdentifierList(&sv.ImageIds, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsForResourceOutput + if *v == nil { + sv = &ListTagsForResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "tags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutImageOutput(v **PutImageOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutImageOutput + if *v == nil { + sv = &PutImageOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "image": + if err := awsAwsjson11_deserializeDocumentImage(&sv.Image, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutImageScanningConfigurationOutput(v **PutImageScanningConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutImageScanningConfigurationOutput + if *v == nil { + sv = &PutImageScanningConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageScanningConfiguration": + if err := awsAwsjson11_deserializeDocumentImageScanningConfiguration(&sv.ImageScanningConfiguration, value); err != nil { + return err + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutImageTagMutabilityOutput(v **PutImageTagMutabilityOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutImageTagMutabilityOutput + if *v == nil { + sv = &PutImageTagMutabilityOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageTagMutability": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTagMutability to be of type string, got %T instead", value) + } + sv.ImageTagMutability = types.ImageTagMutability(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutLifecyclePolicyOutput(v **PutLifecyclePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutLifecyclePolicyOutput + if *v == nil { + sv = &PutLifecyclePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lifecyclePolicyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) + } + sv.LifecyclePolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutRegistryPolicyOutput(v **PutRegistryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutRegistryPolicyOutput + if *v == nil { + sv = &PutRegistryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutRegistryScanningConfigurationOutput(v **PutRegistryScanningConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutRegistryScanningConfigurationOutput + if *v == nil { + sv = &PutRegistryScanningConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "registryScanningConfiguration": + if err := awsAwsjson11_deserializeDocumentRegistryScanningConfiguration(&sv.RegistryScanningConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutReplicationConfigurationOutput(v **PutReplicationConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutReplicationConfigurationOutput + if *v == nil { + sv = &PutReplicationConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "replicationConfiguration": + if err := awsAwsjson11_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(v **SetRepositoryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SetRepositoryPolicyOutput + if *v == nil { + sv = &SetRepositoryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStartImageScanOutput(v **StartImageScanOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartImageScanOutput + if *v == nil { + sv = &StartImageScanOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + case "imageScanStatus": + if err := awsAwsjson11_deserializeDocumentImageScanStatus(&sv.ImageScanStatus, value); err != nil { + return err + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStartLifecyclePolicyPreviewOutput(v **StartLifecyclePolicyPreviewOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartLifecyclePolicyPreviewOutput + if *v == nil { + sv = &StartLifecyclePolicyPreviewOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lifecyclePolicyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyText to be of type string, got %T instead", value) + } + sv.LifecyclePolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LifecyclePolicyPreviewStatus to be of type string, got %T instead", value) + } + sv.Status = types.LifecyclePolicyPreviewStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentTagResourceOutput(v **TagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *TagResourceOutput + if *v == nil { + sv = &TagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUntagResourceOutput(v **UntagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UntagResourceOutput + if *v == nil { + sv = &UntagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(v **UploadLayerPartOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UploadLayerPartOutput + if *v == nil { + sv = &UploadLayerPartOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lastByteReceived": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.LastByteReceived = ptr.Int64(i64) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "uploadId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) + } + sv.UploadId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go new file mode 100644 index 000000000..602df615b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/doc.go @@ -0,0 +1,16 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package ecr provides the API client, operations, and parameter types for Amazon +// EC2 Container Registry. +// +// Amazon Elastic Container Registry Amazon Elastic Container Registry (Amazon ECR) +// is a managed container image registry service. Customers can use the familiar +// Docker CLI, or their preferred client, to push, pull, and manage images. Amazon +// ECR provides a secure, scalable, and reliable registry for your Docker or Open +// Container Initiative (OCI) images. Amazon ECR supports private repositories with +// resource-based permissions using IAM so that specific users or Amazon EC2 +// instances can access repositories and images. Amazon ECR has service endpoints +// in each supported Region. For more information, see Amazon ECR endpoints +// (https://docs.aws.amazon.com/general/latest/gr/ecr.html) in the Amazon Web +// Services General Reference. +package ecr diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go new file mode 100644 index 000000000..e255bc9d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "ecr" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json new file mode 100644 index 000000000..631a65bba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/generated.json @@ -0,0 +1,69 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0", + "github.com/jmespath/go-jmespath": "v0.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_BatchCheckLayerAvailability.go", + "api_op_BatchDeleteImage.go", + "api_op_BatchGetImage.go", + "api_op_BatchGetRepositoryScanningConfiguration.go", + "api_op_CompleteLayerUpload.go", + "api_op_CreatePullThroughCacheRule.go", + "api_op_CreateRepository.go", + "api_op_DeleteLifecyclePolicy.go", + "api_op_DeletePullThroughCacheRule.go", + "api_op_DeleteRegistryPolicy.go", + "api_op_DeleteRepository.go", + "api_op_DeleteRepositoryPolicy.go", + "api_op_DescribeImageReplicationStatus.go", + "api_op_DescribeImageScanFindings.go", + "api_op_DescribeImages.go", + "api_op_DescribePullThroughCacheRules.go", + "api_op_DescribeRegistry.go", + "api_op_DescribeRepositories.go", + "api_op_GetAuthorizationToken.go", + "api_op_GetDownloadUrlForLayer.go", + "api_op_GetLifecyclePolicy.go", + "api_op_GetLifecyclePolicyPreview.go", + "api_op_GetRegistryPolicy.go", + "api_op_GetRegistryScanningConfiguration.go", + "api_op_GetRepositoryPolicy.go", + "api_op_InitiateLayerUpload.go", + "api_op_ListImages.go", + "api_op_ListTagsForResource.go", + "api_op_PutImage.go", + "api_op_PutImageScanningConfiguration.go", + "api_op_PutImageTagMutability.go", + "api_op_PutLifecyclePolicy.go", + "api_op_PutRegistryPolicy.go", + "api_op_PutRegistryScanningConfiguration.go", + "api_op_PutReplicationConfiguration.go", + "api_op_SetRepositoryPolicy.go", + "api_op_StartImageScan.go", + "api_op_StartLifecyclePolicyPreview.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UploadLayerPart.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/ecr", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go new file mode 100644 index 000000000..15f5d82d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ecr + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.18.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go new file mode 100644 index 000000000..53aaf4cf2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/internal/endpoints/endpoints.go @@ -0,0 +1,802 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver ECR endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "api.ecr.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "dkr-us-east-1", + }: endpoints.Endpoint{ + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-east-2", + }: endpoints.Endpoint{ + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-west-1", + }: endpoints.Endpoint{ + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-west-2", + }: endpoints.Endpoint{ + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "fips-dkr-us-east-1", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-dkr-us-east-2", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-dkr-us-west-1", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-dkr-us-west-2", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-1", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-2", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-1", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-2", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "api.ecr.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-west-1", + }, + }, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "api.ecr.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "dkr-us-gov-east-1", + }: endpoints.Endpoint{ + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-gov-west-1", + }: endpoints.Endpoint{ + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "dkr-us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-dkr-us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-dkr-us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go new file mode 100644 index 000000000..e513d0312 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/serializers.go @@ -0,0 +1,3570 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsjson11_serializeOpBatchCheckLayerAvailability struct { +} + +func (*awsAwsjson11_serializeOpBatchCheckLayerAvailability) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchCheckLayerAvailability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchCheckLayerAvailabilityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.BatchCheckLayerAvailability") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchCheckLayerAvailabilityInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpBatchDeleteImage struct { +} + +func (*awsAwsjson11_serializeOpBatchDeleteImage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchDeleteImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchDeleteImageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.BatchDeleteImage") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchDeleteImageInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpBatchGetImage struct { +} + +func (*awsAwsjson11_serializeOpBatchGetImage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchGetImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchGetImageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.BatchGetImage") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchGetImageInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpBatchGetRepositoryScanningConfiguration struct { +} + +func (*awsAwsjson11_serializeOpBatchGetRepositoryScanningConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchGetRepositoryScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchGetRepositoryScanningConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.BatchGetRepositoryScanningConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchGetRepositoryScanningConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCompleteLayerUpload struct { +} + +func (*awsAwsjson11_serializeOpCompleteLayerUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCompleteLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CompleteLayerUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.CompleteLayerUpload") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCompleteLayerUploadInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreatePullThroughCacheRule struct { +} + +func (*awsAwsjson11_serializeOpCreatePullThroughCacheRule) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreatePullThroughCacheRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreatePullThroughCacheRuleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.CreatePullThroughCacheRule") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreatePullThroughCacheRuleInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateRepository struct { +} + +func (*awsAwsjson11_serializeOpCreateRepository) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateRepository) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateRepositoryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.CreateRepository") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateRepositoryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteLifecyclePolicy struct { +} + +func (*awsAwsjson11_serializeOpDeleteLifecyclePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteLifecyclePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteLifecyclePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteLifecyclePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteLifecyclePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeletePullThroughCacheRule struct { +} + +func (*awsAwsjson11_serializeOpDeletePullThroughCacheRule) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeletePullThroughCacheRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeletePullThroughCacheRuleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeletePullThroughCacheRule") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeletePullThroughCacheRuleInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteRegistryPolicy struct { +} + +func (*awsAwsjson11_serializeOpDeleteRegistryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteRegistryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteRegistryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteRegistryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteRegistryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteRepository struct { +} + +func (*awsAwsjson11_serializeOpDeleteRepository) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteRepository) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteRepositoryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteRepository") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteRepositoryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteRepositoryPolicy struct { +} + +func (*awsAwsjson11_serializeOpDeleteRepositoryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteRepositoryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DeleteRepositoryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeImageReplicationStatus struct { +} + +func (*awsAwsjson11_serializeOpDescribeImageReplicationStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeImageReplicationStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeImageReplicationStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeImageReplicationStatus") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeImageReplicationStatusInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeImages struct { +} + +func (*awsAwsjson11_serializeOpDescribeImages) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeImagesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeImages") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeImagesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeImageScanFindings struct { +} + +func (*awsAwsjson11_serializeOpDescribeImageScanFindings) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeImageScanFindings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeImageScanFindingsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeImageScanFindings") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeImageScanFindingsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribePullThroughCacheRules struct { +} + +func (*awsAwsjson11_serializeOpDescribePullThroughCacheRules) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribePullThroughCacheRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribePullThroughCacheRulesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribePullThroughCacheRules") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribePullThroughCacheRulesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeRegistry struct { +} + +func (*awsAwsjson11_serializeOpDescribeRegistry) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeRegistry) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeRegistryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeRegistry") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeRegistryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeRepositories struct { +} + +func (*awsAwsjson11_serializeOpDescribeRepositories) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeRepositories) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeRepositoriesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.DescribeRepositories") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeRepositoriesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetAuthorizationToken struct { +} + +func (*awsAwsjson11_serializeOpGetAuthorizationToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetAuthorizationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetAuthorizationTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetAuthorizationToken") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetAuthorizationTokenInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetDownloadUrlForLayer struct { +} + +func (*awsAwsjson11_serializeOpGetDownloadUrlForLayer) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetDownloadUrlForLayer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDownloadUrlForLayerInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetDownloadUrlForLayer") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetDownloadUrlForLayerInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetLifecyclePolicy struct { +} + +func (*awsAwsjson11_serializeOpGetLifecyclePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetLifecyclePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetLifecyclePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetLifecyclePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetLifecyclePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetLifecyclePolicyPreview struct { +} + +func (*awsAwsjson11_serializeOpGetLifecyclePolicyPreview) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetLifecyclePolicyPreview) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetLifecyclePolicyPreviewInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetLifecyclePolicyPreview") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetLifecyclePolicyPreviewInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetRegistryPolicy struct { +} + +func (*awsAwsjson11_serializeOpGetRegistryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetRegistryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRegistryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetRegistryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetRegistryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetRegistryScanningConfiguration struct { +} + +func (*awsAwsjson11_serializeOpGetRegistryScanningConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetRegistryScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRegistryScanningConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetRegistryScanningConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetRegistryScanningConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetRepositoryPolicy struct { +} + +func (*awsAwsjson11_serializeOpGetRepositoryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRepositoryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.GetRepositoryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpInitiateLayerUpload struct { +} + +func (*awsAwsjson11_serializeOpInitiateLayerUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*InitiateLayerUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.InitiateLayerUpload") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListImages struct { +} + +func (*awsAwsjson11_serializeOpListImages) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListImagesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListImages") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListImagesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListTagsForResource struct { +} + +func (*awsAwsjson11_serializeOpListTagsForResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsForResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.ListTagsForResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutImage struct { +} + +func (*awsAwsjson11_serializeOpPutImage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutImageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImage") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutImageInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutImageScanningConfiguration struct { +} + +func (*awsAwsjson11_serializeOpPutImageScanningConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutImageScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutImageScanningConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImageScanningConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutImageScanningConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutImageTagMutability struct { +} + +func (*awsAwsjson11_serializeOpPutImageTagMutability) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutImageTagMutability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutImageTagMutabilityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutImageTagMutability") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutImageTagMutabilityInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutLifecyclePolicy struct { +} + +func (*awsAwsjson11_serializeOpPutLifecyclePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutLifecyclePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutLifecyclePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutLifecyclePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutLifecyclePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutRegistryPolicy struct { +} + +func (*awsAwsjson11_serializeOpPutRegistryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutRegistryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutRegistryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutRegistryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutRegistryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutRegistryScanningConfiguration struct { +} + +func (*awsAwsjson11_serializeOpPutRegistryScanningConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutRegistryScanningConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutRegistryScanningConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutRegistryScanningConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutRegistryScanningConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutReplicationConfiguration struct { +} + +func (*awsAwsjson11_serializeOpPutReplicationConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutReplicationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutReplicationConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.PutReplicationConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutReplicationConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpSetRepositoryPolicy struct { +} + +func (*awsAwsjson11_serializeOpSetRepositoryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSetRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SetRepositoryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.SetRepositoryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpStartImageScan struct { +} + +func (*awsAwsjson11_serializeOpStartImageScan) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStartImageScan) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartImageScanInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.StartImageScan") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStartImageScanInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpStartLifecyclePolicyPreview struct { +} + +func (*awsAwsjson11_serializeOpStartLifecyclePolicyPreview) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStartLifecyclePolicyPreview) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartLifecyclePolicyPreviewInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.StartLifecyclePolicyPreview") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStartLifecyclePolicyPreviewInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpTagResource struct { +} + +func (*awsAwsjson11_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUntagResource struct { +} + +func (*awsAwsjson11_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUploadLayerPart struct { +} + +func (*awsAwsjson11_serializeOpUploadLayerPart) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUploadLayerPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UploadLayerPartInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonEC2ContainerRegistry_V20150921.UploadLayerPart") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUploadLayerPartInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentDescribeImagesFilter(v *types.DescribeImagesFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.TagStatus) > 0 { + ok := object.Key("tagStatus") + ok.String(string(v.TagStatus)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.EncryptionType) > 0 { + ok := object.Key("encryptionType") + ok.String(string(v.EncryptionType)) + } + + if v.KmsKey != nil { + ok := object.Key("kmsKey") + ok.String(*v.KmsKey) + } + + return nil +} + +func awsAwsjson11_serializeDocumentGetAuthorizationTokenRegistryIdList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentImageIdentifier(v *types.ImageIdentifier, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageDigest != nil { + ok := object.Key("imageDigest") + ok.String(*v.ImageDigest) + } + + if v.ImageTag != nil { + ok := object.Key("imageTag") + ok.String(*v.ImageTag) + } + + return nil +} + +func awsAwsjson11_serializeDocumentImageIdentifierList(v []types.ImageIdentifier, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentImageIdentifier(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentImageScanningConfiguration(v *types.ImageScanningConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ScanOnPush { + ok := object.Key("scanOnPush") + ok.Boolean(v.ScanOnPush) + } + + return nil +} + +func awsAwsjson11_serializeDocumentLayerDigestList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentLifecyclePolicyPreviewFilter(v *types.LifecyclePolicyPreviewFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.TagStatus) > 0 { + ok := object.Key("tagStatus") + ok.String(string(v.TagStatus)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentListImagesFilter(v *types.ListImagesFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.TagStatus) > 0 { + ok := object.Key("tagStatus") + ok.String(string(v.TagStatus)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentMediaTypeList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentPullThroughCacheRuleRepositoryPrefixList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentRegistryScanningRule(v *types.RegistryScanningRule, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RepositoryFilters != nil { + ok := object.Key("repositoryFilters") + if err := awsAwsjson11_serializeDocumentScanningRepositoryFilterList(v.RepositoryFilters, ok); err != nil { + return err + } + } + + if len(v.ScanFrequency) > 0 { + ok := object.Key("scanFrequency") + ok.String(string(v.ScanFrequency)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentRegistryScanningRuleList(v []types.RegistryScanningRule, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentRegistryScanningRule(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Rules != nil { + ok := object.Key("rules") + if err := awsAwsjson11_serializeDocumentReplicationRuleList(v.Rules, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentReplicationDestination(v *types.ReplicationDestination, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Region != nil { + ok := object.Key("region") + ok.String(*v.Region) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + return nil +} + +func awsAwsjson11_serializeDocumentReplicationDestinationList(v []types.ReplicationDestination, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentReplicationDestination(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Destinations != nil { + ok := object.Key("destinations") + if err := awsAwsjson11_serializeDocumentReplicationDestinationList(v.Destinations, ok); err != nil { + return err + } + } + + if v.RepositoryFilters != nil { + ok := object.Key("repositoryFilters") + if err := awsAwsjson11_serializeDocumentRepositoryFilterList(v.RepositoryFilters, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentReplicationRuleList(v []types.ReplicationRule, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentReplicationRule(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentRepositoryFilter(v *types.RepositoryFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("filter") + ok.String(*v.Filter) + } + + if len(v.FilterType) > 0 { + ok := object.Key("filterType") + ok.String(string(v.FilterType)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentRepositoryFilterList(v []types.RepositoryFilter, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentRepositoryFilter(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentRepositoryNameList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentScanningConfigurationRepositoryNameList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentScanningRepositoryFilter(v *types.ScanningRepositoryFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("filter") + ok.String(*v.Filter) + } + + if len(v.FilterType) > 0 { + ok := object.Key("filterType") + ok.String(string(v.FilterType)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentScanningRepositoryFilterList(v []types.ScanningRepositoryFilter, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentScanningRepositoryFilter(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Key != nil { + ok := object.Key("Key") + ok.String(*v.Key) + } + + if v.Value != nil { + ok := object.Key("Value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchCheckLayerAvailabilityInput(v *BatchCheckLayerAvailabilityInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LayerDigests != nil { + ok := object.Key("layerDigests") + if err := awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v.LayerDigests, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchDeleteImageInput(v *BatchDeleteImageInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageIds != nil { + ok := object.Key("imageIds") + if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchGetImageInput(v *BatchGetImageInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AcceptedMediaTypes != nil { + ok := object.Key("acceptedMediaTypes") + if err := awsAwsjson11_serializeDocumentMediaTypeList(v.AcceptedMediaTypes, ok); err != nil { + return err + } + } + + if v.ImageIds != nil { + ok := object.Key("imageIds") + if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchGetRepositoryScanningConfigurationInput(v *BatchGetRepositoryScanningConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RepositoryNames != nil { + ok := object.Key("repositoryNames") + if err := awsAwsjson11_serializeDocumentScanningConfigurationRepositoryNameList(v.RepositoryNames, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCompleteLayerUploadInput(v *CompleteLayerUploadInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LayerDigests != nil { + ok := object.Key("layerDigests") + if err := awsAwsjson11_serializeDocumentLayerDigestList(v.LayerDigests, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + if v.UploadId != nil { + ok := object.Key("uploadId") + ok.String(*v.UploadId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreatePullThroughCacheRuleInput(v *CreatePullThroughCacheRuleInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EcrRepositoryPrefix != nil { + ok := object.Key("ecrRepositoryPrefix") + ok.String(*v.EcrRepositoryPrefix) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.UpstreamRegistryUrl != nil { + ok := object.Key("upstreamRegistryUrl") + ok.String(*v.UpstreamRegistryUrl) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateRepositoryInput(v *CreateRepositoryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionConfiguration != nil { + ok := object.Key("encryptionConfiguration") + if err := awsAwsjson11_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, ok); err != nil { + return err + } + } + + if v.ImageScanningConfiguration != nil { + ok := object.Key("imageScanningConfiguration") + if err := awsAwsjson11_serializeDocumentImageScanningConfiguration(v.ImageScanningConfiguration, ok); err != nil { + return err + } + } + + if len(v.ImageTagMutability) > 0 { + ok := object.Key("imageTagMutability") + ok.String(string(v.ImageTagMutability)) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteLifecyclePolicyInput(v *DeleteLifecyclePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeletePullThroughCacheRuleInput(v *DeletePullThroughCacheRuleInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EcrRepositoryPrefix != nil { + ok := object.Key("ecrRepositoryPrefix") + ok.String(*v.EcrRepositoryPrefix) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteRegistryPolicyInput(v *DeleteRegistryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteRepositoryInput(v *DeleteRepositoryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Force { + ok := object.Key("force") + ok.Boolean(v.Force) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeImageReplicationStatusInput(v *DescribeImageReplicationStatusInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageId != nil { + ok := object.Key("imageId") + if err := awsAwsjson11_serializeDocumentImageIdentifier(v.ImageId, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeImageScanFindingsInput(v *DescribeImageScanFindingsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageId != nil { + ok := object.Key("imageId") + if err := awsAwsjson11_serializeDocumentImageIdentifier(v.ImageId, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeImagesInput(v *DescribeImagesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("filter") + if err := awsAwsjson11_serializeDocumentDescribeImagesFilter(v.Filter, ok); err != nil { + return err + } + } + + if v.ImageIds != nil { + ok := object.Key("imageIds") + if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribePullThroughCacheRulesInput(v *DescribePullThroughCacheRulesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EcrRepositoryPrefixes != nil { + ok := object.Key("ecrRepositoryPrefixes") + if err := awsAwsjson11_serializeDocumentPullThroughCacheRuleRepositoryPrefixList(v.EcrRepositoryPrefixes, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeRegistryInput(v *DescribeRegistryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeRepositoriesInput(v *DescribeRepositoriesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryNames != nil { + ok := object.Key("repositoryNames") + if err := awsAwsjson11_serializeDocumentRepositoryNameList(v.RepositoryNames, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetAuthorizationTokenInput(v *GetAuthorizationTokenInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryIds != nil { + ok := object.Key("registryIds") + if err := awsAwsjson11_serializeDocumentGetAuthorizationTokenRegistryIdList(v.RegistryIds, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetDownloadUrlForLayerInput(v *GetDownloadUrlForLayerInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LayerDigest != nil { + ok := object.Key("layerDigest") + ok.String(*v.LayerDigest) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetLifecyclePolicyInput(v *GetLifecyclePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetLifecyclePolicyPreviewInput(v *GetLifecyclePolicyPreviewInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("filter") + if err := awsAwsjson11_serializeDocumentLifecyclePolicyPreviewFilter(v.Filter, ok); err != nil { + return err + } + } + + if v.ImageIds != nil { + ok := object.Key("imageIds") + if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetRegistryPolicyInput(v *GetRegistryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetRegistryScanningConfigurationInput(v *GetRegistryScanningConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(v *GetRepositoryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(v *InitiateLayerUploadInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListImagesInput(v *ListImagesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("filter") + if err := awsAwsjson11_serializeDocumentListImagesFilter(v.Filter, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutImageInput(v *PutImageInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageDigest != nil { + ok := object.Key("imageDigest") + ok.String(*v.ImageDigest) + } + + if v.ImageManifest != nil { + ok := object.Key("imageManifest") + ok.String(*v.ImageManifest) + } + + if v.ImageManifestMediaType != nil { + ok := object.Key("imageManifestMediaType") + ok.String(*v.ImageManifestMediaType) + } + + if v.ImageTag != nil { + ok := object.Key("imageTag") + ok.String(*v.ImageTag) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutImageScanningConfigurationInput(v *PutImageScanningConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageScanningConfiguration != nil { + ok := object.Key("imageScanningConfiguration") + if err := awsAwsjson11_serializeDocumentImageScanningConfiguration(v.ImageScanningConfiguration, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutImageTagMutabilityInput(v *PutImageTagMutabilityInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ImageTagMutability) > 0 { + ok := object.Key("imageTagMutability") + ok.String(string(v.ImageTagMutability)) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutLifecyclePolicyInput(v *PutLifecyclePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LifecyclePolicyText != nil { + ok := object.Key("lifecyclePolicyText") + ok.String(*v.LifecyclePolicyText) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutRegistryPolicyInput(v *PutRegistryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PolicyText != nil { + ok := object.Key("policyText") + ok.String(*v.PolicyText) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutRegistryScanningConfigurationInput(v *PutRegistryScanningConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Rules != nil { + ok := object.Key("rules") + if err := awsAwsjson11_serializeDocumentRegistryScanningRuleList(v.Rules, ok); err != nil { + return err + } + } + + if len(v.ScanType) > 0 { + ok := object.Key("scanType") + ok.String(string(v.ScanType)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutReplicationConfigurationInput(v *PutReplicationConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ReplicationConfiguration != nil { + ok := object.Key("replicationConfiguration") + if err := awsAwsjson11_serializeDocumentReplicationConfiguration(v.ReplicationConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(v *SetRepositoryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Force { + ok := object.Key("force") + ok.Boolean(v.Force) + } + + if v.PolicyText != nil { + ok := object.Key("policyText") + ok.String(*v.PolicyText) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStartImageScanInput(v *StartImageScanInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageId != nil { + ok := object.Key("imageId") + if err := awsAwsjson11_serializeDocumentImageIdentifier(v.ImageId, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStartLifecyclePolicyPreviewInput(v *StartLifecyclePolicyPreviewInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LifecyclePolicyText != nil { + ok := object.Key("lifecyclePolicyText") + ok.String(*v.LifecyclePolicyText) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + if v.TagKeys != nil { + ok := object.Key("tagKeys") + if err := awsAwsjson11_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUploadLayerPartInput(v *UploadLayerPartInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LayerPartBlob != nil { + ok := object.Key("layerPartBlob") + ok.Base64EncodeBytes(v.LayerPartBlob) + } + + if v.PartFirstByte != nil { + ok := object.Key("partFirstByte") + ok.Long(*v.PartFirstByte) + } + + if v.PartLastByte != nil { + ok := object.Key("partLastByte") + ok.Long(*v.PartLastByte) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + if v.UploadId != nil { + ok := object.Key("uploadId") + ok.String(*v.UploadId) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go new file mode 100644 index 000000000..9356aabf1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go @@ -0,0 +1,324 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type EncryptionType string + +// Enum values for EncryptionType +const ( + EncryptionTypeAes256 EncryptionType = "AES256" + EncryptionTypeKms EncryptionType = "KMS" +) + +// Values returns all known values for EncryptionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (EncryptionType) Values() []EncryptionType { + return []EncryptionType{ + "AES256", + "KMS", + } +} + +type FindingSeverity string + +// Enum values for FindingSeverity +const ( + FindingSeverityInformational FindingSeverity = "INFORMATIONAL" + FindingSeverityLow FindingSeverity = "LOW" + FindingSeverityMedium FindingSeverity = "MEDIUM" + FindingSeverityHigh FindingSeverity = "HIGH" + FindingSeverityCritical FindingSeverity = "CRITICAL" + FindingSeverityUndefined FindingSeverity = "UNDEFINED" +) + +// Values returns all known values for FindingSeverity. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (FindingSeverity) Values() []FindingSeverity { + return []FindingSeverity{ + "INFORMATIONAL", + "LOW", + "MEDIUM", + "HIGH", + "CRITICAL", + "UNDEFINED", + } +} + +type ImageActionType string + +// Enum values for ImageActionType +const ( + ImageActionTypeExpire ImageActionType = "EXPIRE" +) + +// Values returns all known values for ImageActionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ImageActionType) Values() []ImageActionType { + return []ImageActionType{ + "EXPIRE", + } +} + +type ImageFailureCode string + +// Enum values for ImageFailureCode +const ( + ImageFailureCodeInvalidImageDigest ImageFailureCode = "InvalidImageDigest" + ImageFailureCodeInvalidImageTag ImageFailureCode = "InvalidImageTag" + ImageFailureCodeImageTagDoesNotMatchDigest ImageFailureCode = "ImageTagDoesNotMatchDigest" + ImageFailureCodeImageNotFound ImageFailureCode = "ImageNotFound" + ImageFailureCodeMissingDigestAndTag ImageFailureCode = "MissingDigestAndTag" + ImageFailureCodeImageReferencedByManifestList ImageFailureCode = "ImageReferencedByManifestList" + ImageFailureCodeKmsError ImageFailureCode = "KmsError" +) + +// Values returns all known values for ImageFailureCode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ImageFailureCode) Values() []ImageFailureCode { + return []ImageFailureCode{ + "InvalidImageDigest", + "InvalidImageTag", + "ImageTagDoesNotMatchDigest", + "ImageNotFound", + "MissingDigestAndTag", + "ImageReferencedByManifestList", + "KmsError", + } +} + +type ImageTagMutability string + +// Enum values for ImageTagMutability +const ( + ImageTagMutabilityMutable ImageTagMutability = "MUTABLE" + ImageTagMutabilityImmutable ImageTagMutability = "IMMUTABLE" +) + +// Values returns all known values for ImageTagMutability. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ImageTagMutability) Values() []ImageTagMutability { + return []ImageTagMutability{ + "MUTABLE", + "IMMUTABLE", + } +} + +type LayerAvailability string + +// Enum values for LayerAvailability +const ( + LayerAvailabilityAvailable LayerAvailability = "AVAILABLE" + LayerAvailabilityUnavailable LayerAvailability = "UNAVAILABLE" +) + +// Values returns all known values for LayerAvailability. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (LayerAvailability) Values() []LayerAvailability { + return []LayerAvailability{ + "AVAILABLE", + "UNAVAILABLE", + } +} + +type LayerFailureCode string + +// Enum values for LayerFailureCode +const ( + LayerFailureCodeInvalidLayerDigest LayerFailureCode = "InvalidLayerDigest" + LayerFailureCodeMissingLayerDigest LayerFailureCode = "MissingLayerDigest" +) + +// Values returns all known values for LayerFailureCode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (LayerFailureCode) Values() []LayerFailureCode { + return []LayerFailureCode{ + "InvalidLayerDigest", + "MissingLayerDigest", + } +} + +type LifecyclePolicyPreviewStatus string + +// Enum values for LifecyclePolicyPreviewStatus +const ( + LifecyclePolicyPreviewStatusInProgress LifecyclePolicyPreviewStatus = "IN_PROGRESS" + LifecyclePolicyPreviewStatusComplete LifecyclePolicyPreviewStatus = "COMPLETE" + LifecyclePolicyPreviewStatusExpired LifecyclePolicyPreviewStatus = "EXPIRED" + LifecyclePolicyPreviewStatusFailed LifecyclePolicyPreviewStatus = "FAILED" +) + +// Values returns all known values for LifecyclePolicyPreviewStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (LifecyclePolicyPreviewStatus) Values() []LifecyclePolicyPreviewStatus { + return []LifecyclePolicyPreviewStatus{ + "IN_PROGRESS", + "COMPLETE", + "EXPIRED", + "FAILED", + } +} + +type ReplicationStatus string + +// Enum values for ReplicationStatus +const ( + ReplicationStatusInProgress ReplicationStatus = "IN_PROGRESS" + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + ReplicationStatusFailed ReplicationStatus = "FAILED" +) + +// Values returns all known values for ReplicationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationStatus) Values() []ReplicationStatus { + return []ReplicationStatus{ + "IN_PROGRESS", + "COMPLETE", + "FAILED", + } +} + +type RepositoryFilterType string + +// Enum values for RepositoryFilterType +const ( + RepositoryFilterTypePrefixMatch RepositoryFilterType = "PREFIX_MATCH" +) + +// Values returns all known values for RepositoryFilterType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RepositoryFilterType) Values() []RepositoryFilterType { + return []RepositoryFilterType{ + "PREFIX_MATCH", + } +} + +type ScanFrequency string + +// Enum values for ScanFrequency +const ( + ScanFrequencyScanOnPush ScanFrequency = "SCAN_ON_PUSH" + ScanFrequencyContinuousScan ScanFrequency = "CONTINUOUS_SCAN" + ScanFrequencyManual ScanFrequency = "MANUAL" +) + +// Values returns all known values for ScanFrequency. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ScanFrequency) Values() []ScanFrequency { + return []ScanFrequency{ + "SCAN_ON_PUSH", + "CONTINUOUS_SCAN", + "MANUAL", + } +} + +type ScanningConfigurationFailureCode string + +// Enum values for ScanningConfigurationFailureCode +const ( + ScanningConfigurationFailureCodeRepositoryNotFound ScanningConfigurationFailureCode = "REPOSITORY_NOT_FOUND" +) + +// Values returns all known values for ScanningConfigurationFailureCode. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (ScanningConfigurationFailureCode) Values() []ScanningConfigurationFailureCode { + return []ScanningConfigurationFailureCode{ + "REPOSITORY_NOT_FOUND", + } +} + +type ScanningRepositoryFilterType string + +// Enum values for ScanningRepositoryFilterType +const ( + ScanningRepositoryFilterTypeWildcard ScanningRepositoryFilterType = "WILDCARD" +) + +// Values returns all known values for ScanningRepositoryFilterType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ScanningRepositoryFilterType) Values() []ScanningRepositoryFilterType { + return []ScanningRepositoryFilterType{ + "WILDCARD", + } +} + +type ScanStatus string + +// Enum values for ScanStatus +const ( + ScanStatusInProgress ScanStatus = "IN_PROGRESS" + ScanStatusComplete ScanStatus = "COMPLETE" + ScanStatusFailed ScanStatus = "FAILED" + ScanStatusUnsupportedImage ScanStatus = "UNSUPPORTED_IMAGE" + ScanStatusActive ScanStatus = "ACTIVE" + ScanStatusPending ScanStatus = "PENDING" + ScanStatusScanEligibilityExpired ScanStatus = "SCAN_ELIGIBILITY_EXPIRED" + ScanStatusFindingsUnavailable ScanStatus = "FINDINGS_UNAVAILABLE" +) + +// Values returns all known values for ScanStatus. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (ScanStatus) Values() []ScanStatus { + return []ScanStatus{ + "IN_PROGRESS", + "COMPLETE", + "FAILED", + "UNSUPPORTED_IMAGE", + "ACTIVE", + "PENDING", + "SCAN_ELIGIBILITY_EXPIRED", + "FINDINGS_UNAVAILABLE", + } +} + +type ScanType string + +// Enum values for ScanType +const ( + ScanTypeBasic ScanType = "BASIC" + ScanTypeEnhanced ScanType = "ENHANCED" +) + +// Values returns all known values for ScanType. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (ScanType) Values() []ScanType { + return []ScanType{ + "BASIC", + "ENHANCED", + } +} + +type TagStatus string + +// Enum values for TagStatus +const ( + TagStatusTagged TagStatus = "TAGGED" + TagStatusUntagged TagStatus = "UNTAGGED" + TagStatusAny TagStatus = "ANY" +) + +// Values returns all known values for TagStatus. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (TagStatus) Values() []TagStatus { + return []TagStatus{ + "TAGGED", + "UNTAGGED", + "ANY", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go new file mode 100644 index 000000000..3f72008bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go @@ -0,0 +1,906 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The specified layer upload does not contain any layer parts. +type EmptyUploadException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *EmptyUploadException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *EmptyUploadException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *EmptyUploadException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "EmptyUploadException" + } + return *e.ErrorCodeOverride +} +func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified image has already been pushed, and there were no changes to the +// manifest or image tag after the last push. +type ImageAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ImageAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified image digest does not match the digest that Amazon ECR calculated +// for the image. +type ImageDigestDoesNotMatchException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ImageDigestDoesNotMatchException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageDigestDoesNotMatchException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageDigestDoesNotMatchException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageDigestDoesNotMatchException" + } + return *e.ErrorCodeOverride +} +func (e *ImageDigestDoesNotMatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The image requested does not exist in the specified repository. +type ImageNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ImageNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified image is tagged with a tag that already exists. The repository is +// configured for tag immutability. +type ImageTagAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ImageTagAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageTagAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageTagAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ImageTagAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *ImageTagAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The layer digest calculation performed by Amazon ECR upon receipt of the image +// layer does not match the digest specified. +type InvalidLayerException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidLayerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidLayerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidLayerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidLayerException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidLayerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The layer part size is not valid, or the first byte specified is not consecutive +// to the last byte of a previous layer part upload. +type InvalidLayerPartException struct { + Message *string + + ErrorCodeOverride *string + + RegistryId *string + RepositoryName *string + UploadId *string + LastValidByteReceived *int64 + + noSmithyDocumentSerde +} + +func (e *InvalidLayerPartException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidLayerPartException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidLayerPartException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidLayerPartException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified parameter is invalid. Review the available parameters for the API +// request. +type InvalidParameterException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidParameterException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidParameterException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidParameterException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// An invalid parameter has been specified. Tag keys can have a maximum character +// length of 128 characters, and tag values can have a maximum length of 256 +// characters. +type InvalidTagParameterException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidTagParameterException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidTagParameterException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidTagParameterException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidTagParameterException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidTagParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The operation failed due to a KMS exception. +type KmsException struct { + Message *string + + ErrorCodeOverride *string + + KmsError *string + + noSmithyDocumentSerde +} + +func (e *KmsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KmsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KmsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "KmsException" + } + return *e.ErrorCodeOverride +} +func (e *KmsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The image layer already exists in the associated repository. +type LayerAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LayerAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LayerAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LayerAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayerAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *LayerAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified layer is not available because it is not associated with an image. +// Unassociated image layers may be cleaned up at any time. +type LayerInaccessibleException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LayerInaccessibleException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LayerInaccessibleException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LayerInaccessibleException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayerInaccessibleException" + } + return *e.ErrorCodeOverride +} +func (e *LayerInaccessibleException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Layer parts must be at least 5 MiB in size. +type LayerPartTooSmallException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LayerPartTooSmallException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LayerPartTooSmallException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LayerPartTooSmallException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayerPartTooSmallException" + } + return *e.ErrorCodeOverride +} +func (e *LayerPartTooSmallException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified layers could not be found, or the specified layer is not valid for +// this repository. +type LayersNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LayersNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LayersNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LayersNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LayersNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *LayersNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The lifecycle policy could not be found, and no policy is set to the repository. +type LifecyclePolicyNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LifecyclePolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LifecyclePolicyNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LifecyclePolicyNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LifecyclePolicyNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *LifecyclePolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The previous lifecycle policy preview request has not completed. Wait and try +// again. +type LifecyclePolicyPreviewInProgressException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LifecyclePolicyPreviewInProgressException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LifecyclePolicyPreviewInProgressException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LifecyclePolicyPreviewInProgressException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LifecyclePolicyPreviewInProgressException" + } + return *e.ErrorCodeOverride +} +func (e *LifecyclePolicyPreviewInProgressException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// There is no dry run for this repository. +type LifecyclePolicyPreviewNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LifecyclePolicyPreviewNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LifecyclePolicyPreviewNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LifecyclePolicyPreviewNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LifecyclePolicyPreviewNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *LifecyclePolicyPreviewNotFoundException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The operation did not succeed because it would have exceeded a service limit for +// your account. For more information, see Amazon ECR service quotas +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) in +// the Amazon Elastic Container Registry User Guide. +type LimitExceededException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LimitExceededException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "LimitExceededException" + } + return *e.ErrorCodeOverride +} +func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// A pull through cache rule with these settings already exists for the private +// registry. +type PullThroughCacheRuleAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PullThroughCacheRuleAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PullThroughCacheRuleAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PullThroughCacheRuleAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PullThroughCacheRuleAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *PullThroughCacheRuleAlreadyExistsException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The pull through cache rule was not found. Specify a valid pull through cache +// rule and try again. +type PullThroughCacheRuleNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PullThroughCacheRuleNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PullThroughCacheRuleNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PullThroughCacheRuleNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PullThroughCacheRuleNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *PullThroughCacheRuleNotFoundException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The manifest list is referencing an image that does not exist. +type ReferencedImagesNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ReferencedImagesNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ReferencedImagesNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ReferencedImagesNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ReferencedImagesNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ReferencedImagesNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The registry doesn't have an associated registry policy. +type RegistryPolicyNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RegistryPolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RegistryPolicyNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RegistryPolicyNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RegistryPolicyNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *RegistryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified repository already exists in the specified registry. +type RepositoryAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RepositoryAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *RepositoryAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified repository contains images. To delete a repository that contains +// images, you must force the deletion with the force parameter. +type RepositoryNotEmptyException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RepositoryNotEmptyException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryNotEmptyException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryNotEmptyException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryNotEmptyException" + } + return *e.ErrorCodeOverride +} +func (e *RepositoryNotEmptyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct +// registry. +type RepositoryNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RepositoryNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *RepositoryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified repository and registry combination does not have an associated +// repository policy. +type RepositoryPolicyNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RepositoryPolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryPolicyNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryPolicyNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RepositoryPolicyNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified image scan could not be found. Ensure that image scanning is +// enabled on the repository and try again. +type ScanNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ScanNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ScanNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ScanNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ScanNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ScanNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// These errors are usually caused by a server-side issue. +type ServerException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ServerException" + } + return *e.ErrorCodeOverride +} +func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The list of tags on the repository is over the limit. The maximum number of tags +// that can be applied to a repository is 50. +type TooManyTagsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyTagsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyTagsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyTagsException" + } + return *e.ErrorCodeOverride +} +func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The image is of a type that cannot be scanned. +type UnsupportedImageTypeException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedImageTypeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedImageTypeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedImageTypeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedImageTypeException" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedImageTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified upstream registry isn't supported. +type UnsupportedUpstreamRegistryException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedUpstreamRegistryException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedUpstreamRegistryException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedUpstreamRegistryException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedUpstreamRegistryException" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedUpstreamRegistryException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The upload could not be found, or the specified upload ID is not valid for this +// repository. +type UploadNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UploadNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UploadNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UploadNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UploadNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *UploadNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// There was an exception validating this request. +type ValidationException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ValidationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ValidationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ValidationException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ValidationException" + } + return *e.ErrorCodeOverride +} +func (e *ValidationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go new file mode 100644 index 000000000..22afaab80 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go @@ -0,0 +1,881 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// This data type is used in the ImageScanFinding data type. +type Attribute struct { + + // The attribute key. + // + // This member is required. + Key *string + + // The value assigned to the attribute key. + Value *string + + noSmithyDocumentSerde +} + +// An object representing authorization data for an Amazon ECR registry. +type AuthorizationData struct { + + // A base64-encoded string that contains authorization data for the specified + // Amazon ECR registry. When the string is decoded, it is presented in the format + // user:password for private registry authentication using docker login. + AuthorizationToken *string + + // The Unix time in seconds and milliseconds when the authorization token expires. + // Authorization tokens are valid for 12 hours. + ExpiresAt *time.Time + + // The registry URL to use for this authorization token in a docker login command. + // The Amazon ECR registry URL format is + // https://aws_account_id.dkr.ecr.region.amazonaws.com. For example, + // https://012345678910.dkr.ecr.us-east-1.amazonaws.com.. + ProxyEndpoint *string + + noSmithyDocumentSerde +} + +// The image details of the Amazon ECR container image. +type AwsEcrContainerImageDetails struct { + + // The architecture of the Amazon ECR container image. + Architecture *string + + // The image author of the Amazon ECR container image. + Author *string + + // The image hash of the Amazon ECR container image. + ImageHash *string + + // The image tags attached to the Amazon ECR container image. + ImageTags []string + + // The platform of the Amazon ECR container image. + Platform *string + + // The date and time the Amazon ECR container image was pushed. + PushedAt *time.Time + + // The registry the Amazon ECR container image belongs to. + Registry *string + + // The name of the repository the Amazon ECR container image resides in. + RepositoryName *string + + noSmithyDocumentSerde +} + +// The CVSS score for a finding. +type CvssScore struct { + + // The base CVSS score used for the finding. + BaseScore float64 + + // The vector string of the CVSS score. + ScoringVector *string + + // The source of the CVSS score. + Source *string + + // The version of CVSS used for the score. + Version *string + + noSmithyDocumentSerde +} + +// Details on adjustments Amazon Inspector made to the CVSS score for a finding. +type CvssScoreAdjustment struct { + + // The metric used to adjust the CVSS score. + Metric *string + + // The reason the CVSS score has been adjustment. + Reason *string + + noSmithyDocumentSerde +} + +// Information about the CVSS score. +type CvssScoreDetails struct { + + // An object that contains details about adjustment Amazon Inspector made to the + // CVSS score. + Adjustments []CvssScoreAdjustment + + // The CVSS score. + Score float64 + + // The source for the CVSS score. + ScoreSource *string + + // The vector for the CVSS score. + ScoringVector *string + + // The CVSS version used in scoring. + Version *string + + noSmithyDocumentSerde +} + +// An object representing a filter on a DescribeImages operation. +type DescribeImagesFilter struct { + + // The tag status with which to filter your DescribeImages results. You can filter + // results based on whether they are TAGGED or UNTAGGED. + TagStatus TagStatus + + noSmithyDocumentSerde +} + +// The encryption configuration for the repository. This determines how the +// contents of your repository are encrypted at rest. By default, when no +// encryption configuration is set or the AES256 encryption type is used, Amazon +// ECR uses server-side encryption with Amazon S3-managed encryption keys which +// encrypts your data at rest using an AES-256 encryption algorithm. This does not +// require any action on your part. For more control over the encryption of the +// contents of your repository, you can use server-side encryption with Key +// Management Service key stored in Key Management Service (KMS) to encrypt your +// images. For more information, see Amazon ECR encryption at rest +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) +// in the Amazon Elastic Container Registry User Guide. +type EncryptionConfiguration struct { + + // The encryption type to use. If you use the KMS encryption type, the contents of + // the repository will be encrypted using server-side encryption with Key + // Management Service key stored in KMS. When you use KMS to encrypt your data, you + // can either use the default Amazon Web Services managed KMS key for Amazon ECR, + // or specify your own KMS key, which you already created. For more information, + // see Protecting data using server-side encryption with an KMS key stored in Key + // Management Service (SSE-KMS) + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the + // Amazon Simple Storage Service Console Developer Guide. If you use the AES256 + // encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed + // encryption keys which encrypts the images in the repository using an AES-256 + // encryption algorithm. For more information, see Protecting data using + // server-side encryption with Amazon S3-managed encryption keys (SSE-S3) + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon Simple Storage Service Console Developer Guide. + // + // This member is required. + EncryptionType EncryptionType + + // If you use the KMS encryption type, specify the KMS key to use for encryption. + // The alias, key ID, or full ARN of the KMS key can be specified. The key must + // exist in the same Region as the repository. If no key is specified, the default + // Amazon Web Services managed KMS key for Amazon ECR will be used. + KmsKey *string + + noSmithyDocumentSerde +} + +// The details of an enhanced image scan. This is returned when enhanced scanning +// is enabled for your private registry. +type EnhancedImageScanFinding struct { + + // The Amazon Web Services account ID associated with the image. + AwsAccountId *string + + // The description of the finding. + Description *string + + // The Amazon Resource Number (ARN) of the finding. + FindingArn *string + + // The date and time that the finding was first observed. + FirstObservedAt *time.Time + + // The date and time that the finding was last observed. + LastObservedAt *time.Time + + // An object that contains the details of a package vulnerability finding. + PackageVulnerabilityDetails *PackageVulnerabilityDetails + + // An object that contains the details about how to remediate a finding. + Remediation *Remediation + + // Contains information on the resources involved in a finding. + Resources []Resource + + // The Amazon Inspector score given to the finding. + Score float64 + + // An object that contains details of the Amazon Inspector score. + ScoreDetails *ScoreDetails + + // The severity of the finding. + Severity *string + + // The status of the finding. + Status *string + + // The title of the finding. + Title *string + + // The type of the finding. + Type *string + + // The date and time the finding was last updated at. + UpdatedAt *time.Time + + noSmithyDocumentSerde +} + +// An object representing an Amazon ECR image. +type Image struct { + + // An object containing the image tag and image digest associated with an image. + ImageId *ImageIdentifier + + // The image manifest associated with the image. + ImageManifest *string + + // The manifest media type of the image. + ImageManifestMediaType *string + + // The Amazon Web Services account ID associated with the registry containing the + // image. + RegistryId *string + + // The name of the repository associated with the image. + RepositoryName *string + + noSmithyDocumentSerde +} + +// An object that describes an image returned by a DescribeImages operation. +type ImageDetail struct { + + // The artifact media type of the image. + ArtifactMediaType *string + + // The sha256 digest of the image manifest. + ImageDigest *string + + // The media type of the image manifest. + ImageManifestMediaType *string + + // The date and time, expressed in standard JavaScript date format, at which the + // current image was pushed to the repository. + ImagePushedAt *time.Time + + // A summary of the last completed image scan. + ImageScanFindingsSummary *ImageScanFindingsSummary + + // The current state of the scan. + ImageScanStatus *ImageScanStatus + + // The size, in bytes, of the image in the repository. If the image is a manifest + // list, this will be the max size of all manifests in the list. Beginning with + // Docker version 1.9, the Docker client compresses image layers before pushing + // them to a V2 Docker registry. The output of the docker images command shows the + // uncompressed image size, so it may return a larger image size than the image + // sizes returned by DescribeImages. + ImageSizeInBytes *int64 + + // The list of tags associated with this image. + ImageTags []string + + // The date and time, expressed in standard JavaScript date format, when Amazon ECR + // recorded the last image pull. Amazon ECR refreshes the last image pull timestamp + // at least once every 24 hours. For example, if you pull an image once a day then + // the lastRecordedPullTime timestamp will indicate the exact time that the image + // was last pulled. However, if you pull an image once an hour, because Amazon ECR + // refreshes the lastRecordedPullTime timestamp at least once every 24 hours, the + // result may not be the exact time that the image was last pulled. + LastRecordedPullTime *time.Time + + // The Amazon Web Services account ID associated with the registry to which this + // image belongs. + RegistryId *string + + // The name of the repository to which this image belongs. + RepositoryName *string + + noSmithyDocumentSerde +} + +// An object representing an Amazon ECR image failure. +type ImageFailure struct { + + // The code associated with the failure. + FailureCode ImageFailureCode + + // The reason for the failure. + FailureReason *string + + // The image ID associated with the failure. + ImageId *ImageIdentifier + + noSmithyDocumentSerde +} + +// An object with identifying information for an image in an Amazon ECR repository. +type ImageIdentifier struct { + + // The sha256 digest of the image manifest. + ImageDigest *string + + // The tag used for the image. + ImageTag *string + + noSmithyDocumentSerde +} + +// The status of the replication process for an image. +type ImageReplicationStatus struct { + + // The failure code for a replication that has failed. + FailureCode *string + + // The destination Region for the image replication. + Region *string + + // The Amazon Web Services account ID associated with the registry to which the + // image belongs. + RegistryId *string + + // The image replication status. + Status ReplicationStatus + + noSmithyDocumentSerde +} + +// Contains information about an image scan finding. +type ImageScanFinding struct { + + // A collection of attributes of the host from which the finding is generated. + Attributes []Attribute + + // The description of the finding. + Description *string + + // The name associated with the finding, usually a CVE number. + Name *string + + // The finding severity. + Severity FindingSeverity + + // A link containing additional details about the security vulnerability. + Uri *string + + noSmithyDocumentSerde +} + +// The details of an image scan. +type ImageScanFindings struct { + + // Details about the enhanced scan findings from Amazon Inspector. + EnhancedFindings []EnhancedImageScanFinding + + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]int32 + + // The findings from the image scan. + Findings []ImageScanFinding + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time + + noSmithyDocumentSerde +} + +// A summary of the last completed image scan. +type ImageScanFindingsSummary struct { + + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]int32 + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time + + noSmithyDocumentSerde +} + +// The image scanning configuration for a repository. +type ImageScanningConfiguration struct { + + // The setting that determines whether images are scanned after being pushed to a + // repository. If set to true, images will be scanned after being pushed. If this + // parameter is not specified, it will default to false and images will not be + // scanned unless a scan is manually started with the API_StartImageScan + // (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_StartImageScan.html) + // API. + ScanOnPush bool + + noSmithyDocumentSerde +} + +// The current status of an image scan. +type ImageScanStatus struct { + + // The description of the image scan status. + Description *string + + // The current state of an image scan. + Status ScanStatus + + noSmithyDocumentSerde +} + +// An object representing an Amazon ECR image layer. +type Layer struct { + + // The availability status of the image layer. + LayerAvailability LayerAvailability + + // The sha256 digest of the image layer. + LayerDigest *string + + // The size, in bytes, of the image layer. + LayerSize *int64 + + // The media type of the layer, such as + // application/vnd.docker.image.rootfs.diff.tar.gzip or + // application/vnd.oci.image.layer.v1.tar+gzip. + MediaType *string + + noSmithyDocumentSerde +} + +// An object representing an Amazon ECR image layer failure. +type LayerFailure struct { + + // The failure code associated with the failure. + FailureCode LayerFailureCode + + // The reason for the failure. + FailureReason *string + + // The layer digest associated with the failure. + LayerDigest *string + + noSmithyDocumentSerde +} + +// The filter for the lifecycle policy preview. +type LifecyclePolicyPreviewFilter struct { + + // The tag status of the image. + TagStatus TagStatus + + noSmithyDocumentSerde +} + +// The result of the lifecycle policy preview. +type LifecyclePolicyPreviewResult struct { + + // The type of action to be taken. + Action *LifecyclePolicyRuleAction + + // The priority of the applied rule. + AppliedRulePriority *int32 + + // The sha256 digest of the image manifest. + ImageDigest *string + + // The date and time, expressed in standard JavaScript date format, at which the + // current image was pushed to the repository. + ImagePushedAt *time.Time + + // The list of tags associated with this image. + ImageTags []string + + noSmithyDocumentSerde +} + +// The summary of the lifecycle policy preview request. +type LifecyclePolicyPreviewSummary struct { + + // The number of expiring images. + ExpiringImageTotalCount *int32 + + noSmithyDocumentSerde +} + +// The type of action to be taken. +type LifecyclePolicyRuleAction struct { + + // The type of action to be taken. + Type ImageActionType + + noSmithyDocumentSerde +} + +// An object representing a filter on a ListImages operation. +type ListImagesFilter struct { + + // The tag status with which to filter your ListImages results. You can filter + // results based on whether they are TAGGED or UNTAGGED. + TagStatus TagStatus + + noSmithyDocumentSerde +} + +// Information about a package vulnerability finding. +type PackageVulnerabilityDetails struct { + + // An object that contains details about the CVSS score of a finding. + Cvss []CvssScore + + // One or more URLs that contain details about this vulnerability type. + ReferenceUrls []string + + // One or more vulnerabilities related to the one identified in this finding. + RelatedVulnerabilities []string + + // The source of the vulnerability information. + Source *string + + // A URL to the source of the vulnerability information. + SourceUrl *string + + // The date and time that this vulnerability was first added to the vendor's + // database. + VendorCreatedAt *time.Time + + // The severity the vendor has given to this vulnerability type. + VendorSeverity *string + + // The date and time the vendor last updated this vulnerability in their database. + VendorUpdatedAt *time.Time + + // The ID given to this vulnerability. + VulnerabilityId *string + + // The packages impacted by this vulnerability. + VulnerablePackages []VulnerablePackage + + noSmithyDocumentSerde +} + +// The details of a pull through cache rule. +type PullThroughCacheRule struct { + + // The date and time the pull through cache was created. + CreatedAt *time.Time + + // The Amazon ECR repository prefix associated with the pull through cache rule. + EcrRepositoryPrefix *string + + // The Amazon Web Services account ID associated with the registry the pull through + // cache rule is associated with. + RegistryId *string + + // The upstream registry URL associated with the pull through cache rule. + UpstreamRegistryUrl *string + + noSmithyDocumentSerde +} + +// Details about the recommended course of action to remediate the finding. +type Recommendation struct { + + // The recommended course of action to remediate the finding. + Text *string + + // The URL address to the CVE remediation recommendations. + Url *string + + noSmithyDocumentSerde +} + +// The scanning configuration for a private registry. +type RegistryScanningConfiguration struct { + + // The scanning rules associated with the registry. + Rules []RegistryScanningRule + + // The type of scanning configured for the registry. + ScanType ScanType + + noSmithyDocumentSerde +} + +// The details of a scanning rule for a private registry. +type RegistryScanningRule struct { + + // The repository filters associated with the scanning configuration for a private + // registry. + // + // This member is required. + RepositoryFilters []ScanningRepositoryFilter + + // The frequency that scans are performed at for a private registry. When the + // ENHANCED scan type is specified, the supported scan frequencies are + // CONTINUOUS_SCAN and SCAN_ON_PUSH. When the BASIC scan type is specified, the + // SCAN_ON_PUSH and MANUAL scan frequencies are supported. + // + // This member is required. + ScanFrequency ScanFrequency + + noSmithyDocumentSerde +} + +// Information on how to remediate a finding. +type Remediation struct { + + // An object that contains information about the recommended course of action to + // remediate the finding. + Recommendation *Recommendation + + noSmithyDocumentSerde +} + +// The replication configuration for a registry. +type ReplicationConfiguration struct { + + // An array of objects representing the replication destinations and repository + // filters for a replication configuration. + // + // This member is required. + Rules []ReplicationRule + + noSmithyDocumentSerde +} + +// An array of objects representing the destination for a replication rule. +type ReplicationDestination struct { + + // The Region to replicate to. + // + // This member is required. + Region *string + + // The Amazon Web Services account ID of the Amazon ECR private registry to + // replicate to. When configuring cross-Region replication within your own + // registry, specify your own account ID. + // + // This member is required. + RegistryId *string + + noSmithyDocumentSerde +} + +// An array of objects representing the replication destinations and repository +// filters for a replication configuration. +type ReplicationRule struct { + + // An array of objects representing the destination for a replication rule. + // + // This member is required. + Destinations []ReplicationDestination + + // An array of objects representing the filters for a replication rule. Specifying + // a repository filter for a replication rule provides a method for controlling + // which repositories in a private registry are replicated. + RepositoryFilters []RepositoryFilter + + noSmithyDocumentSerde +} + +// An object representing a repository. +type Repository struct { + + // The date and time, in JavaScript date format, when the repository was created. + CreatedAt *time.Time + + // The encryption configuration for the repository. This determines how the + // contents of your repository are encrypted at rest. + EncryptionConfiguration *EncryptionConfiguration + + // The image scanning configuration for a repository. + ImageScanningConfiguration *ImageScanningConfiguration + + // The tag mutability setting for the repository. + ImageTagMutability ImageTagMutability + + // The Amazon Web Services account ID associated with the registry that contains + // the repository. + RegistryId *string + + // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains + // the arn:aws:ecr namespace, followed by the region of the repository, Amazon Web + // Services account ID of the repository owner, repository namespace, and + // repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + RepositoryArn *string + + // The name of the repository. + RepositoryName *string + + // The URI for the repository. You can use this URI for container image push and + // pull operations. + RepositoryUri *string + + noSmithyDocumentSerde +} + +// The filter settings used with image replication. Specifying a repository filter +// to a replication rule provides a method for controlling which repositories in a +// private registry are replicated. If no repository filter is specified, all +// images in the repository are replicated. +type RepositoryFilter struct { + + // The repository filter details. When the PREFIX_MATCH filter type is specified, + // this value is required and should be the repository name prefix to configure + // replication for. + // + // This member is required. + Filter *string + + // The repository filter type. The only supported value is PREFIX_MATCH, which is a + // repository name prefix specified with the filter parameter. + // + // This member is required. + FilterType RepositoryFilterType + + noSmithyDocumentSerde +} + +// The details of the scanning configuration for a repository. +type RepositoryScanningConfiguration struct { + + // The scan filters applied to the repository. + AppliedScanFilters []ScanningRepositoryFilter + + // The ARN of the repository. + RepositoryArn *string + + // The name of the repository. + RepositoryName *string + + // The scan frequency for the repository. + ScanFrequency ScanFrequency + + // Whether or not scan on push is configured for the repository. + ScanOnPush bool + + noSmithyDocumentSerde +} + +// The details about any failures associated with the scanning configuration of a +// repository. +type RepositoryScanningConfigurationFailure struct { + + // The failure code. + FailureCode ScanningConfigurationFailureCode + + // The reason for the failure. + FailureReason *string + + // The name of the repository. + RepositoryName *string + + noSmithyDocumentSerde +} + +// Details about the resource involved in a finding. +type Resource struct { + + // An object that contains details about the resource involved in a finding. + Details *ResourceDetails + + // The ID of the resource. + Id *string + + // The tags attached to the resource. + Tags map[string]string + + // The type of resource. + Type *string + + noSmithyDocumentSerde +} + +// Contains details about the resource involved in the finding. +type ResourceDetails struct { + + // An object that contains details about the Amazon ECR container image involved in + // the finding. + AwsEcrContainerImage *AwsEcrContainerImageDetails + + noSmithyDocumentSerde +} + +// The details of a scanning repository filter. For more information on how to use +// filters, see Using filters +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#image-scanning-filters) +// in the Amazon Elastic Container Registry User Guide. +type ScanningRepositoryFilter struct { + + // The filter to use when scanning. + // + // This member is required. + Filter *string + + // The type associated with the filter. + // + // This member is required. + FilterType ScanningRepositoryFilterType + + noSmithyDocumentSerde +} + +// Information about the Amazon Inspector score given to a finding. +type ScoreDetails struct { + + // An object that contains details about the CVSS score given to a finding. + Cvss *CvssScoreDetails + + noSmithyDocumentSerde +} + +// The metadata to apply to a resource to help you categorize and organize them. +// Each tag consists of a key and a value, both of which you define. Tag keys can +// have a maximum character length of 128 characters, and tag values can have a +// maximum length of 256 characters. +type Tag struct { + + // One part of a key-value pair that make up a tag. A key is a general label that + // acts like a category for more specific tag values. + Key *string + + // A value acts as a descriptor within a tag category (key). + Value *string + + noSmithyDocumentSerde +} + +// Information on the vulnerable package identified by a finding. +type VulnerablePackage struct { + + // The architecture of the vulnerable package. + Arch *string + + // The epoch of the vulnerable package. + Epoch *int32 + + // The file path of the vulnerable package. + FilePath *string + + // The name of the vulnerable package. + Name *string + + // The package manager of the vulnerable package. + PackageManager *string + + // The release of the vulnerable package. + Release *string + + // The source layer hash of the vulnerable package. + SourceLayerHash *string + + // The version of the vulnerable package. + Version *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go new file mode 100644 index 000000000..9baf8c8cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/validators.go @@ -0,0 +1,1630 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpBatchCheckLayerAvailability struct { +} + +func (*validateOpBatchCheckLayerAvailability) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchCheckLayerAvailability) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchCheckLayerAvailabilityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchCheckLayerAvailabilityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpBatchDeleteImage struct { +} + +func (*validateOpBatchDeleteImage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchDeleteImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchDeleteImageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchDeleteImageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpBatchGetImage struct { +} + +func (*validateOpBatchGetImage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchGetImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchGetImageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchGetImageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpBatchGetRepositoryScanningConfiguration struct { +} + +func (*validateOpBatchGetRepositoryScanningConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchGetRepositoryScanningConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchGetRepositoryScanningConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchGetRepositoryScanningConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCompleteLayerUpload struct { +} + +func (*validateOpCompleteLayerUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCompleteLayerUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CompleteLayerUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCompleteLayerUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreatePullThroughCacheRule struct { +} + +func (*validateOpCreatePullThroughCacheRule) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreatePullThroughCacheRule) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreatePullThroughCacheRuleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreatePullThroughCacheRuleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateRepository struct { +} + +func (*validateOpCreateRepository) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateRepository) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateRepositoryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateRepositoryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteLifecyclePolicy struct { +} + +func (*validateOpDeleteLifecyclePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteLifecyclePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteLifecyclePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteLifecyclePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeletePullThroughCacheRule struct { +} + +func (*validateOpDeletePullThroughCacheRule) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeletePullThroughCacheRule) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeletePullThroughCacheRuleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeletePullThroughCacheRuleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteRepository struct { +} + +func (*validateOpDeleteRepository) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteRepository) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteRepositoryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteRepositoryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteRepositoryPolicy struct { +} + +func (*validateOpDeleteRepositoryPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteRepositoryPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteRepositoryPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeImageReplicationStatus struct { +} + +func (*validateOpDescribeImageReplicationStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeImageReplicationStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeImageReplicationStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeImageReplicationStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeImageScanFindings struct { +} + +func (*validateOpDescribeImageScanFindings) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeImageScanFindings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeImageScanFindingsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeImageScanFindingsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeImages struct { +} + +func (*validateOpDescribeImages) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeImages) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeImagesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeImagesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetDownloadUrlForLayer struct { +} + +func (*validateOpGetDownloadUrlForLayer) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetDownloadUrlForLayer) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetDownloadUrlForLayerInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetDownloadUrlForLayerInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetLifecyclePolicy struct { +} + +func (*validateOpGetLifecyclePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetLifecyclePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetLifecyclePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetLifecyclePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetLifecyclePolicyPreview struct { +} + +func (*validateOpGetLifecyclePolicyPreview) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetLifecyclePolicyPreview) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetLifecyclePolicyPreviewInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetLifecyclePolicyPreviewInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetRepositoryPolicy struct { +} + +func (*validateOpGetRepositoryPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetRepositoryPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetRepositoryPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpInitiateLayerUpload struct { +} + +func (*validateOpInitiateLayerUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpInitiateLayerUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*InitiateLayerUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpInitiateLayerUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListImages struct { +} + +func (*validateOpListImages) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListImages) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListImagesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListImagesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTagsForResource struct { +} + +func (*validateOpListTagsForResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsForResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsForResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutImage struct { +} + +func (*validateOpPutImage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutImageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutImageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutImageScanningConfiguration struct { +} + +func (*validateOpPutImageScanningConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutImageScanningConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutImageScanningConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutImageScanningConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutImageTagMutability struct { +} + +func (*validateOpPutImageTagMutability) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutImageTagMutability) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutImageTagMutabilityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutImageTagMutabilityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutLifecyclePolicy struct { +} + +func (*validateOpPutLifecyclePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutLifecyclePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutLifecyclePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutLifecyclePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutRegistryPolicy struct { +} + +func (*validateOpPutRegistryPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutRegistryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutRegistryPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutRegistryPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutRegistryScanningConfiguration struct { +} + +func (*validateOpPutRegistryScanningConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutRegistryScanningConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutRegistryScanningConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutRegistryScanningConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutReplicationConfiguration struct { +} + +func (*validateOpPutReplicationConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutReplicationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutReplicationConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutReplicationConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSetRepositoryPolicy struct { +} + +func (*validateOpSetRepositoryPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSetRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SetRepositoryPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSetRepositoryPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartImageScan struct { +} + +func (*validateOpStartImageScan) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartImageScan) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartImageScanInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartImageScanInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartLifecyclePolicyPreview struct { +} + +func (*validateOpStartLifecyclePolicyPreview) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartLifecyclePolicyPreview) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartLifecyclePolicyPreviewInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartLifecyclePolicyPreviewInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUploadLayerPart struct { +} + +func (*validateOpUploadLayerPart) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUploadLayerPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UploadLayerPartInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUploadLayerPartInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpBatchCheckLayerAvailabilityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchCheckLayerAvailability{}, middleware.After) +} + +func addOpBatchDeleteImageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchDeleteImage{}, middleware.After) +} + +func addOpBatchGetImageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchGetImage{}, middleware.After) +} + +func addOpBatchGetRepositoryScanningConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchGetRepositoryScanningConfiguration{}, middleware.After) +} + +func addOpCompleteLayerUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCompleteLayerUpload{}, middleware.After) +} + +func addOpCreatePullThroughCacheRuleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreatePullThroughCacheRule{}, middleware.After) +} + +func addOpCreateRepositoryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateRepository{}, middleware.After) +} + +func addOpDeleteLifecyclePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteLifecyclePolicy{}, middleware.After) +} + +func addOpDeletePullThroughCacheRuleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeletePullThroughCacheRule{}, middleware.After) +} + +func addOpDeleteRepositoryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteRepository{}, middleware.After) +} + +func addOpDeleteRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteRepositoryPolicy{}, middleware.After) +} + +func addOpDescribeImageReplicationStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeImageReplicationStatus{}, middleware.After) +} + +func addOpDescribeImageScanFindingsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeImageScanFindings{}, middleware.After) +} + +func addOpDescribeImagesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeImages{}, middleware.After) +} + +func addOpGetDownloadUrlForLayerValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetDownloadUrlForLayer{}, middleware.After) +} + +func addOpGetLifecyclePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetLifecyclePolicy{}, middleware.After) +} + +func addOpGetLifecyclePolicyPreviewValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetLifecyclePolicyPreview{}, middleware.After) +} + +func addOpGetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetRepositoryPolicy{}, middleware.After) +} + +func addOpInitiateLayerUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpInitiateLayerUpload{}, middleware.After) +} + +func addOpListImagesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListImages{}, middleware.After) +} + +func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) +} + +func addOpPutImageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutImage{}, middleware.After) +} + +func addOpPutImageScanningConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutImageScanningConfiguration{}, middleware.After) +} + +func addOpPutImageTagMutabilityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutImageTagMutability{}, middleware.After) +} + +func addOpPutLifecyclePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutLifecyclePolicy{}, middleware.After) +} + +func addOpPutRegistryPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutRegistryPolicy{}, middleware.After) +} + +func addOpPutRegistryScanningConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutRegistryScanningConfiguration{}, middleware.After) +} + +func addOpPutReplicationConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutReplicationConfiguration{}, middleware.After) +} + +func addOpSetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSetRepositoryPolicy{}, middleware.After) +} + +func addOpStartImageScanValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartImageScan{}, middleware.After) +} + +func addOpStartLifecyclePolicyPreviewValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartLifecyclePolicyPreview{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUploadLayerPartValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUploadLayerPart{}, middleware.After) +} + +func validateEncryptionConfiguration(v *types.EncryptionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EncryptionConfiguration"} + if len(v.EncryptionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EncryptionType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRegistryScanningRule(v *types.RegistryScanningRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegistryScanningRule"} + if len(v.ScanFrequency) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ScanFrequency")) + } + if v.RepositoryFilters == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryFilters")) + } else if v.RepositoryFilters != nil { + if err := validateScanningRepositoryFilterList(v.RepositoryFilters); err != nil { + invalidParams.AddNested("RepositoryFilters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRegistryScanningRuleList(v []types.RegistryScanningRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegistryScanningRuleList"} + for i := range v { + if err := validateRegistryScanningRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationConfiguration(v *types.ReplicationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateReplicationRuleList(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationDestination(v *types.ReplicationDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationDestination"} + if v.Region == nil { + invalidParams.Add(smithy.NewErrParamRequired("Region")) + } + if v.RegistryId == nil { + invalidParams.Add(smithy.NewErrParamRequired("RegistryId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationDestinationList(v []types.ReplicationDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationDestinationList"} + for i := range v { + if err := validateReplicationDestination(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRule(v *types.ReplicationRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"} + if v.Destinations == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destinations")) + } else if v.Destinations != nil { + if err := validateReplicationDestinationList(v.Destinations); err != nil { + invalidParams.AddNested("Destinations", err.(smithy.InvalidParamsError)) + } + } + if v.RepositoryFilters != nil { + if err := validateRepositoryFilterList(v.RepositoryFilters); err != nil { + invalidParams.AddNested("RepositoryFilters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRuleList(v []types.ReplicationRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleList"} + for i := range v { + if err := validateReplicationRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRepositoryFilter(v *types.RepositoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RepositoryFilter"} + if v.Filter == nil { + invalidParams.Add(smithy.NewErrParamRequired("Filter")) + } + if len(v.FilterType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("FilterType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRepositoryFilterList(v []types.RepositoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RepositoryFilterList"} + for i := range v { + if err := validateRepositoryFilter(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateScanningRepositoryFilter(v *types.ScanningRepositoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScanningRepositoryFilter"} + if v.Filter == nil { + invalidParams.Add(smithy.NewErrParamRequired("Filter")) + } + if len(v.FilterType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("FilterType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateScanningRepositoryFilterList(v []types.ScanningRepositoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScanningRepositoryFilterList"} + for i := range v { + if err := validateScanningRepositoryFilter(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchCheckLayerAvailabilityInput(v *BatchCheckLayerAvailabilityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchCheckLayerAvailabilityInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.LayerDigests == nil { + invalidParams.Add(smithy.NewErrParamRequired("LayerDigests")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchDeleteImageInput(v *BatchDeleteImageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchDeleteImageInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageIds")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchGetImageInput(v *BatchGetImageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetImageInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageIds")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchGetRepositoryScanningConfigurationInput(v *BatchGetRepositoryScanningConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetRepositoryScanningConfigurationInput"} + if v.RepositoryNames == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryNames")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCompleteLayerUploadInput(v *CompleteLayerUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CompleteLayerUploadInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if v.LayerDigests == nil { + invalidParams.Add(smithy.NewErrParamRequired("LayerDigests")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreatePullThroughCacheRuleInput(v *CreatePullThroughCacheRuleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreatePullThroughCacheRuleInput"} + if v.EcrRepositoryPrefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("EcrRepositoryPrefix")) + } + if v.UpstreamRegistryUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("UpstreamRegistryUrl")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateRepositoryInput(v *CreateRepositoryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateRepositoryInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.EncryptionConfiguration != nil { + if err := validateEncryptionConfiguration(v.EncryptionConfiguration); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteLifecyclePolicyInput(v *DeleteLifecyclePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteLifecyclePolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeletePullThroughCacheRuleInput(v *DeletePullThroughCacheRuleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeletePullThroughCacheRuleInput"} + if v.EcrRepositoryPrefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("EcrRepositoryPrefix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteRepositoryInput(v *DeleteRepositoryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteRepositoryInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteRepositoryPolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeImageReplicationStatusInput(v *DescribeImageReplicationStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeImageReplicationStatusInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeImageScanFindingsInput(v *DescribeImageScanFindingsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeImageScanFindingsInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeImagesInput(v *DescribeImagesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeImagesInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetDownloadUrlForLayerInput(v *GetDownloadUrlForLayerInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetDownloadUrlForLayerInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.LayerDigest == nil { + invalidParams.Add(smithy.NewErrParamRequired("LayerDigest")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetLifecyclePolicyInput(v *GetLifecyclePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetLifecyclePolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetLifecyclePolicyPreviewInput(v *GetLifecyclePolicyPreviewInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetLifecyclePolicyPreviewInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetRepositoryPolicyInput(v *GetRepositoryPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetRepositoryPolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpInitiateLayerUploadInput(v *InitiateLayerUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InitiateLayerUploadInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListImagesInput(v *ListImagesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListImagesInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutImageInput(v *PutImageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutImageInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageManifest == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageManifest")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutImageScanningConfigurationInput(v *PutImageScanningConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutImageScanningConfigurationInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageScanningConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageScanningConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutImageTagMutabilityInput(v *PutImageTagMutabilityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutImageTagMutabilityInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if len(v.ImageTagMutability) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ImageTagMutability")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutLifecyclePolicyInput(v *PutLifecyclePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutLifecyclePolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.LifecyclePolicyText == nil { + invalidParams.Add(smithy.NewErrParamRequired("LifecyclePolicyText")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutRegistryPolicyInput(v *PutRegistryPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutRegistryPolicyInput"} + if v.PolicyText == nil { + invalidParams.Add(smithy.NewErrParamRequired("PolicyText")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutRegistryScanningConfigurationInput(v *PutRegistryScanningConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutRegistryScanningConfigurationInput"} + if v.Rules != nil { + if err := validateRegistryScanningRuleList(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutReplicationConfigurationInput(v *PutReplicationConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutReplicationConfigurationInput"} + if v.ReplicationConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration")) + } else if v.ReplicationConfiguration != nil { + if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSetRepositoryPolicyInput(v *SetRepositoryPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SetRepositoryPolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.PolicyText == nil { + invalidParams.Add(smithy.NewErrParamRequired("PolicyText")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartImageScanInput(v *StartImageScanInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartImageScanInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartLifecyclePolicyPreviewInput(v *StartLifecyclePolicyPreviewInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartLifecyclePolicyPreviewInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUploadLayerPartInput(v *UploadLayerPartInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UploadLayerPartInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if v.PartFirstByte == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartFirstByte")) + } + if v.PartLastByte == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartLastByte")) + } + if v.LayerPartBlob == nil { + invalidParams.Add(smithy.NewErrParamRequired("LayerPartBlob")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md new file mode 100644 index 000000000..b6ad151e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/CHANGELOG.md @@ -0,0 +1,87 @@ +# v1.12.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. + +# v1.8.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.7.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go new file mode 100644 index 000000000..dfd012e2f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_client.go @@ -0,0 +1,434 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "ECR PUBLIC" +const ServiceAPIVersion = "2020-10-30" + +// Client provides the API client to make operations call for Amazon Elastic +// Container Registry Public. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ecrpublic", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go new file mode 100644 index 000000000..682599a03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchCheckLayerAvailability.go @@ -0,0 +1,140 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Checks the availability of one or more image layers within a repository in a +// public registry. When an image is pushed to a repository, each image layer is +// checked to verify if it has been uploaded before. If it has been uploaded, then +// the image layer is skipped. This operation is used by the Amazon ECR proxy and +// is not generally used by customers for pulling and pushing images. In most +// cases, you should use the docker CLI to pull, tag, and push images. +func (c *Client) BatchCheckLayerAvailability(ctx context.Context, params *BatchCheckLayerAvailabilityInput, optFns ...func(*Options)) (*BatchCheckLayerAvailabilityOutput, error) { + if params == nil { + params = &BatchCheckLayerAvailabilityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchCheckLayerAvailability", params, optFns, c.addOperationBatchCheckLayerAvailabilityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchCheckLayerAvailabilityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchCheckLayerAvailabilityInput struct { + + // The digests of the image layers to check. + // + // This member is required. + LayerDigests []string + + // The name of the repository that is associated with the image layers to check. + // + // This member is required. + RepositoryName *string + + // The AWS account ID associated with the public registry that contains the image + // layers to check. If you do not specify a registry, the default public registry + // is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type BatchCheckLayerAvailabilityOutput struct { + + // Any failures associated with the call. + Failures []types.LayerFailure + + // A list of image layer objects corresponding to the image layer references in the + // request. + Layers []types.Layer + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchCheckLayerAvailabilityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchCheckLayerAvailability{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchCheckLayerAvailability{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchCheckLayerAvailabilityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchCheckLayerAvailability(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchCheckLayerAvailability(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "BatchCheckLayerAvailability", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go new file mode 100644 index 000000000..b208c9520 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_BatchDeleteImage.go @@ -0,0 +1,140 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a list of specified images within a repository in a public registry. +// Images are specified with either an imageTag or imageDigest. You can remove a +// tag from an image by specifying the image's tag in your request. When you remove +// the last tag from an image, the image is deleted from your repository. You can +// completely delete an image (and all of its tags) by specifying the image's +// digest in your request. +func (c *Client) BatchDeleteImage(ctx context.Context, params *BatchDeleteImageInput, optFns ...func(*Options)) (*BatchDeleteImageOutput, error) { + if params == nil { + params = &BatchDeleteImageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchDeleteImage", params, optFns, c.addOperationBatchDeleteImageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchDeleteImageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchDeleteImageInput struct { + + // A list of image ID references that correspond to images to delete. The format of + // the imageIds reference is imageTag=tag or imageDigest=digest. + // + // This member is required. + ImageIds []types.ImageIdentifier + + // The repository in a public registry that contains the image to delete. + // + // This member is required. + RepositoryName *string + + // The AWS account ID associated with the registry that contains the image to + // delete. If you do not specify a registry, the default public registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type BatchDeleteImageOutput struct { + + // Any failures associated with the call. + Failures []types.ImageFailure + + // The image IDs of the deleted images. + ImageIds []types.ImageIdentifier + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchDeleteImageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchDeleteImage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchDeleteImage{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpBatchDeleteImageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchDeleteImage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchDeleteImage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "BatchDeleteImage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go new file mode 100644 index 000000000..206e55f52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CompleteLayerUpload.go @@ -0,0 +1,151 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Informs Amazon ECR that the image layer upload has completed for a specified +// public registry, repository name, and upload ID. You can optionally provide a +// sha256 digest of the image layer for data validation purposes. When an image is +// pushed, the CompleteLayerUpload API is called once per each new image layer to +// verify that the upload has completed. This operation is used by the Amazon ECR +// proxy and is not generally used by customers for pulling and pushing images. In +// most cases, you should use the docker CLI to pull, tag, and push images. +func (c *Client) CompleteLayerUpload(ctx context.Context, params *CompleteLayerUploadInput, optFns ...func(*Options)) (*CompleteLayerUploadOutput, error) { + if params == nil { + params = &CompleteLayerUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CompleteLayerUpload", params, optFns, c.addOperationCompleteLayerUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CompleteLayerUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CompleteLayerUploadInput struct { + + // The sha256 digest of the image layer. + // + // This member is required. + LayerDigests []string + + // The name of the repository in a public registry to associate with the image + // layer. + // + // This member is required. + RepositoryName *string + + // The upload ID from a previous InitiateLayerUpload operation to associate with + // the image layer. + // + // This member is required. + UploadId *string + + // The AWS account ID associated with the registry to which to upload layers. If + // you do not specify a registry, the default public registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type CompleteLayerUploadOutput struct { + + // The sha256 digest of the image layer. + LayerDigest *string + + // The public registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // The upload ID associated with the layer. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCompleteLayerUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCompleteLayerUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCompleteLayerUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCompleteLayerUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteLayerUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCompleteLayerUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "CompleteLayerUpload", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go new file mode 100644 index 000000000..82a2a7e5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_CreateRepository.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a repository in a public registry. For more information, see Amazon ECR +// repositories +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) in +// the Amazon Elastic Container Registry User Guide. +func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryInput, optFns ...func(*Options)) (*CreateRepositoryOutput, error) { + if params == nil { + params = &CreateRepositoryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateRepository", params, optFns, c.addOperationCreateRepositoryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateRepositoryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateRepositoryInput struct { + + // The name to use for the repository. This appears publicly in the Amazon ECR + // Public Gallery. The repository name may be specified on its own (such as + // nginx-web-app) or it can be prepended with a namespace to group the repository + // into a category (such as project-a/nginx-web-app). + // + // This member is required. + RepositoryName *string + + // The details about the repository that are publicly visible in the Amazon ECR + // Public Gallery. + CatalogData *types.RepositoryCatalogDataInput + + // The metadata that you apply to the repository to help you categorize and + // organize them. Each tag consists of a key and an optional value, both of which + // you define. Tag keys can have a maximum character length of 128 characters, and + // tag values can have a maximum length of 256 characters. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateRepositoryOutput struct { + + // The catalog data for a repository. This data is publicly visible in the Amazon + // ECR Public Gallery. + CatalogData *types.RepositoryCatalogData + + // The repository that was created. + Repository *types.Repository + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateRepositoryMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateRepository{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateRepository{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateRepositoryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateRepository(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateRepository(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "CreateRepository", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go new file mode 100644 index 000000000..30503d3b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepository.go @@ -0,0 +1,131 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a repository in a public registry. If the repository contains images, +// you must either delete all images in the repository or use the force option +// which deletes all images on your behalf before deleting the repository. +func (c *Client) DeleteRepository(ctx context.Context, params *DeleteRepositoryInput, optFns ...func(*Options)) (*DeleteRepositoryOutput, error) { + if params == nil { + params = &DeleteRepositoryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteRepository", params, optFns, c.addOperationDeleteRepositoryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteRepositoryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteRepositoryInput struct { + + // The name of the repository to delete. + // + // This member is required. + RepositoryName *string + + // If a repository contains images, forces the deletion. + Force bool + + // The AWS account ID associated with the public registry that contains the + // repository to delete. If you do not specify a registry, the default public + // registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DeleteRepositoryOutput struct { + + // The repository that was deleted. + Repository *types.Repository + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteRepositoryMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRepository{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRepository{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteRepositoryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepository(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteRepository(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "DeleteRepository", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go new file mode 100644 index 000000000..b6ce507b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DeleteRepositoryPolicy.go @@ -0,0 +1,132 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the repository policy associated with the specified repository. +func (c *Client) DeleteRepositoryPolicy(ctx context.Context, params *DeleteRepositoryPolicyInput, optFns ...func(*Options)) (*DeleteRepositoryPolicyOutput, error) { + if params == nil { + params = &DeleteRepositoryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteRepositoryPolicy", params, optFns, c.addOperationDeleteRepositoryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteRepositoryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteRepositoryPolicyInput struct { + + // The name of the repository that is associated with the repository policy to + // delete. + // + // This member is required. + RepositoryName *string + + // The AWS account ID associated with the public registry that contains the + // repository policy to delete. If you do not specify a registry, the default + // public registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DeleteRepositoryPolicyOutput struct { + + // The JSON repository policy that was deleted from the repository. + PolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteRepositoryPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteRepositoryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "DeleteRepositoryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go new file mode 100644 index 000000000..e59efd5ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImageTags.go @@ -0,0 +1,247 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the image tag details for a repository in a public registry. +func (c *Client) DescribeImageTags(ctx context.Context, params *DescribeImageTagsInput, optFns ...func(*Options)) (*DescribeImageTagsOutput, error) { + if params == nil { + params = &DescribeImageTagsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImageTags", params, optFns, c.addOperationDescribeImageTagsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImageTagsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImageTagsInput struct { + + // The name of the repository that contains the image tag details to describe. + // + // This member is required. + RepositoryName *string + + // The maximum number of repository results returned by DescribeImageTags in + // paginated output. When this parameter is used, DescribeImageTags only returns + // maxResults results in a single page along with a nextToken response element. The + // remaining results of the initial request can be seen by sending another + // DescribeImageTags request with the returned nextToken value. This value can be + // between 1 and 1000. If this parameter is not used, then DescribeImageTags + // returns up to 100 results and a nextToken value, if applicable. This option + // cannot be used when you specify images with imageIds. + MaxResults *int32 + + // The nextToken value returned from a previous paginated DescribeImageTags request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // This option cannot be used when you specify images with imageIds. + NextToken *string + + // The AWS account ID associated with the public registry that contains the + // repository in which to describe images. If you do not specify a registry, the + // default public registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DescribeImageTagsOutput struct { + + // The image tag details for the images in the requested repository. + ImageTagDetails []types.ImageTagDetail + + // The nextToken value to include in a future DescribeImageTags request. When the + // results of a DescribeImageTags request exceed maxResults, this value can be used + // to retrieve the next page of results. This value is null when there are no more + // results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImageTagsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImageTags{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImageTags{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeImageTagsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageTags(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeImageTagsAPIClient is a client that implements the DescribeImageTags +// operation. +type DescribeImageTagsAPIClient interface { + DescribeImageTags(context.Context, *DescribeImageTagsInput, ...func(*Options)) (*DescribeImageTagsOutput, error) +} + +var _ DescribeImageTagsAPIClient = (*Client)(nil) + +// DescribeImageTagsPaginatorOptions is the paginator options for DescribeImageTags +type DescribeImageTagsPaginatorOptions struct { + // The maximum number of repository results returned by DescribeImageTags in + // paginated output. When this parameter is used, DescribeImageTags only returns + // maxResults results in a single page along with a nextToken response element. The + // remaining results of the initial request can be seen by sending another + // DescribeImageTags request with the returned nextToken value. This value can be + // between 1 and 1000. If this parameter is not used, then DescribeImageTags + // returns up to 100 results and a nextToken value, if applicable. This option + // cannot be used when you specify images with imageIds. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeImageTagsPaginator is a paginator for DescribeImageTags +type DescribeImageTagsPaginator struct { + options DescribeImageTagsPaginatorOptions + client DescribeImageTagsAPIClient + params *DescribeImageTagsInput + nextToken *string + firstPage bool +} + +// NewDescribeImageTagsPaginator returns a new DescribeImageTagsPaginator +func NewDescribeImageTagsPaginator(client DescribeImageTagsAPIClient, params *DescribeImageTagsInput, optFns ...func(*DescribeImageTagsPaginatorOptions)) *DescribeImageTagsPaginator { + if params == nil { + params = &DescribeImageTagsInput{} + } + + options := DescribeImageTagsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeImageTagsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeImageTagsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeImageTags page. +func (p *DescribeImageTagsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImageTagsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribeImageTags(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeImageTags(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "DescribeImageTags", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go new file mode 100644 index 000000000..6d6df97a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeImages.go @@ -0,0 +1,254 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns metadata about the images in a repository in a public registry. +// Beginning with Docker version 1.9, the Docker client compresses image layers +// before pushing them to a V2 Docker registry. The output of the docker images +// command shows the uncompressed image size, so it may return a larger image size +// than the image sizes returned by DescribeImages. +func (c *Client) DescribeImages(ctx context.Context, params *DescribeImagesInput, optFns ...func(*Options)) (*DescribeImagesOutput, error) { + if params == nil { + params = &DescribeImagesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImages", params, optFns, c.addOperationDescribeImagesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImagesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImagesInput struct { + + // The repository that contains the images to describe. + // + // This member is required. + RepositoryName *string + + // The list of image IDs for the requested repository. + ImageIds []types.ImageIdentifier + + // The maximum number of repository results returned by DescribeImages in paginated + // output. When this parameter is used, DescribeImages only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeImages + // request with the returned nextToken value. This value can be between 1 and 1000. + // If this parameter is not used, then DescribeImages returns up to 100 results and + // a nextToken value, if applicable. This option cannot be used when you specify + // images with imageIds. + MaxResults *int32 + + // The nextToken value returned from a previous paginated DescribeImages request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. This value is null when there are no more results to return. + // This option cannot be used when you specify images with imageIds. + NextToken *string + + // The AWS account ID associated with the public registry that contains the + // repository in which to describe images. If you do not specify a registry, the + // default public registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type DescribeImagesOutput struct { + + // A list of ImageDetail objects that contain data about the image. + ImageDetails []types.ImageDetail + + // The nextToken value to include in a future DescribeImages request. When the + // results of a DescribeImages request exceed maxResults, this value can be used to + // retrieve the next page of results. This value is null when there are no more + // results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeImages{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeImages{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeImagesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImages(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeImagesAPIClient is a client that implements the DescribeImages +// operation. +type DescribeImagesAPIClient interface { + DescribeImages(context.Context, *DescribeImagesInput, ...func(*Options)) (*DescribeImagesOutput, error) +} + +var _ DescribeImagesAPIClient = (*Client)(nil) + +// DescribeImagesPaginatorOptions is the paginator options for DescribeImages +type DescribeImagesPaginatorOptions struct { + // The maximum number of repository results returned by DescribeImages in paginated + // output. When this parameter is used, DescribeImages only returns maxResults + // results in a single page along with a nextToken response element. The remaining + // results of the initial request can be seen by sending another DescribeImages + // request with the returned nextToken value. This value can be between 1 and 1000. + // If this parameter is not used, then DescribeImages returns up to 100 results and + // a nextToken value, if applicable. This option cannot be used when you specify + // images with imageIds. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeImagesPaginator is a paginator for DescribeImages +type DescribeImagesPaginator struct { + options DescribeImagesPaginatorOptions + client DescribeImagesAPIClient + params *DescribeImagesInput + nextToken *string + firstPage bool +} + +// NewDescribeImagesPaginator returns a new DescribeImagesPaginator +func NewDescribeImagesPaginator(client DescribeImagesAPIClient, params *DescribeImagesInput, optFns ...func(*DescribeImagesPaginatorOptions)) *DescribeImagesPaginator { + if params == nil { + params = &DescribeImagesInput{} + } + + options := DescribeImagesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeImagesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeImagesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeImages page. +func (p *DescribeImagesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImagesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribeImages(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeImages(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "DescribeImages", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go new file mode 100644 index 000000000..e9297dfe9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRegistries.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns details for a public registry. +func (c *Client) DescribeRegistries(ctx context.Context, params *DescribeRegistriesInput, optFns ...func(*Options)) (*DescribeRegistriesOutput, error) { + if params == nil { + params = &DescribeRegistriesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeRegistries", params, optFns, c.addOperationDescribeRegistriesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeRegistriesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeRegistriesInput struct { + + // The maximum number of repository results returned by DescribeRegistries in + // paginated output. When this parameter is used, DescribeRegistries only returns + // maxResults results in a single page along with a nextToken response element. The + // remaining results of the initial request can be seen by sending another + // DescribeRegistries request with the returned nextToken value. This value can be + // between 1 and 1000. If this parameter is not used, then DescribeRegistries + // returns up to 100 results and a nextToken value, if applicable. + MaxResults *int32 + + // The nextToken value returned from a previous paginated DescribeRegistries + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. This token should be treated as an opaque identifier that is only + // used to retrieve the next items in a list and not for other programmatic + // purposes. + NextToken *string + + noSmithyDocumentSerde +} + +type DescribeRegistriesOutput struct { + + // An object containing the details for a public registry. + // + // This member is required. + Registries []types.Registry + + // The nextToken value to include in a future DescribeRepositories request. When + // the results of a DescribeRepositories request exceed maxResults, this value can + // be used to retrieve the next page of results. This value is null when there are + // no more results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeRegistriesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeRegistries{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeRegistries{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRegistries(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeRegistriesAPIClient is a client that implements the DescribeRegistries +// operation. +type DescribeRegistriesAPIClient interface { + DescribeRegistries(context.Context, *DescribeRegistriesInput, ...func(*Options)) (*DescribeRegistriesOutput, error) +} + +var _ DescribeRegistriesAPIClient = (*Client)(nil) + +// DescribeRegistriesPaginatorOptions is the paginator options for +// DescribeRegistries +type DescribeRegistriesPaginatorOptions struct { + // The maximum number of repository results returned by DescribeRegistries in + // paginated output. When this parameter is used, DescribeRegistries only returns + // maxResults results in a single page along with a nextToken response element. The + // remaining results of the initial request can be seen by sending another + // DescribeRegistries request with the returned nextToken value. This value can be + // between 1 and 1000. If this parameter is not used, then DescribeRegistries + // returns up to 100 results and a nextToken value, if applicable. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeRegistriesPaginator is a paginator for DescribeRegistries +type DescribeRegistriesPaginator struct { + options DescribeRegistriesPaginatorOptions + client DescribeRegistriesAPIClient + params *DescribeRegistriesInput + nextToken *string + firstPage bool +} + +// NewDescribeRegistriesPaginator returns a new DescribeRegistriesPaginator +func NewDescribeRegistriesPaginator(client DescribeRegistriesAPIClient, params *DescribeRegistriesInput, optFns ...func(*DescribeRegistriesPaginatorOptions)) *DescribeRegistriesPaginator { + if params == nil { + params = &DescribeRegistriesInput{} + } + + options := DescribeRegistriesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeRegistriesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeRegistriesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeRegistries page. +func (p *DescribeRegistriesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeRegistriesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribeRegistries(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeRegistries(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "DescribeRegistries", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go new file mode 100644 index 000000000..6f446f513 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_DescribeRepositories.go @@ -0,0 +1,247 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes repositories in a public registry. +func (c *Client) DescribeRepositories(ctx context.Context, params *DescribeRepositoriesInput, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { + if params == nil { + params = &DescribeRepositoriesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeRepositories", params, optFns, c.addOperationDescribeRepositoriesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeRepositoriesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeRepositoriesInput struct { + + // The maximum number of repository results returned by DescribeRepositories in + // paginated output. When this parameter is used, DescribeRepositories only returns + // maxResults results in a single page along with a nextToken response element. The + // remaining results of the initial request can be seen by sending another + // DescribeRepositories request with the returned nextToken value. This value can + // be between 1 and 1000. If this parameter is not used, then DescribeRepositories + // returns up to 100 results and a nextToken value, if applicable. This option + // cannot be used when you specify repositories with repositoryNames. + MaxResults *int32 + + // The nextToken value returned from a previous paginated DescribeRepositories + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. This option cannot be used when you specify repositories with + // repositoryNames. This token should be treated as an opaque identifier that is + // only used to retrieve the next items in a list and not for other programmatic + // purposes. + NextToken *string + + // The AWS account ID associated with the registry that contains the repositories + // to be described. If you do not specify a registry, the default public registry + // is assumed. + RegistryId *string + + // A list of repositories to describe. If this parameter is omitted, then all + // repositories in a registry are described. + RepositoryNames []string + + noSmithyDocumentSerde +} + +type DescribeRepositoriesOutput struct { + + // The nextToken value to include in a future DescribeRepositories request. When + // the results of a DescribeRepositories request exceed maxResults, this value can + // be used to retrieve the next page of results. This value is null when there are + // no more results to return. + NextToken *string + + // A list of repository objects corresponding to valid repositories. + Repositories []types.Repository + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeRepositoriesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeRepositories{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeRepositories{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRepositories(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeRepositoriesAPIClient is a client that implements the +// DescribeRepositories operation. +type DescribeRepositoriesAPIClient interface { + DescribeRepositories(context.Context, *DescribeRepositoriesInput, ...func(*Options)) (*DescribeRepositoriesOutput, error) +} + +var _ DescribeRepositoriesAPIClient = (*Client)(nil) + +// DescribeRepositoriesPaginatorOptions is the paginator options for +// DescribeRepositories +type DescribeRepositoriesPaginatorOptions struct { + // The maximum number of repository results returned by DescribeRepositories in + // paginated output. When this parameter is used, DescribeRepositories only returns + // maxResults results in a single page along with a nextToken response element. The + // remaining results of the initial request can be seen by sending another + // DescribeRepositories request with the returned nextToken value. This value can + // be between 1 and 1000. If this parameter is not used, then DescribeRepositories + // returns up to 100 results and a nextToken value, if applicable. This option + // cannot be used when you specify repositories with repositoryNames. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeRepositoriesPaginator is a paginator for DescribeRepositories +type DescribeRepositoriesPaginator struct { + options DescribeRepositoriesPaginatorOptions + client DescribeRepositoriesAPIClient + params *DescribeRepositoriesInput + nextToken *string + firstPage bool +} + +// NewDescribeRepositoriesPaginator returns a new DescribeRepositoriesPaginator +func NewDescribeRepositoriesPaginator(client DescribeRepositoriesAPIClient, params *DescribeRepositoriesInput, optFns ...func(*DescribeRepositoriesPaginatorOptions)) *DescribeRepositoriesPaginator { + if params == nil { + params = &DescribeRepositoriesInput{} + } + + options := DescribeRepositoriesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeRepositoriesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeRepositoriesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeRepositories page. +func (p *DescribeRepositoriesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeRepositoriesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.DescribeRepositories(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeRepositories(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "DescribeRepositories", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go new file mode 100644 index 000000000..adbe13a0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetAuthorizationToken.go @@ -0,0 +1,116 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves an authorization token. An authorization token represents your IAM +// authentication credentials and can be used to access any Amazon ECR registry +// that your IAM principal has access to. The authorization token is valid for 12 +// hours. This API requires the ecr-public:GetAuthorizationToken and +// sts:GetServiceBearerToken permissions. +func (c *Client) GetAuthorizationToken(ctx context.Context, params *GetAuthorizationTokenInput, optFns ...func(*Options)) (*GetAuthorizationTokenOutput, error) { + if params == nil { + params = &GetAuthorizationTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetAuthorizationToken", params, optFns, c.addOperationGetAuthorizationTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetAuthorizationTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetAuthorizationTokenInput struct { + noSmithyDocumentSerde +} + +type GetAuthorizationTokenOutput struct { + + // An authorization token data object that corresponds to a public registry. + AuthorizationData *types.AuthorizationData + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetAuthorizationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetAuthorizationToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetAuthorizationToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAuthorizationToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetAuthorizationToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "GetAuthorizationToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go new file mode 100644 index 000000000..88cd5b4bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRegistryCatalogData.go @@ -0,0 +1,114 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves catalog metadata for a public registry. +func (c *Client) GetRegistryCatalogData(ctx context.Context, params *GetRegistryCatalogDataInput, optFns ...func(*Options)) (*GetRegistryCatalogDataOutput, error) { + if params == nil { + params = &GetRegistryCatalogDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRegistryCatalogData", params, optFns, c.addOperationGetRegistryCatalogDataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRegistryCatalogDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRegistryCatalogDataInput struct { + noSmithyDocumentSerde +} + +type GetRegistryCatalogDataOutput struct { + + // The catalog metadata for the public registry. + // + // This member is required. + RegistryCatalogData *types.RegistryCatalogData + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRegistryCatalogDataMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRegistryCatalogData{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRegistryCatalogData{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRegistryCatalogData(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRegistryCatalogData(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "GetRegistryCatalogData", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go new file mode 100644 index 000000000..5c7173fe3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryCatalogData.go @@ -0,0 +1,127 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieve catalog metadata for a repository in a public registry. This metadata +// is displayed publicly in the Amazon ECR Public Gallery. +func (c *Client) GetRepositoryCatalogData(ctx context.Context, params *GetRepositoryCatalogDataInput, optFns ...func(*Options)) (*GetRepositoryCatalogDataOutput, error) { + if params == nil { + params = &GetRepositoryCatalogDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRepositoryCatalogData", params, optFns, c.addOperationGetRepositoryCatalogDataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRepositoryCatalogDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRepositoryCatalogDataInput struct { + + // The name of the repository to retrieve the catalog metadata for. + // + // This member is required. + RepositoryName *string + + // The AWS account ID associated with the registry that contains the repositories + // to be described. If you do not specify a registry, the default public registry + // is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type GetRepositoryCatalogDataOutput struct { + + // The catalog metadata for the repository. + CatalogData *types.RepositoryCatalogData + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRepositoryCatalogDataMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRepositoryCatalogData{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRepositoryCatalogData{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetRepositoryCatalogDataValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryCatalogData(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRepositoryCatalogData(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "GetRepositoryCatalogData", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go new file mode 100644 index 000000000..80584f924 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_GetRepositoryPolicy.go @@ -0,0 +1,132 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the repository policy for the specified repository. +func (c *Client) GetRepositoryPolicy(ctx context.Context, params *GetRepositoryPolicyInput, optFns ...func(*Options)) (*GetRepositoryPolicyOutput, error) { + if params == nil { + params = &GetRepositoryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRepositoryPolicy", params, optFns, c.addOperationGetRepositoryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRepositoryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRepositoryPolicyInput struct { + + // The name of the repository with the policy to retrieve. + // + // This member is required. + RepositoryName *string + + // The AWS account ID associated with the public registry that contains the + // repository. If you do not specify a registry, the default public registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type GetRepositoryPolicyOutput struct { + + // The repository policy text associated with the repository. The policy text will + // be in JSON format. + PolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetRepositoryPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRepositoryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "GetRepositoryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go new file mode 100644 index 000000000..1b4576d78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_InitiateLayerUpload.go @@ -0,0 +1,135 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Notifies Amazon ECR that you intend to upload an image layer. When an image is +// pushed, the InitiateLayerUpload API is called once per image layer that has not +// already been uploaded. Whether or not an image layer has been uploaded is +// determined by the BatchCheckLayerAvailability API action. This operation is used +// by the Amazon ECR proxy and is not generally used by customers for pulling and +// pushing images. In most cases, you should use the docker CLI to pull, tag, and +// push images. +func (c *Client) InitiateLayerUpload(ctx context.Context, params *InitiateLayerUploadInput, optFns ...func(*Options)) (*InitiateLayerUploadOutput, error) { + if params == nil { + params = &InitiateLayerUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "InitiateLayerUpload", params, optFns, c.addOperationInitiateLayerUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*InitiateLayerUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type InitiateLayerUploadInput struct { + + // The name of the repository to which you intend to upload layers. + // + // This member is required. + RepositoryName *string + + // The AWS account ID associated with the registry to which you intend to upload + // layers. If you do not specify a registry, the default public registry is + // assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type InitiateLayerUploadOutput struct { + + // The size, in bytes, that Amazon ECR expects future layer part uploads to be. + PartSize *int64 + + // The upload ID for the layer upload. This parameter is passed to further + // UploadLayerPart and CompleteLayerUpload operations. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationInitiateLayerUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpInitiateLayerUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpInitiateLayerUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpInitiateLayerUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opInitiateLayerUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opInitiateLayerUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "InitiateLayerUpload", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go new file mode 100644 index 000000000..6db006279 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_ListTagsForResource.go @@ -0,0 +1,122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// List the tags for an Amazon ECR Public resource. +func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { + if params == nil { + params = &ListTagsForResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagsForResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagsForResourceInput struct { + + // The Amazon Resource Name (ARN) that identifies the resource for which to list + // the tags. Currently, the supported resource is an Amazon ECR Public repository. + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +type ListTagsForResourceOutput struct { + + // The tags for the resource. + Tags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "ListTagsForResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go new file mode 100644 index 000000000..3a2cbdbd8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutImage.go @@ -0,0 +1,149 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or updates the image manifest and tags associated with an image. When an +// image is pushed and all new image layers have been uploaded, the PutImage API is +// called once to create or update the image manifest and the tags associated with +// the image. This operation is used by the Amazon ECR proxy and is not generally +// used by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. +func (c *Client) PutImage(ctx context.Context, params *PutImageInput, optFns ...func(*Options)) (*PutImageOutput, error) { + if params == nil { + params = &PutImageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutImage", params, optFns, c.addOperationPutImageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutImageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutImageInput struct { + + // The image manifest corresponding to the image to be uploaded. + // + // This member is required. + ImageManifest *string + + // The name of the repository in which to put the image. + // + // This member is required. + RepositoryName *string + + // The image digest of the image manifest corresponding to the image. + ImageDigest *string + + // The media type of the image manifest. If you push an image manifest that does + // not contain the mediaType field, you must specify the imageManifestMediaType in + // the request. + ImageManifestMediaType *string + + // The tag to associate with the image. This parameter is required for images that + // use the Docker Image Manifest V2 Schema 2 or Open Container Initiative (OCI) + // formats. + ImageTag *string + + // The AWS account ID associated with the public registry that contains the + // repository in which to put the image. If you do not specify a registry, the + // default public registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type PutImageOutput struct { + + // Details of the image uploaded. + Image *types.Image + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutImageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutImage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutImage{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutImageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutImage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutImage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "PutImage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go new file mode 100644 index 000000000..994364a53 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRegistryCatalogData.go @@ -0,0 +1,120 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Create or updates the catalog data for a public registry. +func (c *Client) PutRegistryCatalogData(ctx context.Context, params *PutRegistryCatalogDataInput, optFns ...func(*Options)) (*PutRegistryCatalogDataOutput, error) { + if params == nil { + params = &PutRegistryCatalogDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutRegistryCatalogData", params, optFns, c.addOperationPutRegistryCatalogDataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutRegistryCatalogDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutRegistryCatalogDataInput struct { + + // The display name for a public registry. The display name is shown as the + // repository author in the Amazon ECR Public Gallery. The registry display name is + // only publicly visible in the Amazon ECR Public Gallery for verified accounts. + DisplayName *string + + noSmithyDocumentSerde +} + +type PutRegistryCatalogDataOutput struct { + + // The catalog data for the public registry. + // + // This member is required. + RegistryCatalogData *types.RegistryCatalogData + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutRegistryCatalogDataMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutRegistryCatalogData{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutRegistryCatalogData{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRegistryCatalogData(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutRegistryCatalogData(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "PutRegistryCatalogData", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go new file mode 100644 index 000000000..4510287f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_PutRepositoryCatalogData.go @@ -0,0 +1,131 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or updates the catalog data for a repository in a public registry. +func (c *Client) PutRepositoryCatalogData(ctx context.Context, params *PutRepositoryCatalogDataInput, optFns ...func(*Options)) (*PutRepositoryCatalogDataOutput, error) { + if params == nil { + params = &PutRepositoryCatalogDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutRepositoryCatalogData", params, optFns, c.addOperationPutRepositoryCatalogDataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutRepositoryCatalogDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutRepositoryCatalogDataInput struct { + + // An object containing the catalog data for a repository. This data is publicly + // visible in the Amazon ECR Public Gallery. + // + // This member is required. + CatalogData *types.RepositoryCatalogDataInput + + // The name of the repository to create or update the catalog data for. + // + // This member is required. + RepositoryName *string + + // The AWS account ID associated with the public registry the repository is in. If + // you do not specify a registry, the default public registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type PutRepositoryCatalogDataOutput struct { + + // The catalog data for the repository. + CatalogData *types.RepositoryCatalogData + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutRepositoryCatalogDataMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutRepositoryCatalogData{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutRepositoryCatalogData{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutRepositoryCatalogDataValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutRepositoryCatalogData(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutRepositoryCatalogData(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "PutRepositoryCatalogData", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go new file mode 100644 index 000000000..46fdd68ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_SetRepositoryPolicy.go @@ -0,0 +1,147 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Applies a repository policy to the specified public repository to control access +// permissions. For more information, see Amazon ECR Repository Policies +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) +// in the Amazon Elastic Container Registry User Guide. +func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryPolicyInput, optFns ...func(*Options)) (*SetRepositoryPolicyOutput, error) { + if params == nil { + params = &SetRepositoryPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "SetRepositoryPolicy", params, optFns, c.addOperationSetRepositoryPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SetRepositoryPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SetRepositoryPolicyInput struct { + + // The JSON repository policy text to apply to the repository. For more + // information, see Amazon ECR Repository Policies + // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) + // in the Amazon Elastic Container Registry User Guide. + // + // This member is required. + PolicyText *string + + // The name of the repository to receive the policy. + // + // This member is required. + RepositoryName *string + + // If the policy you are attempting to set on a repository policy would prevent you + // from setting another policy in the future, you must force the + // SetRepositoryPolicy operation. This is intended to prevent accidental repository + // lock outs. + Force bool + + // The AWS account ID associated with the registry that contains the repository. If + // you do not specify a registry, the default public registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type SetRepositoryPolicyOutput struct { + + // The JSON repository policy text applied to the repository. + PolicyText *string + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSetRepositoryPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSetRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSetRepositoryPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpSetRepositoryPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSetRepositoryPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSetRepositoryPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "SetRepositoryPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go new file mode 100644 index 000000000..0e7044006 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_TagResource.go @@ -0,0 +1,128 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Associates the specified tags to a resource with the specified resourceArn. If +// existing tags on a resource are not specified in the request parameters, they +// are not changed. When a resource is deleted, the tags associated with that +// resource are deleted as well. +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, + // the supported resource is an Amazon ECR Public repository. + // + // This member is required. + ResourceArn *string + + // The tags to add to the resource. A tag is an array of key-value pairs. Tag keys + // can have a maximum character length of 128 characters, and tag values can have a + // maximum length of 256 characters. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "TagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go new file mode 100644 index 000000000..8f042d4c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UntagResource.go @@ -0,0 +1,122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes specified tags from a resource. +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // The Amazon Resource Name (ARN) of the resource from which to delete tags. + // Currently, the supported resource is an Amazon ECR Public repository. + // + // This member is required. + ResourceArn *string + + // The keys of the tags to be removed. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "UntagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go new file mode 100644 index 000000000..ce3fa0a4a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/api_op_UploadLayerPart.go @@ -0,0 +1,159 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Uploads an image layer part to Amazon ECR. When an image is pushed, each new +// image layer is uploaded in parts. The maximum size of each image layer part can +// be 20971520 bytes (or about 20MB). The UploadLayerPart API is called once per +// each new image layer part. This operation is used by the Amazon ECR proxy and is +// not generally used by customers for pulling and pushing images. In most cases, +// you should use the docker CLI to pull, tag, and push images. +func (c *Client) UploadLayerPart(ctx context.Context, params *UploadLayerPartInput, optFns ...func(*Options)) (*UploadLayerPartOutput, error) { + if params == nil { + params = &UploadLayerPartInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UploadLayerPart", params, optFns, c.addOperationUploadLayerPartMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UploadLayerPartOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UploadLayerPartInput struct { + + // The base64-encoded layer part payload. + // + // This member is required. + LayerPartBlob []byte + + // The position of the first byte of the layer part witin the overall image layer. + // + // This member is required. + PartFirstByte *int64 + + // The position of the last byte of the layer part within the overall image layer. + // + // This member is required. + PartLastByte *int64 + + // The name of the repository to which you are uploading layer parts. + // + // This member is required. + RepositoryName *string + + // The upload ID from a previous InitiateLayerUpload operation to associate with + // the layer part upload. + // + // This member is required. + UploadId *string + + // The AWS account ID associated with the registry to which you are uploading layer + // parts. If you do not specify a registry, the default public registry is assumed. + RegistryId *string + + noSmithyDocumentSerde +} + +type UploadLayerPartOutput struct { + + // The integer value of the last byte received in the request. + LastByteReceived *int64 + + // The registry ID associated with the request. + RegistryId *string + + // The repository name associated with the request. + RepositoryName *string + + // The upload ID associated with the request. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUploadLayerPartMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUploadLayerPart{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUploadLayerPart{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUploadLayerPartValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadLayerPart(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUploadLayerPart(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "ecr-public", + OperationName: "UploadLayerPart", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go new file mode 100644 index 000000000..2a9ac6204 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/deserializers.go @@ -0,0 +1,7053 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" +) + +type awsAwsjson11_deserializeOpBatchCheckLayerAvailability struct { +} + +func (*awsAwsjson11_deserializeOpBatchCheckLayerAvailability) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchCheckLayerAvailability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response, &metadata) + } + output := &BatchCheckLayerAvailabilityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchCheckLayerAvailabilityOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchCheckLayerAvailability(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RegistryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpBatchDeleteImage struct { +} + +func (*awsAwsjson11_deserializeOpBatchDeleteImage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchDeleteImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchDeleteImage(response, &metadata) + } + output := &BatchDeleteImageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchDeleteImageOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchDeleteImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCompleteLayerUpload struct { +} + +func (*awsAwsjson11_deserializeOpCompleteLayerUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCompleteLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response, &metadata) + } + output := &CompleteLayerUploadOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCompleteLayerUploadOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCompleteLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("EmptyUploadException", errorCode): + return awsAwsjson11_deserializeErrorEmptyUploadException(response, errorBody) + + case strings.EqualFold("InvalidLayerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidLayerException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LayerAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorLayerAlreadyExistsException(response, errorBody) + + case strings.EqualFold("LayerPartTooSmallException", errorCode): + return awsAwsjson11_deserializeErrorLayerPartTooSmallException(response, errorBody) + + case strings.EqualFold("RegistryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + + case strings.EqualFold("UploadNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateRepository struct { +} + +func (*awsAwsjson11_deserializeOpCreateRepository) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateRepository(response, &metadata) + } + output := &CreateRepositoryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateRepositoryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("InvalidTagParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("RepositoryAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TooManyTagsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteRepository struct { +} + +func (*awsAwsjson11_deserializeOpDeleteRepository) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRepository(response, &metadata) + } + output := &DeleteRepositoryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteRepositoryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotEmptyException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteRepositoryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpDeleteRepositoryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response, &metadata) + } + output := &DeleteRepositoryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeImages struct { +} + +func (*awsAwsjson11_deserializeOpDescribeImages) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImages(response, &metadata) + } + output := &DescribeImagesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ImageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorImageNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeImageTags struct { +} + +func (*awsAwsjson11_deserializeOpDescribeImageTags) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeImageTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeImageTags(response, &metadata) + } + output := &DescribeImageTagsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeImageTagsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeImageTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeRegistries struct { +} + +func (*awsAwsjson11_deserializeOpDescribeRegistries) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeRegistries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRegistries(response, &metadata) + } + output := &DescribeRegistriesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeRegistriesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeRegistries(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeRepositories struct { +} + +func (*awsAwsjson11_deserializeOpDescribeRepositories) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeRepositories) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeRepositories(response, &metadata) + } + output := &DescribeRepositoriesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeRepositories(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetAuthorizationToken struct { +} + +func (*awsAwsjson11_deserializeOpGetAuthorizationToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetAuthorizationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response, &metadata) + } + output := &GetAuthorizationTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetAuthorizationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetRegistryCatalogData struct { +} + +func (*awsAwsjson11_deserializeOpGetRegistryCatalogData) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetRegistryCatalogData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetRegistryCatalogData(response, &metadata) + } + output := &GetRegistryCatalogDataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetRegistryCatalogDataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetRegistryCatalogData(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetRepositoryCatalogData struct { +} + +func (*awsAwsjson11_deserializeOpGetRepositoryCatalogData) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetRepositoryCatalogData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetRepositoryCatalogData(response, &metadata) + } + output := &GetRepositoryCatalogDataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetRepositoryCatalogDataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetRepositoryCatalogData(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetRepositoryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpGetRepositoryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response, &metadata) + } + output := &GetRepositoryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryPolicyNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpInitiateLayerUpload struct { +} + +func (*awsAwsjson11_deserializeOpInitiateLayerUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpInitiateLayerUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response, &metadata) + } + output := &InitiateLayerUploadOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorInitiateLayerUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RegistryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTagsForResource struct { +} + +func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) + } + output := &ListTagsForResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutImage struct { +} + +func (*awsAwsjson11_deserializeOpPutImage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutImage(response, &metadata) + } + output := &PutImageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutImageOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ImageAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorImageAlreadyExistsException(response, errorBody) + + case strings.EqualFold("ImageDigestDoesNotMatchException", errorCode): + return awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response, errorBody) + + case strings.EqualFold("ImageTagAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LayersNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorLayersNotFoundException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ReferencedImagesNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response, errorBody) + + case strings.EqualFold("RegistryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutRegistryCatalogData struct { +} + +func (*awsAwsjson11_deserializeOpPutRegistryCatalogData) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutRegistryCatalogData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutRegistryCatalogData(response, &metadata) + } + output := &PutRegistryCatalogDataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutRegistryCatalogDataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutRegistryCatalogData(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutRepositoryCatalogData struct { +} + +func (*awsAwsjson11_deserializeOpPutRepositoryCatalogData) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutRepositoryCatalogData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutRepositoryCatalogData(response, &metadata) + } + output := &PutRepositoryCatalogDataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutRepositoryCatalogDataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutRepositoryCatalogData(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpSetRepositoryPolicy struct { +} + +func (*awsAwsjson11_deserializeOpSetRepositoryPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSetRepositoryPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response, &metadata) + } + output := &SetRepositoryPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSetRepositoryPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("InvalidTagParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TooManyTagsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("InvalidTagParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidTagParameterException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("TooManyTagsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyTagsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUploadLayerPart struct { +} + +func (*awsAwsjson11_deserializeOpUploadLayerPart) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUploadLayerPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUploadLayerPart(response, &metadata) + } + output := &UploadLayerPartOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUploadLayerPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidLayerPartException", errorCode): + return awsAwsjson11_deserializeErrorInvalidLayerPartException(response, errorBody) + + case strings.EqualFold("InvalidParameterException", errorCode): + return awsAwsjson11_deserializeErrorInvalidParameterException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("RegistryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRegistryNotFoundException(response, errorBody) + + case strings.EqualFold("RepositoryNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorRepositoryNotFoundException(response, errorBody) + + case strings.EqualFold("ServerException", errorCode): + return awsAwsjson11_deserializeErrorServerException(response, errorBody) + + case strings.EqualFold("UnsupportedCommandException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedCommandException(response, errorBody) + + case strings.EqualFold("UploadNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorUploadNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorEmptyUploadException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.EmptyUploadException{} + err := awsAwsjson11_deserializeDocumentEmptyUploadException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentImageAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageDigestDoesNotMatchException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageDigestDoesNotMatchException{} + err := awsAwsjson11_deserializeDocumentImageDigestDoesNotMatchException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageNotFoundException{} + err := awsAwsjson11_deserializeDocumentImageNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorImageTagAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ImageTagAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidLayerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidLayerException{} + err := awsAwsjson11_deserializeDocumentInvalidLayerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidLayerPartException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidLayerPartException{} + err := awsAwsjson11_deserializeDocumentInvalidLayerPartException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidParameterException{} + err := awsAwsjson11_deserializeDocumentInvalidParameterException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidTagParameterException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidTagParameterException{} + err := awsAwsjson11_deserializeDocumentInvalidTagParameterException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLayerAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LayerAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentLayerAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLayerPartTooSmallException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LayerPartTooSmallException{} + err := awsAwsjson11_deserializeDocumentLayerPartTooSmallException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLayersNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LayersNotFoundException{} + err := awsAwsjson11_deserializeDocumentLayersNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LimitExceededException{} + err := awsAwsjson11_deserializeDocumentLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorReferencedImagesNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ReferencedImagesNotFoundException{} + err := awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRegistryNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RegistryNotFoundException{} + err := awsAwsjson11_deserializeDocumentRegistryNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRepositoryAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRepositoryNotEmptyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryNotEmptyException{} + err := awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRepositoryNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryNotFoundException{} + err := awsAwsjson11_deserializeDocumentRepositoryNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorRepositoryPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.RepositoryPolicyNotFoundException{} + err := awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ServerException{} + err := awsAwsjson11_deserializeDocumentServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTooManyTagsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TooManyTagsException{} + err := awsAwsjson11_deserializeDocumentTooManyTagsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUnsupportedCommandException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UnsupportedCommandException{} + err := awsAwsjson11_deserializeDocumentUnsupportedCommandException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUploadNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UploadNotFoundException{} + err := awsAwsjson11_deserializeDocumentUploadNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeDocumentArchitectureList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Architecture to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAuthorizationData(v **types.AuthorizationData, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AuthorizationData + if *v == nil { + sv = &types.AuthorizationData{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Base64 to be of type string, got %T instead", value) + } + sv.AuthorizationToken = ptr.String(jtv) + } + + case "expiresAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ExpiresAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected ExpirationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEmptyUploadException(v **types.EmptyUploadException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EmptyUploadException + if *v == nil { + sv = &types.EmptyUploadException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImage(v **types.Image, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Image + if *v == nil { + sv = &types.Image{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + case "imageManifest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageManifest to be of type string, got %T instead", value) + } + sv.ImageManifest = ptr.String(jtv) + } + + case "imageManifestMediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.ImageManifestMediaType = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryIdOrAlias to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageAlreadyExistsException(v **types.ImageAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageAlreadyExistsException + if *v == nil { + sv = &types.ImageAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageDetail(v **types.ImageDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageDetail + if *v == nil { + sv = &types.ImageDetail{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "artifactMediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.ArtifactMediaType = ptr.String(jtv) + } + + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + + case "imageManifestMediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.ImageManifestMediaType = ptr.String(jtv) + } + + case "imagePushedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ImagePushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected PushTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "imageSizeInBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ImageSizeInBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ImageSizeInBytes = ptr.Int64(i64) + } + + case "imageTags": + if err := awsAwsjson11_deserializeDocumentImageTagList(&sv.ImageTags, value); err != nil { + return err + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageDetailList(v *[]types.ImageDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageDetail + if *v == nil { + cv = []types.ImageDetail{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageDetail + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageDetail(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageDigestDoesNotMatchException(v **types.ImageDigestDoesNotMatchException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageDigestDoesNotMatchException + if *v == nil { + sv = &types.ImageDigestDoesNotMatchException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageFailure(v **types.ImageFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageFailure + if *v == nil { + sv = &types.ImageFailure{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageFailureCode to be of type string, got %T instead", value) + } + sv.FailureCode = types.ImageFailureCode(jtv) + } + + case "failureReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageFailureReason to be of type string, got %T instead", value) + } + sv.FailureReason = ptr.String(jtv) + } + + case "imageId": + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&sv.ImageId, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageFailureList(v *[]types.ImageFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageFailure + if *v == nil { + cv = []types.ImageFailure{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageFailure + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageFailure(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageIdentifier(v **types.ImageIdentifier, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageIdentifier + if *v == nil { + sv = &types.ImageIdentifier{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + + case "imageTag": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) + } + sv.ImageTag = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageIdentifierList(v *[]types.ImageIdentifier, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageIdentifier + if *v == nil { + cv = []types.ImageIdentifier{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageIdentifier + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageIdentifier(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageNotFoundException(v **types.ImageNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageNotFoundException + if *v == nil { + sv = &types.ImageNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageTagAlreadyExistsException(v **types.ImageTagAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageTagAlreadyExistsException + if *v == nil { + sv = &types.ImageTagAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageTagDetail(v **types.ImageTagDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImageTagDetail + if *v == nil { + sv = &types.ImageTagDetail{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "imageDetail": + if err := awsAwsjson11_deserializeDocumentReferencedImageDetail(&sv.ImageDetail, value); err != nil { + return err + } + + case "imageTag": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) + } + sv.ImageTag = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentImageTagDetailList(v *[]types.ImageTagDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ImageTagDetail + if *v == nil { + cv = []types.ImageTagDetail{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ImageTagDetail + destAddr := &col + if err := awsAwsjson11_deserializeDocumentImageTagDetail(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentImageTagList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageTag to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidLayerException(v **types.InvalidLayerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidLayerException + if *v == nil { + sv = &types.InvalidLayerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidLayerPartException(v **types.InvalidLayerPartException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidLayerPartException + if *v == nil { + sv = &types.InvalidLayerPartException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lastValidByteReceived": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.LastValidByteReceived = ptr.Int64(i64) + } + + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "uploadId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) + } + sv.UploadId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidParameterException(v **types.InvalidParameterException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidParameterException + if *v == nil { + sv = &types.InvalidParameterException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidTagParameterException(v **types.InvalidTagParameterException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidTagParameterException + if *v == nil { + sv = &types.InvalidTagParameterException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayer(v **types.Layer, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Layer + if *v == nil { + sv = &types.Layer{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "layerAvailability": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerAvailability to be of type string, got %T instead", value) + } + sv.LayerAvailability = types.LayerAvailability(jtv) + } + + case "layerDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) + } + sv.LayerDigest = ptr.String(jtv) + } + + case "layerSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LayerSizeInBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.LayerSize = ptr.Int64(i64) + } + + case "mediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.MediaType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerAlreadyExistsException(v **types.LayerAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayerAlreadyExistsException + if *v == nil { + sv = &types.LayerAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerFailure(v **types.LayerFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayerFailure + if *v == nil { + sv = &types.LayerFailure{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failureCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerFailureCode to be of type string, got %T instead", value) + } + sv.FailureCode = types.LayerFailureCode(jtv) + } + + case "failureReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerFailureReason to be of type string, got %T instead", value) + } + sv.FailureReason = ptr.String(jtv) + } + + case "layerDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected BatchedOperationLayerDigest to be of type string, got %T instead", value) + } + sv.LayerDigest = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerFailureList(v *[]types.LayerFailure, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.LayerFailure + if *v == nil { + cv = []types.LayerFailure{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.LayerFailure + destAddr := &col + if err := awsAwsjson11_deserializeDocumentLayerFailure(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerList(v *[]types.Layer, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Layer + if *v == nil { + cv = []types.Layer{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Layer + destAddr := &col + if err := awsAwsjson11_deserializeDocumentLayer(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentLayerPartTooSmallException(v **types.LayerPartTooSmallException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayerPartTooSmallException + if *v == nil { + sv = &types.LayerPartTooSmallException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLayersNotFoundException(v **types.LayersNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LayersNotFoundException + if *v == nil { + sv = &types.LayersNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LimitExceededException + if *v == nil { + sv = &types.LimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentOperatingSystemList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected OperatingSystem to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentReferencedImageDetail(v **types.ReferencedImageDetail, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReferencedImageDetail + if *v == nil { + sv = &types.ReferencedImageDetail{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "artifactMediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.ArtifactMediaType = ptr.String(jtv) + } + + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + + case "imageManifestMediaType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MediaType to be of type string, got %T instead", value) + } + sv.ImageManifestMediaType = ptr.String(jtv) + } + + case "imagePushedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ImagePushedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected PushTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "imageSizeInBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ImageSizeInBytes to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ImageSizeInBytes = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentReferencedImagesNotFoundException(v **types.ReferencedImagesNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ReferencedImagesNotFoundException + if *v == nil { + sv = &types.ReferencedImagesNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistry(v **types.Registry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Registry + if *v == nil { + sv = &types.Registry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "aliases": + if err := awsAwsjson11_deserializeDocumentRegistryAliasList(&sv.Aliases, value); err != nil { + return err + } + + case "registryArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.RegistryArn = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "registryUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.RegistryUri = ptr.String(jtv) + } + + case "verified": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected RegistryVerified to be of type *bool, got %T instead", value) + } + sv.Verified = ptr.Bool(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryAlias(v **types.RegistryAlias, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryAlias + if *v == nil { + sv = &types.RegistryAlias{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "defaultRegistryAlias": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected DefaultRegistryAliasFlag to be of type *bool, got %T instead", value) + } + sv.DefaultRegistryAlias = jtv + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryAliasName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "primaryRegistryAlias": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected PrimaryRegistryAliasFlag to be of type *bool, got %T instead", value) + } + sv.PrimaryRegistryAlias = jtv + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryAliasStatus to be of type string, got %T instead", value) + } + sv.Status = types.RegistryAliasStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryAliasList(v *[]types.RegistryAlias, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RegistryAlias + if *v == nil { + cv = []types.RegistryAlias{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RegistryAlias + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRegistryAlias(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryCatalogData(v **types.RegistryCatalogData, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryCatalogData + if *v == nil { + sv = &types.RegistryCatalogData{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "displayName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryDisplayName to be of type string, got %T instead", value) + } + sv.DisplayName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryList(v *[]types.Registry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Registry + if *v == nil { + cv = []types.Registry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Registry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRegistry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRegistryNotFoundException(v **types.RegistryNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RegistryNotFoundException + if *v == nil { + sv = &types.RegistryNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepository(v **types.Repository, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Repository + if *v == nil { + sv = &types.Repository{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected CreationTimestamp to be a JSON Number, got %T instead", value) + + } + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Arn to be of type string, got %T instead", value) + } + sv.RepositoryArn = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "repositoryUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Url to be of type string, got %T instead", value) + } + sv.RepositoryUri = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryAlreadyExistsException(v **types.RepositoryAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryAlreadyExistsException + if *v == nil { + sv = &types.RepositoryAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryCatalogData(v **types.RepositoryCatalogData, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryCatalogData + if *v == nil { + sv = &types.RepositoryCatalogData{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "aboutText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AboutText to be of type string, got %T instead", value) + } + sv.AboutText = ptr.String(jtv) + } + + case "architectures": + if err := awsAwsjson11_deserializeDocumentArchitectureList(&sv.Architectures, value); err != nil { + return err + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryDescription to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "logoUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceUrl to be of type string, got %T instead", value) + } + sv.LogoUrl = ptr.String(jtv) + } + + case "marketplaceCertified": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected MarketplaceCertified to be of type *bool, got %T instead", value) + } + sv.MarketplaceCertified = ptr.Bool(jtv) + } + + case "operatingSystems": + if err := awsAwsjson11_deserializeDocumentOperatingSystemList(&sv.OperatingSystems, value); err != nil { + return err + } + + case "usageText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UsageText to be of type string, got %T instead", value) + } + sv.UsageText = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryList(v *[]types.Repository, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Repository + if *v == nil { + cv = []types.Repository{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Repository + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRepository(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryNotEmptyException(v **types.RepositoryNotEmptyException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryNotEmptyException + if *v == nil { + sv = &types.RepositoryNotEmptyException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryNotFoundException(v **types.RepositoryNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryNotFoundException + if *v == nil { + sv = &types.RepositoryNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRepositoryPolicyNotFoundException(v **types.RepositoryPolicyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RepositoryPolicyNotFoundException + if *v == nil { + sv = &types.RepositoryPolicyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentServerException(v **types.ServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServerException + if *v == nil { + sv = &types.ServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + case "Value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTooManyTagsException(v **types.TooManyTagsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyTagsException + if *v == nil { + sv = &types.TooManyTagsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnsupportedCommandException(v **types.UnsupportedCommandException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedCommandException + if *v == nil { + sv = &types.UnsupportedCommandException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUploadNotFoundException(v **types.UploadNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UploadNotFoundException + if *v == nil { + sv = &types.UploadNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExceptionMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchCheckLayerAvailabilityOutput(v **BatchCheckLayerAvailabilityOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchCheckLayerAvailabilityOutput + if *v == nil { + sv = &BatchCheckLayerAvailabilityOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentLayerFailureList(&sv.Failures, value); err != nil { + return err + } + + case "layers": + if err := awsAwsjson11_deserializeDocumentLayerList(&sv.Layers, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchDeleteImageOutput(v **BatchDeleteImageOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchDeleteImageOutput + if *v == nil { + sv = &BatchDeleteImageOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "failures": + if err := awsAwsjson11_deserializeDocumentImageFailureList(&sv.Failures, value); err != nil { + return err + } + + case "imageIds": + if err := awsAwsjson11_deserializeDocumentImageIdentifierList(&sv.ImageIds, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCompleteLayerUploadOutput(v **CompleteLayerUploadOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CompleteLayerUploadOutput + if *v == nil { + sv = &CompleteLayerUploadOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "layerDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LayerDigest to be of type string, got %T instead", value) + } + sv.LayerDigest = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "uploadId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) + } + sv.UploadId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateRepositoryOutput(v **CreateRepositoryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateRepositoryOutput + if *v == nil { + sv = &CreateRepositoryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "catalogData": + if err := awsAwsjson11_deserializeDocumentRepositoryCatalogData(&sv.CatalogData, value); err != nil { + return err + } + + case "repository": + if err := awsAwsjson11_deserializeDocumentRepository(&sv.Repository, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteRepositoryOutput(v **DeleteRepositoryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteRepositoryOutput + if *v == nil { + sv = &DeleteRepositoryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "repository": + if err := awsAwsjson11_deserializeDocumentRepository(&sv.Repository, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteRepositoryPolicyOutput(v **DeleteRepositoryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteRepositoryPolicyOutput + if *v == nil { + sv = &DeleteRepositoryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeImagesOutput(v **DescribeImagesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeImagesOutput + if *v == nil { + sv = &DescribeImagesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageDetails": + if err := awsAwsjson11_deserializeDocumentImageDetailList(&sv.ImageDetails, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeImageTagsOutput(v **DescribeImageTagsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeImageTagsOutput + if *v == nil { + sv = &DescribeImageTagsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "imageTagDetails": + if err := awsAwsjson11_deserializeDocumentImageTagDetailList(&sv.ImageTagDetails, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeRegistriesOutput(v **DescribeRegistriesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeRegistriesOutput + if *v == nil { + sv = &DescribeRegistriesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "registries": + if err := awsAwsjson11_deserializeDocumentRegistryList(&sv.Registries, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeRepositoriesOutput(v **DescribeRepositoriesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeRepositoriesOutput + if *v == nil { + sv = &DescribeRepositoriesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "repositories": + if err := awsAwsjson11_deserializeDocumentRepositoryList(&sv.Repositories, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetAuthorizationTokenOutput(v **GetAuthorizationTokenOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetAuthorizationTokenOutput + if *v == nil { + sv = &GetAuthorizationTokenOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationData": + if err := awsAwsjson11_deserializeDocumentAuthorizationData(&sv.AuthorizationData, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetRegistryCatalogDataOutput(v **GetRegistryCatalogDataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRegistryCatalogDataOutput + if *v == nil { + sv = &GetRegistryCatalogDataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "registryCatalogData": + if err := awsAwsjson11_deserializeDocumentRegistryCatalogData(&sv.RegistryCatalogData, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetRepositoryCatalogDataOutput(v **GetRepositoryCatalogDataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRepositoryCatalogDataOutput + if *v == nil { + sv = &GetRepositoryCatalogDataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "catalogData": + if err := awsAwsjson11_deserializeDocumentRepositoryCatalogData(&sv.CatalogData, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetRepositoryPolicyOutput(v **GetRepositoryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRepositoryPolicyOutput + if *v == nil { + sv = &GetRepositoryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentInitiateLayerUploadOutput(v **InitiateLayerUploadOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *InitiateLayerUploadOutput + if *v == nil { + sv = &InitiateLayerUploadOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "partSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PartSize = ptr.Int64(i64) + } + + case "uploadId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) + } + sv.UploadId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsForResourceOutput + if *v == nil { + sv = &ListTagsForResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "tags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutImageOutput(v **PutImageOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutImageOutput + if *v == nil { + sv = &PutImageOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "image": + if err := awsAwsjson11_deserializeDocumentImage(&sv.Image, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutRegistryCatalogDataOutput(v **PutRegistryCatalogDataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutRegistryCatalogDataOutput + if *v == nil { + sv = &PutRegistryCatalogDataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "registryCatalogData": + if err := awsAwsjson11_deserializeDocumentRegistryCatalogData(&sv.RegistryCatalogData, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutRepositoryCatalogDataOutput(v **PutRepositoryCatalogDataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutRepositoryCatalogDataOutput + if *v == nil { + sv = &PutRepositoryCatalogDataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "catalogData": + if err := awsAwsjson11_deserializeDocumentRepositoryCatalogData(&sv.CatalogData, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentSetRepositoryPolicyOutput(v **SetRepositoryPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SetRepositoryPolicyOutput + if *v == nil { + sv = &SetRepositoryPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "policyText": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryPolicyText to be of type string, got %T instead", value) + } + sv.PolicyText = ptr.String(jtv) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentTagResourceOutput(v **TagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *TagResourceOutput + if *v == nil { + sv = &TagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUntagResourceOutput(v **UntagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UntagResourceOutput + if *v == nil { + sv = &UntagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUploadLayerPartOutput(v **UploadLayerPartOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UploadLayerPartOutput + if *v == nil { + sv = &UploadLayerPartOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "lastByteReceived": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PartSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.LastByteReceived = ptr.Int64(i64) + } + + case "registryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegistryId to be of type string, got %T instead", value) + } + sv.RegistryId = ptr.String(jtv) + } + + case "repositoryName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryName to be of type string, got %T instead", value) + } + sv.RepositoryName = ptr.String(jtv) + } + + case "uploadId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UploadId to be of type string, got %T instead", value) + } + sv.UploadId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go new file mode 100644 index 000000000..a17297a09 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/doc.go @@ -0,0 +1,15 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package ecrpublic provides the API client, operations, and parameter types for +// Amazon Elastic Container Registry Public. +// +// Amazon Elastic Container Registry Public Amazon Elastic Container Registry +// (Amazon ECR) is a managed container image registry service. Amazon ECR provides +// both public and private registries to host your container images. You can use +// the familiar Docker CLI, or their preferred client, to push, pull, and manage +// images. Amazon ECR provides a secure, scalable, and reliable registry for your +// Docker or Open Container Initiative (OCI) images. Amazon ECR supports public +// repositories with this API. For information about the Amazon ECR API for private +// repositories, see Amazon Elastic Container Registry API Reference +// (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/Welcome.html). +package ecrpublic diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go new file mode 100644 index 000000000..071eca69c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "ecr-public" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json new file mode 100644 index 000000000..a60655a86 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/generated.json @@ -0,0 +1,50 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_BatchCheckLayerAvailability.go", + "api_op_BatchDeleteImage.go", + "api_op_CompleteLayerUpload.go", + "api_op_CreateRepository.go", + "api_op_DeleteRepository.go", + "api_op_DeleteRepositoryPolicy.go", + "api_op_DescribeImageTags.go", + "api_op_DescribeImages.go", + "api_op_DescribeRegistries.go", + "api_op_DescribeRepositories.go", + "api_op_GetAuthorizationToken.go", + "api_op_GetRegistryCatalogData.go", + "api_op_GetRepositoryCatalogData.go", + "api_op_GetRepositoryPolicy.go", + "api_op_InitiateLayerUpload.go", + "api_op_ListTagsForResource.go", + "api_op_PutImage.go", + "api_op_PutRegistryCatalogData.go", + "api_op_PutRepositoryCatalogData.go", + "api_op_SetRepositoryPolicy.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UploadLayerPart.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/ecrpublic", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go new file mode 100644 index 000000000..45b1d3dac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ecrpublic + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.12.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go new file mode 100644 index 000000000..356bda290 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/internal/endpoints/endpoints.go @@ -0,0 +1,250 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver ECR PUBLIC endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-public.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr-public.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-public.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr-public.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr-public.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr-public.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-public.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "api.ecr-public-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "api.ecr-public.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/serializers.go new file mode 100644 index 000000000..4fd9a7dc3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/serializers.go @@ -0,0 +1,1948 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsjson11_serializeOpBatchCheckLayerAvailability struct { +} + +func (*awsAwsjson11_serializeOpBatchCheckLayerAvailability) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchCheckLayerAvailability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchCheckLayerAvailabilityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.BatchCheckLayerAvailability") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchCheckLayerAvailabilityInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpBatchDeleteImage struct { +} + +func (*awsAwsjson11_serializeOpBatchDeleteImage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchDeleteImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchDeleteImageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.BatchDeleteImage") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchDeleteImageInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCompleteLayerUpload struct { +} + +func (*awsAwsjson11_serializeOpCompleteLayerUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCompleteLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CompleteLayerUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.CompleteLayerUpload") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCompleteLayerUploadInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateRepository struct { +} + +func (*awsAwsjson11_serializeOpCreateRepository) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateRepository) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateRepositoryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.CreateRepository") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateRepositoryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteRepository struct { +} + +func (*awsAwsjson11_serializeOpDeleteRepository) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteRepository) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteRepositoryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DeleteRepository") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteRepositoryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteRepositoryPolicy struct { +} + +func (*awsAwsjson11_serializeOpDeleteRepositoryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteRepositoryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DeleteRepositoryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeImages struct { +} + +func (*awsAwsjson11_serializeOpDescribeImages) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeImagesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DescribeImages") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeImagesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeImageTags struct { +} + +func (*awsAwsjson11_serializeOpDescribeImageTags) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeImageTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeImageTagsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DescribeImageTags") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeImageTagsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeRegistries struct { +} + +func (*awsAwsjson11_serializeOpDescribeRegistries) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeRegistries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeRegistriesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DescribeRegistries") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeRegistriesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeRepositories struct { +} + +func (*awsAwsjson11_serializeOpDescribeRepositories) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeRepositories) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeRepositoriesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.DescribeRepositories") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeRepositoriesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetAuthorizationToken struct { +} + +func (*awsAwsjson11_serializeOpGetAuthorizationToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetAuthorizationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetAuthorizationTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.GetAuthorizationToken") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetAuthorizationTokenInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetRegistryCatalogData struct { +} + +func (*awsAwsjson11_serializeOpGetRegistryCatalogData) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetRegistryCatalogData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRegistryCatalogDataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.GetRegistryCatalogData") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetRegistryCatalogDataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetRepositoryCatalogData struct { +} + +func (*awsAwsjson11_serializeOpGetRepositoryCatalogData) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetRepositoryCatalogData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRepositoryCatalogDataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.GetRepositoryCatalogData") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetRepositoryCatalogDataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetRepositoryPolicy struct { +} + +func (*awsAwsjson11_serializeOpGetRepositoryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRepositoryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.GetRepositoryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpInitiateLayerUpload struct { +} + +func (*awsAwsjson11_serializeOpInitiateLayerUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpInitiateLayerUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*InitiateLayerUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.InitiateLayerUpload") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListTagsForResource struct { +} + +func (*awsAwsjson11_serializeOpListTagsForResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsForResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.ListTagsForResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutImage struct { +} + +func (*awsAwsjson11_serializeOpPutImage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutImageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.PutImage") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutImageInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutRegistryCatalogData struct { +} + +func (*awsAwsjson11_serializeOpPutRegistryCatalogData) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutRegistryCatalogData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutRegistryCatalogDataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.PutRegistryCatalogData") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutRegistryCatalogDataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutRepositoryCatalogData struct { +} + +func (*awsAwsjson11_serializeOpPutRepositoryCatalogData) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutRepositoryCatalogData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutRepositoryCatalogDataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.PutRepositoryCatalogData") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutRepositoryCatalogDataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpSetRepositoryPolicy struct { +} + +func (*awsAwsjson11_serializeOpSetRepositoryPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSetRepositoryPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SetRepositoryPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.SetRepositoryPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpTagResource struct { +} + +func (*awsAwsjson11_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUntagResource struct { +} + +func (*awsAwsjson11_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUploadLayerPart struct { +} + +func (*awsAwsjson11_serializeOpUploadLayerPart) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUploadLayerPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UploadLayerPartInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SpencerFrontendService.UploadLayerPart") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUploadLayerPartInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsjson11_serializeDocumentArchitectureList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentImageIdentifier(v *types.ImageIdentifier, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageDigest != nil { + ok := object.Key("imageDigest") + ok.String(*v.ImageDigest) + } + + if v.ImageTag != nil { + ok := object.Key("imageTag") + ok.String(*v.ImageTag) + } + + return nil +} + +func awsAwsjson11_serializeDocumentImageIdentifierList(v []types.ImageIdentifier, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentImageIdentifier(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentLayerDigestList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentOperatingSystemList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentRepositoryCatalogDataInput(v *types.RepositoryCatalogDataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AboutText != nil { + ok := object.Key("aboutText") + ok.String(*v.AboutText) + } + + if v.Architectures != nil { + ok := object.Key("architectures") + if err := awsAwsjson11_serializeDocumentArchitectureList(v.Architectures, ok); err != nil { + return err + } + } + + if v.Description != nil { + ok := object.Key("description") + ok.String(*v.Description) + } + + if v.LogoImageBlob != nil { + ok := object.Key("logoImageBlob") + ok.Base64EncodeBytes(v.LogoImageBlob) + } + + if v.OperatingSystems != nil { + ok := object.Key("operatingSystems") + if err := awsAwsjson11_serializeDocumentOperatingSystemList(v.OperatingSystems, ok); err != nil { + return err + } + } + + if v.UsageText != nil { + ok := object.Key("usageText") + ok.String(*v.UsageText) + } + + return nil +} + +func awsAwsjson11_serializeDocumentRepositoryNameList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Key != nil { + ok := object.Key("Key") + ok.String(*v.Key) + } + + if v.Value != nil { + ok := object.Key("Value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchCheckLayerAvailabilityInput(v *BatchCheckLayerAvailabilityInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LayerDigests != nil { + ok := object.Key("layerDigests") + if err := awsAwsjson11_serializeDocumentBatchedOperationLayerDigestList(v.LayerDigests, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchDeleteImageInput(v *BatchDeleteImageInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageIds != nil { + ok := object.Key("imageIds") + if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCompleteLayerUploadInput(v *CompleteLayerUploadInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LayerDigests != nil { + ok := object.Key("layerDigests") + if err := awsAwsjson11_serializeDocumentLayerDigestList(v.LayerDigests, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + if v.UploadId != nil { + ok := object.Key("uploadId") + ok.String(*v.UploadId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateRepositoryInput(v *CreateRepositoryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CatalogData != nil { + ok := object.Key("catalogData") + if err := awsAwsjson11_serializeDocumentRepositoryCatalogDataInput(v.CatalogData, ok); err != nil { + return err + } + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteRepositoryInput(v *DeleteRepositoryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Force { + ok := object.Key("force") + ok.Boolean(v.Force) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeImagesInput(v *DescribeImagesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageIds != nil { + ok := object.Key("imageIds") + if err := awsAwsjson11_serializeDocumentImageIdentifierList(v.ImageIds, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeImageTagsInput(v *DescribeImageTagsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeRegistriesInput(v *DescribeRegistriesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeRepositoriesInput(v *DescribeRepositoriesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("maxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryNames != nil { + ok := object.Key("repositoryNames") + if err := awsAwsjson11_serializeDocumentRepositoryNameList(v.RepositoryNames, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetAuthorizationTokenInput(v *GetAuthorizationTokenInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetRegistryCatalogDataInput(v *GetRegistryCatalogDataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetRepositoryCatalogDataInput(v *GetRepositoryCatalogDataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetRepositoryPolicyInput(v *GetRepositoryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentInitiateLayerUploadInput(v *InitiateLayerUploadInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutImageInput(v *PutImageInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ImageDigest != nil { + ok := object.Key("imageDigest") + ok.String(*v.ImageDigest) + } + + if v.ImageManifest != nil { + ok := object.Key("imageManifest") + ok.String(*v.ImageManifest) + } + + if v.ImageManifestMediaType != nil { + ok := object.Key("imageManifestMediaType") + ok.String(*v.ImageManifestMediaType) + } + + if v.ImageTag != nil { + ok := object.Key("imageTag") + ok.String(*v.ImageTag) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutRegistryCatalogDataInput(v *PutRegistryCatalogDataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DisplayName != nil { + ok := object.Key("displayName") + ok.String(*v.DisplayName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutRepositoryCatalogDataInput(v *PutRepositoryCatalogDataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CatalogData != nil { + ok := object.Key("catalogData") + if err := awsAwsjson11_serializeDocumentRepositoryCatalogDataInput(v.CatalogData, ok); err != nil { + return err + } + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentSetRepositoryPolicyInput(v *SetRepositoryPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Force { + ok := object.Key("force") + ok.Boolean(v.Force) + } + + if v.PolicyText != nil { + ok := object.Key("policyText") + ok.String(*v.PolicyText) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("resourceArn") + ok.String(*v.ResourceArn) + } + + if v.TagKeys != nil { + ok := object.Key("tagKeys") + if err := awsAwsjson11_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUploadLayerPartInput(v *UploadLayerPartInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LayerPartBlob != nil { + ok := object.Key("layerPartBlob") + ok.Base64EncodeBytes(v.LayerPartBlob) + } + + if v.PartFirstByte != nil { + ok := object.Key("partFirstByte") + ok.Long(*v.PartFirstByte) + } + + if v.PartLastByte != nil { + ok := object.Key("partLastByte") + ok.Long(*v.PartLastByte) + } + + if v.RegistryId != nil { + ok := object.Key("registryId") + ok.String(*v.RegistryId) + } + + if v.RepositoryName != nil { + ok := object.Key("repositoryName") + ok.String(*v.RepositoryName) + } + + if v.UploadId != nil { + ok := object.Key("uploadId") + ok.String(*v.UploadId) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/enums.go new file mode 100644 index 000000000..d80a05244 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/enums.go @@ -0,0 +1,87 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type ImageFailureCode string + +// Enum values for ImageFailureCode +const ( + ImageFailureCodeInvalidImageDigest ImageFailureCode = "InvalidImageDigest" + ImageFailureCodeInvalidImageTag ImageFailureCode = "InvalidImageTag" + ImageFailureCodeImageTagDoesNotMatchDigest ImageFailureCode = "ImageTagDoesNotMatchDigest" + ImageFailureCodeImageNotFound ImageFailureCode = "ImageNotFound" + ImageFailureCodeMissingDigestAndTag ImageFailureCode = "MissingDigestAndTag" + ImageFailureCodeImageReferencedByManifestList ImageFailureCode = "ImageReferencedByManifestList" + ImageFailureCodeKmsError ImageFailureCode = "KmsError" +) + +// Values returns all known values for ImageFailureCode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ImageFailureCode) Values() []ImageFailureCode { + return []ImageFailureCode{ + "InvalidImageDigest", + "InvalidImageTag", + "ImageTagDoesNotMatchDigest", + "ImageNotFound", + "MissingDigestAndTag", + "ImageReferencedByManifestList", + "KmsError", + } +} + +type LayerAvailability string + +// Enum values for LayerAvailability +const ( + LayerAvailabilityAvailable LayerAvailability = "AVAILABLE" + LayerAvailabilityUnavailable LayerAvailability = "UNAVAILABLE" +) + +// Values returns all known values for LayerAvailability. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (LayerAvailability) Values() []LayerAvailability { + return []LayerAvailability{ + "AVAILABLE", + "UNAVAILABLE", + } +} + +type LayerFailureCode string + +// Enum values for LayerFailureCode +const ( + LayerFailureCodeInvalidLayerDigest LayerFailureCode = "InvalidLayerDigest" + LayerFailureCodeMissingLayerDigest LayerFailureCode = "MissingLayerDigest" +) + +// Values returns all known values for LayerFailureCode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (LayerFailureCode) Values() []LayerFailureCode { + return []LayerFailureCode{ + "InvalidLayerDigest", + "MissingLayerDigest", + } +} + +type RegistryAliasStatus string + +// Enum values for RegistryAliasStatus +const ( + RegistryAliasStatusActive RegistryAliasStatus = "ACTIVE" + RegistryAliasStatusPending RegistryAliasStatus = "PENDING" + RegistryAliasStatusRejected RegistryAliasStatus = "REJECTED" +) + +// Values returns all known values for RegistryAliasStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RegistryAliasStatus) Values() []RegistryAliasStatus { + return []RegistryAliasStatus{ + "ACTIVE", + "PENDING", + "REJECTED", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go new file mode 100644 index 000000000..485ac0c1d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/errors.go @@ -0,0 +1,476 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The specified layer upload does not contain any layer parts. +type EmptyUploadException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *EmptyUploadException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *EmptyUploadException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *EmptyUploadException) ErrorCode() string { return "EmptyUploadException" } +func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified image has already been pushed, and there were no changes to the +// manifest or image tag after the last push. +type ImageAlreadyExistsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ImageAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageAlreadyExistsException) ErrorCode() string { return "ImageAlreadyExistsException" } +func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified image digest does not match the digest that Amazon ECR calculated +// for the image. +type ImageDigestDoesNotMatchException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ImageDigestDoesNotMatchException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageDigestDoesNotMatchException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageDigestDoesNotMatchException) ErrorCode() string { + return "ImageDigestDoesNotMatchException" +} +func (e *ImageDigestDoesNotMatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The image requested does not exist in the specified repository. +type ImageNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ImageNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageNotFoundException) ErrorCode() string { return "ImageNotFoundException" } +func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified image is tagged with a tag that already exists. The repository is +// configured for tag immutability. +type ImageTagAlreadyExistsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ImageTagAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ImageTagAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ImageTagAlreadyExistsException) ErrorCode() string { return "ImageTagAlreadyExistsException" } +func (e *ImageTagAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The layer digest calculation performed by Amazon ECR upon receipt of the image +// layer does not match the digest specified. +type InvalidLayerException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidLayerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidLayerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidLayerException) ErrorCode() string { return "InvalidLayerException" } +func (e *InvalidLayerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The layer part size is not valid, or the first byte specified is not consecutive +// to the last byte of a previous layer part upload. +type InvalidLayerPartException struct { + Message *string + + RegistryId *string + RepositoryName *string + UploadId *string + LastValidByteReceived *int64 + + noSmithyDocumentSerde +} + +func (e *InvalidLayerPartException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidLayerPartException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidLayerPartException) ErrorCode() string { return "InvalidLayerPartException" } +func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified parameter is invalid. Review the available parameters for the API +// request. +type InvalidParameterException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidParameterException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidParameterException) ErrorCode() string { return "InvalidParameterException" } +func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// An invalid parameter has been specified. Tag keys can have a maximum character +// length of 128 characters, and tag values can have a maximum length of 256 +// characters. +type InvalidTagParameterException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidTagParameterException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidTagParameterException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidTagParameterException) ErrorCode() string { return "InvalidTagParameterException" } +func (e *InvalidTagParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The image layer already exists in the associated repository. +type LayerAlreadyExistsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *LayerAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LayerAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LayerAlreadyExistsException) ErrorCode() string { return "LayerAlreadyExistsException" } +func (e *LayerAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Layer parts must be at least 5 MiB in size. +type LayerPartTooSmallException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *LayerPartTooSmallException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LayerPartTooSmallException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LayerPartTooSmallException) ErrorCode() string { return "LayerPartTooSmallException" } +func (e *LayerPartTooSmallException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified layers could not be found, or the specified layer is not valid for +// this repository. +type LayersNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *LayersNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LayersNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LayersNotFoundException) ErrorCode() string { return "LayersNotFoundException" } +func (e *LayersNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The operation did not succeed because it would have exceeded a service limit for +// your account. For more information, see Amazon ECR Service Quotas +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) in +// the Amazon Elastic Container Registry User Guide. +type LimitExceededException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LimitExceededException) ErrorCode() string { return "LimitExceededException" } +func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The manifest list is referencing an image that does not exist. +type ReferencedImagesNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ReferencedImagesNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ReferencedImagesNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ReferencedImagesNotFoundException) ErrorCode() string { + return "ReferencedImagesNotFoundException" +} +func (e *ReferencedImagesNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The registry does not exist. +type RegistryNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *RegistryNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RegistryNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RegistryNotFoundException) ErrorCode() string { return "RegistryNotFoundException" } +func (e *RegistryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified repository already exists in the specified registry. +type RepositoryAlreadyExistsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *RepositoryAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryAlreadyExistsException) ErrorCode() string { + return "RepositoryAlreadyExistsException" +} +func (e *RepositoryAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified repository contains images. To delete a repository that contains +// images, you must force the deletion with the force parameter. +type RepositoryNotEmptyException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *RepositoryNotEmptyException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryNotEmptyException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryNotEmptyException) ErrorCode() string { return "RepositoryNotEmptyException" } +func (e *RepositoryNotEmptyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct +// registry. +type RepositoryNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *RepositoryNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryNotFoundException) ErrorCode() string { return "RepositoryNotFoundException" } +func (e *RepositoryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified repository and registry combination does not have an associated +// repository policy. +type RepositoryPolicyNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *RepositoryPolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RepositoryPolicyNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RepositoryPolicyNotFoundException) ErrorCode() string { + return "RepositoryPolicyNotFoundException" +} +func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// These errors are usually caused by a server-side issue. +type ServerException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServerException) ErrorCode() string { return "ServerException" } +func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The list of tags on the repository is over the limit. The maximum number of tags +// that can be applied to a repository is 50. +type TooManyTagsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyTagsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyTagsException) ErrorCode() string { return "TooManyTagsException" } +func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The action is not supported in this Region. +type UnsupportedCommandException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedCommandException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedCommandException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedCommandException) ErrorCode() string { return "UnsupportedCommandException" } +func (e *UnsupportedCommandException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The upload could not be found, or the specified upload ID is not valid for this +// repository. +type UploadNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UploadNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UploadNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UploadNotFoundException) ErrorCode() string { return "UploadNotFoundException" } +func (e *UploadNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go new file mode 100644 index 000000000..731ea603d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/types/types.go @@ -0,0 +1,405 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// An authorization token data object that corresponds to a public registry. +type AuthorizationData struct { + + // A base64-encoded string that contains authorization data for a public Amazon ECR + // registry. When the string is decoded, it is presented in the format + // user:password for public registry authentication using docker login. + AuthorizationToken *string + + // The Unix time in seconds and milliseconds when the authorization token expires. + // Authorization tokens are valid for 12 hours. + ExpiresAt *time.Time + + noSmithyDocumentSerde +} + +// An object representing an Amazon ECR image. +type Image struct { + + // An object containing the image tag and image digest associated with an image. + ImageId *ImageIdentifier + + // The image manifest associated with the image. + ImageManifest *string + + // The manifest media type of the image. + ImageManifestMediaType *string + + // The AWS account ID associated with the registry containing the image. + RegistryId *string + + // The name of the repository associated with the image. + RepositoryName *string + + noSmithyDocumentSerde +} + +// An object that describes an image returned by a DescribeImages operation. +type ImageDetail struct { + + // The artifact media type of the image. + ArtifactMediaType *string + + // The sha256 digest of the image manifest. + ImageDigest *string + + // The media type of the image manifest. + ImageManifestMediaType *string + + // The date and time, expressed in standard JavaScript date format, at which the + // current image was pushed to the repository. + ImagePushedAt *time.Time + + // The size, in bytes, of the image in the repository. If the image is a manifest + // list, this will be the max size of all manifests in the list. Beginning with + // Docker version 1.9, the Docker client compresses image layers before pushing + // them to a V2 Docker registry. The output of the docker images command shows the + // uncompressed image size, so it may return a larger image size than the image + // sizes returned by DescribeImages. + ImageSizeInBytes *int64 + + // The list of tags associated with this image. + ImageTags []string + + // The AWS account ID associated with the public registry to which this image + // belongs. + RegistryId *string + + // The name of the repository to which this image belongs. + RepositoryName *string + + noSmithyDocumentSerde +} + +// An object representing an Amazon ECR image failure. +type ImageFailure struct { + + // The code associated with the failure. + FailureCode ImageFailureCode + + // The reason for the failure. + FailureReason *string + + // The image ID associated with the failure. + ImageId *ImageIdentifier + + noSmithyDocumentSerde +} + +// An object with identifying information for an Amazon ECR image. +type ImageIdentifier struct { + + // The sha256 digest of the image manifest. + ImageDigest *string + + // The tag used for the image. + ImageTag *string + + noSmithyDocumentSerde +} + +// An object representing the image tag details for an image. +type ImageTagDetail struct { + + // The time stamp indicating when the image tag was created. + CreatedAt *time.Time + + // An object that describes the details of an image. + ImageDetail *ReferencedImageDetail + + // The tag associated with the image. + ImageTag *string + + noSmithyDocumentSerde +} + +// An object representing an Amazon ECR image layer. +type Layer struct { + + // The availability status of the image layer. + LayerAvailability LayerAvailability + + // The sha256 digest of the image layer. + LayerDigest *string + + // The size, in bytes, of the image layer. + LayerSize *int64 + + // The media type of the layer, such as + // application/vnd.docker.image.rootfs.diff.tar.gzip or + // application/vnd.oci.image.layer.v1.tar+gzip. + MediaType *string + + noSmithyDocumentSerde +} + +// An object representing an Amazon ECR image layer failure. +type LayerFailure struct { + + // The failure code associated with the failure. + FailureCode LayerFailureCode + + // The reason for the failure. + FailureReason *string + + // The layer digest associated with the failure. + LayerDigest *string + + noSmithyDocumentSerde +} + +// An object that describes the image tag details returned by a DescribeImageTags +// action. +type ReferencedImageDetail struct { + + // The artifact media type of the image. + ArtifactMediaType *string + + // The sha256 digest of the image manifest. + ImageDigest *string + + // The media type of the image manifest. + ImageManifestMediaType *string + + // The date and time, expressed in standard JavaScript date format, at which the + // current image tag was pushed to the repository. + ImagePushedAt *time.Time + + // The size, in bytes, of the image in the repository. If the image is a manifest + // list, this will be the max size of all manifests in the list. Beginning with + // Docker version 1.9, the Docker client compresses image layers before pushing + // them to a V2 Docker registry. The output of the docker images command shows the + // uncompressed image size, so it may return a larger image size than the image + // sizes returned by DescribeImages. + ImageSizeInBytes *int64 + + noSmithyDocumentSerde +} + +// The details of a public registry. +type Registry struct { + + // An array of objects representing the aliases for a public registry. + // + // This member is required. + Aliases []RegistryAlias + + // The Amazon Resource Name (ARN) of the public registry. + // + // This member is required. + RegistryArn *string + + // The AWS account ID associated with the registry. If you do not specify a + // registry, the default public registry is assumed. + // + // This member is required. + RegistryId *string + + // The URI of a public registry. The URI contains a universal prefix and the + // registry alias. + // + // This member is required. + RegistryUri *string + + // Whether the account is verified. This indicates whether the account is an AWS + // Marketplace vendor. If an account is verified, each public repository will + // received a verified account badge on the Amazon ECR Public Gallery. + // + // This member is required. + Verified *bool + + noSmithyDocumentSerde +} + +// An object representing the aliases for a public registry. A public registry is +// given an alias upon creation but a custom alias can be set using the Amazon ECR +// console. For more information, see Registries +// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html) in the +// Amazon Elastic Container Registry User Guide. +type RegistryAlias struct { + + // Whether or not the registry alias is the default alias for the registry. When + // the first public repository is created, your public registry is assigned a + // default registry alias. + // + // This member is required. + DefaultRegistryAlias bool + + // The name of the registry alias. + // + // This member is required. + Name *string + + // Whether or not the registry alias is the primary alias for the registry. If + // true, the alias is the primary registry alias and is displayed in both the + // repository URL and the image URI used in the docker pull commands on the Amazon + // ECR Public Gallery. A registry alias that is not the primary registry alias can + // be used in the repository URI in a docker pull command. + // + // This member is required. + PrimaryRegistryAlias bool + + // The status of the registry alias. + // + // This member is required. + Status RegistryAliasStatus + + noSmithyDocumentSerde +} + +// The metadata for a public registry. +type RegistryCatalogData struct { + + // The display name for a public registry. This appears on the Amazon ECR Public + // Gallery. Only accounts that have the verified account badge can have a registry + // display name. + DisplayName *string + + noSmithyDocumentSerde +} + +// An object representing a repository. +type Repository struct { + + // The date and time, in JavaScript date format, when the repository was created. + CreatedAt *time.Time + + // The AWS account ID associated with the public registry that contains the + // repository. + RegistryId *string + + // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains + // the arn:aws:ecr namespace, followed by the region of the repository, AWS account + // ID of the repository owner, repository namespace, and repository name. For + // example, arn:aws:ecr:region:012345678910:repository/test. + RepositoryArn *string + + // The name of the repository. + RepositoryName *string + + // The URI for the repository. You can use this URI for container image push and + // pull operations. + RepositoryUri *string + + noSmithyDocumentSerde +} + +// The catalog data for a repository. This data is publicly visible in the Amazon +// ECR Public Gallery. +type RepositoryCatalogData struct { + + // The longform description of the contents of the repository. This text appears in + // the repository details on the Amazon ECR Public Gallery. + AboutText *string + + // The architecture tags that are associated with the repository. Only supported + // operating system tags appear publicly in the Amazon ECR Public Gallery. For more + // information, see RepositoryCatalogDataInput. + Architectures []string + + // The short description of the repository. + Description *string + + // The URL containing the logo associated with the repository. + LogoUrl *string + + // Whether or not the repository is certified by AWS Marketplace. + MarketplaceCertified *bool + + // The operating system tags that are associated with the repository. Only + // supported operating system tags appear publicly in the Amazon ECR Public + // Gallery. For more information, see RepositoryCatalogDataInput. + OperatingSystems []string + + // The longform usage details of the contents of the repository. The usage text + // provides context for users of the repository. + UsageText *string + + noSmithyDocumentSerde +} + +// An object containing the catalog data for a repository. This data is publicly +// visible in the Amazon ECR Public Gallery. +type RepositoryCatalogDataInput struct { + + // A detailed description of the contents of the repository. It is publicly visible + // in the Amazon ECR Public Gallery. The text must be in markdown format. + AboutText *string + + // The system architecture that the images in the repository are compatible with. + // On the Amazon ECR Public Gallery, the following supported architectures will + // appear as badges on the repository and are used as search filters. + // + // * Linux + // + // * + // Windows + // + // If an unsupported tag is added to your repository catalog data, it will + // be associated with the repository and can be retrieved using the API but will + // not be discoverable in the Amazon ECR Public Gallery. + Architectures []string + + // A short description of the contents of the repository. This text appears in both + // the image details and also when searching for repositories on the Amazon ECR + // Public Gallery. + Description *string + + // The base64-encoded repository logo payload. The repository logo is only publicly + // visible in the Amazon ECR Public Gallery for verified accounts. + LogoImageBlob []byte + + // The operating systems that the images in the repository are compatible with. On + // the Amazon ECR Public Gallery, the following supported operating systems will + // appear as badges on the repository and are used as search filters. + // + // * ARM + // + // * ARM + // 64 + // + // * x86 + // + // * x86-64 + // + // If an unsupported tag is added to your repository catalog + // data, it will be associated with the repository and can be retrieved using the + // API but will not be discoverable in the Amazon ECR Public Gallery. + OperatingSystems []string + + // Detailed information on how to use the contents of the repository. It is + // publicly visible in the Amazon ECR Public Gallery. The usage text provides + // context, support information, and additional usage details for users of the + // repository. The text must be in markdown format. + UsageText *string + + noSmithyDocumentSerde +} + +// The metadata that you apply to a resource to help you categorize and organize +// them. Each tag consists of a key and an optional value, both of which you +// define. Tag keys can have a maximum character length of 128 characters, and tag +// values can have a maximum length of 256 characters. +type Tag struct { + + // One part of a key-value pair that make up a tag. A key is a general label that + // acts like a category for more specific tag values. + Key *string + + // The optional part of a key-value pair that make up a tag. A value acts as a + // descriptor within a tag category (key). + Value *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/validators.go new file mode 100644 index 000000000..76d60ad71 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecrpublic/validators.go @@ -0,0 +1,751 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ecrpublic + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpBatchCheckLayerAvailability struct { +} + +func (*validateOpBatchCheckLayerAvailability) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchCheckLayerAvailability) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchCheckLayerAvailabilityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchCheckLayerAvailabilityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpBatchDeleteImage struct { +} + +func (*validateOpBatchDeleteImage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchDeleteImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchDeleteImageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchDeleteImageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCompleteLayerUpload struct { +} + +func (*validateOpCompleteLayerUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCompleteLayerUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CompleteLayerUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCompleteLayerUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateRepository struct { +} + +func (*validateOpCreateRepository) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateRepository) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateRepositoryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateRepositoryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteRepository struct { +} + +func (*validateOpDeleteRepository) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteRepository) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteRepositoryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteRepositoryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteRepositoryPolicy struct { +} + +func (*validateOpDeleteRepositoryPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteRepositoryPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteRepositoryPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeImages struct { +} + +func (*validateOpDescribeImages) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeImages) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeImagesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeImagesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeImageTags struct { +} + +func (*validateOpDescribeImageTags) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeImageTags) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeImageTagsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeImageTagsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetRepositoryCatalogData struct { +} + +func (*validateOpGetRepositoryCatalogData) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetRepositoryCatalogData) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetRepositoryCatalogDataInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetRepositoryCatalogDataInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetRepositoryPolicy struct { +} + +func (*validateOpGetRepositoryPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetRepositoryPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetRepositoryPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpInitiateLayerUpload struct { +} + +func (*validateOpInitiateLayerUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpInitiateLayerUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*InitiateLayerUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpInitiateLayerUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTagsForResource struct { +} + +func (*validateOpListTagsForResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsForResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsForResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutImage struct { +} + +func (*validateOpPutImage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutImage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutImageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutImageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutRepositoryCatalogData struct { +} + +func (*validateOpPutRepositoryCatalogData) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutRepositoryCatalogData) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutRepositoryCatalogDataInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutRepositoryCatalogDataInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSetRepositoryPolicy struct { +} + +func (*validateOpSetRepositoryPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSetRepositoryPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SetRepositoryPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSetRepositoryPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUploadLayerPart struct { +} + +func (*validateOpUploadLayerPart) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUploadLayerPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UploadLayerPartInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUploadLayerPartInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpBatchCheckLayerAvailabilityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchCheckLayerAvailability{}, middleware.After) +} + +func addOpBatchDeleteImageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchDeleteImage{}, middleware.After) +} + +func addOpCompleteLayerUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCompleteLayerUpload{}, middleware.After) +} + +func addOpCreateRepositoryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateRepository{}, middleware.After) +} + +func addOpDeleteRepositoryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteRepository{}, middleware.After) +} + +func addOpDeleteRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteRepositoryPolicy{}, middleware.After) +} + +func addOpDescribeImagesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeImages{}, middleware.After) +} + +func addOpDescribeImageTagsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeImageTags{}, middleware.After) +} + +func addOpGetRepositoryCatalogDataValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetRepositoryCatalogData{}, middleware.After) +} + +func addOpGetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetRepositoryPolicy{}, middleware.After) +} + +func addOpInitiateLayerUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpInitiateLayerUpload{}, middleware.After) +} + +func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) +} + +func addOpPutImageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutImage{}, middleware.After) +} + +func addOpPutRepositoryCatalogDataValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutRepositoryCatalogData{}, middleware.After) +} + +func addOpSetRepositoryPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSetRepositoryPolicy{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUploadLayerPartValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUploadLayerPart{}, middleware.After) +} + +func validateOpBatchCheckLayerAvailabilityInput(v *BatchCheckLayerAvailabilityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchCheckLayerAvailabilityInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.LayerDigests == nil { + invalidParams.Add(smithy.NewErrParamRequired("LayerDigests")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchDeleteImageInput(v *BatchDeleteImageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchDeleteImageInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageIds")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCompleteLayerUploadInput(v *CompleteLayerUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CompleteLayerUploadInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if v.LayerDigests == nil { + invalidParams.Add(smithy.NewErrParamRequired("LayerDigests")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateRepositoryInput(v *CreateRepositoryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateRepositoryInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteRepositoryInput(v *DeleteRepositoryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteRepositoryInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteRepositoryPolicyInput(v *DeleteRepositoryPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteRepositoryPolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeImagesInput(v *DescribeImagesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeImagesInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeImageTagsInput(v *DescribeImageTagsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeImageTagsInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetRepositoryCatalogDataInput(v *GetRepositoryCatalogDataInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetRepositoryCatalogDataInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetRepositoryPolicyInput(v *GetRepositoryPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetRepositoryPolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpInitiateLayerUploadInput(v *InitiateLayerUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InitiateLayerUploadInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutImageInput(v *PutImageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutImageInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.ImageManifest == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageManifest")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutRepositoryCatalogDataInput(v *PutRepositoryCatalogDataInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutRepositoryCatalogDataInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.CatalogData == nil { + invalidParams.Add(smithy.NewErrParamRequired("CatalogData")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSetRepositoryPolicyInput(v *SetRepositoryPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SetRepositoryPolicyInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.PolicyText == nil { + invalidParams.Add(smithy.NewErrParamRequired("PolicyText")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUploadLayerPartInput(v *UploadLayerPartInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UploadLayerPartInput"} + if v.RepositoryName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RepositoryName")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if v.PartFirstByte == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartFirstByte")) + } + if v.PartLastByte == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartLastByte")) + } + if v.LayerPartBlob == nil { + invalidParams.Add(smithy.NewErrParamRequired("LayerPartBlob")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md new file mode 100644 index 000000000..dec755d42 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -0,0 +1,80 @@ +# v1.9.9 (2022-09-14) + +* No change notes available for this release. + +# v1.9.8 (2022-09-02) + +* No change notes available for this release. + +# v1.9.7 (2022-08-31) + +* No change notes available for this release. + +# v1.9.6 (2022-08-29) + +* No change notes available for this release. + +# v1.9.5 (2022-08-11) + +* No change notes available for this release. + +# v1.9.4 (2022-08-09) + +* No change notes available for this release. + +# v1.9.3 (2022-06-29) + +* No change notes available for this release. + +# v1.9.2 (2022-06-07) + +* No change notes available for this release. + +# v1.9.1 (2022-03-24) + +* No change notes available for this release. + +# v1.9.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.8.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.7.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.6.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.5.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.4.0 (2021-10-21) + +* **Feature**: Updated to latest version + +# v1.3.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. + +# v1.2.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go new file mode 100644 index 000000000..3f451fc9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go @@ -0,0 +1,176 @@ +package acceptencoding + +import ( + "compress/gzip" + "context" + "fmt" + "io" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const acceptEncodingHeaderKey = "Accept-Encoding" +const contentEncodingHeaderKey = "Content-Encoding" + +// AddAcceptEncodingGzipOptions provides the options for the +// AddAcceptEncodingGzip middleware setup. +type AddAcceptEncodingGzipOptions struct { + Enable bool +} + +// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP +// middleware to the operation stack. This allows checksums to be correctly +// computed without disabling GZIP support. +func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error { + if options.Enable { + if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil { + return err + } + if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil { + return err + } + return nil + } + + return stack.Finalize.Add(&DisableGzip{}, middleware.Before) +} + +// DisableGzip provides the middleware that will +// disable the underlying http client automatically enabling for gzip +// decompress content-encoding support. +type DisableGzip struct{} + +// ID returns the id for the middleware. +func (*DisableGzip) ID() string { + return "DisableAcceptEncodingGzip" +} + +// HandleFinalize implements the FinalizeMiddleware interface. +func (*DisableGzip) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + output middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return output, metadata, &smithy.SerializationError{ + Err: fmt.Errorf("unknown request type %T", input.Request), + } + } + + // Explicitly enable gzip support, this will prevent the http client from + // auto extracting the zipped content. + req.Header.Set(acceptEncodingHeaderKey, "identity") + + return next.HandleFinalize(ctx, input) +} + +// EnableGzip provides a middleware to enable support for +// gzip responses, with manual decompression. This prevents the underlying HTTP +// client from performing the gzip decompression automatically. +type EnableGzip struct{} + +// ID returns the id for the middleware. +func (*EnableGzip) ID() string { + return "AcceptEncodingGzip" +} + +// HandleFinalize implements the FinalizeMiddleware interface. +func (*EnableGzip) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + output middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return output, metadata, &smithy.SerializationError{ + Err: fmt.Errorf("unknown request type %T", input.Request), + } + } + + // Explicitly enable gzip support, this will prevent the http client from + // auto extracting the zipped content. + req.Header.Set(acceptEncodingHeaderKey, "gzip") + + return next.HandleFinalize(ctx, input) +} + +// DecompressGzip provides the middleware for decompressing a gzip +// response from the service. +type DecompressGzip struct{} + +// ID returns the id for the middleware. +func (*DecompressGzip) ID() string { + return "DecompressGzip" +} + +// HandleDeserialize implements the DeserializeMiddlware interface. +func (*DecompressGzip) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + output, metadata, err = next.HandleDeserialize(ctx, input) + if err != nil { + return output, metadata, err + } + + resp, ok := output.RawResponse.(*smithyhttp.Response) + if !ok { + return output, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("unknown response type %T", output.RawResponse), + } + } + if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" { + return output, metadata, err + } + + // Clear content length since it will no longer be valid once the response + // body is decompressed. + resp.Header.Del("Content-Length") + resp.ContentLength = -1 + + resp.Body = wrapGzipReader(resp.Body) + + return output, metadata, err +} + +type gzipReader struct { + reader io.ReadCloser + gzip *gzip.Reader +} + +func wrapGzipReader(reader io.ReadCloser) *gzipReader { + return &gzipReader{ + reader: reader, + } +} + +// Read wraps the gzip reader around the underlying io.Reader to extract the +// response bytes on the fly. +func (g *gzipReader) Read(b []byte) (n int, err error) { + if g.gzip == nil { + g.gzip, err = gzip.NewReader(g.reader) + if err != nil { + g.gzip = nil // ensure uninitialized gzip value isn't used in close. + return 0, fmt.Errorf("failed to decompress gzip response, %w", err) + } + } + + return g.gzip.Read(b) +} + +func (g *gzipReader) Close() error { + if g.gzip == nil { + return nil + } + + if err := g.gzip.Close(); err != nil { + g.reader.Close() + return fmt.Errorf("failed to decompress gzip response, %w", err) + } + + return g.reader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go new file mode 100644 index 000000000..7056d9bf6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go @@ -0,0 +1,22 @@ +/* +Package acceptencoding provides customizations associated with Accept Encoding Header. + +# Accept encoding gzip + +The Go HTTP client automatically supports accept-encoding and content-encoding +gzip by default. This default behavior is not desired by the SDK, and prevents +validating the response body's checksum. To prevent this the SDK must manually +control usage of content-encoding gzip. + +To control content-encoding, the SDK must always set the `Accept-Encoding` +header to a value. This prevents the HTTP client from using gzip automatically. +When gzip is enabled on the API client, the SDK's customization will control +decompressing the gzip data in order to not break the checksum validation. When +gzip is disabled, the API client will disable gzip, preventing the HTTP +client's default behavior. + +An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using +the below middleware. The option if present can be used to enable auto decompressing +gzip by the SDK. +*/ +package acceptencoding diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go new file mode 100644 index 000000000..04ff9fb79 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package acceptencoding + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.9.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md new file mode 100644 index 000000000..90efb0987 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -0,0 +1,84 @@ +# v1.1.18 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.17 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.16 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.15 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.14 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2022-04-27) + +* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors. + +# v1.1.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2022-03-08) + +* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2022-02-24) + +* **Release**: New module for computing checksums +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go new file mode 100644 index 000000000..a17041c35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go @@ -0,0 +1,323 @@ +package checksum + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "hash/crc32" + "io" + "strings" + "sync" +) + +// Algorithm represents the checksum algorithms supported +type Algorithm string + +// Enumeration values for supported checksum Algorithms. +const ( + // AlgorithmCRC32C represents CRC32C hash algorithm + AlgorithmCRC32C Algorithm = "CRC32C" + + // AlgorithmCRC32 represents CRC32 hash algorithm + AlgorithmCRC32 Algorithm = "CRC32" + + // AlgorithmSHA1 represents SHA1 hash algorithm + AlgorithmSHA1 Algorithm = "SHA1" + + // AlgorithmSHA256 represents SHA256 hash algorithm + AlgorithmSHA256 Algorithm = "SHA256" +) + +var supportedAlgorithms = []Algorithm{ + AlgorithmCRC32C, + AlgorithmCRC32, + AlgorithmSHA1, + AlgorithmSHA256, +} + +func (a Algorithm) String() string { return string(a) } + +// ParseAlgorithm attempts to parse the provided value into a checksum +// algorithm, matching without case. Returns the algorithm matched, or an error +// if the algorithm wasn't matched. +func ParseAlgorithm(v string) (Algorithm, error) { + for _, a := range supportedAlgorithms { + if strings.EqualFold(string(a), v) { + return a, nil + } + } + return "", fmt.Errorf("unknown checksum algorithm, %v", v) +} + +// FilterSupportedAlgorithms filters the set of algorithms, returning a slice +// of algorithms that are supported. +func FilterSupportedAlgorithms(vs []string) []Algorithm { + found := map[Algorithm]struct{}{} + + supported := make([]Algorithm, 0, len(supportedAlgorithms)) + for _, v := range vs { + for _, a := range supportedAlgorithms { + // Only consider algorithms that are supported + if !strings.EqualFold(v, string(a)) { + continue + } + // Ignore duplicate algorithms in list. + if _, ok := found[a]; ok { + continue + } + + supported = append(supported, a) + found[a] = struct{}{} + } + } + return supported +} + +// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is +// returned if the algorithm is unknown. +func NewAlgorithmHash(v Algorithm) (hash.Hash, error) { + switch v { + case AlgorithmSHA1: + return sha1.New(), nil + case AlgorithmSHA256: + return sha256.New(), nil + case AlgorithmCRC32: + return crc32.NewIEEE(), nil + case AlgorithmCRC32C: + return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil + default: + return nil, fmt.Errorf("unknown checksum algorithm, %v", v) + } +} + +// AlgorithmChecksumLength returns the length of the algorithm's checksum in +// bytes. If the algorithm is not known, an error is returned. +func AlgorithmChecksumLength(v Algorithm) (int, error) { + switch v { + case AlgorithmSHA1: + return sha1.Size, nil + case AlgorithmSHA256: + return sha256.Size, nil + case AlgorithmCRC32: + return crc32.Size, nil + case AlgorithmCRC32C: + return crc32.Size, nil + default: + return 0, fmt.Errorf("unknown checksum algorithm, %v", v) + } +} + +const awsChecksumHeaderPrefix = "x-amz-checksum-" + +// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash. +func AlgorithmHTTPHeader(v Algorithm) string { + return awsChecksumHeaderPrefix + strings.ToLower(string(v)) +} + +// base64EncodeHashSum computes base64 encoded checksum of a given running +// hash. The running hash must already have content written to it. Returns the +// byte slice of checksum and an error +func base64EncodeHashSum(h hash.Hash) []byte { + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + return sum64 +} + +// hexEncodeHashSum computes hex encoded checksum of a given running hash. The +// running hash must already have content written to it. Returns the byte slice +// of checksum and an error +func hexEncodeHashSum(h hash.Hash) []byte { + sum := h.Sum(nil) + sumHex := make([]byte, hex.EncodedLen(len(sum))) + hex.Encode(sumHex, sum) + return sumHex +} + +// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents. +// Returns the byte slice of MD5 checksum and an error. +func computeMD5Checksum(r io.Reader) ([]byte, error) { + h := md5.New() + + // Copy errors may be assumed to be from the body. + if _, err := io.Copy(h, r); err != nil { + return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err) + } + + // Encode the MD5 checksum in base64. + return base64EncodeHashSum(h), nil +} + +// computeChecksumReader provides a reader wrapping an underlying io.Reader to +// compute the checksum of the stream's bytes. +type computeChecksumReader struct { + stream io.Reader + algorithm Algorithm + hasher hash.Hash + base64ChecksumLen int + + mux sync.RWMutex + lockedChecksum string + lockedErr error +} + +// newComputeChecksumReader returns a computeChecksumReader for the stream and +// algorithm specified. Returns error if unable to create the reader, or +// algorithm is unknown. +func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return nil, err + } + + checksumLength, err := AlgorithmChecksumLength(algorithm) + if err != nil { + return nil, err + } + + return &computeChecksumReader{ + stream: io.TeeReader(stream, hasher), + algorithm: algorithm, + hasher: hasher, + base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength), + }, nil +} + +// Read wraps the underlying reader. When the underlying reader returns EOF, +// the checksum of the reader will be computed, and can be retrieved with +// ChecksumBase64String. +func (r *computeChecksumReader) Read(p []byte) (int, error) { + n, err := r.stream.Read(p) + if err == nil { + return n, nil + } else if err != io.EOF { + r.mux.Lock() + defer r.mux.Unlock() + + r.lockedErr = err + return n, err + } + + b := base64EncodeHashSum(r.hasher) + + r.mux.Lock() + defer r.mux.Unlock() + + r.lockedChecksum = string(b) + + return n, err +} + +func (r *computeChecksumReader) Algorithm() Algorithm { + return r.algorithm +} + +// Base64ChecksumLength returns the base64 encoded length of the checksum for +// algorithm. +func (r *computeChecksumReader) Base64ChecksumLength() int { + return r.base64ChecksumLen +} + +// Base64Checksum returns the base64 checksum for the algorithm, or error if +// the underlying reader returned a non-EOF error. +// +// Safe to be called concurrently, but will return an error until after the +// underlying reader is returns EOF. +func (r *computeChecksumReader) Base64Checksum() (string, error) { + r.mux.RLock() + defer r.mux.RUnlock() + + if r.lockedErr != nil { + return "", r.lockedErr + } + + if r.lockedChecksum == "" { + return "", fmt.Errorf( + "checksum not available yet, called before reader returns EOF", + ) + } + + return r.lockedChecksum, nil +} + +// validateChecksumReader implements io.ReadCloser interface. The wrapper +// performs checksum validation when the underlying reader has been fully read. +type validateChecksumReader struct { + originalBody io.ReadCloser + body io.Reader + hasher hash.Hash + algorithm Algorithm + expectChecksum string +} + +// newValidateChecksumReader returns a configured io.ReadCloser that performs +// checksum validation when the underlying reader has been fully read. +func newValidateChecksumReader( + body io.ReadCloser, + algorithm Algorithm, + expectChecksum string, +) (*validateChecksumReader, error) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return nil, err + } + + return &validateChecksumReader{ + originalBody: body, + body: io.TeeReader(body, hasher), + hasher: hasher, + algorithm: algorithm, + expectChecksum: expectChecksum, + }, nil +} + +// Read attempts to read from the underlying stream while also updating the +// running hash. If the underlying stream returns with an EOF error, the +// checksum of the stream will be collected, and compared against the expected +// checksum. If the checksums do not match, an error will be returned. +// +// If a non-EOF error occurs when reading the underlying stream, that error +// will be returned and the checksum for the stream will be discarded. +func (c *validateChecksumReader) Read(p []byte) (n int, err error) { + n, err = c.body.Read(p) + if err == io.EOF { + if checksumErr := c.validateChecksum(); checksumErr != nil { + return n, checksumErr + } + } + + return n, err +} + +// Close closes the underlying reader, returning any error that occurred in the +// underlying reader. +func (c *validateChecksumReader) Close() (err error) { + return c.originalBody.Close() +} + +func (c *validateChecksumReader) validateChecksum() error { + // Compute base64 encoded checksum hash of the payload's read bytes. + v := base64EncodeHashSum(c.hasher) + if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) { + return validationError{ + Algorithm: c.algorithm, Expect: e, Actual: a, + } + } + + return nil +} + +type validationError struct { + Algorithm Algorithm + Expect string + Actual string +} + +func (v validationError) Error() string { + return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v", + v.Algorithm, v.Expect, v.Actual) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go new file mode 100644 index 000000000..3bd320c43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go @@ -0,0 +1,389 @@ +package checksum + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +const ( + crlf = "\r\n" + + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + defaultChunkLength = 1024 * 64 + + awsTrailerHeaderName = "x-amz-trailer" + decodedContentLengthHeaderName = "x-amz-decoded-content-length" + + contentEncodingHeaderName = "content-encoding" + awsChunkedContentEncodingHeaderValue = "aws-chunked" + + trailerKeyValueSeparator = ":" +) + +var ( + crlfBytes = []byte(crlf) + finalChunkBytes = []byte("0" + crlf) +) + +type awsChunkedEncodingOptions struct { + // The total size of the stream. For unsigned encoding this implies that + // there will only be a single chunk containing the underlying payload, + // unless ChunkLength is also specified. + StreamLength int64 + + // Set of trailer key:value pairs that will be appended to the end of the + // payload after the end chunk has been written. + Trailers map[string]awsChunkedTrailerValue + + // The maximum size of each chunk to be sent. Default value of -1, signals + // that optimal chunk length will be used automatically. ChunkSize must be + // at least 8KB. + // + // If ChunkLength and StreamLength are both specified, the stream will be + // broken up into ChunkLength chunks. The encoded length of the aws-chunked + // encoding can still be determined as long as all trailers, if any, have a + // fixed length. + ChunkLength int +} + +type awsChunkedTrailerValue struct { + // Function to retrieve the value of the trailer. Will only be called after + // the underlying stream returns EOF error. + Get func() (string, error) + + // If the length of the value can be pre-determined, and is constant + // specify the length. A value of -1 means the length is unknown, or + // cannot be pre-determined. + Length int +} + +// awsChunkedEncoding provides a reader that wraps the payload such that +// payload is read as a single aws-chunk payload. This reader can only be used +// if the content length of payload is known. Content-Length is used as size of +// the single payload chunk. The final chunk and trailing checksum is appended +// at the end. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition +// +// Here is the aws-chunked payload stream as read from the awsChunkedEncoding +// if original request stream is "Hello world", and checksum hash used is SHA256 +// +// \r\n +// Hello world\r\n +// 0\r\n +// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n +// \r\n +type awsChunkedEncoding struct { + options awsChunkedEncodingOptions + + encodedStream io.Reader + trailerEncodedLength int +} + +// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured +// for unsigned aws-chunked content encoding. Any additional trailers that need +// to be appended after the end chunk must be included as via Trailer +// callbacks. +func newUnsignedAWSChunkedEncoding( + stream io.Reader, + optFns ...func(*awsChunkedEncodingOptions), +) *awsChunkedEncoding { + options := awsChunkedEncodingOptions{ + Trailers: map[string]awsChunkedTrailerValue{}, + StreamLength: -1, + ChunkLength: -1, + } + for _, fn := range optFns { + fn(&options) + } + + var chunkReader io.Reader + if options.ChunkLength != -1 || options.StreamLength == -1 { + if options.ChunkLength == -1 { + options.ChunkLength = defaultChunkLength + } + chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength) + } else { + chunkReader = newUnsignedChunkReader(stream, options.StreamLength) + } + + trailerReader := newAWSChunkedTrailerReader(options.Trailers) + + return &awsChunkedEncoding{ + options: options, + encodedStream: io.MultiReader(chunkReader, + trailerReader, + bytes.NewBuffer(crlfBytes), + ), + trailerEncodedLength: trailerReader.EncodedLength(), + } +} + +// EncodedLength returns the final length of the aws-chunked content encoded +// stream if it can be determined without reading the underlying stream or lazy +// header values, otherwise -1 is returned. +func (e *awsChunkedEncoding) EncodedLength() int64 { + var length int64 + if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 { + return -1 + } + + if e.options.StreamLength != 0 { + // If the stream length is known, and there is no chunk length specified, + // only a single chunk will be used. Otherwise the stream length needs to + // include the multiple chunk padding content. + if e.options.ChunkLength == -1 { + length += getUnsignedChunkBytesLength(e.options.StreamLength) + + } else { + // Compute chunk header and payload length + numChunks := e.options.StreamLength / int64(e.options.ChunkLength) + length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength)) + if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 { + length += getUnsignedChunkBytesLength(remainder) + } + } + } + + // End chunk + length += int64(len(finalChunkBytes)) + + // Trailers + length += int64(e.trailerEncodedLength) + + // Encoding terminator + length += int64(len(crlf)) + + return length +} + +func getUnsignedChunkBytesLength(payloadLength int64) int64 { + payloadLengthStr := strconv.FormatInt(payloadLength, 16) + return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf)) +} + +// HTTPHeaders returns the set of headers that must be included the request for +// aws-chunked to work. This includes the content-encoding: aws-chunked header. +// +// If there are multiple layered content encoding, the aws-chunked encoding +// must be appended to the previous layers the stream's encoding. The best way +// to do this is to append all header values returned to the HTTP request's set +// of headers. +func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string { + headers := map[string][]string{ + contentEncodingHeaderName: { + awsChunkedContentEncodingHeaderValue, + }, + } + + if len(e.options.Trailers) != 0 { + trailers := make([]string, 0, len(e.options.Trailers)) + for name := range e.options.Trailers { + trailers = append(trailers, strings.ToLower(name)) + } + headers[awsTrailerHeaderName] = trailers + } + + return headers +} + +func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) { + return e.encodedStream.Read(b) +} + +// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked +// content encoded trailers. The trailer values will not be retrieved until the +// reader is read from. +type awsChunkedTrailerReader struct { + reader *bytes.Buffer + trailers map[string]awsChunkedTrailerValue + trailerEncodedLength int +} + +// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to +// lazy reading aws-chunk content encoded trailers. +func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader { + return &awsChunkedTrailerReader{ + trailers: trailers, + trailerEncodedLength: trailerEncodedLength(trailers), + } +} + +func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) { + for name, trailer := range trailers { + length += len(name) + len(trailerKeyValueSeparator) + l := trailer.Length + if l == -1 { + return -1 + } + length += l + len(crlf) + } + + return length +} + +// EncodedLength returns the length of the encoded trailers if the length could +// be determined without retrieving the header values. Returns -1 if length is +// unknown. +func (r *awsChunkedTrailerReader) EncodedLength() (length int) { + return r.trailerEncodedLength +} + +// Read populates the passed in byte slice with bytes from the encoded +// trailers. Will lazy read header values first time Read is called. +func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) { + if r.trailerEncodedLength == 0 { + return 0, io.EOF + } + + if r.reader == nil { + trailerLen := r.trailerEncodedLength + if r.trailerEncodedLength == -1 { + trailerLen = 0 + } + r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen)) + for name, trailer := range r.trailers { + r.reader.WriteString(name) + r.reader.WriteString(trailerKeyValueSeparator) + v, err := trailer.Get() + if err != nil { + return 0, fmt.Errorf("failed to get trailer value, %w", err) + } + r.reader.WriteString(v) + r.reader.WriteString(crlf) + } + } + + return r.reader.Read(p) +} + +// newUnsignedChunkReader returns an io.Reader encoding the underlying reader +// as unsigned aws-chunked chunks. The returned reader will also include the +// end chunk, but not the aws-chunked final `crlf` segment so trailers can be +// added. +// +// If the payload size is -1 for unknown length the content will be buffered in +// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding. +func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader { + if payloadSize == -1 { + return newBufferedAWSChunkReader(reader, defaultChunkLength) + } + + var endChunk bytes.Buffer + if payloadSize == 0 { + endChunk.Write(finalChunkBytes) + return &endChunk + } + + endChunk.WriteString(crlf) + endChunk.Write(finalChunkBytes) + + var header bytes.Buffer + header.WriteString(strconv.FormatInt(payloadSize, 16)) + header.WriteString(crlf) + return io.MultiReader( + &header, + reader, + &endChunk, + ) +} + +// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader. +// Will include end chunk, but not the aws-chunked final `crlf` segment so +// trailers can be added. +// +// Note does not implement support for chunk extensions, e.g. chunk signing. +type bufferedAWSChunkReader struct { + reader io.Reader + chunkSize int + chunkSizeStr string + + headerBuffer *bytes.Buffer + chunkBuffer *bytes.Buffer + + multiReader io.Reader + multiReaderLen int + endChunkDone bool +} + +// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading +// aws-chunked encoded chunks. +func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader { + return &bufferedAWSChunkReader{ + reader: reader, + chunkSize: chunkSize, + chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16), + + headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)), + chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))), + } +} + +// Read attempts to read from the underlying io.Reader writing aws-chunked +// chunk encoded bytes to p. When the underlying io.Reader has been completed +// read the end chunk will be available. Once the end chunk is read, the reader +// will return EOF. +func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) { + if r.multiReaderLen == 0 && r.endChunkDone { + return 0, io.EOF + } + if r.multiReader == nil || r.multiReaderLen == 0 { + r.multiReader, r.multiReaderLen, err = r.newMultiReader() + if err != nil { + return 0, err + } + } + + n, err = r.multiReader.Read(p) + r.multiReaderLen -= n + + if err == io.EOF && !r.endChunkDone { + // Edge case handling when the multi-reader has been completely read, + // and returned an EOF, make sure that EOF only gets returned if the + // end chunk was included in the multi-reader. Otherwise, the next call + // to read will initialize the next chunk's multi-reader. + err = nil + } + return n, err +} + +// newMultiReader returns a new io.Reader for wrapping the next chunk. Will +// return an error if the underlying reader can not be read from. Will never +// return io.EOF. +func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) { + // io.Copy eats the io.EOF returned by io.LimitReader. Any error that + // occurs here is due to an actual read error. + n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize))) + if err != nil { + return nil, 0, err + } + if n == 0 { + // Early exit writing out only the end chunk. This does not include + // aws-chunk's final `crlf` so that trailers can still be added by + // upstream reader. + r.headerBuffer.Reset() + r.headerBuffer.WriteString("0") + r.headerBuffer.WriteString(crlf) + r.endChunkDone = true + + return r.headerBuffer, r.headerBuffer.Len(), nil + } + r.chunkBuffer.WriteString(crlf) + + chunkSizeStr := r.chunkSizeStr + if int(n) != r.chunkSize { + chunkSizeStr = strconv.FormatInt(n, 16) + } + + r.headerBuffer.Reset() + r.headerBuffer.WriteString(chunkSizeStr) + r.headerBuffer.WriteString(crlf) + + return io.MultiReader( + r.headerBuffer, + r.chunkBuffer, + ), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go new file mode 100644 index 000000000..c95af9a11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package checksum + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.1.18" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go new file mode 100644 index 000000000..3e17d2216 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go @@ -0,0 +1,185 @@ +package checksum + +import ( + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// InputMiddlewareOptions provides the options for the request +// checksum middleware setup. +type InputMiddlewareOptions struct { + // GetAlgorithm is a function to get the checksum algorithm of the + // input payload from the input parameters. + // + // Given the input parameter value, the function must return the algorithm + // and true, or false if no algorithm is specified. + GetAlgorithm func(interface{}) (string, bool) + + // Forces the middleware to compute the input payload's checksum. The + // request will fail if the algorithm is not specified or unable to compute + // the checksum. + RequireChecksum bool + + // Enables support for wrapping the serialized input payload with a + // content-encoding: aws-check wrapper, and including a trailer for the + // algorithm's checksum value. + // + // The checksum will not be computed, nor added as trailing checksum, if + // the Algorithm's header is already set on the request. + EnableTrailingChecksum bool + + // Enables support for computing the SHA256 checksum of input payloads + // along with the algorithm specified checksum. Prevents downstream + // middleware handlers (computePayloadSHA256) re-reading the payload. + // + // The SHA256 payload checksum will only be used for computed for requests + // that are not TLS, or do not enable trailing checksums. + // + // The SHA256 payload hash will not be computed, if the Algorithm's header + // is already set on the request. + EnableComputeSHA256PayloadHash bool + + // Enables support for setting the aws-chunked decoded content length + // header for the decoded length of the underlying stream. Will only be set + // when used with trailing checksums, and aws-chunked content-encoding. + EnableDecodedContentLengthHeader bool +} + +// AddInputMiddleware adds the middleware for performing checksum computing +// of request payloads, and checksum validation of response payloads. +func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) { + // TODO ensure this works correctly with presigned URLs + + // Middleware stack: + // * (OK)(Initialize) --none-- + // * (OK)(Serialize) EndpointResolver + // * (OK)(Build) ComputeContentLength + // * (AD)(Build) Header ComputeInputPayloadChecksum + // * SIGNED Payload - If HTTP && not support trailing checksum + // * UNSIGNED Payload - If HTTPS && not support trailing checksum + // * (RM)(Build) ContentChecksum - OK to remove + // * (OK)(Build) ComputePayloadHash + // * v4.dynamicPayloadSigningMiddleware + // * v4.computePayloadSHA256 + // * v4.unsignedPayload + // (OK)(Build) Set computedPayloadHash header + // * (OK)(Finalize) Retry + // * (AD)(Finalize) Trailer ComputeInputPayloadChecksum, + // * Requires HTTPS && support trailing checksum + // * UNSIGNED Payload + // * Finalize run if HTTPS && support trailing checksum + // * (OK)(Finalize) Signing + // * (OK)(Deserialize) --none-- + + // Initial checksum configuration look up middleware + err = stack.Initialize.Add(&setupInputContext{ + GetAlgorithm: options.GetAlgorithm, + }, middleware.Before) + if err != nil { + return err + } + + stack.Build.Remove("ContentChecksum") + + // Create the compute checksum middleware that will be added as both a + // build and finalize handler. + inputChecksum := &computeInputPayloadChecksum{ + RequireChecksum: options.RequireChecksum, + EnableTrailingChecksum: options.EnableTrailingChecksum, + EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, + EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, + } + + // Insert header checksum after ComputeContentLength middleware, must also + // be before the computePayloadHash middleware handlers. + err = stack.Build.Insert(inputChecksum, + (*smithyhttp.ComputeContentLength)(nil).ID(), + middleware.After) + if err != nil { + return err + } + + // If trailing checksum is not supported no need for finalize handler to be added. + if options.EnableTrailingChecksum { + err = stack.Finalize.Insert(inputChecksum, "Retry", middleware.After) + if err != nil { + return err + } + } + + return nil +} + +// RemoveInputMiddleware Removes the compute input payload checksum middleware +// handlers from the stack. +func RemoveInputMiddleware(stack *middleware.Stack) { + id := (*setupInputContext)(nil).ID() + stack.Initialize.Remove(id) + + id = (*computeInputPayloadChecksum)(nil).ID() + stack.Build.Remove(id) + stack.Finalize.Remove(id) +} + +// OutputMiddlewareOptions provides options for configuring output checksum +// validation middleware. +type OutputMiddlewareOptions struct { + // GetValidationMode is a function to get the checksum validation + // mode of the output payload from the input parameters. + // + // Given the input parameter value, the function must return the validation + // mode and true, or false if no mode is specified. + GetValidationMode func(interface{}) (string, bool) + + // The set of checksum algorithms that should be used for response payload + // checksum validation. The algorithm(s) used will be a union of the + // output's returned algorithms and this set. + // + // Only the first algorithm in the union is currently used. + ValidationAlgorithms []string + + // If set the middleware will ignore output multipart checksums. Otherwise + // an checksum format error will be returned by the middleware. + IgnoreMultipartValidation bool + + // When set the middleware will log when output does not have checksum or + // algorithm to validate. + LogValidationSkipped bool + + // When set the middleware will log when the output contains a multipart + // checksum that was, skipped and not validated. + LogMultipartValidationSkipped bool +} + +// AddOutputMiddleware adds the middleware for validating response payload's +// checksum. +func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error { + err := stack.Initialize.Add(&setupOutputContext{ + GetValidationMode: options.GetValidationMode, + }, middleware.Before) + if err != nil { + return err + } + + // Resolve a supported priority order list of algorithms to validate. + algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms) + + m := &validateOutputPayloadChecksum{ + Algorithms: algorithms, + IgnoreMultipartValidation: options.IgnoreMultipartValidation, + LogMultipartValidationSkipped: options.LogMultipartValidationSkipped, + LogValidationSkipped: options.LogValidationSkipped, + } + + return stack.Deserialize.Add(m, middleware.After) +} + +// RemoveOutputMiddleware Removes the compute input payload checksum middleware +// handlers from the stack. +func RemoveOutputMiddleware(stack *middleware.Stack) { + id := (*setupOutputContext)(nil).ID() + stack.Initialize.Remove(id) + + id = (*validateOutputPayloadChecksum)(nil).ID() + stack.Deserialize.Remove(id) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go new file mode 100644 index 000000000..0b3c48912 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go @@ -0,0 +1,479 @@ +package checksum + +import ( + "context" + "crypto/sha256" + "fmt" + "hash" + "io" + "strconv" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + contentMD5Header = "Content-Md5" + streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" +) + +// computedInputChecksumsKey is the metadata key for recording the algorithm the +// checksum was computed for and the checksum value. +type computedInputChecksumsKey struct{} + +// GetComputedInputChecksums returns the map of checksum algorithm to their +// computed value stored in the middleware Metadata. Returns false if no values +// were stored in the Metadata. +func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) { + vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string) + return vs, ok +} + +// SetComputedInputChecksums stores the map of checksum algorithm to their +// computed value in the middleware Metadata. Overwrites any values that +// currently exist in the metadata. +func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) { + m.Set(computedInputChecksumsKey{}, vs) +} + +// computeInputPayloadChecksum middleware computes payload checksum +type computeInputPayloadChecksum struct { + // Enables support for wrapping the serialized input payload with a + // content-encoding: aws-check wrapper, and including a trailer for the + // algorithm's checksum value. + // + // The checksum will not be computed, nor added as trailing checksum, if + // the Algorithm's header is already set on the request. + EnableTrailingChecksum bool + + // States that a checksum is required to be included for the operation. If + // Input does not specify a checksum, fallback to built in MD5 checksum is + // used. + // + // Replaces smithy-go's ContentChecksum middleware. + RequireChecksum bool + + // Enables support for computing the SHA256 checksum of input payloads + // along with the algorithm specified checksum. Prevents downstream + // middleware handlers (computePayloadSHA256) re-reading the payload. + // + // The SHA256 payload hash will only be used for computed for requests + // that are not TLS, or do not enable trailing checksums. + // + // The SHA256 payload hash will not be computed, if the Algorithm's header + // is already set on the request. + EnableComputePayloadHash bool + + // Enables support for setting the aws-chunked decoded content length + // header for the decoded length of the underlying stream. Will only be set + // when used with trailing checksums, and aws-chunked content-encoding. + EnableDecodedContentLengthHeader bool + + buildHandlerRun bool + deferToFinalizeHandler bool +} + +// ID provides the middleware's identifier. +func (m *computeInputPayloadChecksum) ID() string { + return "AWSChecksum:ComputeInputPayloadChecksum" +} + +type computeInputHeaderChecksumError struct { + Msg string + Err error +} + +func (e computeInputHeaderChecksumError) Error() string { + const intro = "compute input header checksum failed" + + if e.Err != nil { + return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) + } + + return fmt.Sprintf("%s, %s", intro, e.Msg) +} +func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } + +// HandleBuild handles computing the payload's checksum, in the following cases: +// - Is HTTP, not HTTPS +// - RequireChecksum is true, and no checksums were specified via the Input +// - Trailing checksums are not supported +// +// The build handler must be inserted in the stack before ContentPayloadHash +// and after ComputeContentLength. +func (m *computeInputPayloadChecksum) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + m.buildHandlerRun = true + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, computeInputHeaderChecksumError{ + Msg: fmt.Sprintf("unknown request type %T", req), + } + } + + var algorithm Algorithm + var checksum string + defer func() { + if algorithm == "" || checksum == "" || err != nil { + return + } + + // Record the checksum and algorithm that was computed + SetComputedInputChecksums(&metadata, map[string]string{ + string(algorithm): checksum, + }) + }() + + // If no algorithm was specified, and the operation requires a checksum, + // fallback to the legacy content MD5 checksum. + algorithm, ok, err = getInputAlgorithm(ctx) + if err != nil { + return out, metadata, err + } else if !ok { + if m.RequireChecksum { + checksum, err = setMD5Checksum(ctx, req) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to compute stream's MD5 checksum", + Err: err, + } + } + algorithm = Algorithm("MD5") + } + return next.HandleBuild(ctx, in) + } + + // If the checksum header is already set nothing to do. + checksumHeader := AlgorithmHTTPHeader(algorithm) + if checksum = req.Header.Get(checksumHeader); checksum != "" { + return next.HandleBuild(ctx, in) + } + + computePayloadHash := m.EnableComputePayloadHash + if v := v4.GetPayloadHash(ctx); v != "" { + computePayloadHash = false + } + + stream := req.GetStream() + streamLength, err := getRequestStreamLength(req) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to determine stream length", + Err: err, + } + } + + // If trailing checksums are supported, the request is HTTPS, and the + // stream is not nil or empty, there is nothing to do in the build stage. + // The checksum will be added to the request as a trailing checksum in the + // finalize handler. + // + // Nil and empty streams will always be handled as a request header, + // regardless if the operation supports trailing checksums or not. + if req.IsHTTPS() { + if stream != nil && streamLength != 0 && m.EnableTrailingChecksum { + if m.EnableComputePayloadHash { + // payload hash is set as header in Build middleware handler, + // ContentSHA256Header. + ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash) + } + + m.deferToFinalizeHandler = true + return next.HandleBuild(ctx, in) + } + + // If trailing checksums are not enabled but protocol is still HTTPS + // disabling computing the payload hash. Downstream middleware handler + // (ComputetPayloadHash) will set the payload hash to unsigned payload, + // if signing was used. + computePayloadHash = false + } + + // Only seekable streams are supported for non-trailing checksums, because + // the stream needs to be rewound before the handler can continue. + if stream != nil && !req.IsStreamSeekable() { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "unseekable stream is not supported without TLS and trailing checksum", + } + } + + var sha256Checksum string + checksum, sha256Checksum, err = computeStreamChecksum( + algorithm, stream, computePayloadHash) + if err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to compute stream checksum", + Err: err, + } + } + + if err := req.RewindStream(); err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to rewind stream", + Err: err, + } + } + + req.Header.Set(checksumHeader, checksum) + + if computePayloadHash { + ctx = v4.SetPayloadHash(ctx, sha256Checksum) + } + + return next.HandleBuild(ctx, in) +} + +type computeInputTrailingChecksumError struct { + Msg string + Err error +} + +func (e computeInputTrailingChecksumError) Error() string { + const intro = "compute input trailing checksum failed" + + if e.Err != nil { + return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) + } + + return fmt.Sprintf("%s, %s", intro, e.Msg) +} +func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } + +// HandleFinalize handles computing the payload's checksum, in the following cases: +// - Is HTTPS, not HTTP +// - A checksum was specified via the Input +// - Trailing checksums are supported. +// +// The finalize handler must be inserted in the stack before Signing, and after Retry. +func (m *computeInputPayloadChecksum) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !m.deferToFinalizeHandler { + if !m.buildHandlerRun { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "build handler was removed without also removing finalize handler", + } + } + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, computeInputTrailingChecksumError{ + Msg: fmt.Sprintf("unknown request type %T", req), + } + } + + // Trailing checksums are only supported when TLS is enabled. + if !req.IsHTTPS() { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "HTTPS required", + } + } + + // If no algorithm was specified, there is nothing to do. + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to get algorithm", + Err: err, + } + } else if !ok { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "no algorithm specified", + } + } + + // If the checksum header is already set before finalize could run, there + // is nothing to do. + checksumHeader := AlgorithmHTTPHeader(algorithm) + if req.Header.Get(checksumHeader) != "" { + return next.HandleFinalize(ctx, in) + } + + stream := req.GetStream() + streamLength, err := getRequestStreamLength(req) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to determine stream length", + Err: err, + } + } + + if stream == nil || streamLength == 0 { + // Nil and empty streams are handled by the Build handler. They are not + // supported by the trailing checksums finalize handler. There is no + // benefit to sending them as trailers compared to headers. + return out, metadata, computeInputTrailingChecksumError{ + Msg: "nil or empty streams are not supported", + } + } + + checksumReader, err := newComputeChecksumReader(stream, algorithm) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to created checksum reader", + Err: err, + } + } + + awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader, + func(o *awsChunkedEncodingOptions) { + o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{ + Get: checksumReader.Base64Checksum, + Length: checksumReader.Base64ChecksumLength(), + } + o.StreamLength = streamLength + }) + + for key, values := range awsChunkedReader.HTTPHeaders() { + for _, value := range values { + req.Header.Add(key, value) + } + } + + // Setting the stream on the request will create a copy. The content length + // is not updated until after the request is copied to prevent impacting + // upstream middleware. + req, err = req.SetStream(awsChunkedReader) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed updating request to trailing checksum wrapped stream", + Err: err, + } + } + req.ContentLength = awsChunkedReader.EncodedLength() + in.Request = req + + // Add decoded content length header if original stream's content length is known. + if streamLength != -1 && m.EnableDecodedContentLengthHeader { + req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10)) + } + + out, metadata, err = next.HandleFinalize(ctx, in) + if err == nil { + checksum, err := checksumReader.Base64Checksum() + if err != nil { + return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err) + } + + // Record the checksum and algorithm that was computed + SetComputedInputChecksums(&metadata, map[string]string{ + string(algorithm): checksum, + }) + } + + return out, metadata, err +} + +func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) { + ctxAlgorithm := getContextInputAlgorithm(ctx) + if ctxAlgorithm == "" { + return "", false, nil + } + + algorithm, err := ParseAlgorithm(ctxAlgorithm) + if err != nil { + return "", false, fmt.Errorf( + "failed to parse algorithm, %w", err) + } + + return algorithm, true, nil +} + +func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) ( + checksum string, sha256Checksum string, err error, +) { + hasher, err := NewAlgorithmHash(algorithm) + if err != nil { + return "", "", fmt.Errorf( + "failed to get hasher for checksum algorithm, %w", err) + } + + var sha256Hasher hash.Hash + var batchHasher io.Writer = hasher + + // Compute payload hash for the protocol. To prevent another handler + // (computePayloadSHA256) re-reading body also compute the SHA256 for + // request signing. If configured checksum algorithm is SHA256, don't + // double wrap stream with another SHA256 hasher. + if computePayloadHash && algorithm != AlgorithmSHA256 { + sha256Hasher = sha256.New() + batchHasher = io.MultiWriter(hasher, sha256Hasher) + } + + if stream != nil { + if _, err = io.Copy(batchHasher, stream); err != nil { + return "", "", fmt.Errorf( + "failed to read stream to compute hash, %w", err) + } + } + + checksum = string(base64EncodeHashSum(hasher)) + if computePayloadHash { + if algorithm != AlgorithmSHA256 { + sha256Checksum = string(hexEncodeHashSum(sha256Hasher)) + } else { + sha256Checksum = string(hexEncodeHashSum(hasher)) + } + } + + return checksum, sha256Checksum, nil +} + +func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { + if v := req.ContentLength; v > 0 { + return v, nil + } + + if length, ok, err := req.StreamLength(); err != nil { + return 0, fmt.Errorf("failed getting request stream's length, %w", err) + } else if ok { + return length, nil + } + + return -1, nil +} + +// setMD5Checksum computes the MD5 of the request payload and sets it to the +// Content-MD5 header. Returning the MD5 base64 encoded string or error. +// +// If the MD5 is already set as the Content-MD5 header, that value will be +// returned, and nothing else will be done. +// +// If the payload is empty, no MD5 will be computed. No error will be returned. +// Empty payloads do not have an MD5 value. +// +// Replaces the smithy-go middleware for httpChecksum trait. +func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) { + if v := req.Header.Get(contentMD5Header); len(v) != 0 { + return v, nil + } + stream := req.GetStream() + if stream == nil { + return "", nil + } + + if !req.IsStreamSeekable() { + return "", fmt.Errorf( + "unseekable stream is not supported for computing md5 checksum") + } + + v, err := computeMD5Checksum(stream) + if err != nil { + return "", err + } + if err := req.RewindStream(); err != nil { + return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err) + } + // set the 'Content-MD5' header + req.Header.Set(contentMD5Header, string(v)) + return string(v), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go new file mode 100644 index 000000000..f72952549 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go @@ -0,0 +1,117 @@ +package checksum + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// setupChecksumContext is the initial middleware that looks up the input +// used to configure checksum behavior. This middleware must be executed before +// input validation step or any other checksum middleware. +type setupInputContext struct { + // GetAlgorithm is a function to get the checksum algorithm of the + // input payload from the input parameters. + // + // Given the input parameter value, the function must return the algorithm + // and true, or false if no algorithm is specified. + GetAlgorithm func(interface{}) (string, bool) +} + +// ID for the middleware +func (m *setupInputContext) ID() string { + return "AWSChecksum:SetupInputContext" +} + +// HandleInitialize initialization middleware that setups up the checksum +// context based on the input parameters provided in the stack. +func (m *setupInputContext) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // Check if validation algorithm is specified. + if m.GetAlgorithm != nil { + // check is input resource has a checksum algorithm + algorithm, ok := m.GetAlgorithm(in.Parameters) + if ok && len(algorithm) != 0 { + ctx = setContextInputAlgorithm(ctx, algorithm) + } + } + + return next.HandleInitialize(ctx, in) +} + +// inputAlgorithmKey is the key set on context used to identify, retrieves the +// request checksum algorithm if present on the context. +type inputAlgorithmKey struct{} + +// setContextInputAlgorithm sets the request checksum algorithm on the context. +// +// Scoped to stack values. +func setContextInputAlgorithm(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, inputAlgorithmKey{}, value) +} + +// getContextInputAlgorithm returns the checksum algorithm from the context if +// one was specified. Empty string is returned if one is not specified. +// +// Scoped to stack values. +func getContextInputAlgorithm(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, inputAlgorithmKey{}).(string) + return v +} + +type setupOutputContext struct { + // GetValidationMode is a function to get the checksum validation + // mode of the output payload from the input parameters. + // + // Given the input parameter value, the function must return the validation + // mode and true, or false if no mode is specified. + GetValidationMode func(interface{}) (string, bool) +} + +// ID for the middleware +func (m *setupOutputContext) ID() string { + return "AWSChecksum:SetupOutputContext" +} + +// HandleInitialize initialization middleware that setups up the checksum +// context based on the input parameters provided in the stack. +func (m *setupOutputContext) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // Check if validation mode is specified. + if m.GetValidationMode != nil { + // check is input resource has a checksum algorithm + mode, ok := m.GetValidationMode(in.Parameters) + if ok && len(mode) != 0 { + ctx = setContextOutputValidationMode(ctx, mode) + } + } + + return next.HandleInitialize(ctx, in) +} + +// outputValidationModeKey is the key set on context used to identify if +// output checksum validation is enabled. +type outputValidationModeKey struct{} + +// setContextOutputValidationMode sets the request checksum +// algorithm on the context. +// +// Scoped to stack values. +func setContextOutputValidationMode(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, outputValidationModeKey{}, value) +} + +// getContextOutputValidationMode returns response checksum validation state, +// if one was specified. Empty string is returned if one is not specified. +// +// Scoped to stack values. +func getContextOutputValidationMode(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go new file mode 100644 index 000000000..9fde12d86 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go @@ -0,0 +1,131 @@ +package checksum + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms +// that were used, by the middleware's validation. +type outputValidationAlgorithmsUsedKey struct{} + +// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used +// stored in the middleware Metadata. Returns false if no algorithms were +// stored in the Metadata. +func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) { + vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string) + return vs, ok +} + +// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the +// middleware Metadata. +func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) { + m.Set(outputValidationAlgorithmsUsedKey{}, vs) +} + +// validateOutputPayloadChecksum middleware computes payload checksum of the +// received response and validates with checksum returned by the service. +type validateOutputPayloadChecksum struct { + // Algorithms represents a priority-ordered list of valid checksum + // algorithm that should be validated when present in HTTP response + // headers. + Algorithms []Algorithm + + // IgnoreMultipartValidation indicates multipart checksums ending with "-#" + // will be ignored. + IgnoreMultipartValidation bool + + // When set the middleware will log when output does not have checksum or + // algorithm to validate. + LogValidationSkipped bool + + // When set the middleware will log when the output contains a multipart + // checksum that was, skipped and not validated. + LogMultipartValidationSkipped bool +} + +func (m *validateOutputPayloadChecksum) ID() string { + return "AWSChecksum:ValidateOutputPayloadChecksum" +} + +// HandleDeserialize is a Deserialize middleware that wraps the HTTP response +// body with an io.ReadCloser that will validate the its checksum. +func (m *validateOutputPayloadChecksum) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + // If there is no validation mode specified nothing is supported. + if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("unknown transport type %T", out.RawResponse), + } + } + + var expectedChecksum string + var algorithmToUse Algorithm + for _, algorithm := range m.Algorithms { + value := response.Header.Get(AlgorithmHTTPHeader(algorithm)) + if len(value) == 0 { + continue + } + + expectedChecksum = value + algorithmToUse = algorithm + } + + // TODO this must validate the validation mode is set to enabled. + + logger := middleware.GetLogger(ctx) + + // Skip validation if no checksum algorithm or checksum is available. + if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 { + if m.LogValidationSkipped { + // TODO this probably should have more information about the + // operation output that won't be validated. + logger.Logf(logging.Warn, + "Response has no supported checksum. Not validating response payload.") + } + return out, metadata, nil + } + + // Ignore multipart validation + if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") { + if m.LogMultipartValidationSkipped { + // TODO this probably should have more information about the + // operation output that won't be validated. + logger.Logf(logging.Warn, "Skipped validation of multipart checksum.") + } + return out, metadata, nil + } + + body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum) + if err != nil { + return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err) + } + response.Body = body + + // Update the metadata to include the set of the checksum algorithms that + // will be validated. + SetOutputValidationAlgorithmsUsed(&metadata, []string{ + string(algorithmToUse), + }) + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md new file mode 100644 index 000000000..c60858a10 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -0,0 +1,191 @@ +# v1.9.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go new file mode 100644 index 000000000..cc919701a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go @@ -0,0 +1,48 @@ +package presignedurl + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// WithIsPresigning adds the isPresigning sentinel value to a context to signal +// that the middleware stack is using the presign flow. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func WithIsPresigning(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, isPresigningKey{}, true) +} + +// GetIsPresigning returns if the context contains the isPresigning sentinel +// value for presigning flows. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetIsPresigning(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool) + return v +} + +type isPresigningKey struct{} + +// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that +// will update the stack's context to be flagged as being invoked for the +// purpose of presigning. +func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) +} + +type asIsPresigningMiddleware struct{} + +func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } + +func (asIsPresigningMiddleware) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + ctx = WithIsPresigning(ctx) + return next.HandleInitialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go new file mode 100644 index 000000000..1b85375cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go @@ -0,0 +1,3 @@ +// Package presignedurl provides the customizations for API clients to fill in +// presigned URLs into input parameters. +package presignedurl diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go new file mode 100644 index 000000000..ea2621e47 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package presignedurl + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.9.28" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go new file mode 100644 index 000000000..1e2f5c812 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go @@ -0,0 +1,110 @@ +package presignedurl + +import ( + "context" + "fmt" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + + "github.com/aws/smithy-go/middleware" +) + +// URLPresigner provides the interface to presign the input parameters in to a +// presigned URL. +type URLPresigner interface { + // PresignURL presigns a URL. + PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) +} + +// ParameterAccessor provides an collection of accessor to for retrieving and +// setting the values needed to PresignedURL generation +type ParameterAccessor struct { + // GetPresignedURL accessor points to a function that retrieves a presigned url if present + GetPresignedURL func(interface{}) (string, bool, error) + + // GetSourceRegion accessor points to a function that retrieves source region for presigned url + GetSourceRegion func(interface{}) (string, bool, error) + + // CopyInput accessor points to a function that takes in an input, and returns a copy. + CopyInput func(interface{}) (interface{}, error) + + // SetDestinationRegion accessor points to a function that sets destination region on api input struct + SetDestinationRegion func(interface{}, string) error + + // SetPresignedURL accessor points to a function that sets presigned url on api input struct + SetPresignedURL func(interface{}, string) error +} + +// Options provides the set of options needed by the presigned URL middleware. +type Options struct { + // Accessor are the parameter accessors used by this middleware + Accessor ParameterAccessor + + // Presigner is the URLPresigner used by the middleware + Presigner URLPresigner +} + +// AddMiddleware adds the Presign URL middleware to the middleware stack. +func AddMiddleware(stack *middleware.Stack, opts Options) error { + return stack.Initialize.Add(&presign{options: opts}, middleware.Before) +} + +// RemoveMiddleware removes the Presign URL middleware from the stack. +func RemoveMiddleware(stack *middleware.Stack) error { + _, err := stack.Initialize.Remove((*presign)(nil).ID()) + return err +} + +type presign struct { + options Options +} + +func (m *presign) ID() string { return "Presign" } + +func (m *presign) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // If PresignedURL is already set ignore middleware. + if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if ok { + return next.HandleInitialize(ctx, input) + } + + // If have source region is not set ignore middleware. + srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if !ok || len(srcRegion) == 0 { + return next.HandleInitialize(ctx, input) + } + + // Create a copy of the original input so the destination region value can + // be added. This ensures that value does not leak into the original + // request parameters. + paramCpy, err := m.options.Accessor.CopyInput(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Destination region is the API client's configured region. + dstRegion := awsmiddleware.GetRegion(ctx) + if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Update the original input with the presigned URL value. + if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + return next.HandleInitialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md new file mode 100644 index 000000000..ccc3c7479 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -0,0 +1,156 @@ +# v1.13.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-09-02) + +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. + +# v1.6.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-04) + +* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 000000000..ec290b213 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,53 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn:aws:s3:us-west-2:012345678901:accesspoint/myaccesspoint +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if isFIPS(a.Region) { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} + +func isFIPS(region string) bool { + return strings.HasPrefix(region, "fips-") || strings.HasSuffix(region, "-fips") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go new file mode 100644 index 000000000..06e1a3add --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go @@ -0,0 +1,85 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(a arn.ARN, resParser ResourceParser) (resARN Resource, err error) { + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if !isSupportedServiceARN(a.Service) { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 000000000..e06a30285 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,128 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if isFIPS(a.Region) { + return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 000000000..513154cc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go new file mode 100644 index 000000000..b51532085 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go @@ -0,0 +1,73 @@ +package s3shared + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + + "github.com/aws/aws-sdk-go-v2/aws/arn" +) + +// ARNLookup is the initial middleware that looks up if an arn is provided. +// This middleware is responsible for fetching ARN from a arnable field, and registering the ARN on +// middleware context. This middleware must be executed before input validation step or any other +// arn processing middleware. +type ARNLookup struct { + + // GetARNValue takes in a input interface and returns a ptr to string and a bool + GetARNValue func(interface{}) (*string, bool) +} + +// ID for the middleware +func (m *ARNLookup) ID() string { + return "S3Shared:ARNLookup" +} + +// HandleInitialize handles the behavior of this initialize step +func (m *ARNLookup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // check if GetARNValue is supported + if m.GetARNValue == nil { + return next.HandleInitialize(ctx, in) + } + + // check is input resource is an ARN; if not go to next + v, ok := m.GetARNValue(in.Parameters) + if !ok || v == nil || !arn.IsARN(*v) { + return next.HandleInitialize(ctx, in) + } + + // if ARN process ResourceRequest and put it on ctx + av, err := arn.Parse(*v) + if err != nil { + return out, metadata, fmt.Errorf("error parsing arn: %w", err) + } + // set parsed arn on context + ctx = setARNResourceOnContext(ctx, av) + + return next.HandleInitialize(ctx, in) +} + +// arnResourceKey is the key set on context used to identify, retrive an ARN resource +// if present on the context. +type arnResourceKey struct{} + +// SetARNResourceOnContext sets the S3 ARN on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setARNResourceOnContext(ctx context.Context, value arn.ARN) context.Context { + return middleware.WithStackValue(ctx, arnResourceKey{}, value) +} + +// GetARNResourceFromContext returns an ARN from context and a bool indicating +// presence of ARN on ctx. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetARNResourceFromContext(ctx context.Context) (arn.ARN, bool) { + v, ok := middleware.GetStackValue(ctx, arnResourceKey{}).(arn.ARN) + return v, ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go new file mode 100644 index 000000000..8926e5970 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go @@ -0,0 +1,22 @@ +package config + +import "context" + +// UseARNRegionProvider is an interface for retrieving external configuration value for UseARNRegion +type UseARNRegionProvider interface { + GetS3UseARNRegion(ctx context.Context) (value bool, found bool, err error) +} + +// ResolveUseARNRegion extracts the first instance of a UseARNRegion from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseARNRegionProvider); ok { + value, found, err = p.GetS3UseARNRegion(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go new file mode 100644 index 000000000..aa0c3714e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go @@ -0,0 +1,183 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +// TODO: fix these error statements to be relevant to v2 sdk + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARN error string +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + msg := invalidARNErrorErrCode + " : " + e.message + if extra != "" { + msg = msg + "\n\t" + extra + } + + return msg +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) Unwrap() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +// +// Deprecated: FIPS will not appear in the ARN region component. +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + msg := configurationErrorErrCode + " : " + e.message + if extra != "" { + msg = msg + "\n\t" + extra + } + return msg +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) Unwrap() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS +func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "use of ARN is not supported when client or request is configured for FIPS", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go new file mode 100644 index 000000000..3ebafc356 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package s3shared + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.13.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go new file mode 100644 index 000000000..85b60d2a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go @@ -0,0 +1,29 @@ +package s3shared + +import ( + "github.com/aws/smithy-go/middleware" +) + +// hostID is used to retrieve host id from response metadata +type hostID struct { +} + +// SetHostIDMetadata sets the provided host id over middleware metadata +func SetHostIDMetadata(metadata *middleware.Metadata, id string) { + metadata.Set(hostID{}, id) +} + +// GetHostIDMetadata retrieves the host id from middleware metadata +// returns host id as string along with a boolean indicating presence of +// hostId on middleware metadata. +func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { + if !metadata.Has(hostID{}) { + return "", false + } + + v, ok := metadata.Get(hostID{}).(string) + if !ok { + return "", true + } + return v, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go new file mode 100644 index 000000000..f02604cb6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go @@ -0,0 +1,28 @@ +package s3shared + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// clonedInputKey used to denote if request input was cloned. +type clonedInputKey struct{} + +// SetClonedInputKey sets a key on context to denote input was cloned previously. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetClonedInputKey(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, clonedInputKey{}, value) +} + +// IsClonedInput retrieves if context key for cloned input was set. +// If set, we can infer that the reuqest input was cloned previously. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func IsClonedInput(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, clonedInputKey{}).(bool) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go new file mode 100644 index 000000000..f52f2f11e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go @@ -0,0 +1,52 @@ +package s3shared + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const metadataRetrieverID = "S3MetadataRetriever" + +// AddMetadataRetrieverMiddleware adds request id, host id retriever middleware +func AddMetadataRetrieverMiddleware(stack *middleware.Stack) error { + // add metadata retriever middleware before operation deserializers so that it can retrieve metadata such as + // host id, request id from response header returned by operation deserializers + return stack.Deserialize.Insert(&metadataRetriever{}, "OperationDeserializer", middleware.Before) +} + +type metadataRetriever struct { +} + +// ID returns the middleware identifier +func (m *metadataRetriever) ID() string { + return metadataRetrieverID +} + +func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // check for header for Request id + if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 { + // set reqID on metadata for successful responses. + awsmiddleware.SetRequestIDMetadata(&metadata, v) + } + + // look up host-id + if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 { + // set reqID on metadata for successful responses. + SetHostIDMetadata(&metadata, v) + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go new file mode 100644 index 000000000..bee8da3fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go @@ -0,0 +1,77 @@ +package s3shared + +import ( + "fmt" + "strings" + + awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +// ResourceRequest represents an ARN resource and api request metadata +type ResourceRequest struct { + Resource arn.Resource + // RequestRegion is the region configured on the request config + RequestRegion string + + // SigningRegion is the signing region resolved for the request + SigningRegion string + + // PartitionID is the resolved partition id for the provided request region + PartitionID string + + // UseARNRegion indicates if client should use the region provided in an ARN resource + UseARNRegion bool + + // UseFIPS indicates if te client is configured for FIPS + UseFIPS bool +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS +// +// Deprecated: FIPS will not be present in the ARN region +func (r ResourceRequest) ResourceConfiguredForFIPS() bool { + return IsFIPS(r.ARN().Region) +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return r.UseARNRegion +} + +// IsCrossPartition returns true if request is configured for region of another partition, than +// the partition that resource ARN region resolves to. IsCrossPartition will not return an error, +// if request is not configured with a specific partition id. This might happen if customer provides +// custom endpoint url, but does not associate a partition id with it. +func (r ResourceRequest) IsCrossPartition() (bool, error) { + rv := r.PartitionID + if len(rv) == 0 { + return false, nil + } + + av := r.Resource.GetARN().Partition + if len(av) == 0 { + return false, fmt.Errorf("no partition id for provided ARN") + } + + return !strings.EqualFold(rv, av), nil +} + +// IsCrossRegion returns true if request signing region is not same as arn region +func (r ResourceRequest) IsCrossRegion() bool { + v := r.SigningRegion + return !strings.EqualFold(v, r.Resource.GetARN().Region) +} + +// IsFIPS returns true if region is a fips pseudo-region +// +// Deprecated: FIPS should be specified via EndpointOptions. +func IsFIPS(region string) bool { + return strings.HasPrefix(region, "fips-") || + strings.HasSuffix(region, "-fips") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go new file mode 100644 index 000000000..857336243 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go @@ -0,0 +1,33 @@ +package s3shared + +import ( + "errors" + "fmt" + + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" +) + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError struct { + *awshttp.ResponseError + + // HostID associated with response error + HostID string +} + +// ServiceHostID returns the host id associated with Response Error +func (e *ResponseError) ServiceHostID() string { return e.HostID } + +// Error returns the formatted error +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "https response error StatusCode: %d, RequestID: %s, HostID: %s, %v", + e.Response.StatusCode, e.RequestID, e.HostID, e.Err) +} + +// As populates target and returns true if the type of target is a error type that +// the ResponseError embeds, (e.g.S3 HTTP ResponseError) +func (e *ResponseError) As(target interface{}) bool { + return errors.As(e.ResponseError, target) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go new file mode 100644 index 000000000..543576245 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go @@ -0,0 +1,60 @@ +package s3shared + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddResponseErrorMiddleware adds response error wrapper middleware +func AddResponseErrorMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before request id retriever middleware so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&errorWrapper{}, metadataRetrieverID, middleware.Before) +} + +type errorWrapper struct { +} + +// ID returns the middleware identifier +func (m *errorWrapper) ID() string { + return "ResponseErrorWrapper" +} + +func (m *errorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err == nil { + // Nothing to do when there is no error. + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // look for request id in metadata + reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) + // look for host id in metadata + hostID, _ := GetHostIDMetadata(metadata) + + // Wrap the returned smithy error with the request id retrieved from the metadata + err = &ResponseError{ + ResponseError: &awshttp.ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: resp, + Err: err, + }, + RequestID: reqID, + }, + HostID: hostID, + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go new file mode 100644 index 000000000..22773199f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go @@ -0,0 +1,78 @@ +package s3shared + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" +) + +// EnableDualstack represents middleware struct for enabling dualstack support +// +// Deprecated: See EndpointResolverOptions' UseDualStackEndpoint support +type EnableDualstack struct { + // UseDualstack indicates if dualstack endpoint resolving is to be enabled + UseDualstack bool + + // DefaultServiceID is the service id prefix used in endpoint resolving + // by default service-id is 's3' and 's3-control' for service s3, s3control. + DefaultServiceID string +} + +// ID returns the middleware ID. +func (*EnableDualstack) ID() string { + return "EnableDualstack" +} + +// HandleSerialize handles serializer middleware behavior when middleware is executed +func (u *EnableDualstack) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + // check for host name immutable property + if smithyhttp.GetHostnameImmutable(ctx) { + return next.HandleSerialize(ctx, in) + } + + serviceID := awsmiddle.GetServiceID(ctx) + + // s3-control may be represented as `S3 Control` as in model + if serviceID == "S3 Control" { + serviceID = "s3-control" + } + + if len(serviceID) == 0 { + // default service id + serviceID = u.DefaultServiceID + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + if u.UseDualstack { + parts := strings.Split(req.URL.Host, ".") + if len(parts) < 3 { + return out, metadata, fmt.Errorf("unable to update endpoint host for dualstack, hostname invalid, %s", req.URL.Host) + } + + for i := 0; i+1 < len(parts); i++ { + if strings.EqualFold(parts[i], serviceID) { + parts[i] = parts[i] + ".dualstack" + break + } + } + + // construct the url host + req.URL.Host = strings.Join(parts, ".") + } + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go new file mode 100644 index 000000000..65fd07e00 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go @@ -0,0 +1,89 @@ +package s3shared + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "strings" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` +} + +// GetUnwrappedErrorResponseComponents returns the error fields from an xml error response body +func GetUnwrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { + var errComponents ErrorComponents + if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) + } + return errComponents, nil +} + +// GetWrappedErrorResponseComponents returns the error fields from an xml error response body +// in which error code, and message are wrapped by a tag +func GetWrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { + var errComponents struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + } + + if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) + } + + return ErrorComponents{ + Code: errComponents.Code, + Message: errComponents.Message, + RequestID: errComponents.RequestID, + HostID: errComponents.HostID, + }, nil +} + +// GetErrorResponseComponents retrieves error components according to passed in options +func GetErrorResponseComponents(r io.Reader, options ErrorResponseDeserializerOptions) (ErrorComponents, error) { + var errComponents ErrorComponents + var err error + + if options.IsWrappedWithErrorTag { + errComponents, err = GetWrappedErrorResponseComponents(r) + } else { + errComponents, err = GetUnwrappedErrorResponseComponents(r) + } + + if err != nil { + return ErrorComponents{}, err + } + + // If an error code or message is not retrieved, it is derived from the http status code + // eg, for S3 service, we derive err code and message, if none is found + if options.UseStatusCode && len(errComponents.Code) == 0 && + len(errComponents.Message) == 0 { + // derive code and message from status code + statusText := http.StatusText(options.StatusCode) + errComponents.Code = strings.Replace(statusText, " ", "", -1) + errComponents.Message = statusText + } + return errComponents, nil +} + +// ErrorResponseDeserializerOptions represents error response deserializer options for s3 and s3-control service +type ErrorResponseDeserializerOptions struct { + // UseStatusCode denotes if status code should be used to retrieve error code, msg + UseStatusCode bool + + // StatusCode is status code of error response + StatusCode int + + //IsWrappedWithErrorTag represents if error response's code, msg is wrapped within an + // additional tag + IsWrappedWithErrorTag bool +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md new file mode 100644 index 000000000..d81bbdb88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -0,0 +1,228 @@ +# v1.27.11 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.10 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.9 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.8 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.7 (2022-08-30) + +* No change notes available for this release. + +# v1.27.6 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.5 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.4 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.3 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.2 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.0 (2022-07-01) + +* **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076). + +# v1.26.12 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.11 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.10 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.9 (2022-05-06) + +* No change notes available for this release. + +# v1.26.8 (2022-05-03) + +* **Documentation**: Documentation only update for doc bug fixes for the S3 API docs. + +# v1.26.7 (2022-04-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.6 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2022-04-12) + +* **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. + +# v1.26.4 (2022-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2022-01-28) + +* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR. + +# v1.24.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Feature**: Updated to latest service endpoints + +# v1.21.0 (2021-12-02) + +* **Feature**: API client updated +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.19.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2021-11-12) + +* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. + +# v1.18.0 (2021-11-06) + +* **Feature**: Support has been added for the SelectObjectContent API. +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2021-09-10) + +* No change notes available for this release. + +# v1.15.0 (2021-09-02) + +* **Feature**: API client updated +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2021-08-19) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-08-04) + +* **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346)) +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-06-04) + +* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. +* **Feature**: Updated service client to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-05-25) + +* **Feature**: API client updated + +# v1.8.0 (2021-05-20) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Feature**: Updated to latest service API model. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go new file mode 100644 index 000000000..790cc4380 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -0,0 +1,772 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "S3" +const ServiceAPIVersion = "2006-03-01" + +// Client provides the API client to make operations call for Amazon Simple Storage +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + resolveHTTPSignerV4a(&options) + + for _, fn := range optFns { + fn(&options) + } + + resolveCredentialProvider(&options) + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // Allows you to disable S3 Multi-Region access points feature. + DisableMultiRegionAccessPoints bool + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // Allows you to enable arn region support for the service. + UseARNRegion bool + + // Allows you to enable S3 Accelerate feature. All operations compatible with S3 + // Accelerate will use the accelerate endpoint for requests. Requests not + // compatible will fall back to normal S3 requests. The bucket must be enabled for + // accelerate to be used with S3 client with accelerate enabled. If the bucket is + // not enabled for accelerate an error will be returned. The bucket name must be + // DNS compatible to work with accelerate. + UseAccelerate bool + + // Allows you to enable dual-stack endpoint support for the service. + // + // Deprecated: Set dual-stack by setting UseDualStackEndpoint on + // EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint + // field is set it overrides this field value. + UseDualstack bool + + // Allows you to enable the client to use path-style addressing, i.e., + // https://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will use virtual + // hosted bucket addressing when possible(https://BUCKET.s3.amazonaws.com/KEY). + UsePathStyle bool + + // Signature Version 4a (SigV4a) Signer + httpSignerV4a httpSignerV4a + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + setSafeEventStreamClientLogMode(&options, opID) + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + resolveCredentialProvider(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseARNRegion(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + so.DisableURIPathEscaping = true + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves UseARNRegion S3 configuration +func resolveUseARNRegion(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := s3sharedconfig.ResolveUseARNRegion(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.UseARNRegion = value + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func resolveCredentialProvider(o *Options) { + if o.Credentials == nil { + return + } + + if _, ok := o.Credentials.(v4a.CredentialsProvider); ok { + return + } + + switch o.Credentials.(type) { + case aws.AnonymousCredentials, *aws.AnonymousCredentials: + return + } + + o.Credentials = &v4a.SymmetricCredentialAdaptor{SymmetricProvider: o.Credentials} +} + +func swapWithCustomHTTPSignerMiddleware(stack *middleware.Stack, o Options) error { + mw := s3cust.NewSignHTTPRequestMiddleware(s3cust.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + V4Signer: o.HTTPSignerV4, + V4aSigner: o.httpSignerV4a, + LogSigning: o.ClientLogMode.IsSigning(), + }) + + return s3cust.RegisterSigningMiddleware(stack, mw) +} + +type httpSignerV4a interface { + SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, + service string, regionSet []string, signingTime time.Time, + optFns ...func(*v4a.SignerOptions)) error +} + +func resolveHTTPSignerV4a(o *Options) { + if o.httpSignerV4a != nil { + return + } + o.httpSignerV4a = newDefaultV4aSigner(*o) +} + +func newDefaultV4aSigner(o Options) *v4a.Signer { + return v4a.NewSigner(func(so *v4a.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + so.DisableURIPathEscaping = true + }) +} + +func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { + return s3shared.AddMetadataRetrieverMiddleware(stack) +} + +// ComputedInputChecksumsMetadata provides information about the algorithms used to +// compute the checksum(s) of the input payload. +type ComputedInputChecksumsMetadata struct { + // ComputedChecksums is a map of algorithm name to checksum value of the computed + // input payload's checksums. + ComputedChecksums map[string]string +} + +// GetComputedInputChecksumsMetadata retrieves from the result metadata the map of +// algorithms and input payload checksums values. +func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChecksumsMetadata, bool) { + values, ok := internalChecksum.GetComputedInputChecksums(m) + if !ok { + return ComputedInputChecksumsMetadata{}, false + } + return ComputedInputChecksumsMetadata{ + ComputedChecksums: values, + }, true + +} + +// ChecksumValidationMetadata contains metadata such as the checksum algorithm used +// for data integrity validation. +type ChecksumValidationMetadata struct { + // AlgorithmsUsed is the set of the checksum algorithms used to validate the + // response payload. The response payload must be completely read in order for the + // checksum validation to be performed. An error is returned by the operation + // output's response io.ReadCloser if the computed checksums are invalid. + AlgorithmsUsed []string +} + +// GetChecksumValidationMetadata returns the set of algorithms that will be used to +// validate the response payload with. The response payload must be completely read +// in order for the checksum validation to be performed. An error is returned by +// the operation output's response io.ReadCloser if the computed checksums are +// invalid. Returns false if no checksum algorithm used metadata was found. +func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) { + values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m) + if !ok { + return ChecksumValidationMetadata{}, false + } + return ChecksumValidationMetadata{ + AlgorithmsUsed: append(make([]string, 0, len(values)), values...), + }, true + +} + +// nopGetBucketAccessor is no-op accessor for operation that don't support bucket +// member as input +func nopGetBucketAccessor(input interface{}) (*string, bool) { + return nil, false +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return s3shared.AddResponseErrorMiddleware(stack) +} + +func disableAcceptEncodingGzip(stack *middleware.Stack) error { + return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{}) +} + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError interface { + error + + ServiceHostID() string + ServiceRequestID() string +} + +var _ ResponseError = (*s3shared.ResponseError)(nil) + +// GetHostIDMetadata retrieves the host id from middleware metadata returns host id +// as string along with a boolean indicating presence of hostId on middleware +// metadata. +func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { + return s3shared.GetHostIDMetadata(metadata) +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// httpPresignerV4a represents sigv4a presigner interface used by presign url +// client +type httpPresignerV4a interface { + PresignHTTP( + ctx context.Context, credentials v4a.Credentials, r *http.Request, + payloadHash string, service string, regionSet []string, signingTime time.Time, + optFns ...func(*v4a.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 + + // Expires sets the expiration duration for the generated presign url. This should + // be the duration in seconds the presigned URL should be considered valid for. If + // not set or set to zero, presign url would default to expire after 900 seconds. + Expires time.Duration + + // presignerV4a is the presigner used by the presign url client + presignerV4a httpPresignerV4a +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// WithPresignExpires is a helper utility to append Expires value on presign +// options optional function +func WithPresignExpires(dur time.Duration) func(*PresignOptions) { + return withPresignExpires(dur).options +} + +type withPresignExpires time.Duration + +func (w withPresignExpires) options(o *PresignOptions) { + o.Expires = time.Duration(w) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + if options.presignerV4a == nil { + options.presignerV4a = newDefaultV4aSigner(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Finalize.Clear() + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = stack.Finalize.Add(pmw, middleware.After) + if err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + + // add multi-region access point presigner + signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + V4Presigner: c.Presigner, + V4aPresigner: c.presignerV4a, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = s3cust.RegisterPreSigningMiddleware(stack, signermv) + if err != nil { + return err + } + + if c.Expires < 0 { + return fmt.Errorf("presign URL duration must be 0 or greater, %v", c.Expires) + } + // add middleware to set expiration for s3 presigned url, if expiration is set to + // 0, this middleware sets a default expiration of 900 seconds + err = stack.Build.Add(&s3cust.AddExpiresOnPresignedURL{Expires: c.Expires}, middleware.After) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go new file mode 100644 index 000000000..042e848a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -0,0 +1,232 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action aborts a multipart upload. After a multipart upload is aborted, no +// additional parts can be uploaded using that upload ID. The storage consumed by +// any previously uploaded parts will be freed. However, if any part uploads are +// currently in progress, those part uploads might or might not succeed. As a +// result, it might be necessary to abort a given multipart upload multiple times +// in order to completely free all storage consumed by all parts. To verify that +// all parts have been removed, so you don't get charged for the part storage, you +// should call the ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) action and +// ensure that the parts list is empty. For information about permissions required +// to use the multipart upload, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The +// following operations are related to AbortMultipartUpload: +// +// * +// CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { + if params == nil { + params = &AbortMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AbortMultipartUpload", params, optFns, c.addOperationAbortMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AbortMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AbortMultipartUploadInput struct { + + // The bucket name to which the upload was taking place. When using this action + // with an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key of the object for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Upload ID that identifies the multipart upload. + // + // This member is required. + UploadId *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +type AbortMultipartUploadOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpAbortMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpAbortMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAbortMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAbortMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "AbortMultipartUpload", + } +} + +// getAbortMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getAbortMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*AbortMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addAbortMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getAbortMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go new file mode 100644 index 000000000..95ff64968 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -0,0 +1,436 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Completes a multipart upload by assembling previously uploaded parts. You first +// initiate the multipart upload and then upload all parts using the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation. +// After successfully uploading all relevant parts of an upload, you call this +// action to complete the upload. Upon receiving this request, Amazon S3 +// concatenates all the parts in ascending order by part number to create a new +// object. In the Complete Multipart Upload request, you must provide the parts +// list. You must ensure that the parts list is complete. This action concatenates +// the parts that you provide in the list. For each part in the list, you must +// provide the part number and the ETag value, returned after that part was +// uploaded. Processing of a Complete Multipart Upload request could take several +// minutes to complete. After Amazon S3 begins processing the request, it sends an +// HTTP response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial 200 +// OK response has been sent, it is important that you check the response body to +// determine whether the request succeeded. Note that if CompleteMultipartUpload +// fails, applications should be prepared to retry the failed requests. For more +// information, see Amazon S3 Error Best Practices +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). You +// cannot use Content-Type: application/x-www-form-urlencoded with Complete +// Multipart Upload requests. Also, if you do not provide a Content-Type header, +// CompleteMultipartUpload returns a 200 OK response. For more information about +// multipart uploads, see Uploading Objects Using Multipart Upload +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For +// information about permissions required to use the multipart upload API, see +// Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// CompleteMultipartUpload has the following special errors: +// +// * Error code: +// EntityTooSmall +// +// * Description: Your proposed upload is smaller than the minimum +// allowed object size. Each part must be at least 5 MB in size, except the last +// part. +// +// * 400 Bad Request +// +// * Error code: InvalidPart +// +// * Description: One or more +// of the specified parts could not be found. The part might not have been +// uploaded, or the specified entity tag might not have matched the part's entity +// tag. +// +// * 400 Bad Request +// +// * Error code: InvalidPartOrder +// +// * Description: The list +// of parts was not in ascending order. The parts list must be specified in order +// by part number. +// +// * 400 Bad Request +// +// * Error code: NoSuchUpload +// +// * Description: +// The specified multipart upload does not exist. The upload ID might be invalid, +// or the multipart upload might have been aborted or completed. +// +// * 404 Not +// Found +// +// The following operations are related to CompleteMultipartUpload: +// +// * +// CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { + if params == nil { + params = &CompleteMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CompleteMultipartUpload", params, optFns, c.addOperationCompleteMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CompleteMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CompleteMultipartUploadInput struct { + + // Name of the bucket to which the multipart upload was initiated. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // ID for the initiated multipart upload. + // + // This member is required. + UploadId *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The container for the multipart upload request information. + MultipartUpload *types.CompletedMultipartUpload + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +type CompleteMultipartUploadOutput struct { + + // The name of the bucket that contains the newly created object. Does not return + // the access point ARN or access point alias if used. When using this action with + // an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + Bucket *string + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is an + // opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will contain + // one or more nonhexadecimal characters and/or will consist of less than 32 or + // more than 32 hexadecimal digits. For more information about how the entity tag + // is calculated, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ETag *string + + // If the object expiration is configured, this will contain the expiration date + // (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + Expiration *string + + // The object key of the newly created object. + Key *string + + // The URI that identifies the newly created object. + Location *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an Amazon Web Services KMS key in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. + ServerSideEncryption types.ServerSideEncryption + + // Version ID of the newly created object, in case the bucket has versioning turned + // on. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpCompleteMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCompleteMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCompleteMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "CompleteMultipartUpload", + } +} + +// getCompleteMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getCompleteMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*CompleteMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCompleteMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCompleteMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go new file mode 100644 index 000000000..477900778 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -0,0 +1,574 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Creates a copy of an object that is already stored in Amazon S3. You can store +// individual objects of up to 5 TB in Amazon S3. You create a copy of your object +// up to 5 GB in size in a single atomic action using this API. However, to copy an +// object greater than 5 GB, you must use the multipart upload Upload Part - Copy +// (UploadPartCopy) API. For more information, see Copy Object Using the REST +// Multipart Upload API +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// All copy requests must be authenticated. Additionally, you must have read access +// to the source object and write access to the destination bucket. For more +// information, see REST Authentication +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). Both +// the Region that you want to copy the object from and the Region that you want to +// copy the object to must be enabled for your account. A copy request might return +// an error when Amazon S3 receives the copy request or while Amazon S3 is copying +// the files. If the error occurs before the copy action starts, you receive a +// standard Amazon S3 error. If the error occurs during the copy operation, the +// error response is embedded in the 200 OK response. This means that a 200 OK +// response can contain either a success or an error. Design your application to +// parse the contents of the response and handle it appropriately. If the copy is +// successful, you receive a response with information about the copied object. If +// the request is an HTTP 1.1 request, the response is chunk encoded. If it were +// not, it would not contain the content-length, and you would need to read the +// entire body. The copy request charge is based on the storage class and Region +// that you specify for the destination object. For pricing information, see Amazon +// S3 pricing (http://aws.amazon.com/s3/pricing/). Amazon S3 transfer acceleration +// does not support cross-Region copies. If you request a cross-Region copy using a +// transfer acceleration endpoint, you get a 400 Bad Request error. For more +// information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// Metadata When copying an object, you can preserve all metadata (default) or +// specify new metadata. However, the ACL is not preserved and is set to private +// for the user making the request. To override the default ACL setting, specify a +// new ACL when generating a copy request. For more information, see Using ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). To +// specify whether you want the object metadata copied from the source object or +// replaced with metadata provided in the request, you can optionally add the +// x-amz-metadata-directive header. When you grant permissions, you can use the +// s3:x-amz-metadata-directive condition key to enforce certain metadata behavior +// when objects are uploaded. For more information, see Specifying Conditions in a +// Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) in +// the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition +// keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). +// x-amz-copy-source-if Headers To only copy an object under certain conditions, +// such as whether the Etag matches or whether the object was modified before or +// after a specified date, use the following request parameters: +// +// * +// x-amz-copy-source-if-match +// +// * x-amz-copy-source-if-none-match +// +// * +// x-amz-copy-source-if-unmodified-since +// +// * x-amz-copy-source-if-modified-since +// +// If +// both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: +// +// * x-amz-copy-source-if-match condition evaluates to +// true +// +// * x-amz-copy-source-if-unmodified-since condition evaluates to false +// +// If +// both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: +// +// * x-amz-copy-source-if-none-match +// condition evaluates to false +// +// * x-amz-copy-source-if-modified-since condition +// evaluates to true +// +// All headers with the x-amz- prefix, including +// x-amz-copy-source, must be signed. Server-side encryption When you perform a +// CopyObject operation, you can optionally use the appropriate encryption-related +// headers to encrypt the object using server-side encryption with Amazon Web +// Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided +// encryption key. With server-side encryption, Amazon S3 encrypts your data as it +// writes it to disks in its data centers and decrypts the data when you access it. +// For more information about server-side encryption, see Using Server-Side +// Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). If +// a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. +// For more information, see Amazon S3 Bucket Keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon +// S3 User Guide. Access Control List (ACL)-Specific Request Headers When copying +// an object, you can optionally use headers to grant ACL-based permissions. By +// default, all objects are private. Only the owner has full access control. When +// adding a new object, you can grant permissions to individual Amazon Web Services +// accounts or to predefined groups defined by Amazon S3. These permissions are +// then added to the ACL on the object. For more information, see Access Control +// List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing +// ACLs Using the REST API +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). If +// the bucket that you're copying objects to uses the bucket owner enforced setting +// for S3 Object Ownership, ACLs are disabled and no longer affect permissions. +// Buckets that use this setting only accept PUT requests that don't specify an ACL +// or PUT requests that specify bucket owner full control ACLs, such as the +// bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed +// in the XML format. For more information, see Controlling ownership of objects +// and disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced +// setting for Object Ownership, all objects written to the bucket by any account +// will be owned by the bucket owner. Checksums When copying an object, if it has a +// checksum, that checksum will be copied to the new object by default. When you +// copy the object over, you may optionally specify a different checksum algorithm +// to use with the x-amz-checksum-algorithm header. Storage Class Options You can +// use the CopyObject action to change the storage class of an object that is +// already stored in Amazon S3 using the StorageClass parameter. For more +// information, see Storage Classes +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in +// the Amazon S3 User Guide. Versioning By default, x-amz-copy-source identifies +// the current version of an object to copy. If the current version is a delete +// marker, Amazon S3 behaves as if the object was deleted. To copy a different +// version, use the versionId subresource. If you enable versioning on the target +// bucket, Amazon S3 generates a unique version ID for the object being copied. +// This version ID is different from the version ID of the source object. Amazon S3 +// returns the version ID of the copied object in the x-amz-version-id response +// header in the response. If you do not enable versioning or suspend it on the +// target bucket, the version ID that Amazon S3 generates is always null. If the +// source object's storage class is GLACIER, you must restore a copy of this object +// before you can use it as a source object for the copy operation. For more +// information, see RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). The +// following operations are related to CopyObject: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// For more +// information, see Copying Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { + if params == nil { + params = &CopyObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CopyObject", params, optFns, c.addOperationCopyObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CopyObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CopyObjectInput struct { + + // The name of the destination bucket. When using this action with an access point, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the source object for the copy operation. You specify the value in one + // of two formats, depending on whether you want to access the source object + // through an access point + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * + // For objects not accessed through an access point, specify the name of the source + // bucket and the key of the source object, separated by a slash (/). For example, + // to copy the object reports/january.pdf from the bucket awsexamplebucket, use + // awsexamplebucket/reports/january.pdf. The value must be URL-encoded. + // + // * For + // objects accessed through access points, specify the Amazon Resource Name (ARN) + // of the object as accessed through the access point, in the format + // arn:aws:s3:::accesspoint//object/. For example, to copy the object + // reports/january.pdf through access point my-access-point owned by account + // 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using access + // points only when the source and destination buckets are in the same Amazon Web + // Services Region. Alternatively, for objects accessed through Amazon S3 on + // Outposts, specify the ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 in + // Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // To copy a specific version of an object, append + // ?versionId= to the value (for example, + // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of the + // source object. + // + // This member is required. + CopySource *string + + // The key of the destination object. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. This action is not supported by Amazon S3 + // on Outposts. + ACL types.ObjectCannedACL + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true + // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + // Specifying this header with a COPY action doesn’t affect bucket-level settings + // for S3 Bucket Key. + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // Indicates the algorithm you want Amazon S3 to use to create the checksum for the + // object. For more information, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time + + // Copies the object if its entity tag (ETag) is different than the specified ETag. + CopySourceIfNoneMatch *string + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one that + // was used when the source object was created. + CopySourceSSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string + + // The account ID of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request fails with the HTTP status + // code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The account ID of the expected source bucket owner. If the source bucket is + // owned by a different account, the request fails with the HTTP status code 403 + // Forbidden (access denied). + ExpectedSourceBucketOwner *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This + // action is not supported by Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to read the object data and its metadata. This action is not + // supported by Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the object ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadACP *string + + // Allows grantee to write the ACL for the applicable object. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether the metadata is copied from the source object or replaced with + // metadata provided in the request. + MetadataDirective types.MetadataDirective + + // Specifies whether you want to apply a legal hold to the copied object. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode that you want to apply to the copied object. + ObjectLockMode types.ObjectLockMode + + // The date and time when you want the copied object's Object Lock to expire. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // Specifies the Amazon Web Services KMS key ID to use for object encryption. All + // GET and PUT requests for an object protected by Amazon Web Services KMS will + // fail if not made via SSL or using SigV4. For information about configuring using + // any of the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high + // availability. Depending on performance needs, you can specify a different + // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For + // more information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in + // the Amazon S3 User Guide. + StorageClass types.StorageClass + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL Query + // parameters. + Tagging *string + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective types.TaggingDirective + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +type CopyObjectOutput struct { + + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Container for all response elements. + CopyObjectResult *types.CopyObjectResult + + // Version of the copied object in the destination bucket. + CopySourceVersionId *string + + // If the object expiration is configured, the response includes this header. + Expiration *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Version ID of the newly created copy. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCopyObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpCopyObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopyObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addCopyObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "CopyObject", + } +} + +// getCopyObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getCopyObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*CopyObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCopyObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCopyObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go new file mode 100644 index 000000000..27322a2c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -0,0 +1,318 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 +// and have a valid Amazon Web Services Access Key ID to authenticate requests. +// Anonymous requests are never allowed to create buckets. By creating the bucket, +// you become the bucket owner. Not every string is an acceptable bucket name. For +// information about bucket naming restrictions, see Bucket naming rules +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). +// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// By default, the bucket is created in the US East (N. Virginia) Region. You can +// optionally specify a Region in the request body. You might choose a Region to +// optimize latency, minimize costs, or address regulatory requirements. For +// example, if you reside in Europe, you will probably find it advantageous to +// create buckets in the Europe (Ireland) Region. For more information, see +// Accessing a bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// If you send your create bucket request to the s3.amazonaws.com endpoint, the +// request goes to the us-east-1 Region. Accordingly, the signature calculations in +// Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to be +// created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, see +// Virtual hosting of buckets +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Access +// control lists (ACLs) When creating a bucket using this operation, you can +// optionally configure the bucket ACL to specify the accounts or groups that +// should be granted specific permissions on the bucket. If your CreateBucket +// request sets bucket owner enforced for S3 Object Ownership and specifies a +// bucket ACL that provides access to an external Amazon Web Services account, your +// request fails with a 400 error and returns the +// InvalidBucketAclWithObjectOwnership error code. For more information, see +// Controlling object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. There are two ways to grant the appropriate +// permissions using the request headers. +// +// * Specify a canned ACL using the +// x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as +// canned ACLs. Each canned ACL has a predefined set of grantees and permissions. +// For more information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly using the x-amz-grant-read, +// x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and +// x-amz-grant-full-control headers. These headers map to the set of permissions +// Amazon S3 supports in an ACL. For more information, see Access control list +// (ACL) overview +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: +// +// * id – if the value specified is the canonical user ID of an Amazon +// Web Services account +// +// * uri – if you are granting permissions to a predefined +// group +// +// * emailAddress – if the value specified is the email address of an Amazon +// Web Services account Using email addresses to specify a grantee is only +// supported in the following Amazon Web Services Regions: +// +// * US East (N. +// Virginia) +// +// * US West (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific +// (Singapore) +// +// * Asia Pacific (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe +// (Ireland) +// +// * South America (São Paulo) +// +// For a list of all the Amazon S3 +// supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// For example, the following +// x-amz-grant-read header grants the Amazon Web Services accounts identified by +// account IDs permissions to read object data and its metadata: x-amz-grant-read: +// id="11112222333", id="444455556666" +// +// You can use either a canned ACL or specify +// access permissions explicitly. You cannot do both. Permissions In addition to +// s3:CreateBucket, the following permissions are required when your CreateBucket +// includes specific headers: +// +// * ACLs - If your CreateBucket request specifies ACL +// permissions and the ACL is public-read, public-read-write, authenticated-read, +// or if you specify access permissions explicitly through any other ACL, both +// s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the +// CreateBucket request is private or doesn't specify any ACLs, only +// s3:CreateBucket permission is needed. +// +// * Object Lock - If +// ObjectLockEnabledForBucket is set to true in your CreateBucket request, +// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are +// required. +// +// * S3 Object Ownership - If your CreateBucket request includes the the +// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission is +// required. +// +// The following operations are related to CreateBucket: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { + if params == nil { + params = &CreateBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateBucket", params, optFns, c.addOperationCreateBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateBucketInput struct { + + // The name of the bucket to create. + // + // This member is required. + Bucket *string + + // The canned ACL to apply to the bucket. + ACL types.BucketCannedACL + + // The configuration information for the bucket. + CreateBucketConfiguration *types.CreateBucketConfiguration + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. + GrantRead *string + + // Allows grantee to read the bucket ACL. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + ObjectLockEnabledForBucket bool + + // The container element for object ownership for a bucket's ownership controls. + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the + // bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. ObjectWriter - The uploading account will own the object if the + // object is uploaded with the bucket-owner-full-control canned ACL. + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that don't + // specify an ACL or bucket owner full control ACLs, such as the + // bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed + // in the XML format. + ObjectOwnership types.ObjectOwnership + + noSmithyDocumentSerde +} + +type CreateBucketOutput struct { + + // A forward slash followed by the name of the bucket. + Location *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucket{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addCreateBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "CreateBucket", + } +} + +// getCreateBucketBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getCreateBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go new file mode 100644 index 000000000..825feebd2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -0,0 +1,604 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. You +// specify this upload ID in each of your subsequent upload part requests (see +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also +// include this upload ID in the final request to either complete or abort the +// multipart upload request. For more information about multipart uploads, see +// Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). If you have +// configured a lifecycle rule to abort incomplete multipart uploads, the upload +// must complete within the number of days specified in the bucket lifecycle +// configuration. Otherwise, the incomplete multipart upload becomes eligible for +// an abort action and Amazon S3 aborts the multipart upload. For more information, +// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// For information about the permissions required to use the multipart upload API, +// see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). For +// request signing, multipart upload is just a series of regular requests. You +// initiate a multipart upload, send one or more requests to upload parts, and then +// complete the multipart upload process. You sign each request individually. There +// is nothing special about signing multipart upload requests. For more information +// about signing, see Authenticating Requests (Amazon Web Services Signature +// Version 4) +// (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or abort +// the multipart upload. Amazon S3 frees up the space used to store the parts and +// stop charging you for storing them only after you either complete or abort a +// multipart upload. You can optionally request server-side encryption. For +// server-side encryption, Amazon S3 encrypts your data as it writes it to disks in +// its data centers and decrypts it when you access it. You can provide your own +// encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed +// encryption keys. If you choose to provide your own encryption key, the request +// headers you provide in UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and +// UploadPartCopy +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload. To perform a multipart upload with encryption +// using an Amazon Web Services KMS key, the requester must have permission to the +// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are +// required because Amazon S3 must decrypt and read data from the encrypted file +// parts before it completes the multipart upload. For more information, see +// Multipart upload API and permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user +// or role is in the same Amazon Web Services account as the KMS key, then you must +// have these permissions on the key policy. If your IAM user or role belongs to a +// different account than the key, then you must have the permissions on both the +// key policy and your IAM user or role. For more information, see Protecting Data +// Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// Access Permissions When copying an object, you can optionally specify the +// accounts or groups that should be granted specific permissions on the new +// object. There are two ways to grant the permissions using the request +// headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more +// information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly with the x-amz-grant-read, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These parameters map to the set of permissions that Amazon S3 supports +// in an ACL. For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can +// use either a canned ACL or specify access permissions explicitly. You cannot do +// both. Server-Side- Encryption-Specific Request Headers You can optionally tell +// Amazon S3 to encrypt data at rest using server-side encryption. Server-side +// encryption is for data encryption at rest. Amazon S3 encrypts your data as it +// writes it to disks in its data centers and decrypts it when you access it. The +// option you use depends on whether you want to use Amazon Web Services managed +// encryption keys or provide your own encryption key. +// +// * Use encryption keys +// managed by Amazon S3 or customer managed key stored in Amazon Web Services Key +// Management Service (Amazon Web Services KMS) – If you want Amazon Web Services +// to manage the keys used to encrypt data, specify the following headers in the +// request. +// +// * x-amz-server-side-encryption +// +// * +// x-amz-server-side-encryption-aws-kms-key-id +// +// * +// x-amz-server-side-encryption-context +// +// If you specify +// x-amz-server-side-encryption:aws:kms, but don't provide +// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web +// Services managed key in Amazon Web Services KMS to protect the data. All GET and +// PUT requests for an object protected by Amazon Web Services KMS fail if you +// don't make them with SSL or by using SigV4. For more information about +// server-side encryption with KMS key (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * +// Use customer-provided encryption keys – If you want to manage your own +// encryption keys, provide all the following headers in the request. +// +// * +// x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about +// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List +// (ACL)-Specific Request Headers You also can use the following access +// control–related headers with this operation. By default, all objects are +// private. Only the owner has full access control. When adding a new object, you +// can grant permissions to individual Amazon Web Services accounts or to +// predefined groups defined by Amazon S3. These permissions are then added to the +// access control list (ACL) on the object. For more information, see Using ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). With +// this operation, you can grant access permissions using one of the following two +// methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of +// predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of +// grantees and permissions. For more information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly — To explicitly grant access permissions +// to specific Amazon Web Services accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an ACL. For +// more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). In the +// header, you specify a list of grantees who get the specific permission. To grant +// permissions explicitly, use: +// +// * x-amz-grant-read +// +// * x-amz-grant-write +// +// * +// x-amz-grant-read-acp +// +// * x-amz-grant-write-acp +// +// * x-amz-grant-full-control +// +// You +// specify each grantee as a type=value pair, where the type is one of the +// following: +// +// * id – if the value specified is the canonical user ID of an Amazon +// Web Services account +// +// * uri – if you are granting permissions to a predefined +// group +// +// * emailAddress – if the value specified is the email address of an Amazon +// Web Services account Using email addresses to specify a grantee is only +// supported in the following Amazon Web Services Regions: +// +// * US East (N. +// Virginia) +// +// * US West (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific +// (Singapore) +// +// * Asia Pacific (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe +// (Ireland) +// +// * South America (São Paulo) +// +// For a list of all the Amazon S3 +// supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// For example, the following +// x-amz-grant-read header grants the Amazon Web Services accounts identified by +// account IDs permissions to read object data and its metadata: x-amz-grant-read: +// id="11112222333", id="444455556666" +// +// The following operations are related to +// CreateMultipartUpload: +// +// * UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { + if params == nil { + params = &CreateMultipartUploadInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateMultipartUpload", params, optFns, c.addOperationCreateMultipartUploadMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateMultipartUploadOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateMultipartUploadInput struct { + + // The name of the bucket to which to initiate the upload When using this action + // with an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload is to be initiated. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. This action is not supported by Amazon S3 + // on Outposts. + ACL types.ObjectCannedACL + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true + // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + // Specifying this header with an object action doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // Indicates the algorithm you want Amazon S3 to use to create the checksum for the + // object. For more information, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This + // action is not supported by Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to read the object data and its metadata. This action is not + // supported by Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the object ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadACP *string + + // Allows grantee to write the ACL for the applicable object. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether you want to apply a legal hold to the uploaded object. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + ObjectLockMode types.ObjectLockMode + + // Specifies the date and time when you want the Object Lock to expire. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // Specifies the ID of the symmetric customer managed key to use for object + // encryption. All GET and PUT requests for an object protected by Amazon Web + // Services KMS will fail if not made via SSL or using SigV4. For information about + // configuring using any of the officially supported Amazon Web Services SDKs and + // Amazon Web Services CLI, see Specifying the Signature Version in Request + // Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high + // availability. Depending on performance needs, you can specify a different + // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For + // more information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in + // the Amazon S3 User Guide. + StorageClass types.StorageClass + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + Tagging *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +type CreateMultipartUploadOutput struct { + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object name + // in the request, the response includes this header. The header indicates when the + // initiated multipart upload becomes eligible for an abort operation. For more + // information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle + // Policy + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // The response also includes the x-amz-abort-rule-id header that provides the ID + // of the lifecycle configuration rule that defines this action. + AbortDate *time.Time + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + Bucket *string + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Object key for which the multipart upload was initiated. + Key *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // ID for the initiated multipart upload. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateMultipartUpload{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateMultipartUpload{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMultipartUpload(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "CreateMultipartUpload", + } +} + +// getCreateMultipartUploadBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getCreateMultipartUploadBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateMultipartUploadInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateMultipartUploadBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go new file mode 100644 index 000000000..6bfb43c22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// Related Resources +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { + if params == nil { + params = &DeleteBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucket", params, optFns, c.addOperationDeleteBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketInput struct { + + // Specifies the bucket being deleted. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucket{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucket", + } +} + +// getDeleteBucketBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignDeleteBucket is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignDeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &DeleteBucketInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "DeleteBucket", params, clientOptFns, + c.client.addOperationDeleteBucketMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addDeleteBucketPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addDeleteBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go new file mode 100644 index 000000000..e016d9763 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -0,0 +1,189 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). To use this operation, you must have permissions to perform +// the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – +// Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * +// GetBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * +// ListBucketAnalyticsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * +// PutBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketAnalyticsConfiguration", params, optFns, c.addOperationDeleteBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketAnalyticsConfigurationInput struct { + + // The name of the bucket from which an analytics configuration is deleted. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketAnalyticsConfiguration", + } +} + +// getDeleteBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go new file mode 100644 index 000000000..79045abe2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the cors configuration information set for the bucket. To use this +// operation, you must have permission to perform the s3:PutBucketCORS action. The +// bucket owner has this permission by default and can grant this permission to +// others. For information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 +// User Guide. Related Resources: +// +// * PutBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * +// RESTOPTIONSobject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { + if params == nil { + params = &DeleteBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketCors", params, optFns, c.addOperationDeleteBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketCorsInput struct { + + // Specifies the bucket whose cors configuration is being deleted. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketCorsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketCors{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketCors", + } +} + +// getDeleteBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go new file mode 100644 index 000000000..9c3201f6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go @@ -0,0 +1,180 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the DELETE action removes default encryption from the +// bucket. For information about the Amazon S3 default encryption feature, see +// Amazon S3 Default Bucket Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the +// Amazon S3 User Guide. To use this operation, you must have permissions to +// perform the s3:PutEncryptionConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Related Resources +// +// * PutBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * +// GetBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { + if params == nil { + params = &DeleteBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketEncryption", params, optFns, c.addOperationDeleteBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketEncryptionInput struct { + + // The name of the bucket containing the server-side encryption configuration to + // delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketEncryptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketEncryption", + } +} + +// getDeleteBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000..139dd78a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. The +// S3 Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// * +// GetBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * +// PutBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * +// ListBucketIntelligentTieringConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketIntelligentTieringConfiguration", params, optFns, c.addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + noSmithyDocumentSerde +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketIntelligentTieringConfiguration", + } +} + +// getDeleteBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getDeleteBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go new file mode 100644 index 000000000..32fe81f12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes an inventory configuration (identified by the inventory ID) from the +// bucket. To use this operation, you must have permissions to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * +// GetBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * +// PutBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// * +// ListBucketInventoryConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketInventoryConfiguration", params, optFns, c.addOperationDeleteBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketInventoryConfigurationInput struct { + + // The name of the bucket containing the inventory configuration to delete. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketInventoryConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketInventoryConfiguration", + } +} + +// getDeleteBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go new file mode 100644 index 000000000..c110bfb44 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go @@ -0,0 +1,178 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes +// all the lifecycle configuration rules in the lifecycle subresource associated +// with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the deleted +// lifecycle configuration. To use this operation, you must have permission to +// perform the s3:PutLifecycleConfiguration action. By default, the bucket owner +// has this permission and the bucket owner can grant this permission to others. +// There is usually some time lag before lifecycle configuration deletion is fully +// propagated to all the Amazon S3 systems. For more information about the object +// expiration, see Elements to Describe Lifecycle Actions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// Related actions include: +// +// * PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * +// GetBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { + if params == nil { + params = &DeleteBucketLifecycleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketLifecycle", params, optFns, c.addOperationDeleteBucketLifecycleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketLifecycleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketLifecycleInput struct { + + // The bucket name of the lifecycle to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketLifecycleOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketLifecycle{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketLifecycle{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketLifecycle(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketLifecycle(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketLifecycle", + } +} + +// getDeleteBucketLifecycleBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketLifecycleBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketLifecycleInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketLifecycleUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketLifecycleBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go new file mode 100644 index 000000000..cc08f04b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -0,0 +1,194 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. To use this operation, you must have +// permissions to perform the s3:PutMetricsConfiguration action. The bucket owner +// has this permission by default. The bucket owner can grant this permission to +// others. For more information about permissions, see Permissions Related to +// Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * +// GetBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * +// PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * +// ListBucketMetricsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * +// Monitoring Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetricsConfiguration", params, optFns, c.addOperationDeleteBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketMetricsConfigurationInput struct { + + // The name of the bucket containing the metrics configuration to delete. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketMetricsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketMetricsConfiguration", + } +} + +// getDeleteBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go new file mode 100644 index 000000000..6186db5e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -0,0 +1,171 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// The following operations are related to DeleteBucketOwnershipControls: +// +// * +// GetBucketOwnershipControls +// +// * PutBucketOwnershipControls +func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { + if params == nil { + params = &DeleteBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketOwnershipControls", params, optFns, c.addOperationDeleteBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketOwnershipControlsInput struct { + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketOwnershipControlsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketOwnershipControls", + } +} + +// getDeleteBucketOwnershipControlsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getDeleteBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go new file mode 100644 index 000000000..618d9bede --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the DELETE action uses the policy subresource to delete +// the policy of a specified bucket. If you are using an identity other than the +// root user of the Amazon Web Services account that owns the bucket, the calling +// identity must have the DeleteBucketPolicy permissions on the specified bucket +// and belong to the bucket owner's account to use this operation. If you don't +// have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied +// error. If you have the correct permissions, but you're not using an identity +// that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not +// Allowed error. As a security precaution, the root user of the Amazon Web +// Services account that owns a bucket can always use this operation, even if the +// policy explicitly denies the root user the ability to perform this action. For +// more information about bucket policies, see Using Bucket Policies and +// UserPolicies +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The +// following operations are related to DeleteBucketPolicy +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { + if params == nil { + params = &DeleteBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketPolicy", params, optFns, c.addOperationDeleteBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketPolicyInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketPolicy", + } +} + +// getDeleteBucketPolicyBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go new file mode 100644 index 000000000..ad2d772d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the replication configuration from the bucket. To use this operation, +// you must have permissions to perform the s3:PutReplicationConfiguration action. +// The bucket owner has these permissions by default and can grant it to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// It can take a while for the deletion of a replication configuration to fully +// propagate. For information about replication configuration, see Replication +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon +// S3 User Guide. The following operations are related to +// DeleteBucketReplication: +// +// * PutBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * +// GetBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { + if params == nil { + params = &DeleteBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketReplication", params, optFns, c.addOperationDeleteBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketReplicationInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketReplicationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketReplication", + } +} + +// getDeleteBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go new file mode 100644 index 000000000..063f0bc59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go @@ -0,0 +1,170 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the tags from the bucket. To use this operation, you must have +// permission to perform the s3:PutBucketTagging action. By default, the bucket +// owner has this permission and can grant this permission to others. The following +// operations are related to DeleteBucketTagging: +// +// * GetBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * +// PutBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { + if params == nil { + params = &DeleteBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketTagging", params, optFns, c.addOperationDeleteBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketTaggingInput struct { + + // The bucket that has the tag set to be removed. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketTaggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketTagging", + } +} + +// getDeleteBucketTaggingBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go new file mode 100644 index 000000000..7eb72b86a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action removes the website configuration for a bucket. Amazon S3 returns a +// 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 +// response if the bucket specified in the request does not exist. This DELETE +// action requires the S3:DeleteBucketWebsite permission. By default, only the +// bucket owner can delete the website configuration attached to a bucket. However, +// bucket owners can grant other users permission to delete the website +// configuration by writing a bucket policy granting them the +// S3:DeleteBucketWebsite permission. For more information about hosting websites, +// see Hosting Websites on Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). The +// following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// * +// PutBucketWebsite +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { + if params == nil { + params = &DeleteBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketWebsite", params, optFns, c.addOperationDeleteBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketWebsiteInput struct { + + // The bucket name for which you want to remove the website configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeleteBucketWebsiteOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteBucketWebsite", + } +} + +// getDeleteBucketWebsiteBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go new file mode 100644 index 000000000..4b4f12127 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -0,0 +1,270 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a null +// version, Amazon S3 does not remove any objects but will still respond that the +// command was successful. To remove a specific version, you must be the bucket +// owner and you must use the version Id subresource. Using this subresource +// permanently deletes the version. If the object deleted is a delete marker, +// Amazon S3 sets the response header, x-amz-delete-marker, to true. If the object +// you want to delete is in a bucket where the bucket versioning configuration is +// MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE +// versionId request. Requests that include x-amz-mfa must use HTTPS. For more +// information about MFA Delete, see Using MFA Delete +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). To see +// sample requests that use versioning, see Sample Request +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// You can delete objects by explicitly calling DELETE Object or configure its +// lifecycle (PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny them +// the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. The following action is related to DeleteObject: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { + if params == nil { + params = &DeleteObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObject", params, optFns, c.addOperationDeleteObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectInput struct { + + // The bucket name of the bucket containing the object. When using this action with + // an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key name of the object to delete. + // + // This member is required. + Key *string + + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to + // process this operation. To use this header, you must have the + // s3:BypassGovernanceRetention permission. + BypassGovernanceRetention bool + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and the + // value that is displayed on your authentication device. Required to permanently + // delete a versioned object if versioning is configured with MFA delete enabled. + MFA *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type DeleteObjectOutput struct { + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker bool + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteObject", + } +} + +// getDeleteObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignDeleteObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignDeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &DeleteObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "DeleteObject", params, clientOptFns, + c.client.addOperationDeleteObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addDeleteObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addDeleteObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go new file mode 100644 index 000000000..0d1bb7399 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go @@ -0,0 +1,202 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the entire tag set from the specified object. For more information about +// managing object tags, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). To use +// this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. To delete tags of a specific object version, add the versionId query +// parameter in the request. You will need permission for the +// s3:DeleteObjectVersionTagging action. The following operations are related to +// DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// * +// GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { + if params == nil { + params = &DeleteObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObjectTagging", params, optFns, c.addOperationDeleteObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectTaggingInput struct { + + // The bucket name containing the objects from which to remove the tags. When using + // this action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key that identifies the object in the bucket from which to remove all tags. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The versionId of the object that the tag-set will be removed from. + VersionId *string + + noSmithyDocumentSerde +} + +type DeleteObjectTaggingOutput struct { + + // The versionId of the object the tag-set was removed from. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteObjectTagging", + } +} + +// getDeleteObjectTaggingBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeleteObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go new file mode 100644 index 000000000..50078da53 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -0,0 +1,294 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action enables you to delete multiple objects from a bucket using a single +// HTTP request. If you know the object keys that you want to delete, then this +// action provides a suitable alternative to sending individual delete requests, +// reducing per-request overhead. The request contains a list of up to 1000 keys +// that you want to delete. In the XML, you provide the object key names, and +// optionally, version IDs if you want to delete a specific version of the object +// from a versioning-enabled bucket. For each key, Amazon S3 performs a delete +// action and returns the result of that delete, success, or failure, in the +// response. Note that if the object specified in the request is not found, Amazon +// S3 returns the result as deleted. The action supports two modes for the +// response: verbose and quiet. By default, the action uses verbose mode in which +// the response includes the result of deletion of each key in your request. In +// quiet mode the response includes only keys where the delete action encountered +// an error. For a successful deletion, the action does not return any information +// about the delete in the response body. When performing this action on an MFA +// Delete enabled bucket, that attempts to delete any versioned objects, you must +// include an MFA token. If you do not provide one, the entire request will fail, +// even if there are non-versioned objects you are trying to delete. If you provide +// an invalid token, whether there are versioned keys in the request or not, the +// entire Multi-Object Delete request will fail. For information about MFA Delete, +// see MFA Delete +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// Finally, the Content-MD5 header is required for all Multi-Object Delete +// requests. Amazon S3 uses the header value to ensure that your request body has +// not been altered in transit. The following operations are related to +// DeleteObjects: +// +// * CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { + if params == nil { + params = &DeleteObjectsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteObjects", params, optFns, c.addOperationDeleteObjectsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteObjectsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteObjectsInput struct { + + // The bucket name containing the objects to delete. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Container for the request. + // + // This member is required. + Delete *types.Delete + + // Specifies whether you want to delete this object even if it has a + // Governance-type Object Lock in place. To use this header, you must have the + // s3:BypassGovernanceRetention permission. + BypassGovernanceRetention bool + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must + // be the same for all parts and it match the checksum value supplied in the + // CreateMultipartUpload request. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and the + // value that is displayed on your authentication device. Required to permanently + // delete a versioned object if versioning is configured with MFA delete enabled. + MFA *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +type DeleteObjectsOutput struct { + + // Container element for a successful delete. It identifies the object that was + // successfully deleted. + Deleted []types.DeletedObject + + // Container for a failed delete action that describes the object that Amazon S3 + // attempted to delete and the error it encountered. + Errors []types.Error + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjects{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjects{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjects(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addDeleteObjectsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeleteObjects", + } +} + +// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*DeleteObjectsInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getDeleteObjectsRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getDeleteObjectsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getDeleteObjectsBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteObjectsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteObjectsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go new file mode 100644 index 000000000..3defd538c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go @@ -0,0 +1,183 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketPublicAccessBlock permission. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// The following operations are related to DeletePublicAccessBlock: +// +// * Using Amazon +// S3 Block Public Access +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * +// GetPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * +// PutPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * +// GetBucketPolicyStatus +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { + if params == nil { + params = &DeletePublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeletePublicAccessBlock", params, optFns, c.addOperationDeletePublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeletePublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeletePublicAccessBlockInput struct { + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type DeletePublicAccessBlockOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpDeletePublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeletePublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeletePublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "DeletePublicAccessBlock", + } +} + +// getDeletePublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getDeletePublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*DeletePublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeletePublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeletePublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go new file mode 100644 index 000000000..d1690f3ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the GET action uses the accelerate subresource to return +// the Transfer Acceleration state of a bucket, which is either Enabled or +// Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. To use this +// operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an +// existing bucket to Enabled or Suspended by using the +// PutBucketAccelerateConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. A GET accelerate request does not return a state value for a bucket +// that has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. For more information about +// transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in +// the Amazon S3 User Guide. Related Resources +// +// * PutBucketAccelerateConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { + if params == nil { + params = &GetBucketAccelerateConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAccelerateConfiguration", params, optFns, c.addOperationGetBucketAccelerateConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAccelerateConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAccelerateConfigurationInput struct { + + // The name of the bucket for which the accelerate configuration is retrieved. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketAccelerateConfigurationOutput struct { + + // The accelerate configuration of the bucket. + Status types.BucketAccelerateStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketAccelerateConfiguration", + } +} + +// getGetBucketAccelerateConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAccelerateConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAccelerateConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go new file mode 100644 index 000000000..610762883 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the GET action uses the acl subresource to return the +// access control list (ACL) of a bucket. To use GET to return the ACL of the +// bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is +// granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. If your bucket uses the bucket owner enforced +// setting for S3 Object Ownership, requests to read ACLs are still supported and +// return the bucket-owner-full-control ACL with the owner being the account that +// created the bucket. For more information, see Controlling object ownership and +// disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Related Resources +// +// * ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { + if params == nil { + params = &GetBucketAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAcl", params, optFns, c.addOperationGetBucketAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAclInput struct { + + // Specifies the S3 bucket whose ACL is being requested. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketAclOutput struct { + + // A list of grants. + Grants []types.Grant + + // Container for the bucket owner's display name and ID. + Owner *types.Owner + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAcl{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketAcl", + } +} + +// getGetBucketAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketAclBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go new file mode 100644 index 000000000..bf2c3be67 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -0,0 +1,194 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. To use this +// operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, +// see Amazon S3 Analytics – Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon S3 User Guide. Related Resources +// +// * +// DeleteBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * +// ListBucketAnalyticsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// * +// PutBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &GetBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketAnalyticsConfiguration", params, optFns, c.addOperationGetBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketAnalyticsConfigurationInput struct { + + // The name of the bucket from which an analytics configuration is retrieved. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketAnalyticsConfigurationOutput struct { + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *types.AnalyticsConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketAnalyticsConfiguration", + } +} + +// getGetBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go new file mode 100644 index 000000000..0ed61273a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the Cross-Origin Resource Sharing (CORS) configuration information set +// for the bucket. To use this operation, you must have permission to perform the +// s3:GetBucketCORS action. By default, the bucket owner has this permission and +// can grant it to others. For more information about CORS, see Enabling +// Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). The following +// operations are related to GetBucketCors: +// +// * PutBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// * +// DeleteBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { + if params == nil { + params = &GetBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketCors", params, optFns, c.addOperationGetBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketCorsInput struct { + + // The bucket name for which to get the cors configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketCorsOutput struct { + + // A set of origins and methods (cross-origin access that you want to allow). You + // can add up to 100 rules to the configuration. + CORSRules []types.CORSRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketCors{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketCors", + } +} + +// getGetBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go new file mode 100644 index 000000000..7fa92fc5f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the default encryption configuration for an Amazon S3 bucket. If the +// bucket does not have a default encryption configuration, GetBucketEncryption +// returns ServerSideEncryptionConfigurationNotFoundError. For information about +// the Amazon S3 default encryption feature, see Amazon S3 Default Bucket +// Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). To use +// this operation, you must have permission to perform the +// s3:GetEncryptionConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// The following operations are related to GetBucketEncryption: +// +// * +// PutBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// * +// DeleteBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { + if params == nil { + params = &GetBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketEncryption", params, optFns, c.addOperationGetBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketEncryptionInput struct { + + // The name of the bucket from which the server-side encryption configuration is + // retrieved. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketEncryptionOutput struct { + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketEncryption", + } +} + +// getGetBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000..70bbb9dfb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// * +// DeleteBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * +// PutBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * +// ListBucketIntelligentTieringConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &GetBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketIntelligentTieringConfiguration", params, optFns, c.addOperationGetBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + noSmithyDocumentSerde +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *types.IntelligentTieringConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketIntelligentTieringConfiguration", + } +} + +// getGetBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getGetBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go new file mode 100644 index 000000000..f35a4606c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -0,0 +1,192 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. To use this operation, you must have permissions to perform +// the s3:GetInventoryConfiguration action. The bucket owner has this permission by +// default and can grant this permission to others. For more information about +// permissions, see Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). The +// following operations are related to GetBucketInventoryConfiguration: +// +// * +// DeleteBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * +// ListBucketInventoryConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// * +// PutBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &GetBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketInventoryConfiguration", params, optFns, c.addOperationGetBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketInventoryConfigurationInput struct { + + // The name of the bucket containing the inventory configuration to retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketInventoryConfigurationOutput struct { + + // Specifies the inventory configuration. + InventoryConfiguration *types.InventoryConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketInventoryConfiguration", + } +} + +// getGetBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go new file mode 100644 index 000000000..5d72d2ebc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Bucket lifecycle configuration now supports specifying a lifecycle rule using an +// object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes the +// new filter element that you can use to specify a filter to select a subset of +// objects to which the rule applies. If you are using a previous version of the +// lifecycle configuration, it still works. For the earlier action, see +// GetBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// Returns the lifecycle configuration information set on the bucket. For +// information about lifecycle configuration, see Object Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). To +// use this operation, you must have permission to perform the +// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: +// NoSuchLifecycleConfiguration +// +// * Description: The lifecycle configuration does +// not exist. +// +// * HTTP Status Code: 404 Not Found +// +// * SOAP Fault Code Prefix: +// Client +// +// The following operations are related to +// GetBucketLifecycleConfiguration: +// +// * GetBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// * +// PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// * +// DeleteBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { + if params == nil { + params = &GetBucketLifecycleConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLifecycleConfiguration", params, optFns, c.addOperationGetBucketLifecycleConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLifecycleConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLifecycleConfigurationInput struct { + + // The name of the bucket for which to get the lifecycle information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketLifecycleConfigurationOutput struct { + + // Container for a lifecycle rule. + Rules []types.LifecycleRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketLifecycleConfiguration", + } +} + +// getGetBucketLifecycleConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLifecycleConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLifecycleConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go new file mode 100644 index 000000000..fb8ff30fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go @@ -0,0 +1,248 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// Returns the Region the bucket resides in. You set the bucket's Region using the +// LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). To use +// this implementation of the operation, you must be the bucket owner. To use this +// API against an access point, provide the alias of the access point in place of +// the bucket name. The following operations are related to GetBucketLocation: +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { + if params == nil { + params = &GetBucketLocationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLocation", params, optFns, c.addOperationGetBucketLocationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLocationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLocationInput struct { + + // The name of the bucket for which to get the location. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketLocationOutput struct { + + // Specifies the Region where the bucket resides. For a list of all the Amazon S3 + // supported location constraints by Region, see Regions and Endpoints + // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). Buckets in + // Region us-east-1 have a LocationConstraint of null. + LocationConstraint types.BucketLocationConstraint + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLocation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLocation{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapDeserializerHelper(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLocation(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +type awsRestxml_deserializeOpGetBucketLocation_custom struct { +} + +func (*awsRestxml_deserializeOpGetBucketLocation_custom) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLocation_custom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) + } + output := &GetBucketLocationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + decoder := smithyxml.WrapNodeDecoder(rootDecoder, xml.StartElement{}) + err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) + if err == io.EOF { + err = nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +// Helper to swap in a custom deserializer +func swapDeserializerHelper(stack *middleware.Stack) error { + _, err := stack.Deserialize.Swap("OperationDeserializer", &awsRestxml_deserializeOpGetBucketLocation_custom{}) + if err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketLocation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketLocation", + } +} + +// getGetBucketLocationBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketLocationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLocationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLocationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLocationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go new file mode 100644 index 000000000..ca115886d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go @@ -0,0 +1,177 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the logging status of a bucket and the permissions users have to view +// and modify that status. To use GET, you must be the bucket owner. The following +// operations are related to GetBucketLogging: +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// PutBucketLogging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { + if params == nil { + params = &GetBucketLoggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketLogging", params, optFns, c.addOperationGetBucketLoggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketLoggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketLoggingInput struct { + + // The bucket name for which to get the logging information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketLoggingOutput struct { + + // Describes where logs are stored and the prefix that Amazon S3 assigns to all log + // object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in + // the Amazon S3 API Reference. + LoggingEnabled *types.LoggingEnabled + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLogging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLogging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLogging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketLogging", + } +} + +// getGetBucketLoggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketLoggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketLoggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketLoggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go new file mode 100644 index 000000000..22cf389cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a metrics configuration (specified by the metrics configuration ID) from +// the bucket. Note that this doesn't include the daily storage metrics. To use +// this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// The following operations are related to GetBucketMetricsConfiguration: +// +// * +// PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * +// DeleteBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * +// ListBucketMetricsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// * +// Monitoring Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &GetBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketMetricsConfiguration", params, optFns, c.addOperationGetBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketMetricsConfigurationInput struct { + + // The name of the bucket containing the metrics configuration to retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketMetricsConfigurationOutput struct { + + // Specifies the metrics configuration. + MetricsConfiguration *types.MetricsConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketMetricsConfiguration", + } +} + +// getGetBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go new file mode 100644 index 000000000..cbf103a7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -0,0 +1,193 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the notification configuration of a bucket. If notifications are not +// enabled on the bucket, the action returns an empty NotificationConfiguration +// element. By default, you must be the bucket owner to read the notification +// configuration of a bucket. However, the bucket owner can use a bucket policy to +// grant permission to other users to read this configuration with the +// s3:GetBucketNotification permission. For more information about setting and +// reading the notification configuration on a bucket, see Setting Up Notification +// of Bucket Events +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). For +// more information about bucket policies, see Using Bucket Policies +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The +// following action is related to GetBucketNotification: +// +// * PutBucketNotification +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { + if params == nil { + params = &GetBucketNotificationConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketNotificationConfiguration", params, optFns, c.addOperationGetBucketNotificationConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketNotificationConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketNotificationConfigurationInput struct { + + // The name of the bucket for which to get the notification configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +// A container for specifying the notification configuration of the bucket. If this +// element is empty, notifications are turned off for the bucket. +type GetBucketNotificationConfigurationOutput struct { + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *types.EventBridgeConfiguration + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []types.LambdaFunctionConfiguration + + // The Amazon Simple Queue Service queues to publish messages to and the events for + // which to publish messages. + QueueConfigurations []types.QueueConfiguration + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []types.TopicConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketNotificationConfiguration", + } +} + +// getGetBucketNotificationConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketNotificationConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketNotificationConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go new file mode 100644 index 000000000..571c9566c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go @@ -0,0 +1,177 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying permissions in a policy +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html). +// The following operations are related to GetBucketOwnershipControls: +// +// * +// PutBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { + if params == nil { + params = &GetBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketOwnershipControls", params, optFns, c.addOperationGetBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketOwnershipControlsInput struct { + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketOwnershipControlsOutput struct { + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or + // ObjectWriter) currently in effect for this Amazon S3 bucket. + OwnershipControls *types.OwnershipControls + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketOwnershipControls", + } +} + +// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go new file mode 100644 index 000000000..f16c84cdd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go @@ -0,0 +1,180 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must have the GetBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. As a security precaution, the root user of the Amazon +// Web Services account that owns a bucket can always use this operation, even if +// the policy explicitly denies the root user the ability to perform this action. +// For more information about bucket policies, see Using Bucket Policies and User +// Policies +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The +// following action is related to GetBucketPolicy: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { + if params == nil { + params = &GetBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicy", params, optFns, c.addOperationGetBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketPolicyInput struct { + + // The bucket name for which to get the bucket policy. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketPolicyOutput struct { + + // The bucket policy as a JSON document. + Policy *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketPolicy", + } +} + +// getGetBucketPolicyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go new file mode 100644 index 000000000..570f60faa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go @@ -0,0 +1,189 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the +// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 +// permissions, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// For more information about when Amazon S3 considers a bucket public, see The +// Meaning of "Public" +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon +// S3 Block Public Access +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * +// GetPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * +// PutPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * +// DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { + if params == nil { + params = &GetBucketPolicyStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicyStatus", params, optFns, c.addOperationGetBucketPolicyStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketPolicyStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketPolicyStatusInput struct { + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketPolicyStatusOutput struct { + + // The policy status for the specified bucket. + PolicyStatus *types.PolicyStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicyStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicyStatus{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicyStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketPolicyStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketPolicyStatus", + } +} + +// getGetBucketPolicyStatusBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketPolicyStatusBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketPolicyStatusInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketPolicyStatusUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketPolicyStatusBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go new file mode 100644 index 000000000..5d7f3115b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the replication configuration of a bucket. It can take a while to +// propagate the put or delete a replication configuration to all Amazon S3 +// systems. Therefore, a get request soon after put or delete can return a wrong +// result. For information about replication configuration, see Replication +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon +// S3 User Guide. This action requires permissions for the +// s3:GetReplicationConfiguration action. For more information about permissions, +// see Using Bucket Policies and User Policies +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). If +// you include the Filter element in a replication configuration, you must also +// include the DeleteMarkerReplication and Priority elements. The response also +// returns those elements. For information about GetBucketReplication errors, see +// List of replication-related error codes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// The following operations are related to GetBucketReplication: +// +// * +// PutBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// * +// DeleteBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { + if params == nil { + params = &GetBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketReplication", params, optFns, c.addOperationGetBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketReplicationInput struct { + + // The bucket name for which to get the replication information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketReplicationOutput struct { + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *types.ReplicationConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketReplication", + } +} + +// getGetBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go new file mode 100644 index 000000000..45f985b95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the request payment configuration of a bucket. To use this version of +// the operation, you must be the bucket owner. For more information, see Requester +// Pays Buckets +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The +// following operations are related to GetBucketRequestPayment: +// +// * ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { + if params == nil { + params = &GetBucketRequestPaymentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketRequestPayment", params, optFns, c.addOperationGetBucketRequestPaymentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketRequestPaymentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketRequestPaymentInput struct { + + // The name of the bucket for which to get the payment request configuration + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketRequestPaymentOutput struct { + + // Specifies who pays for the download and request fees. + Payer types.Payer + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketRequestPayment(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketRequestPayment", + } +} + +// getGetBucketRequestPaymentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketRequestPaymentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketRequestPaymentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go new file mode 100644 index 000000000..816d1b3e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go @@ -0,0 +1,185 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the tag set associated with the bucket. To use this operation, you must +// have permission to perform the s3:GetBucketTagging action. By default, the +// bucket owner has this permission and can grant this permission to others. +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSet +// +// * +// Description: There is no tag set associated with the bucket. +// +// The following +// operations are related to GetBucketTagging: +// +// * PutBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// * +// DeleteBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { + if params == nil { + params = &GetBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketTagging", params, optFns, c.addOperationGetBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketTaggingInput struct { + + // The name of the bucket for which to get the tagging information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketTaggingOutput struct { + + // Contains the tag set. + // + // This member is required. + TagSet []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketTagging", + } +} + +// getGetBucketTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go new file mode 100644 index 000000000..3657bd1ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go @@ -0,0 +1,186 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the versioning state of a bucket. To retrieve the versioning state of a +// bucket, you must be the bucket owner. This implementation also returns the MFA +// Delete status of the versioning state. If the MFA Delete status is enabled, the +// bucket owner must use an authentication device to change the versioning state of +// the bucket. The following operations are related to GetBucketVersioning: +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { + if params == nil { + params = &GetBucketVersioningInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketVersioning", params, optFns, c.addOperationGetBucketVersioningMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketVersioningOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketVersioningInput struct { + + // The name of the bucket for which to get the versioning information. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketVersioningOutput struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA delete. + // If the bucket has never been so configured, this element is not returned. + MFADelete types.MFADeleteStatus + + // The versioning state of the bucket. + Status types.BucketVersioningStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketVersioning{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketVersioning{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketVersioning(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketVersioning", + } +} + +// getGetBucketVersioningBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetBucketVersioningBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketVersioningInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketVersioningBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go new file mode 100644 index 000000000..aa866b301 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go @@ -0,0 +1,190 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the website configuration for a bucket. To host website on Amazon S3, +// you can configure a bucket as website by adding a website configuration. For +// more information about hosting websites, see Hosting Websites on Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). This GET +// action requires the S3:GetBucketWebsite permission. By default, only the bucket +// owner can read the bucket website configuration. However, bucket owners can +// allow other users to read the website configuration by writing a bucket policy +// granting them the S3:GetBucketWebsite permission. The following operations are +// related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// * +// PutBucketWebsite +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { + if params == nil { + params = &GetBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketWebsite", params, optFns, c.addOperationGetBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketWebsiteInput struct { + + // The bucket name for which to get the website configuration. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetBucketWebsiteOutput struct { + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *types.ErrorDocument + + // The name of the index document for the website (for example index.html). + IndexDocument *types.IndexDocument + + // Specifies the redirect behavior of all requests to a website endpoint of an + // Amazon S3 bucket. + RedirectAllRequestsTo *types.RedirectAllRequestsTo + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []types.RoutingRule + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetBucketWebsite", + } +} + +// getGetBucketWebsiteBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go new file mode 100644 index 000000000..92c38bed8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -0,0 +1,593 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "time" +) + +// Retrieves objects from Amazon S3. To use GET, you must have READ access to the +// object. If you grant READ access to the anonymous user, you can return the +// object without using an authorization header. An Amazon S3 bucket has no +// directory hierarchy such as you would find in a typical computer file system. +// You can, however, create a logical hierarchy by using object key names that +// imply a folder structure. For example, instead of naming an object sample.jpg, +// you can name it photos/2006/February/sample.jpg. To get an object from such a +// logical hierarchy, specify the full key name for the object in the GET +// operation. For a virtual hosted-style request example, if you have the object +// photos/2006/February/sample.jpg, specify the resource as +// /photos/2006/February/sample.jpg. For a path-style request example, if you have +// the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more +// information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// For more information about returning the ACL of an object, see GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). If the +// object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive +// storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers, before you can retrieve the object you must first restore a copy +// using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this action returns an InvalidObjectStateError error. For information +// about restoring archived objects, see Restoring Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// Encryption request headers, like x-amz-server-side-encryption, should not be +// sent for GET requests if your object uses server-side encryption with KMS keys +// (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys +// (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 +// BadRequest error. If you encrypt an object by using server-side encryption with +// customer-provided encryption keys (SSE-C) when you store the object in Amazon +// S3, then when you GET the object, you must use the following headers: +// +// * +// x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, +// see Server-Side Encryption (Using Customer-Provided Encryption Keys) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// Assuming you have the relevant permission to read object tags, the response also +// returns the x-amz-tagging-count header that provides the count of number of tags +// associated with the object. You can use GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) to +// retrieve the tag set associated with an object. Permissions You need the +// relevant read object (or version) permission for this operation. For more +// information, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If +// the object you request does not exist, the error Amazon S3 returns depends on +// whether you also have the s3:ListBucket permission. +// +// * If you have the +// s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status +// code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket +// permission, Amazon S3 will return an HTTP status code 403 ("access denied") +// error. +// +// Versioning By default, the GET action returns the current version of an +// object. To return a different version, use the versionId subresource. +// +// * If you +// supply a versionId, you need the s3:GetObjectVersion permission to access a +// specific version of an object. If you request a specific version, you do not +// need to have the s3:GetObject permission. +// +// * If the current version of the +// object is a delete marker, Amazon S3 behaves as if the object was deleted and +// includes x-amz-delete-marker: true in the response. +// +// For more information about +// versioning, see PutBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). +// Overriding Response Header Values There are times when you want to override +// certain response header values in a GET response. For example, you might +// override the Content-Disposition response header value in your GET request. You +// can override values for a set of response headers using the following query +// parameters. These response header values are sent only on a successful request, +// that is, when status code 200 OK is returned. The set of headers you can +// override using these parameters is a subset of the headers that Amazon S3 +// accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values in +// the GET response, you use the following request parameters. You must sign the +// request, either using an Authorization header or a presigned URL, when using +// these parameters. They cannot be used with an unsigned (anonymous) request. +// +// * +// response-content-type +// +// * response-content-language +// +// * response-expires +// +// * +// response-cache-control +// +// * response-content-disposition +// +// * +// response-content-encoding +// +// Additional Considerations about Request Headers If +// both of the If-Match and If-Unmodified-Since headers are present in the request +// as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. If +// both of the If-None-Match and If-Modified-Since headers are present in the +// request as follows: If-None-Match condition evaluates to false, and; +// If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified +// response code. For more information about conditional requests, see RFC 7232 +// (https://tools.ietf.org/html/rfc7232). The following operations are related to +// GetObject: +// +// * ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// * +// GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { + if params == nil { + params = &GetObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObject", params, optFns, c.addOperationGetObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectInput struct { + + // The bucket name containing the object. When using this action with an access + // point, you must direct requests to the access point hostname. The access point + // hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using an Object Lambda access point the + // hostname takes the form + // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. When using this + // action with Amazon S3 on Outposts, you must direct requests to the S3 on + // Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key of the object to get. + // + // This member is required. + Key *string + + // To retrieve the checksum, this mode must be enabled. + ChecksumMode types.ChecksumMode + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Return the object only if its entity tag (ETag) is the same as the one + // specified; otherwise, return a 412 (precondition failed) error. + IfMatch *string + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + IfModifiedSince *time.Time + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. + IfNoneMatch *string + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. + IfUnmodifiedSince *time.Time + + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber int32 + + // Downloads the specified range bytes of an object. For more information about the + // HTTP Range header, see + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). Amazon S3 + // doesn't support retrieving multiple ranges of data per GET request. + Range *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string + + // Sets the Content-Type header of the response. + ResponseContentType *string + + // Sets the Expires header of the response. + ResponseExpires *time.Time + + // Specifies the algorithm to use to when decrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt the + // data. This value is used to decrypt the object when recovering it and must match + // the one used when storing the data. The key must be appropriate for use with the + // algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectOutput struct { + + // Indicates that a range of bytes was specified. + AcceptRanges *string + + // Object data. + Body io.ReadCloser + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. + ContentLength int64 + + // The portion of the object returned in the response. + ContentRange *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker bool + + // An entity tag (ETag) is an opaque identifier assigned by a web server to a + // specific version of a resource found at a URL. + ETag *string + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is + // URL-encoded. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Creation date of the object. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + // + // Map keys will be normalized to lower-case. + Metadata map[string]string + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, you + // can create metadata whose values are not legal HTTP headers. + MissingMeta int32 + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode currently in place for this object. + ObjectLockMode types.ObjectLockMode + + // The date and time when this object's Object Lock will expire. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount int32 + + // Amazon S3 can return this if your request involves a bucket that is either a + // source or destination in a replication rule. + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Provides information about object restoration action and expiration time of the + // restored object copy. + Restore *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. + StorageClass types.StorageClass + + // The number of tags, if any, on the object. + TagCount int32 + + // Version of the object. + VersionId *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. + WebsiteRedirectLocation *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addGetObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObject", + } +} + +// getGetObjectRequestValidationModeMember gets the request checksum validation +// mode provided as input. +func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) { + in := input.(*GetObjectInput) + if len(in.ChecksumMode) == 0 { + return "", false + } + return string(in.ChecksumMode), true +} + +func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{ + GetValidationMode: getGetObjectRequestValidationModeMember, + ValidationAlgorithms: []string{"CRC32", "CRC32C", "SHA256", "SHA1"}, + IgnoreMultipartValidation: true, + LogValidationSkipped: true, + LogMultipartValidationSkipped: true, + }) +} + +// getGetObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getGetObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignGetObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetObject", params, clientOptFns, + c.client.addOperationGetObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addGetObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addGetObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go new file mode 100644 index 000000000..709e62ff7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -0,0 +1,224 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the access control list (ACL) of an object. To use this operation, you +// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For more +// information, see Mapping of ACL permissions and access policy permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide This action is not supported by Amazon S3 on +// Outposts. Versioning By default, GET returns ACL information about the current +// version of an object. To return ACL information about a different version, use +// the versionId subresource. If your bucket uses the bucket owner enforced setting +// for S3 Object Ownership, requests to read ACLs are still supported and return +// the bucket-owner-full-control ACL with the owner being the account that created +// the bucket. For more information, see Controlling object ownership and +// disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetObjectAcl: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// * +// PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { + if params == nil { + params = &GetObjectAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectAcl", params, optFns, c.addOperationGetObjectAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectAclInput struct { + + // The bucket name that contains the object for which to get the ACL information. + // When using this action with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key of the object for which to get the ACL information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectAclOutput struct { + + // A list of grants. + Grants []types.Grant + + // Container for the bucket owner's display name and ID. + Owner *types.Owner + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAcl{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectAcl", + } +} + +// getGetObjectAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectAclBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go new file mode 100644 index 000000000..fb1683e7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -0,0 +1,363 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Retrieves all the metadata from an object without returning the object itself. +// This action is useful if you're interested only in an object's metadata. To use +// GetObjectAttributes, you must have READ access to the object. +// GetObjectAttributes combines the functionality of GetObjectAcl, +// GetObjectLegalHold, GetObjectLockConfiguration, GetObjectRetention, +// GetObjectTagging, HeadObject, and ListParts. All of the data returned with each +// of those individual calls can be returned with a single call to +// GetObjectAttributes. If you encrypt an object by using server-side encryption +// with customer-provided encryption keys (SSE-C) when you store the object in +// Amazon S3, then when you retrieve the metadata from the object, you must use the +// following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, +// see Server-Side Encryption (Using Customer-Provided Encryption Keys) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// * Encryption request headers, such as +// x-amz-server-side-encryption, should not be sent for GET requests if your object +// uses server-side encryption with Amazon Web Services KMS keys stored in Amazon +// Web Services Key Management Service (SSE-KMS) or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). If your object does use these types +// of keys, you'll get an HTTP 400 Bad Request error. +// +// * The last modified property +// in this case is the creation date of the object. +// +// Consider the following when +// using request headers: +// +// * If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows, then Amazon S3 returns the HTTP +// status code 200 OK and the data requested: +// +// * If-Match condition evaluates to +// true. +// +// * If-Unmodified-Since condition evaluates to false. +// +// * If both of the +// If-None-Match and If-Modified-Since headers are present in the request as +// follows, then Amazon S3 returns the HTTP status code 304 Not Modified: +// +// * +// If-None-Match condition evaluates to false. +// +// * If-Modified-Since condition +// evaluates to true. +// +// For more information about conditional requests, see RFC +// 7232 (https://tools.ietf.org/html/rfc7232). Permissions The permissions that you +// need to use this operation depend on whether the bucket is versioned. If the +// bucket is versioned, you need both the s3:GetObjectVersion and +// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is +// not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. +// For more information, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in +// the Amazon S3 User Guide. If the object that you request does not exist, the +// error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 +// returns an HTTP status code 404 Not Found ("no such key") error. +// +// * If you don't +// have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 +// Forbidden ("access denied") error. +// +// The following actions are related to +// GetObjectAttributes: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// * +// GetObjectLegalHold +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) +// +// * +// GetObjectLockConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) +// +// * +// GetObjectRetention +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) +// +// * +// GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * +// HeadObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) +// +// * +// ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { + if params == nil { + params = &GetObjectAttributesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectAttributes", params, optFns, c.addOperationGetObjectAttributesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectAttributesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectAttributesInput struct { + + // The name of the bucket that contains the object. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The object key. + // + // This member is required. + Key *string + + // An XML header that specifies the fields at the root level that you want returned + // in the response. Fields that you do not specify are not returned. + // + // This member is required. + ObjectAttributes []types.ObjectAttributes + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Sets the maximum number of parts to return. + MaxParts int32 + + // Specifies the part after which listing should begin. Only parts with higher part + // numbers will be listed. + PartNumberMarker *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // The version ID used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectAttributesOutput struct { + + // The checksum or digest of the object. + Checksum *types.Checksum + + // Specifies whether the object retrieved was (true) or was not (false) a delete + // marker. If false, this response header does not appear in the response. + DeleteMarker bool + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string + + // The creation date of the object. + LastModified *time.Time + + // A collection of parts associated with a multipart upload. + ObjectParts *types.GetObjectAttributesParts + + // The size of the object in bytes. + ObjectSize int64 + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Provides the storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass types.StorageClass + + // The version ID of the object. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAttributes{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAttributes{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAttributes(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectAttributes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectAttributes", + } +} + +// getGetObjectAttributesBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectAttributesBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectAttributesInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectAttributesUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectAttributesBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go new file mode 100644 index 000000000..a2446ac32 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -0,0 +1,195 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets an object's current legal hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This +// action is not supported by Amazon S3 on Outposts. The following action is +// related to GetObjectLegalHold: +// +// * GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { + if params == nil { + params = &GetObjectLegalHoldInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectLegalHold", params, optFns, c.addOperationGetObjectLegalHoldMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectLegalHoldOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectLegalHoldInput struct { + + // The bucket name containing the object whose legal hold status you want to + // retrieve. When using this action with an access point, you must direct requests + // to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object whose legal hold status you want to retrieve. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The version ID of the object whose legal hold status you want to retrieve. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectLegalHoldOutput struct { + + // The current legal hold status for the specified object. + LegalHold *types.ObjectLockLegalHold + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLegalHold(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectLegalHold", + } +} + +// getGetObjectLegalHoldBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectLegalHoldBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectLegalHoldInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectLegalHoldBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go new file mode 100644 index 000000000..91793c133 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object placed +// in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). The +// following action is related to GetObjectLockConfiguration: +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { + if params == nil { + params = &GetObjectLockConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectLockConfiguration", params, optFns, c.addOperationGetObjectLockConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectLockConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectLockConfigurationInput struct { + + // The bucket whose Object Lock configuration you want to retrieve. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetObjectLockConfigurationOutput struct { + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *types.ObjectLockConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLockConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectLockConfiguration", + } +} + +// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectLockConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectLockConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go new file mode 100644 index 000000000..33fc04897 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -0,0 +1,195 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This +// action is not supported by Amazon S3 on Outposts. The following action is +// related to GetObjectRetention: +// +// * GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { + if params == nil { + params = &GetObjectRetentionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectRetention", params, optFns, c.addOperationGetObjectRetentionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectRetentionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectRetentionInput struct { + + // The bucket name containing the object whose retention settings you want to + // retrieve. When using this action with an access point, you must direct requests + // to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object whose retention settings you want to retrieve. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectRetentionOutput struct { + + // The container element for an object's retention settings. + Retention *types.ObjectLockRetention + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectRetention{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectRetention{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectRetention(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectRetention", + } +} + +// getGetObjectRetentionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetObjectRetentionBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectRetentionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectRetentionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go new file mode 100644 index 000000000..cec5210c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -0,0 +1,223 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. To use this operation, you must have +// permission to perform the s3:GetObjectTagging action. By default, the GET action +// returns information about current version of an object. For a versioned bucket, +// you can have multiple versions of an object in your bucket. To retrieve tags of +// any other version, use the versionId query parameter. You also need permission +// for the s3:GetObjectVersionTagging action. By default, the bucket owner has this +// permission and can grant this permission to others. For information about the +// Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). The +// following actions are related to GetObjectTagging: +// +// * DeleteObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * +// PutObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { + if params == nil { + params = &GetObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectTagging", params, optFns, c.addOperationGetObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectTaggingInput struct { + + // The bucket name containing the object for which to get the tagging information. + // When using this action with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which to get the tagging information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The versionId of the object for which to get the tagging information. + VersionId *string + + noSmithyDocumentSerde +} + +type GetObjectTaggingOutput struct { + + // Contains the tag set. + // + // This member is required. + TagSet []types.Tag + + // The versionId of the object for which you got the tagging information. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectTagging", + } +} + +// getGetObjectTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go new file mode 100644 index 000000000..fa71442c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -0,0 +1,191 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Using BitTorrent with Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). You can get +// torrent only for objects that are less than 5 GB in size, and that are not +// encrypted using server-side encryption with a customer-provided encryption key. +// To use GET, you must have READ access to the object. This action is not +// supported by Amazon S3 on Outposts. The following action is related to +// GetObjectTorrent: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { + if params == nil { + params = &GetObjectTorrentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetObjectTorrent", params, optFns, c.addOperationGetObjectTorrentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetObjectTorrentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetObjectTorrentInput struct { + + // The name of the bucket containing the object for which to get the torrent files. + // + // This member is required. + Bucket *string + + // The object key for which to get the information. + // + // This member is required. + Key *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +type GetObjectTorrentOutput struct { + + // A Bencoded dictionary as defined by the BitTorrent specification + Body io.ReadCloser + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTorrent{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTorrent{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTorrent(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetObjectTorrent(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetObjectTorrent", + } +} + +// getGetObjectTorrentBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getGetObjectTorrentBucketMember(input interface{}) (*string, bool) { + in := input.(*GetObjectTorrentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetObjectTorrentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetObjectTorrentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go new file mode 100644 index 000000000..eb42c7d27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go @@ -0,0 +1,196 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For +// more information about Amazon S3 permissions, see Specifying Permissions in a +// Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the +// PublicAccessBlock settings are different between the bucket and the account, +// Amazon S3 uses the most restrictive combination of the bucket-level and +// account-level settings. For more information about when Amazon S3 considers a +// bucket or an object public, see The Meaning of "Public" +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 +// Block Public Access +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * +// PutPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// * +// GetPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * +// DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { + if params == nil { + params = &GetPublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetPublicAccessBlock", params, optFns, c.addOperationGetPublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetPublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetPublicAccessBlockInput struct { + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want + // to retrieve. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type GetPublicAccessBlockOutput struct { + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpGetPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "GetPublicAccessBlock", + } +} + +// getGetPublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getGetPublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*GetPublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetPublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go new file mode 100644 index 000000000..c3d9a16f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go @@ -0,0 +1,541 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" +) + +// This action is useful to determine if a bucket exists and you have permission to +// access it. The action returns a 200 OK if the bucket exists and you have +// permission to access it. If the bucket does not exist or you do not have +// permission to access it, the HEAD request returns a generic 404 Not Found or 403 +// Forbidden code. A message body is not included, so you cannot determine the +// exception beyond these error codes. To use this operation, you must have +// permissions to perform the s3:ListBucket action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// To use this API against an access point, you must provide the alias of the +// access point in place of the bucket name or specify the access point ARN. When +// using the access point ARN, you must direct requests to the access point +// hostname. The access point hostname takes the form +// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the +// Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For +// more information see, Using access points +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). +func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { + if params == nil { + params = &HeadBucketInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "HeadBucket", params, optFns, c.addOperationHeadBucketMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*HeadBucketOutput) + out.ResultMetadata = metadata + return out, nil +} + +type HeadBucketInput struct { + + // The bucket name. When using this action with an access point, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type HeadBucketOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpHeadBucket{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadBucket{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpHeadBucketValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadBucket(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addHeadBucketUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// HeadBucketAPIClient is a client that implements the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) +} + +var _ HeadBucketAPIClient = (*Client)(nil) + +// BucketExistsWaiterOptions are waiter options for BucketExistsWaiter +type BucketExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note that + // MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) +} + +// BucketExistsWaiter defines the waiters for BucketExists +type BucketExistsWaiter struct { + client HeadBucketAPIClient + + options BucketExistsWaiterOptions +} + +// NewBucketExistsWaiter constructs a BucketExistsWaiter. +func NewBucketExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketExistsWaiterOptions)) *BucketExistsWaiter { + options := BucketExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = bucketExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &BucketExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for BucketExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *BucketExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for BucketExists waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) (*HeadBucketOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for BucketExists waiter") +} + +func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { + + if err == nil { + return false, nil + } + + if err != nil { + var errorType *types.NotFound + if errors.As(err, &errorType) { + return true, nil + } + } + + return true, nil +} + +// BucketNotExistsWaiterOptions are waiter options for BucketNotExistsWaiter +type BucketNotExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, BucketNotExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) +} + +// BucketNotExistsWaiter defines the waiters for BucketNotExists +type BucketNotExistsWaiter struct { + client HeadBucketAPIClient + + options BucketNotExistsWaiterOptions +} + +// NewBucketNotExistsWaiter constructs a BucketNotExistsWaiter. +func NewBucketNotExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketNotExistsWaiterOptions)) *BucketNotExistsWaiter { + options := BucketNotExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = bucketNotExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &BucketNotExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *BucketNotExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for BucketNotExists waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) (*HeadBucketOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for BucketNotExists waiter") +} + +func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { + + if err != nil { + var errorType *types.NotFound + if errors.As(err, &errorType) { + return false, nil + } + } + + return true, nil +} + +func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "HeadBucket", + } +} + +// getHeadBucketBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getHeadBucketBucketMember(input interface{}) (*string, bool) { + in := input.(*HeadBucketInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addHeadBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getHeadBucketBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignHeadBucket is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignHeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &HeadBucketInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "HeadBucket", params, clientOptFns, + c.client.addOperationHeadBucketMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addHeadBucketPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addHeadBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go new file mode 100644 index 000000000..1e745a7e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -0,0 +1,884 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" +) + +// The HEAD action retrieves metadata from an object without returning the object +// itself. This action is useful if you're only interested in an object's metadata. +// To use HEAD, you must have READ access to the object. A HEAD request has the +// same options as a GET action on an object. The response is identical to the GET +// response except that there is no response body. Because of this, if the HEAD +// request generates an error, it returns a generic 404 Not Found or 403 Forbidden +// code. It is not possible to retrieve the exact exception beyond these error +// codes. If you encrypt an object by using server-side encryption with +// customer-provided encryption keys (SSE-C) when you store the object in Amazon +// S3, then when you retrieve the metadata from the object, you must use the +// following headers: +// +// * x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, +// see Server-Side Encryption (Using Customer-Provided Encryption Keys) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// * +// Encryption request headers, like x-amz-server-side-encryption, should not be +// sent for GET requests if your object uses server-side encryption with KMS keys +// (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys +// (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 +// BadRequest error. +// +// * The last modified property in this case is the creation +// date of the object. +// +// Request headers are limited to 8 KB in size. For more +// information, see Common Request Headers +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// Consider the following when using request headers: +// +// * Consideration 1 – If both +// of the If-Match and If-Unmodified-Since headers are present in the request as +// follows: +// +// * If-Match condition evaluates to true, and; +// +// * If-Unmodified-Since +// condition evaluates to false; +// +// Then Amazon S3 returns 200 OK and the data +// requested. +// +// * Consideration 2 – If both of the If-None-Match and +// If-Modified-Since headers are present in the request as follows: +// +// * +// If-None-Match condition evaluates to false, and; +// +// * If-Modified-Since condition +// evaluates to true; +// +// Then Amazon S3 returns the 304 Not Modified response +// code. +// +// For more information about conditional requests, see RFC 7232 +// (https://tools.ietf.org/html/rfc7232). Permissions You need the relevant read +// object (or version) permission for this operation. For more information, see +// Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If +// the object you request does not exist, the error Amazon S3 returns depends on +// whether you also have the s3:ListBucket permission. +// +// * If you have the +// s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code +// 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, +// Amazon S3 returns an HTTP status code 403 ("access denied") error. +// +// The +// following actions are related to HeadObject: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { + if params == nil { + params = &HeadObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "HeadObject", params, optFns, c.addOperationHeadObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*HeadObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type HeadObjectInput struct { + + // The name of the bucket containing the object. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The object key. + // + // This member is required. + Key *string + + // To retrieve the checksum, this parameter must be enabled. In addition, if you + // enable ChecksumMode and the object is encrypted with Amazon Web Services Key + // Management Service (Amazon Web Services KMS), you must have permission to use + // the kms:Decrypt action for the request to succeed. + ChecksumMode types.ChecksumMode + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Return the object only if its entity tag (ETag) is the same as the one + // specified; otherwise, return a 412 (precondition failed) error. + IfMatch *string + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + IfModifiedSince *time.Time + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. + IfNoneMatch *string + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. + IfUnmodifiedSince *time.Time + + // Part number of the object being read. This is a positive integer between 1 and + // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber int32 + + // Because HeadObject returns only the metadata for an object, this parameter has + // no effect. + Range *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type HeadObjectOutput struct { + + // Indicates that a range of bytes was specified. + AcceptRanges *string + + // The archive state of the head object. + ArchiveStatus types.ArchiveStatus + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. + ContentLength int64 + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker bool + + // An entity tag (ETag) is an opaque identifier assigned by a web server to a + // specific version of a resource found at a URL. + ETag *string + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // providing object expiration information. The value of the rule-id is + // URL-encoded. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // Creation date of the object. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + // + // Map keys will be normalized to lower-case. + Metadata map[string]string + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, you + // can create metadata whose values are not legal HTTP headers. + MissingMeta int32 + + // Specifies whether a legal hold is in effect for this object. This header is only + // returned if the requester has the s3:GetObjectLegalHold permission. This header + // is not returned if the specified version of this object has never had a legal + // hold applied. For more information about S3 Object Lock, see Object Lock + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode, if any, that's in effect for this object. This header is + // only returned if the requester has the s3:GetObjectRetention permission. For + // more information about S3 Object Lock, see Object Lock + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockMode types.ObjectLockMode + + // The date and time when the Object Lock retention period expires. This header is + // only returned if the requester has the s3:GetObjectRetention permission. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount int32 + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. In replication, you have + // a source bucket on which you configure replication and destination bucket or + // buckets where Amazon S3 stores object replicas. When you request an object + // (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will + // return the x-amz-replication-status header in the response as follows: + // + // * If + // requesting an object from the source bucket, Amazon S3 will return the + // x-amz-replication-status header if the object in your request is eligible for + // replication. For example, suppose that in your replication configuration, you + // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with key + // prefix TaxDocs. Any objects you upload with this key name prefix, for example + // TaxDocs/document1.pdf, are eligible for replication. For any object request with + // this key name prefix, Amazon S3 will return the x-amz-replication-status header + // with value PENDING, COMPLETED or FAILED indicating object replication status. + // + // * + // If requesting an object from a destination bucket, Amazon S3 will return the + // x-amz-replication-status header with value REPLICA if the object in your request + // is a replica that Amazon S3 created and there is no replica modification + // replication in progress. + // + // * When replicating objects to multiple destination + // buckets, the x-amz-replication-status header acts differently. The header of the + // source object will only return a value of COMPLETED when replication is + // successful to all destinations. The header will remain at value PENDING until + // replication has completed for all destinations. If one or more destinations + // fails replication the header will return FAILED. + // + // For more information, see + // Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) or an + // archive copy is already restored. If an archive copy is already restored, the + // header value indicates when Amazon S3 is scheduled to delete the object copy. + // For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec + // 2012 00:00:00 GMT" If the object restoration is in progress, the header returns + // the value ongoing-request="true". For more information about archiving objects, + // see Transitioning Objects: General Considerations + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + Restore *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // If the object is stored using server-side encryption either with an Amazon Web + // Services KMS key or an Amazon S3-managed encryption key, the response includes + // this header with the value of the server-side encryption algorithm used when + // storing this object in Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass types.StorageClass + + // Version of the object. + VersionId *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. + WebsiteRedirectLocation *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpHeadObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpHeadObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addHeadObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// HeadObjectAPIClient is a client that implements the HeadObject operation. +type HeadObjectAPIClient interface { + HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) +} + +var _ HeadObjectAPIClient = (*Client)(nil) + +// ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter +type ObjectExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note that + // MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) +} + +// ObjectExistsWaiter defines the waiters for ObjectExists +type ObjectExistsWaiter struct { + client HeadObjectAPIClient + + options ObjectExistsWaiterOptions +} + +// NewObjectExistsWaiter constructs a ObjectExistsWaiter. +func NewObjectExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectExistsWaiterOptions)) *ObjectExistsWaiter { + options := ObjectExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = objectExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ObjectExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for ObjectExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *ObjectExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ObjectExists waiter and returns the +// output of the successful operation. The maxWaitDur is the maximum wait duration +// the waiter will wait. The maxWaitDur is required and must be greater than zero. +func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) (*HeadObjectOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadObject(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ObjectExists waiter") +} + +func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { + + if err == nil { + return false, nil + } + + if err != nil { + var apiErr smithy.APIError + ok := errors.As(err, &apiErr) + if !ok { + return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err) + } + + if "NotFound" == apiErr.ErrorCode() { + return true, nil + } + } + + return true, nil +} + +// ObjectNotExistsWaiterOptions are waiter options for ObjectNotExistsWaiter +type ObjectNotExistsWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or set + // to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. By + // default service-modeled logic will populate this option. This option can thus be + // used to define a custom waiter state with fall-back to service-modeled waiter + // state mutators.The function returns an error in case of a failure state. In case + // of retry state, this function returns a bool value of true and nil error, while + // in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) +} + +// ObjectNotExistsWaiter defines the waiters for ObjectNotExists +type ObjectNotExistsWaiter struct { + client HeadObjectAPIClient + + options ObjectNotExistsWaiterOptions +} + +// NewObjectNotExistsWaiter constructs a ObjectNotExistsWaiter. +func NewObjectNotExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectNotExistsWaiterOptions)) *ObjectNotExistsWaiter { + options := ObjectNotExistsWaiterOptions{} + options.MinDelay = 5 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = objectNotExistsStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ObjectNotExistsWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *ObjectNotExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ObjectNotExists waiter and returns +// the output of the successful operation. The maxWaitDur is the maximum wait +// duration the waiter will wait. The maxWaitDur is required and must be greater +// than zero. +func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) (*HeadObjectOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.HeadObject(ctx, params, func(o *Options) { + o.APIOptions = append(o.APIOptions, apiOptions...) + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ObjectNotExists waiter") +} + +func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { + + if err != nil { + var apiErr smithy.APIError + ok := errors.As(err, &apiErr) + if !ok { + return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err) + } + + if "NotFound" == apiErr.ErrorCode() { + return false, nil + } + } + + return true, nil +} + +func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "HeadObject", + } +} + +// getHeadObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getHeadObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*HeadObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addHeadObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getHeadObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignHeadObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignHeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &HeadObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "HeadObject", params, clientOptFns, + c.client.addOperationHeadObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + addHeadObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addHeadObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go new file mode 100644 index 000000000..0a0373f29 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -0,0 +1,214 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. This action supports list pagination and +// does not return more than 100 configurations at a time. You should always check +// the IsTruncated element in the response. If there are no more configurations to +// list, IsTruncated is set to false. If there are more configurations to list, +// IsTruncated is set to true, and there will be a value in NextContinuationToken. +// You use the NextContinuationToken value to continue the pagination of the list +// by passing the value in continuation-token in the request to GET the next page. +// To use this operation, you must have permissions to perform the +// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics – +// Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * +// GetBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * +// DeleteBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * +// PutBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { + if params == nil { + params = &ListBucketAnalyticsConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketAnalyticsConfigurations", params, optFns, c.addOperationListBucketAnalyticsConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketAnalyticsConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketAnalyticsConfigurationsInput struct { + + // The name of the bucket from which analytics configurations are retrieved. + // + // This member is required. + Bucket *string + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type ListBucketAnalyticsConfigurationsOutput struct { + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []types.AnalyticsConfiguration + + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string + + // Indicates whether the returned list of analytics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated bool + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketAnalyticsConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketAnalyticsConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBucketAnalyticsConfigurations", + } +} + +// getListBucketAnalyticsConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketAnalyticsConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketAnalyticsConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketAnalyticsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketAnalyticsConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go new file mode 100644 index 000000000..972a69c99 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -0,0 +1,206 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// * +// DeleteBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * +// PutBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// * +// GetBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + if params == nil { + params = &ListBucketIntelligentTieringConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketIntelligentTieringConfigurations", params, optFns, c.addOperationListBucketIntelligentTieringConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketIntelligentTieringConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + noSmithyDocumentSerde +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []types.IntelligentTieringConfiguration + + // Indicates whether the returned list of analytics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated bool + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBucketIntelligentTieringConfigurations", + } +} + +// getListBucketIntelligentTieringConfigurationsBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getListBucketIntelligentTieringConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketIntelligentTieringConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketIntelligentTieringConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go new file mode 100644 index 000000000..e6c8c79a8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -0,0 +1,215 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of inventory configurations for the bucket. You can have up to +// 1,000 analytics configurations per bucket. This action supports list pagination +// and does not return more than 100 configurations at a time. Always check the +// IsTruncated element in the response. If there are no more configurations to +// list, IsTruncated is set to false. If there are more configurations to list, +// IsTruncated is set to true, and there is a value in NextContinuationToken. You +// use the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. To +// use this operation, you must have permissions to perform the +// s3:GetInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) The +// following operations are related to ListBucketInventoryConfigurations: +// +// * +// GetBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * +// DeleteBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * +// PutBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { + if params == nil { + params = &ListBucketInventoryConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketInventoryConfigurations", params, optFns, c.addOperationListBucketInventoryConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketInventoryConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketInventoryConfigurationsInput struct { + + // The name of the bucket containing the inventory configurations to retrieve. + // + // This member is required. + Bucket *string + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type ListBucketInventoryConfigurationsOutput struct { + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []types.InventoryConfiguration + + // Tells whether the returned list of inventory configurations is complete. A value + // of true indicates that the list is not complete and the NextContinuationToken is + // provided for a subsequent request. + IsTruncated bool + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketInventoryConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketInventoryConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketInventoryConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBucketInventoryConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBucketInventoryConfigurations", + } +} + +// getListBucketInventoryConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketInventoryConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketInventoryConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketInventoryConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketInventoryConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go new file mode 100644 index 000000000..50b207af6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -0,0 +1,218 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the metrics configurations for the bucket. The metrics configurations are +// only for the request metrics of the bucket and do not provide information on +// daily storage metrics. You can have up to 1,000 configurations per bucket. This +// action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there are no +// more configurations to list, IsTruncated is set to false. If there are more +// configurations to list, IsTruncated is set to true, and there is a value in +// NextContinuationToken. You use the NextContinuationToken value to continue the +// pagination of the list by passing the value in continuation-token in the request +// to GET the next page. To use this operation, you must have permissions to +// perform the s3:GetMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// The following operations are related to ListBucketMetricsConfigurations: +// +// * +// PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// * +// GetBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * +// DeleteBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { + if params == nil { + params = &ListBucketMetricsConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBucketMetricsConfigurations", params, optFns, c.addOperationListBucketMetricsConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketMetricsConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketMetricsConfigurationsInput struct { + + // The name of the bucket containing the metrics configurations to retrieve. + // + // This member is required. + Bucket *string + + // The marker that is used to continue a metrics configuration listing that has + // been truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + ContinuationToken *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type ListBucketMetricsConfigurationsOutput struct { + + // The marker that is used as a starting point for this metrics configuration list + // response. This value is present if it was sent in the request. + ContinuationToken *string + + // Indicates whether the returned list of metrics configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken will be provided for a subsequent request. + IsTruncated bool + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []types.MetricsConfiguration + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value that + // Amazon S3 understands. + NextContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketMetricsConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketMetricsConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketMetricsConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBucketMetricsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBucketMetricsConfigurations", + } +} + +// getListBucketMetricsConfigurationsBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getListBucketMetricsConfigurationsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListBucketMetricsConfigurationsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListBucketMetricsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListBucketMetricsConfigurationsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go new file mode 100644 index 000000000..7a3de38f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go @@ -0,0 +1,145 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all buckets owned by the authenticated sender of the request. +// To use this operation, you must have the s3:ListAllMyBuckets permission. +func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { + if params == nil { + params = &ListBucketsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListBuckets", params, optFns, c.addOperationListBucketsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListBucketsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListBucketsInput struct { + noSmithyDocumentSerde +} + +type ListBucketsOutput struct { + + // The list of buckets owned by the requester. + Buckets []types.Bucket + + // The owner of the buckets listed. + Owner *types.Owner + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListBuckets{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBuckets{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListBucketsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListBuckets", + } +} + +func addListBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: false, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go new file mode 100644 index 000000000..af281a252 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -0,0 +1,310 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action lists in-progress multipart uploads. An in-progress multipart upload +// is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. This action returns +// at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the +// maximum number of uploads a response can include, which is also the default +// value. You can further limit the number of uploads in a response by specifying +// the max-uploads parameter in the response. If additional multipart uploads +// satisfy the list criteria, the response will contain an IsTruncated element with +// the value true. To list the additional multipart uploads, use the key-marker and +// upload-id-marker request parameters. In the response, the uploads are sorted by +// key. If your application has initiated more than one multipart upload using the +// same object key, then uploads in the response are first sorted by key. +// Additionally, uploads are sorted in ascending order within each key by the +// upload initiation time. For more information on multipart uploads, see Uploading +// Objects Using Multipart Upload +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For +// information on permissions required to use the multipart upload API, see +// Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The +// following operations are related to ListMultipartUploads: +// +// * +// CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { + if params == nil { + params = &ListMultipartUploadsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListMultipartUploads", params, optFns, c.addOperationListMultipartUploadsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListMultipartUploadsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListMultipartUploadsInput struct { + + // The name of the bucket to which the multipart upload was initiated. When using + // this action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Character you use to group keys. All keys that contain the same string between + // the prefix, if specified, and the first occurrence of the delimiter after the + // prefix are grouped under a single result element, CommonPrefixes. If you don't + // specify the prefix parameter, then the substring starts at the beginning of the + // key. The keys that are grouped under CommonPrefixes result element are not + // returned elsewhere in the response. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters with an + // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. If upload-id-marker is not specified, only the + // keys lexicographically greater than the specified key-marker will be included in + // the list. If upload-id-marker is specified, any multipart uploads for a key + // equal to the key-marker might also be included, provided those multipart uploads + // have upload IDs lexicographically greater than the specified upload-id-marker. + KeyMarker *string + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the + // response body. 1,000 is the maximum number of uploads that can be returned in a + // response. + MaxUploads int32 + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping of + // keys. (You can think of using prefix to make groups in the same way you'd use a + // folder in a file system.) + Prefix *string + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + UploadIdMarker *string + + noSmithyDocumentSerde +} + +type ListMultipartUploadsOutput struct { + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. + Bucket *string + + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. + CommonPrefixes []types.CommonPrefix + + // Contains the delimiter you specified in the request. If you don't specify a + // delimiter in your request, this element is absent from the response. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. If you + // specify encoding-type request parameter, Amazon S3 includes this element in the + // response, and returns encoded key name values in the following response + // elements: Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + EncodingType types.EncodingType + + // Indicates whether the returned list of multipart uploads is truncated. A value + // of true indicates that the list was truncated. The list can be truncated if the + // number of multipart uploads exceeds the limit allowed or specified by max + // uploads. + IsTruncated bool + + // The key at or after which the listing began. + KeyMarker *string + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads int32 + + // When a list is truncated, this element specifies the value that should be used + // for the key-marker request parameter in a subsequent request. + NextKeyMarker *string + + // When a list is truncated, this element specifies the value that should be used + // for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string + + // Upload ID after which listing began. + UploadIdMarker *string + + // Container for elements related to a particular multipart upload. A response can + // contain zero or more Upload elements. + Uploads []types.MultipartUpload + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListMultipartUploads{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListMultipartUploads{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMultipartUploads(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListMultipartUploads", + } +} + +// getListMultipartUploadsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getListMultipartUploadsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListMultipartUploadsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListMultipartUploadsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListMultipartUploadsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go new file mode 100644 index 000000000..f2d2b9fa9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -0,0 +1,276 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns metadata about all versions of the objects in a bucket. You can also use +// request parameters as selection criteria to return metadata about a subset of +// all the object versions. To use this operation, you must have permissions to +// perform the s3:ListBucketVersions action. Be aware of the name difference. A 200 +// OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// To use this operation, you must have READ access to the bucket. This action is +// not supported by Amazon S3 on Outposts. The following operations are related to +// ListObjectVersions: +// +// * ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { + if params == nil { + params = &ListObjectVersionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjectVersions", params, optFns, c.addOperationListObjectVersionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectVersionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectVersionsInput struct { + + // The bucket name that contains the objects. + // + // This member is required. + Bucket *string + + // A delimiter is a character that you specify to group keys. All keys that contain + // the same string between the prefix and the first occurrence of the delimiter are + // grouped under a single result element in CommonPrefixes. These groups are + // counted as one result against the max-keys limitation. These keys are not + // returned elsewhere in the response. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters with an + // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string + + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. If additional keys satisfy the search criteria, but were not + // returned because max-keys was exceeded, the response contains true. To return + // the additional keys, see key-marker and version-id-marker. + MaxKeys int32 + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings of + // keys. (You can think of using prefix to make groups in the same way you'd use a + // folder in a file system.) You can use prefix with delimiter to roll up numerous + // objects into a single result under CommonPrefixes. + Prefix *string + + // Specifies the object version you want to start listing from. + VersionIdMarker *string + + noSmithyDocumentSerde +} + +type ListObjectVersionsOutput struct { + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []types.CommonPrefix + + // Container for an object that is a delete marker. + DeleteMarkers []types.DeleteMarkerEntry + + // The delimiter grouping the included keys. A delimiter is a character that you + // specify to group keys. All keys that contain the same string between the prefix + // and the first occurrence of the delimiter are grouped under a single result + // element in CommonPrefixes. These groups are counted as one result against the + // max-keys limitation. These keys are not returned elsewhere in the response. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + EncodingType types.EncodingType + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make a + // follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the rest of + // the results. + IsTruncated bool + + // Marks the last key returned in a truncated response. + KeyMarker *string + + // Specifies the maximum number of objects to return. + MaxKeys int32 + + // The bucket name. + Name *string + + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. Use + // this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string + + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string + + // Selects objects that start with the value supplied by this parameter. + Prefix *string + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string + + // Container for version information. + Versions []types.ObjectVersion + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectVersions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectVersions{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectVersions(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListObjectVersions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListObjectVersions", + } +} + +// getListObjectVersionsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getListObjectVersionsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectVersionsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectVersionsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectVersionsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go new file mode 100644 index 000000000..b2d83ff74 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go @@ -0,0 +1,284 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns some or all (up to 1,000) of the objects in a bucket. You can use the +// request parameters as selection criteria to return a subset of the objects in a +// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design +// your application to parse the contents of the response and handle it +// appropriately. This action has been revised. We recommend that you use the newer +// version, ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), when +// developing applications. For backward compatibility, Amazon S3 continues to +// support ListObjects. The following operations are related to ListObjects: +// +// * +// ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { + if params == nil { + params = &ListObjectsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjects", params, optFns, c.addOperationListObjectsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectsInput struct { + + // The name of the bucket containing the objects. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // A delimiter is a character you use to group keys. + Delimiter *string + + // Requests Amazon S3 to encode the object keys in the response and specifies the + // encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters with an + // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. + Marker *string + + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + MaxKeys int32 + + // Limits the response to keys that begin with the specified prefix. + Prefix *string + + // Confirms that the requester knows that she or he will be charged for the list + // objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer types.RequestPayer + + noSmithyDocumentSerde +} + +type ListObjectsOutput struct { + + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. A response can contain + // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if + // there are any) keys between Prefix and the next occurrence of the string + // specified by the delimiter. CommonPrefixes lists keys that act like + // subdirectories in the directory specified by Prefix. For example, if the prefix + // is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common + // prefix is notes/summer/. All of the keys that roll up into a common prefix count + // as a single return when calculating the number of returns. + CommonPrefixes []types.CommonPrefix + + // Metadata about each object returned. + Contents []types.Object + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element in the + // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + // the response. Each rolled-up result counts as only one return against the + // MaxKeys value. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType types.EncodingType + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated bool + + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker *string + + // The maximum number of keys returned in the response body. + MaxKeys int32 + + // The bucket name. + Name *string + + // When response is truncated (the IsTruncated element value in the response is + // true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMarker and it is + // truncated, you can use the value of the last Key in the response as the marker + // in the subsequent request to get the next set of object keys. + NextMarker *string + + // Keys that begin with the indicated prefix. + Prefix *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjects{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjects{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListObjectsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjects(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListObjectsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListObjects(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListObjects", + } +} + +// getListObjectsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getListObjectsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go new file mode 100644 index 000000000..6214d2471 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go @@ -0,0 +1,408 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns some or all (up to 1,000) of the objects in a bucket with each request. +// You can use the request parameters as selection criteria to return a subset of +// the objects in a bucket. A 200 OK response can contain valid or invalid XML. +// Make sure to design your application to parse the contents of the response and +// handle it appropriately. Objects are returned sorted in an ascending order of +// the respective key names in the list. For more information about listing +// objects, see Listing object keys programmatically +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// To use this operation, you must have READ access to the bucket. To use this +// action in an Identity and Access Management (IAM) policy, you must have +// permissions to perform the s3:ListBucket action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// This section describes the latest revision of this action. We recommend that you +// use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). To get a +// list of your buckets, see ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). The +// following operations are related to ListObjectsV2: +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { + if params == nil { + params = &ListObjectsV2Input{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListObjectsV2", params, optFns, c.addOperationListObjectsV2Middlewares) + if err != nil { + return nil, err + } + + out := result.(*ListObjectsV2Output) + out.ResultMetadata = metadata + return out, nil +} + +type ListObjectsV2Input struct { + + // Bucket name to list. When using this action with an access point, you must + // direct requests to the access point hostname. The access point hostname takes + // the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When + // using this action with an access point through the Amazon Web Services SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // ContinuationToken indicates Amazon S3 that the list is being continued on this + // bucket with a token. ContinuationToken is obfuscated and is not a real key. + ContinuationToken *string + + // A delimiter is a character you use to group keys. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType types.EncodingType + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The owner field is not present in listV2 by default, if you want to return owner + // field with each key in the result then set the fetch owner field to true. + FetchOwner bool + + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + MaxKeys int32 + + // Limits the response to keys that begin with the specified prefix. + Prefix *string + + // Confirms that the requester knows that she or he will be charged for the list + // objects request in V2 style. Bucket owners need not specify this parameter in + // their requests. + RequestPayer types.RequestPayer + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. + StartAfter *string + + noSmithyDocumentSerde +} + +type ListObjectsV2Output struct { + + // All of the keys (up to 1,000) rolled up into a common prefix count as a single + // return when calculating the number of returns. A response can contain + // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if + // there are any) keys between Prefix and the next occurrence of the string + // specified by a delimiter. CommonPrefixes lists keys that act like subdirectories + // in the directory specified by Prefix. For example, if the prefix is notes/ and + // the delimiter is a slash (/) as in notes/summer/july, the common prefix is + // notes/summer/. All of the keys that roll up into a common prefix count as a + // single return when calculating the number of returns. + CommonPrefixes []types.CommonPrefix + + // Metadata about each object returned. + Contents []types.Object + + // If ContinuationToken was sent with the request, it is included in the response. + ContinuationToken *string + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element in the + // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in + // the response. Each rolled-up result counts as only one return against the + // MaxKeys value. + Delimiter *string + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: Delimiter, Prefix, Key, and StartAfter. + EncodingType types.EncodingType + + // Set to false if all of the results were returned. Set to true if more keys are + // available to return. If the number of results exceeds that specified by MaxKeys, + // all of the results might not be returned. + IsTruncated bool + + // KeyCount is the number of keys returned with this request. KeyCount will always + // be less than or equals to MaxKeys field. Say you ask for 50 keys, your result + // will include less than equals 50 keys + KeyCount int32 + + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + MaxKeys int32 + + // The bucket name. When using this action with an access point, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + Name *string + + // NextContinuationToken is sent when isTruncated is true, which means there are + // more keys in the bucket that can be listed. The next list requests to Amazon S3 + // can be continued with this NextContinuationToken. NextContinuationToken is + // obfuscated and is not a real key + NextContinuationToken *string + + // Keys that begin with the indicated prefix. + Prefix *string + + // If StartAfter was sent with the request, it is included in the response. + StartAfter *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectsV2{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectsV2{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectsV2(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) +} + +var _ ListObjectsV2APIClient = (*Client)(nil) + +// ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 +type ListObjectsV2PaginatorOptions struct { + // Sets the maximum number of keys returned in the response. By default the action + // returns up to 1,000 key names. The response might contain fewer keys but will + // never contain more. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListObjectsV2Paginator is a paginator for ListObjectsV2 +type ListObjectsV2Paginator struct { + options ListObjectsV2PaginatorOptions + client ListObjectsV2APIClient + params *ListObjectsV2Input + nextToken *string + firstPage bool +} + +// NewListObjectsV2Paginator returns a new ListObjectsV2Paginator +func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObjectsV2Input, optFns ...func(*ListObjectsV2PaginatorOptions)) *ListObjectsV2Paginator { + if params == nil { + params = &ListObjectsV2Input{} + } + + options := ListObjectsV2PaginatorOptions{} + if params.MaxKeys != 0 { + options.Limit = params.MaxKeys + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectsV2Paginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListObjectsV2Paginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListObjectsV2 page. +func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Output, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + params.MaxKeys = p.options.Limit + + result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = nil + if result.IsTruncated { + p.nextToken = result.NextContinuationToken + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListObjectsV2", + } +} + +// getListObjectsV2BucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getListObjectsV2BucketMember(input interface{}) (*string, bool) { + in := input.(*ListObjectsV2Input) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListObjectsV2UpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListObjectsV2BucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go new file mode 100644 index 000000000..36675dcd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -0,0 +1,424 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Lists the parts that have been uploaded for a specific multipart upload. This +// operation must include the upload ID, which you obtain by sending the initiate +// multipart upload request (see CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). +// This request returns a maximum of 1,000 uploaded parts. The default number of +// parts returned is 1,000 parts. You can restrict the number of parts returned by +// specifying the max-parts request parameter. If your multipart upload consists of +// more than 1,000 parts, the response returns an IsTruncated field with the value +// of true, and a NextPartNumberMarker element. In subsequent ListParts requests +// you can include the part-number-marker query string parameter and set its value +// to the NextPartNumberMarker field value from the previous response. If the +// upload was created using a checksum algorithm, you will need to have permission +// to the kms:Decrypt action for the request to succeed. For more information on +// multipart uploads, see Uploading Objects Using Multipart Upload +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For +// information on permissions required to use the multipart upload API, see +// Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The +// following operations are related to ListParts: +// +// * CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// GetObjectAttributes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { + if params == nil { + params = &ListPartsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListParts", params, optFns, c.addOperationListPartsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListPartsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListPartsInput struct { + + // The name of the bucket to which the parts are being uploaded. When using this + // action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // This member is required. + UploadId *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Sets the maximum number of parts to return. + MaxParts int32 + + // Specifies the part after which listing should begin. Only parts with higher part + // numbers will be listed. + PartNumberMarker *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +type ListPartsOutput struct { + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object name + // in the request, then the response includes this header indicating when the + // initiated multipart upload will become eligible for abort operation. For more + // information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle + // Policy + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + AbortDate *time.Time + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + AbortRuleId *string + + // The name of the bucket to which the multipart upload was initiated. Does not + // return the access point ARN or access point alias if used. + Bucket *string + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Container element that identifies who initiated the multipart upload. If the + // initiator is an Amazon Web Services account, this element provides the same + // information as the Owner element. If the initiator is an IAM User, this element + // provides the user ARN and display name. + Initiator *types.Initiator + + // Indicates whether the returned list of parts is truncated. A true value + // indicates that the list was truncated. A list can be truncated if the number of + // parts exceeds the limit returned in the MaxParts element. + IsTruncated bool + + // Object key for which the multipart upload was initiated. + Key *string + + // Maximum number of parts that were allowed in the response. + MaxParts int32 + + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the part-number-marker request parameter in a + // subsequent request. + NextPartNumberMarker *string + + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. + Owner *types.Owner + + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the part-number-marker request parameter in a + // subsequent request. + PartNumberMarker *string + + // Container for elements related to a particular part. A response can contain zero + // or more Part elements. + Parts []types.Part + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. + StorageClass types.StorageClass + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpListParts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListParts{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpListPartsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListParts(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addListPartsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListPartsAPIClient is a client that implements the ListParts operation. +type ListPartsAPIClient interface { + ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) +} + +var _ ListPartsAPIClient = (*Client)(nil) + +// ListPartsPaginatorOptions is the paginator options for ListParts +type ListPartsPaginatorOptions struct { + // Sets the maximum number of parts to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListPartsPaginator is a paginator for ListParts +type ListPartsPaginator struct { + options ListPartsPaginatorOptions + client ListPartsAPIClient + params *ListPartsInput + nextToken *string + firstPage bool +} + +// NewListPartsPaginator returns a new ListPartsPaginator +func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, optFns ...func(*ListPartsPaginatorOptions)) *ListPartsPaginator { + if params == nil { + params = &ListPartsInput{} + } + + options := ListPartsPaginatorOptions{} + if params.MaxParts != 0 { + options.Limit = params.MaxParts + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListPartsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.PartNumberMarker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListPartsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListParts page. +func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.PartNumberMarker = p.nextToken + + params.MaxParts = p.options.Limit + + result, err := p.client.ListParts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = nil + if result.IsTruncated { + p.nextToken = result.NextPartNumberMarker + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "ListParts", + } +} + +// getListPartsBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getListPartsBucketMember(input interface{}) (*string, bool) { + in := input.(*ListPartsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addListPartsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getListPartsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go new file mode 100644 index 000000000..7875798f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster data +// transfers to Amazon S3. To use this operation, you must have permission to +// perform the s3:PutAccelerateConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * +// Suspended – Disables accelerated data transfers to the bucket. +// +// The +// GetBucketAccelerateConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// action returns the transfer acceleration state of a bucket. After setting the +// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty +// minutes before the data transfer rates to the bucket increase. The name of the +// bucket used for Transfer Acceleration must be DNS-compliant and must not contain +// periods ("."). For more information about transfer acceleration, see Transfer +// Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * +// GetBucketAccelerateConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { + if params == nil { + params = &PutBucketAccelerateConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAccelerateConfiguration", params, optFns, c.addOperationPutBucketAccelerateConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAccelerateConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAccelerateConfigurationInput struct { + + // Container for setting the transfer acceleration state. + // + // This member is required. + AccelerateConfiguration *types.AccelerateConfiguration + + // The name of the bucket for which the accelerate configuration is set. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketAccelerateConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAccelerateConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketAccelerateConfiguration", + } +} + +// getPutBucketAccelerateConfigurationRequestAlgorithmMember gets the request +// checksum algorithm value provided as input. +func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketAccelerateConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketAccelerateConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAccelerateConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAccelerateConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go new file mode 100644 index 000000000..bc28a9960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go @@ -0,0 +1,368 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the permissions on an existing bucket using access control lists (ACL). For +// more information, see Using ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). To set +// the ACL of a bucket, you must have WRITE_ACP permission. You can use one of the +// following two ways to set a bucket's permissions: +// +// * Specify the ACL in the +// request body +// +// * Specify permissions using request headers +// +// You cannot specify +// access permission using both the body and the request headers. Depending on your +// application needs, you may choose to set the ACL on a bucket using either the +// request body or the headers. For example, if you have an existing application +// that updates a bucket ACL using the request body, then you can continue to use +// that approach. If your bucket uses the bucket owner enforced setting for S3 +// Object Ownership, ACLs are disabled and no longer affect permissions. You must +// use policies to grant access to your bucket and the objects in it. Requests to +// set ACLs or update ACLs fail and return the AccessControlListNotSupported error +// code. Requests to read ACLs are still supported. For more information, see +// Controlling object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Access Permissions You can set access permissions +// using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl +// request header. Amazon S3 supports a set of predefined ACLs, known as canned +// ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify +// the canned ACL name as the value of x-amz-acl. If you use this header, you +// cannot use other access control-specific headers in your request. For more +// information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly with the x-amz-grant-read, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. When using these headers, you specify explicit access permissions and +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use the x-amz-acl +// header to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control List +// (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify +// each grantee as a type=value pair, where the type is one of the following: +// +// * id +// – if the value specified is the canonical user ID of an Amazon Web Services +// account +// +// * uri – if you are granting permissions to a predefined group +// +// * +// emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// +// * US East (N. Virginia) +// +// * US West +// (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific (Singapore) +// +// * Asia Pacific +// (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe (Ireland) +// +// * South America (São +// Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// For example, the following +// x-amz-grant-write header grants create, overwrite, and delete objects permission +// to LogDelivery group predefined by Amazon S3 and two Amazon Web Services +// accounts identified by their email addresses. x-amz-grant-write: +// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", +// id="555566667777" +// +// You can use either a canned ACL or specify access permissions +// explicitly. You cannot do both. Grantee Values You can specify the person +// (grantee) to whom you're assigning access rights (using request elements) in the +// following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is +// optional and ignored in the request +// +// * By URI: +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email +// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the +// CanonicalUser and, in a response to a GET Object acl request, appears as the +// CanonicalUser. Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// +// * US East (N. Virginia) +// +// * US West +// (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific (Singapore) +// +// * Asia Pacific +// (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe (Ireland) +// +// * South America (São +// Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// # Related Resources +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * +// GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { + if params == nil { + params = &PutBucketAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAcl", params, optFns, c.addOperationPutBucketAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAclInput struct { + + // The bucket to which to apply the ACL. + // + // This member is required. + Bucket *string + + // The canned ACL to apply to the bucket. + ACL types.BucketCannedACL + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *types.AccessControlPolicy + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864. + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. + GrantRead *string + + // Allows grantee to read the bucket ACL. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string + + noSmithyDocumentSerde +} + +type PutBucketAclOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAcl{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketAcl", + } +} + +// getPutBucketAclRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketAclInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketAclRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketAclBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go new file mode 100644 index 000000000..da70d547e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -0,0 +1,234 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per bucket. +// You can choose to have storage class analysis export analysis reports sent to a +// comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you +// configure. When selecting data export, you specify a destination bucket and an +// optional destination prefix where the file is written. You can export the data +// to a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage Class +// Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// To use this operation, you must have permissions to perform the +// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request +// +// * Code: InvalidArgument +// +// * +// Cause: Invalid argument. +// +// * HTTP Error: HTTP 400 Bad Request +// +// * Code: +// TooManyConfigurations +// +// * Cause: You are attempting to create a new configuration +// but have already reached the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 +// Forbidden +// +// * Code: AccessDenied +// +// * Cause: You are not the owner of the specified +// bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to +// set the configuration on the bucket. +// +// # Related Resources +// +// * +// GetBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// * +// DeleteBucketAnalyticsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// * +// ListBucketAnalyticsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { + if params == nil { + params = &PutBucketAnalyticsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketAnalyticsConfiguration", params, optFns, c.addOperationPutBucketAnalyticsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketAnalyticsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketAnalyticsConfigurationInput struct { + + // The configuration and any analyses for the analytics filter. + // + // This member is required. + AnalyticsConfiguration *types.AnalyticsConfiguration + + // The name of the bucket to which an analytics configuration is stored. + // + // This member is required. + Bucket *string + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketAnalyticsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAnalyticsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketAnalyticsConfiguration", + } +} + +// getPutBucketAnalyticsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketAnalyticsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketAnalyticsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go new file mode 100644 index 000000000..55a67f188 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go @@ -0,0 +1,253 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the cors configuration for your bucket. If the configuration exists, Amazon +// S3 replaces it. To use this operation, you must be allowed to perform the +// s3:PutBucketCORS action. By default, the bucket owner has this permission and +// can grant it to others. You set this configuration on a bucket so that the +// bucket can service cross-origin requests. For example, you might want to enable +// a request whose origin is http://www.example.com to access your Amazon S3 bucket +// at my.example.bucket.com by using the browser's XMLHttpRequest capability. To +// enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which you +// configure rules that identify origins and the HTTP methods that can be executed +// on your bucket. The document is limited to 64 KB in size. When Amazon S3 +// receives a cross-origin request (or a pre-flight OPTIONS request) against a +// bucket, it evaluates the cors configuration on the bucket and uses the first +// CORSRule rule that matches the incoming browser request to enable a cross-origin +// request. For a rule to match, the following conditions must be met: +// +// * The +// request's Origin header must match AllowedOrigin elements. +// +// * The request method +// (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers +// request header of a pre-flight request must match an AllowedHeader element. +// +// For +// more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 +// User Guide. Related Resources +// +// * GetBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// * +// DeleteBucketCors +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// * +// RESTOPTIONSobject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { + if params == nil { + params = &PutBucketCorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketCors", params, optFns, c.addOperationPutBucketCorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketCorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketCorsInput struct { + + // Specifies the bucket impacted by the corsconfiguration. + // + // This member is required. + Bucket *string + + // Describes the cross-origin access configuration for objects in an Amazon S3 + // bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 + // User Guide. + // + // This member is required. + CORSConfiguration *types.CORSConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864. + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketCorsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketCors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketCors{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketCors(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketCorsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketCors", + } +} + +// getPutBucketCorsRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketCorsInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketCorsRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketCorsBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketCorsBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketCorsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketCorsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go new file mode 100644 index 000000000..184f0cd31 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -0,0 +1,243 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This action uses the encryption subresource to configure default encryption and +// Amazon S3 Bucket Key for an existing bucket. Default encryption for a bucket can +// use server-side encryption with Amazon S3-managed keys (SSE-S3) or customer +// managed keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can +// also configure Amazon S3 Bucket Key. When the default encryption is SSE-KMS, if +// you upload an object to the bucket and do not specify the KMS key to use for +// encryption, Amazon S3 uses the default Amazon Web Services managed KMS key for +// your account. For information about default encryption, see Amazon S3 default +// bucket encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the +// Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 +// Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in +// the Amazon S3 User Guide. This action requires Amazon Web Services Signature +// Version 4. For more information, see Authenticating Requests (Amazon Web +// Services Signature Version 4) +// (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// To use this operation, you must have permissions to perform the +// s3:PutEncryptionConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Related Resources +// +// * GetBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// * +// DeleteBucketEncryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { + if params == nil { + params = &PutBucketEncryptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketEncryption", params, optFns, c.addOperationPutBucketEncryptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketEncryptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketEncryptionInput struct { + + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For + // information about the Amazon S3 default encryption feature, see Amazon S3 + // Default Bucket Encryption + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the default server-side-encryption configuration. + // + // This member is required. + ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the server-side encryption + // configuration. For requests made using the Amazon Web Services Command Line + // Interface (CLI) or Amazon Web Services SDKs, this field is calculated + // automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketEncryptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketEncryption{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketEncryption{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketEncryption(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketEncryptionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketEncryption", + } +} + +// getPutBucketEncryptionRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketEncryptionInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketEncryptionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketEncryptionBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketEncryptionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketEncryptionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go new file mode 100644 index 000000000..edf5d178b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -0,0 +1,225 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can +// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by +// automatically moving data to the most cost-effective storage access tier, +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput access +// tiers. To get the lowest storage cost on data that can be accessed in minutes to +// hours, you can choose to activate additional archiving capabilities. The S3 +// Intelligent-Tiering storage class is the ideal storage class for data with +// unknown, changing, or unpredictable access patterns, independent of object size +// or retention period. If the size of an object is less than 128 KB, it is not +// monitored and not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. For more information, see Storage class for +// automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// * +// DeleteBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// * +// GetBucketIntelligentTieringConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// * +// ListBucketIntelligentTieringConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// You +// only need S3 Intelligent-Tiering enabled on a bucket if you want to +// automatically move objects stored in the S3 Intelligent-Tiering storage class to +// the Archive Access or Deep Archive Access tier. Special Errors +// +// * HTTP 400 Bad +// Request Error +// +// * Code: InvalidArgument +// +// * Cause: Invalid Argument +// +// * HTTP 400 +// Bad Request Error +// +// * Code: TooManyConfigurations +// +// * Cause: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. +// +// * HTTP 403 Forbidden Error +// +// * Code: AccessDenied +// +// * Cause: You are not +// the owner of the specified bucket, or you do not have the +// s3:PutIntelligentTieringConfiguration bucket permission to set the configuration +// on the bucket. +func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { + if params == nil { + params = &PutBucketIntelligentTieringConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketIntelligentTieringConfiguration", params, optFns, c.addOperationPutBucketIntelligentTieringConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketIntelligentTieringConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketIntelligentTieringConfigurationInput struct { + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // This member is required. + Bucket *string + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + // Container for S3 Intelligent-Tiering configuration. + // + // This member is required. + IntelligentTieringConfiguration *types.IntelligentTieringConfiguration + + noSmithyDocumentSerde +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketIntelligentTieringConfiguration", + } +} + +// getPutBucketIntelligentTieringConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getPutBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketIntelligentTieringConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketIntelligentTieringConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go new file mode 100644 index 000000000..bb1a903b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -0,0 +1,235 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// This implementation of the PUT action adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. Amazon S3 inventory generates inventories +// of the objects in the bucket on a daily or weekly basis, and the results are +// published to a flat file. The bucket that is inventoried is called the source +// bucket, and the bucket where the inventory flat file is stored is called the +// destination bucket. The destination bucket must be in the same Amazon Web +// Services Region as the source bucket. When you configure an inventory for a +// source bucket, you specify the destination bucket where you want the inventory +// to be stored, and whether to generate the inventory daily or weekly. You can +// also configure what object metadata to include and whether to inventory all +// object versions or only current versions. For more information, see Amazon S3 +// Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) in the +// Amazon S3 User Guide. You must create a bucket policy on the destination bucket +// to grant permissions to Amazon S3 to write objects to the bucket in the defined +// location. For an example policy, see Granting Permissions for Amazon S3 +// Inventory and Storage Class Analysis +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// To use this operation, you must have permissions to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default and can grant this permission to others. For more information about +// permissions, see Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Special Errors +// +// * HTTP 400 Bad Request Error +// +// * +// Code: InvalidArgument +// +// * Cause: Invalid Argument +// +// * HTTP 400 Bad Request +// Error +// +// * Code: TooManyConfigurations +// +// * Cause: You are attempting to create a +// new configuration but have already reached the 1,000-configuration limit. +// +// * +// HTTP 403 Forbidden Error +// +// * Code: AccessDenied +// +// * Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket. +// +// # Related Resources +// +// * +// GetBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// * +// DeleteBucketInventoryConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// * +// ListBucketInventoryConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { + if params == nil { + params = &PutBucketInventoryConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketInventoryConfiguration", params, optFns, c.addOperationPutBucketInventoryConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketInventoryConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketInventoryConfigurationInput struct { + + // The name of the bucket where the inventory configuration will be stored. + // + // This member is required. + Bucket *string + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // Specifies the inventory configuration. + // + // This member is required. + InventoryConfiguration *types.InventoryConfiguration + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketInventoryConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketInventoryConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketInventoryConfiguration", + } +} + +// getPutBucketInventoryConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketInventoryConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketInventoryConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go new file mode 100644 index 000000000..ca79b24e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -0,0 +1,269 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information about +// lifecycle configuration, see Managing your storage lifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). +// Bucket lifecycle configuration now supports specifying a lifecycle rule using an +// object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version of the +// API supported filtering based only on an object key name prefix, which is +// supported for backward compatibility. For the related API description, see +// PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// Rules You specify the lifecycle configuration in your request body. The +// lifecycle configuration is specified as XML consisting of one or more rules. An +// Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not +// adjustable. Each rule consists of the following: +// +// * Filter identifying a subset +// of objects to which the rule applies. The filter can be based on a key name +// prefix, object tags, or a combination of both. +// +// * Status whether the rule is in +// effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state of +// your bucket is versioning-enabled or versioning-suspended, you can have many +// versions of the same object (one current version and zero or more noncurrent +// versions). Amazon S3 provides predefined actions that you can specify for +// current and noncurrent object versions. +// +// For more information, see Object +// Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) and +// Lifecycle Configuration Elements +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// Permissions By default, all Amazon S3 resources are private, including buckets, +// objects, and related subresources (for example, lifecycle configuration and +// website configuration). Only the resource owner (that is, the Amazon Web +// Services account that created it) can access the resource. The resource owner +// can optionally grant access permissions to others by writing an access policy. +// For this operation, a user must get the s3:PutLifecycleConfiguration permission. +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * +// s3:PutLifecycleConfiguration +// +// For more information about permissions, see +// Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of +// Lifecycle Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * +// GetBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * +// DeleteBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { + if params == nil { + params = &PutBucketLifecycleConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketLifecycleConfiguration", params, optFns, c.addOperationPutBucketLifecycleConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketLifecycleConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketLifecycleConfigurationInput struct { + + // The name of the bucket for which to set the configuration. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *types.BucketLifecycleConfiguration + + noSmithyDocumentSerde +} + +type PutBucketLifecycleConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLifecycleConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketLifecycleConfiguration", + } +} + +// getPutBucketLifecycleConfigurationRequestAlgorithmMember gets the request +// checksum algorithm value provided as input. +func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketLifecycleConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketLifecycleConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketLifecycleConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketLifecycleConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go new file mode 100644 index 000000000..0f3ea6d33 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go @@ -0,0 +1,258 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Set the logging parameters for a bucket and to specify permissions for who can +// view and modify the logging parameters. All logs are saved to buckets in the +// same Amazon Web Services Region as the source bucket. To set the logging status +// of a bucket, you must be the bucket owner. The bucket owner is automatically +// granted FULL_CONTROL to all logs. You use the Grantee request element to grant +// access to other people. The Permissions request element specifies the kind of +// access the grantee has to the logs. If the target bucket for log delivery uses +// the bucket owner enforced setting for S3 Object Ownership, you can't use the +// Grantee request element to grant access to others. Permissions can only be +// granted using policies. For more information, see Permissions for server access +// log delivery +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) +// to whom you're assigning access rights (using request elements) in the following +// ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional +// and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> The +// grantee is resolved to the CanonicalUser and, in a response to a GET Object acl +// request, appears as the CanonicalUser. +// +// * By URI: +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable +// logging, you use LoggingEnabled and its children request elements. To disable +// logging, you use an empty BucketLoggingStatus request element: For more +// information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) in the +// Amazon S3 User Guide. For more information about creating a bucket, see +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). For +// more information about returning the logging status of a bucket, see +// GetBucketLogging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). The +// following operations are related to PutBucketLogging: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * +// CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// GetBucketLogging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { + if params == nil { + params = &PutBucketLoggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketLogging", params, optFns, c.addOperationPutBucketLoggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketLoggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketLoggingInput struct { + + // The name of the bucket for which to set the logging parameters. + // + // This member is required. + Bucket *string + + // Container for logging status information. + // + // This member is required. + BucketLoggingStatus *types.BucketLoggingStatus + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash of the PutBucketLogging request body. For requests made using the + // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, + // this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketLoggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLogging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLogging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLogging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketLoggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketLogging", + } +} + +// getPutBucketLoggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketLoggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketLoggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketLoggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketLoggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketLoggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go new file mode 100644 index 000000000..6f0c6facd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets a metrics configuration (specified by the metrics configuration ID) for the +// bucket. You can have up to 1,000 metrics configurations per bucket. If you're +// updating an existing metrics configuration, note that this is a full replacement +// of the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. To use this operation, you must have permissions +// to perform the s3:PutMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// The following operations are related to PutBucketMetricsConfiguration: +// +// * +// DeleteBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// * +// GetBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// * +// ListBucketMetricsConfigurations +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// GetBucketLifecycle +// has the following special error: +// +// * Error code: TooManyConfigurations +// +// * +// Description: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// * HTTP Status Code: HTTP 400 Bad Request +func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { + if params == nil { + params = &PutBucketMetricsConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketMetricsConfiguration", params, optFns, c.addOperationPutBucketMetricsConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketMetricsConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketMetricsConfigurationInput struct { + + // The name of the bucket for which the metrics configuration is set. + // + // This member is required. + Bucket *string + + // The ID used to identify the metrics configuration. + // + // This member is required. + Id *string + + // Specifies the metrics configuration. + // + // This member is required. + MetricsConfiguration *types.MetricsConfiguration + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketMetricsConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketMetricsConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketMetricsConfiguration", + } +} + +// getPutBucketMetricsConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketMetricsConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketMetricsConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go new file mode 100644 index 000000000..8e771d6bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). Using +// this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an event +// notification when it detects an event of the specified type. By default, your +// bucket has no event notifications configured. That is, the notification +// configuration will be an empty NotificationConfiguration. This action replaces +// the existing notification configuration with the configuration you include in +// the request body. After Amazon S3 receives this request, it first verifies that +// any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue +// Service (Amazon SQS) destination exists, and that the bucket owner has +// permission to publish to it by sending a test notification. In the case of +// Lambda destinations, Amazon S3 verifies that the Lambda function permissions +// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For +// more information, see Configuring Notifications for Amazon S3 Events +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). You +// can disable notifications by adding the empty NotificationConfiguration element. +// For more information about the number of event notification configurations that +// you can create per bucket, see Amazon S3 service quotas +// (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) in Amazon Web +// Services General Reference. By default, only the bucket owner can configure +// notifications on a bucket. However, bucket owners can use a bucket policy to +// grant permission to other users to set this configuration with +// s3:PutBucketNotification permission. The PUT notification is an atomic +// operation. For example, suppose your notification configuration includes SNS +// topic, SQS queue, and Lambda function configurations. When you send a PUT +// request with this configuration, Amazon S3 sends test messages to your SNS +// topic. If the message fails, the entire PUT action will fail, and Amazon S3 will +// not add the configuration to your bucket. Responses If the configuration in the +// request body includes only one TopicConfiguration specifying only the +// s3:ReducedRedundancyLostObject event type, the response will also include the +// x-amz-sns-test-message-id header containing the message ID of the test +// notification sent to the topic. The following action is related to +// PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { + if params == nil { + params = &PutBucketNotificationConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketNotificationConfiguration", params, optFns, c.addOperationPutBucketNotificationConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketNotificationConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketNotificationConfigurationInput struct { + + // The name of the bucket. + // + // This member is required. + Bucket *string + + // A container for specifying the notification configuration of the bucket. If this + // element is empty, notifications are turned off for the bucket. + // + // This member is required. + NotificationConfiguration *types.NotificationConfiguration + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or + // false value. + SkipDestinationValidation bool + + noSmithyDocumentSerde +} + +type PutBucketNotificationConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketNotificationConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketNotificationConfiguration", + } +} + +// getPutBucketNotificationConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getPutBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketNotificationConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketNotificationConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go new file mode 100644 index 000000000..83210cac4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go @@ -0,0 +1,197 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For more +// information about Amazon S3 permissions, see Specifying permissions in a policy +// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html). +// For information about Amazon S3 Object Ownership, see Using object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html). +// The following operations are related to PutBucketOwnershipControls: +// +// * +// GetBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { + if params == nil { + params = &PutBucketOwnershipControlsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketOwnershipControls", params, optFns, c.addOperationPutBucketOwnershipControlsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketOwnershipControlsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketOwnershipControlsInput struct { + + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. + // + // This member is required. + Bucket *string + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or + // ObjectWriter) that you want to apply to this Amazon S3 bucket. + // + // This member is required. + OwnershipControls *types.OwnershipControls + + // The MD5 hash of the OwnershipControls request body. For requests made using the + // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, + // this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketOwnershipControlsOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketOwnershipControls{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketOwnershipControls(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketOwnershipControls", + } +} + +func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: nil, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketOwnershipControlsInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketOwnershipControlsBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go new file mode 100644 index 000000000..8860d3b56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go @@ -0,0 +1,229 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an +// identity other than the root user of the Amazon Web Services account that owns +// the bucket, the calling identity must have the PutBucketPolicy permissions on +// the specified bucket and belong to the bucket owner's account in order to use +// this operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns +// a 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. As a security precaution, the root user of the +// Amazon Web Services account that owns a bucket can always use this operation, +// even if the policy explicitly denies the root user the ability to perform this +// action. For more information, see Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { + if params == nil { + params = &PutBucketPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketPolicy", params, optFns, c.addOperationPutBucketPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketPolicyInput struct { + + // The name of the bucket. + // + // This member is required. + Bucket *string + + // The bucket policy as a JSON document. + // + // This member is required. + Policy *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess bool + + // The MD5 hash of the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketPolicyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketPolicy", + } +} + +// getPutBucketPolicyRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketPolicyInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketPolicyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketPolicyBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketPolicyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketPolicyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go new file mode 100644 index 000000000..2213373f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -0,0 +1,264 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a replication configuration or replaces an existing one. For more +// information, see Replication +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon +// S3 User Guide. Specify the replication configuration in the request body. In the +// replication configuration, you provide the name of the destination bucket or +// buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon +// S3 can assume to replicate objects on your behalf, and other relevant +// information. A replication configuration must include at least one rule, and can +// contain a maximum of 1,000. Each rule identifies a subset of objects to +// replicate by filtering the objects in the source bucket. To choose additional +// subsets of objects to replicate, add a rule for each subset. To specify a subset +// of the objects in the source bucket to apply a replication rule to, add the +// Filter element as a child of the Rule element. You can filter objects based on +// an object key prefix, one or more object tags, or both. When you add the Filter +// element in the configuration, you must also add the following elements: +// DeleteMarkerReplication, Status, and Priority. If you are using an earlier +// version of the replication configuration, Amazon S3 handles replication of +// delete markers differently. For more information, see Backward Compatibility +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). Handling +// Replication of Encrypted Objects By default, Amazon S3 doesn't replicate objects +// that are stored at rest using server-side encryption with KMS keys. To replicate +// Amazon Web Services KMS-encrypted objects, add the following: +// SourceSelectionCriteria, SseKmsEncryptedObjects, Status, +// EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication +// configuration, see Replicating Objects Created with SSE Using KMS keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// For information on PutBucketReplication errors, see List of replication-related +// error codes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// Permissions To create a PutBucketReplication request, you must have +// s3:PutReplicationConfiguration permissions for the bucket. By default, a +// resource owner, in this case the Amazon Web Services account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) and +// Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// To perform this operation, the user or role performing the action must have the +// iam:PassRole +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. The following operations are related to PutBucketReplication: +// +// * +// GetBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// * +// DeleteBucketReplication +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { + if params == nil { + params = &PutBucketReplicationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketReplication", params, optFns, c.addOperationPutBucketReplicationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketReplicationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketReplicationInput struct { + + // The name of the bucket + // + // This member is required. + Bucket *string + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // This member is required. + ReplicationConfiguration *types.ReplicationConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string + + noSmithyDocumentSerde +} + +type PutBucketReplicationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketReplication{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketReplication{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketReplication(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketReplicationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketReplication", + } +} + +// getPutBucketReplicationRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketReplicationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketReplicationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketReplicationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketReplicationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketReplicationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go new file mode 100644 index 000000000..c89d97bec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go @@ -0,0 +1,221 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download will +// be charged for the download. For more information, see Requester Pays Buckets +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The +// following operations are related to PutBucketRequestPayment: +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// GetBucketRequestPayment +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { + if params == nil { + params = &PutBucketRequestPaymentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketRequestPayment", params, optFns, c.addOperationPutBucketRequestPaymentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketRequestPaymentOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketRequestPaymentInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for Payer. + // + // This member is required. + RequestPaymentConfiguration *types.RequestPaymentConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketRequestPaymentOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketRequestPayment{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketRequestPayment(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketRequestPayment", + } +} + +// getPutBucketRequestPaymentRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketRequestPaymentInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketRequestPaymentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketRequestPaymentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketRequestPaymentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go new file mode 100644 index 000000000..f41010773 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -0,0 +1,266 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the tags for a bucket. Use tags to organize your Amazon Web Services bill +// to reflect your own cost structure. To do this, sign up to get your Amazon Web +// Services account bill with tag key values included. Then, to see the cost of +// combined resources, organize your billing information according to resources +// with the same tag key values. For example, you can tag several resources with a +// specific application name, and then organize your billing information to see the +// total cost of that application across several services. For more information, +// see Cost Allocation and Tagging +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). When +// this operation sets the tags for a bucket, it will overwrite any current tags +// the bucket already has. You cannot use this operation to add tags to an existing +// list of tags. To use this operation, you must have permissions to perform the +// s3:PutBucketTagging action. The bucket owner has this permission by default and +// can grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// PutBucketTagging has the following special errors: +// +// * Error code: +// InvalidTagError +// +// * Description: The tag provided was not a valid tag. This error +// can occur if the tag did not pass input validation. For information about tag +// restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and Amazon Web Services-Generated Cost Allocation Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * +// Error code: MalformedXMLError +// +// * Description: The XML provided does not match +// the schema. +// +// * Error code: OperationAbortedError +// +// * Description: A conflicting +// conditional action is currently in progress against this resource. Please try +// again. +// +// * Error code: InternalError +// +// * Description: The service was unable to +// apply the provided tag to the bucket. +// +// The following operations are related to +// PutBucketTagging: +// +// * GetBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// * +// DeleteBucketTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { + if params == nil { + params = &PutBucketTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketTagging", params, optFns, c.addOperationPutBucketTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketTaggingInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for the TagSet and Tag elements. + // + // This member is required. + Tagging *types.Tagging + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketTaggingOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketTagging", + } +} + +// getPutBucketTaggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketTaggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go new file mode 100644 index 000000000..6d7943e6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go @@ -0,0 +1,243 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the versioning state of an existing bucket. You can set the versioning +// state with one of the following values: Enabled—Enables versioning for the +// objects in the bucket. All objects added to the bucket receive a unique version +// ID. Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. If the versioning state has +// never been set on a bucket, it has no versioning state; a GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. In order to enable MFA Delete, +// you must be the bucket owner. If you are the bucket owner and want to enable MFA +// Delete in the bucket versioning configuration, you must include the x-amz-mfa +// request header and the Status and the MfaDelete request elements in a request to +// set the versioning state of the bucket. If you have an object expiration +// lifecycle policy in your non-versioned bucket and you want to maintain the same +// permanent delete behavior when you enable versioning, you must add a noncurrent +// expiration policy. The noncurrent expiration lifecycle policy will manage the +// deletes of the noncurrent object versions in the version-enabled bucket. (A +// version-enabled bucket maintains one current and zero or more noncurrent object +// versions.) For more information, see Lifecycle and Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// Related Resources +// +// * CreateBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// * +// DeleteBucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// * +// GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { + if params == nil { + params = &PutBucketVersioningInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketVersioning", params, optFns, c.addOperationPutBucketVersioningMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketVersioningOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketVersioningInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for setting the versioning state. + // + // This member is required. + VersioningConfiguration *types.VersioningConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The concatenation of the authentication device's serial number, a space, and the + // value that is displayed on your authentication device. + MFA *string + + noSmithyDocumentSerde +} + +type PutBucketVersioningOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketVersioning{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketVersioning{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketVersioning(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketVersioningUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketVersioning", + } +} + +// getPutBucketVersioningRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketVersioningInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketVersioningBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutBucketVersioningBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketVersioningInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketVersioningBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go new file mode 100644 index 000000000..11cb4a355 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -0,0 +1,279 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the configuration of the website that is specified in the website +// subresource. To configure a bucket as a website, you can add this subresource on +// the bucket with website configuration information such as the file name of the +// index document and any redirect rules. For more information, see Hosting +// Websites on Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). This PUT +// action requires the S3:PutBucketWebsite permission. By default, only the bucket +// owner can configure the website attached to a bucket; however, bucket owners can +// allow other users to set the website configuration by writing a bucket policy +// that grants them the S3:PutBucketWebsite permission. To redirect all website +// requests sent to the bucket's website endpoint, you add a website configuration +// with the following elements. Because all requests are sent to another website, +// you don't need to provide index document name for the bucket. +// +// * +// WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you +// want granular control over redirects, you can use the following elements to add +// routing rules that describe conditions for redirecting requests and information +// about the redirect destination. In this case, the website configuration must +// provide an index document for the bucket, because some requests might not be +// redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * +// ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * +// HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * +// HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode +// +// Amazon +// S3 has a limitation of 50 routing rules per website configuration. If you +// require more than 50 routing rules, you can use object redirect. For more +// information, see Configuring an Object Redirect +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) in +// the Amazon S3 User Guide. +func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { + if params == nil { + params = &PutBucketWebsiteInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutBucketWebsite", params, optFns, c.addOperationPutBucketWebsiteMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutBucketWebsiteOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutBucketWebsiteInput struct { + + // The bucket name. + // + // This member is required. + Bucket *string + + // Container for the request. + // + // This member is required. + WebsiteConfiguration *types.WebsiteConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 + // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutBucketWebsiteOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketWebsite{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketWebsite(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutBucketWebsiteUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutBucketWebsite", + } +} + +// getPutBucketWebsiteRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketWebsiteInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutBucketWebsiteBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutBucketWebsiteBucketMember(input interface{}) (*string, bool) { + in := input.(*PutBucketWebsiteInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutBucketWebsiteBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go new file mode 100644 index 000000000..643364061 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -0,0 +1,613 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "time" +) + +// Adds an object to a bucket. You must have WRITE permissions on a bucket to add +// an object to it. Amazon S3 never adds partial objects; if you receive a success +// response, Amazon S3 added the entire object to the bucket. Amazon S3 is a +// distributed system. If it receives multiple write requests for the same object +// simultaneously, it overwrites all but the last object written. Amazon S3 does +// not provide object locking; if you need this, make sure to build it into your +// application layer or use versioning instead. To ensure that data is not +// corrupted traversing the network, use the Content-MD5 header. When you use this +// header, Amazon S3 checks the object against the provided MD5 value and, if they +// do not match, returns an error. Additionally, you can calculate the MD5 while +// putting an object to Amazon S3 and compare the returned ETag to the calculated +// MD5 value. +// +// * To successfully complete the PutObject request, you must have the +// s3:PutObject in your IAM permissions. +// +// * To successfully change the objects acl +// of your PutObject request, you must have the s3:PutObjectAcl in your IAM +// permissions. +// +// * The Content-MD5 header is required for any request to upload an +// object with a retention period configured using Amazon S3 Object Lock. For more +// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in +// the Amazon S3 User Guide. +// +// Server-side Encryption You can optionally request +// server-side encryption. With server-side encryption, Amazon S3 encrypts your +// data as it writes it to disks in its data centers and decrypts the data when you +// access it. You have the option to provide your own encryption key or use Amazon +// Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information, +// see Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// If you request server-side encryption using Amazon Web Services Key Management +// Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more +// information, see Amazon S3 Bucket Keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon +// S3 User Guide. Access Control List (ACL)-Specific Request Headers You can use +// headers to grant ACL- based permissions. By default, all objects are private. +// Only the owner has full access control. When adding a new object, you can grant +// permissions to individual Amazon Web Services accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the object. +// For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing +// ACLs Using the REST API +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). If +// the bucket that you're uploading objects to uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect +// permissions. Buckets that use this setting only accept PUT requests that don't +// specify an ACL or PUT requests that specify bucket owner full control ACLs, such +// as the bucket-owner-full-control canned ACL or an equivalent form of this ACL +// expressed in the XML format. PUT requests that contain other ACLs (for example, +// custom grants to certain Amazon Web Services accounts) fail and return a 400 +// error with the error code AccessControlListNotSupported. For more information, +// see Controlling ownership of objects and disabling ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced +// setting for Object Ownership, all objects written to the bucket by any account +// will be owned by the bucket owner. Storage Class Options By default, Amazon S3 +// uses the STANDARD Storage Class to store newly created objects. The STANDARD +// storage class provides high durability and high availability. Depending on +// performance needs, you can specify a different Storage Class. Amazon S3 on +// Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage +// Classes +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in +// the Amazon S3 User Guide. Versioning If you enable versioning for a bucket, +// Amazon S3 automatically generates a unique version ID for the object being +// stored. Amazon S3 returns this ID in the response. When you enable versioning +// for a bucket, if Amazon S3 receives multiple write requests for the same object +// simultaneously, it stores all of the objects. For more information about +// versioning, see Adding Objects to Versioning Enabled Buckets +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see +// GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// Related Resources +// +// * CopyObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * +// DeleteObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { + if params == nil { + params = &PutObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObject", params, optFns, c.addOperationPutObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectInput struct { + + // The bucket name to which the PUT action was initiated. When using this action + // with an access point, you must direct requests to the access point hostname. The + // access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the PUT action was initiated. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // This action is not supported by Amazon S3 on Outposts. + ACL types.ObjectCannedACL + + // Object data. + Body io.Reader + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true + // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + BucketKeyEnabled bool + + // Can be used to specify caching behavior along the request/reply chain. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Specifies presentational information for the object. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // Size of the body in bytes. This parameter is useful when the size of the body + // cannot be determined automatically. For more information, see + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). + ContentLength int64 + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check to + // verify that the data is the same data that was originally sent. Although it is + // optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, see + // REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + ContentMD5 *string + + // A standard MIME type describing the format of the contents. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + ContentType *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The date and time at which the object is no longer cacheable. For more + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + Expires *time.Time + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This + // action is not supported by Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to read the object data and its metadata. This action is not + // supported by Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the object ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadACP *string + + // Allows grantee to write the ACL for the applicable object. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteACP *string + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Specifies whether a legal hold will be applied to this object. For more + // information about S3 Object Lock, see Object Lock + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // The Object Lock mode that you want to apply to this object. + ObjectLockMode types.ObjectLockMode + + // The date and time when you want this object's Object Lock to expire. Must be + // formatted as a timestamp parameter. + ObjectLockRetainUntilDate *time.Time + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // If x-amz-server-side-encryption is present and has the value of aws:kms, this + // header specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetrical customer managed key that was used for the + // object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide + // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web + // Services managed key to protect the data. If the KMS key does not exist in the + // same account issuing the command, you must use the full ARN and not just the ID. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high + // availability. Depending on performance needs, you can specify a different + // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For + // more information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in + // the Amazon S3 User Guide. + StorageClass types.StorageClass + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string + + // If the bucket is configured as a website, redirects requests for this object to + // another object in the same bucket or to an external URL. Amazon S3 stores the + // value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). In the + // following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: x-amz-website-redirect-location: + // /anotherPage.html In the following example, the request header sets the object + // redirect to another website: x-amz-website-redirect-location: + // http://www.example.com/ For more information about website hosting in Amazon S3, + // see Hosting Websites on Amazon S3 + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) and How to + // Configure Website Page Redirects + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + WebsiteRedirectLocation *string + + noSmithyDocumentSerde +} + +type PutObjectOutput struct { + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag for the uploaded object. + ETag *string + + // If the expiration is configured for the object (see + // PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value of + // the rule-id is URL-encoded. + Expiration *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a base64-encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. + SSEKMSEncryptionContext *string + + // If x-amz-server-side-encryption is present and has the value of aws:kms, this + // header specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // If you specified server-side encryption either with an Amazon Web Services KMS + // key or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 used + // to encrypt the object. + ServerSideEncryption types.ServerSideEncryption + + // Version of the object. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObject", + } +} + +// getPutObjectRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: true, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getPutObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignPutObject is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &PutObjectInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns, + c.client.addOperationPutObjectMiddlewares, + presignConverter(options).convertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + addPutObjectPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addPutObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go new file mode 100644 index 000000000..05a377b5b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -0,0 +1,410 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Uses the acl subresource to set the access control list (ACL) permissions for a +// new or existing object in an S3 bucket. You must have WRITE_ACP permission to +// set the ACL of an object. For more information, see What permissions can I +// grant? +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. This action is not supported by Amazon S3 on +// Outposts. Depending on your application needs, you can choose to set the ACL on +// an object using either the request body or the headers. For example, if you have +// an existing application that updates a bucket ACL using the request body, you +// can continue to use that approach. For more information, see Access Control List +// (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) in the +// Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for +// S3 Object Ownership, ACLs are disabled and no longer affect permissions. You +// must use policies to grant access to your bucket and the objects in it. Requests +// to set ACLs or update ACLs fail and return the AccessControlListNotSupported +// error code. Requests to read ACLs are still supported. For more information, see +// Controlling object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Access Permissions You can set access permissions +// using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl +// request header. Amazon S3 supports a set of predefined ACLs, known as canned +// ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify +// the canned ACL name as the value of x-amz-acl. If you use this header, you +// cannot use other access control-specific headers in your request. For more +// information, see Canned ACL +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * +// Specify access permissions explicitly with the x-amz-grant-read, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. When using these headers, you specify explicit access permissions and +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use x-amz-acl +// header to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control List +// (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify +// each grantee as a type=value pair, where the type is one of the following: +// +// * id +// – if the value specified is the canonical user ID of an Amazon Web Services +// account +// +// * uri – if you are granting permissions to a predefined group +// +// * +// emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// +// * US East (N. Virginia) +// +// * US West +// (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific (Singapore) +// +// * Asia Pacific +// (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe (Ireland) +// +// * South America (São +// Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// For example, the following +// x-amz-grant-read header grants list objects permission to the two Amazon Web +// Services accounts identified by their email addresses. x-amz-grant-read: +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either +// a canned ACL or specify access permissions explicitly. You cannot do both. +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// * By the person's +// ID: <>ID<><>GranteesEmail<> DisplayName is optional and ignored in the +// request. +// +// * By URI: +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// * By Email +// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the +// CanonicalUser and, in a response to a GET Object acl request, appears as the +// CanonicalUser. Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// +// * US East (N. Virginia) +// +// * US West +// (N. California) +// +// * US West (Oregon) +// +// * Asia Pacific (Singapore) +// +// * Asia Pacific +// (Sydney) +// +// * Asia Pacific (Tokyo) +// +// * Europe (Ireland) +// +// * South America (São +// Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the +// Amazon Web Services General Reference. +// +// Versioning The ACL of an object is set +// at the object version level. By default, PUT sets the ACL of the current version +// of an object. To set the ACL of a different version, use the versionId +// subresource. Related Resources +// +// * CopyObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// * +// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { + if params == nil { + params = &PutObjectAclInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectAcl", params, optFns, c.addOperationPutObjectAclMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectAclOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectAclInput struct { + + // The bucket name that contains the object to which you want to attach the ACL. + // When using this action with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Key for which the PUT action was initiated. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Key *string + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL types.ObjectCannedACL + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *types.AccessControlPolicy + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a + // message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864.> + // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. This action is not supported by Amazon S3 on Outposts. + GrantFullControl *string + + // Allows grantee to list the objects in the bucket. This action is not supported + // by Amazon S3 on Outposts. + GrantRead *string + + // Allows grantee to read the bucket ACL. This action is not supported by Amazon S3 + // on Outposts. + GrantReadACP *string + + // Allows grantee to create new objects in the bucket. For the bucket and object + // owners of existing objects, also allows deletions and overwrites of those + // objects. + GrantWrite *string + + // Allows grantee to write the ACL for the applicable bucket. This action is not + // supported by Amazon S3 on Outposts. + GrantWriteACP *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type PutObjectAclOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectAcl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectAcl{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectAcl(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectAclUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectAcl", + } +} + +// getPutObjectAclRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectAclInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectAclRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectAclBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutObjectAclBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectAclInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectAclBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go new file mode 100644 index 000000000..b8004b598 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Applies a legal hold configuration to the specified object. For more +// information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This action +// is not supported by Amazon S3 on Outposts. +func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { + if params == nil { + params = &PutObjectLegalHoldInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectLegalHold", params, optFns, c.addOperationPutObjectLegalHoldMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectLegalHoldOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectLegalHoldInput struct { + + // The bucket name containing the object that you want to place a legal hold on. + // When using this action with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object that you want to place a legal hold on. + // + // This member is required. + Key *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Container element for the legal hold configuration you want to apply to the + // specified object. + LegalHold *types.ObjectLockLegalHold + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The version ID of the object that you want to place a legal hold on. + VersionId *string + + noSmithyDocumentSerde +} + +type PutObjectLegalHoldOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLegalHold{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLegalHold(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectLegalHoldUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectLegalHold", + } +} + +// getPutObjectLegalHoldRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectLegalHoldInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectLegalHoldBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutObjectLegalHoldBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectLegalHoldInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectLegalHoldBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go new file mode 100644 index 000000000..9740967a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -0,0 +1,234 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// * The +// DefaultRetention settings require both a mode and a period. +// +// * The +// DefaultRetention period can be either Days or Years but you must select one. You +// cannot specify Days and Years at the same time. +// +// * You can only enable Object +// Lock for new buckets. If you want to turn on Object Lock for an existing bucket, +// contact Amazon Web Services Support. +func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { + if params == nil { + params = &PutObjectLockConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectLockConfiguration", params, optFns, c.addOperationPutObjectLockConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectLockConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectLockConfigurationInput struct { + + // The bucket whose Object Lock configuration you want to create or replace. + // + // This member is required. + Bucket *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *types.ObjectLockConfiguration + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string + + noSmithyDocumentSerde +} + +type PutObjectLockConfigurationOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLockConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLockConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectLockConfiguration", + } +} + +// getPutObjectLockConfigurationRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectLockConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectLockConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectLockConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go new file mode 100644 index 000000000..c4918f3cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Places an Object Retention configuration on an object. For more information, see +// Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). Users or +// accounts require the s3:PutObjectRetention permission in order to place an +// Object Retention configuration on objects. Bypassing a Governance Retention +// configuration requires the s3:BypassGovernanceRetention permission. This action +// is not supported by Amazon S3 on Outposts. +func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { + if params == nil { + params = &PutObjectRetentionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectRetention", params, optFns, c.addOperationPutObjectRetentionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectRetentionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectRetentionInput struct { + + // The bucket name that contains the object you want to apply this Object Retention + // configuration to. When using this action with an access point, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // This member is required. + Key *string + + // Indicates whether this action should bypass Governance-mode restrictions. + BypassGovernanceRetention bool + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The container element for the Object Retention configuration. + Retention *types.ObjectLockRetention + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string + + noSmithyDocumentSerde +} + +type PutObjectRetentionOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectRetention{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectRetention{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectRetention(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectRetentionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectRetention", + } +} + +// getPutObjectRetentionRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectRetentionInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectRetentionBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutObjectRetentionBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectRetentionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectRetentionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go new file mode 100644 index 000000000..43effb9eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the supplied tag-set to an object that already exists in a bucket. A tag is +// a key-value pair. You can associate tags with an object by sending a PUT request +// against the tagging subresource that is associated with the object. You can +// retrieve tags by sending a GET request. For more information, see +// GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). For +// tagging-related restrictions related to characters and encodings, see Tag +// Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. To +// use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. To put tags of any other version, use the versionId query +// parameter. You also need permission for the s3:PutObjectVersionTagging action. +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). Special +// Errors +// +// * Code: InvalidTagError +// +// * Cause: The tag provided was not a valid tag. +// This error can occur if the tag did not pass input validation. For more +// information, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: +// MalformedXMLError +// +// * Cause: The XML provided does not match the schema. +// +// * Code: +// OperationAbortedError +// +// * Cause: A conflicting conditional action is currently in +// progress against this resource. Please try again. +// +// * Code: InternalError +// +// * +// Cause: The service was unable to apply the provided tag to the object. +// +// Related +// Resources +// +// * GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// * +// DeleteObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { + if params == nil { + params = &PutObjectTaggingInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutObjectTagging", params, optFns, c.addOperationPutObjectTaggingMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutObjectTaggingOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutObjectTaggingInput struct { + + // The bucket name containing the object. When using this action with an access + // point, you must direct requests to the access point hostname. The access point + // hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Name of the object key. + // + // This member is required. + Key *string + + // Container for the TagSet and Tag elements + // + // This member is required. + Tagging *types.Tagging + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash for the request body. For requests made using the Amazon Web + // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is + // calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // The versionId of the object that the tag-set will be added to. + VersionId *string + + noSmithyDocumentSerde +} + +type PutObjectTaggingOutput struct { + + // The versionId of the object the tag-set was added to. + VersionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectTagging{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectTagging{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectTagging(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutObjectTaggingUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutObjectTagging", + } +} + +// getPutObjectTaggingRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. +func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutObjectTaggingInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutObjectTaggingBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getPutObjectTaggingBucketMember(input interface{}) (*string, bool) { + in := input.(*PutObjectTaggingInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutObjectTaggingBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go new file mode 100644 index 000000000..922102b58 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -0,0 +1,240 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. +// To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the +// PublicAccessBlock configurations are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level and +// account-level settings. For more information about when Amazon S3 considers a +// bucket or an object public, see The Meaning of "Public" +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// Related Resources +// +// * GetPublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// * +// DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// * +// GetBucketPolicyStatus +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// * +// Using Amazon S3 Block Public Access +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { + if params == nil { + params = &PutPublicAccessBlockInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutPublicAccessBlock", params, optFns, c.addOperationPutPublicAccessBlockMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutPublicAccessBlockOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutPublicAccessBlockInput struct { + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want + // to set. + // + // This member is required. + Bucket *string + + // The PublicAccessBlock configuration that you want to apply to this Amazon S3 + // bucket. You can enable the configuration options in any combination. For more + // information about when Amazon S3 considers a bucket or object public, see The + // Meaning of "Public" + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon S3 User Guide. + // + // This member is required. + PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash of the PutPublicAccessBlock request body. For requests made using + // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services + // SDKs, this field is calculated automatically. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +type PutPublicAccessBlockOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpPutPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutPublicAccessBlock{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutPublicAccessBlock(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addPutPublicAccessBlockUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "PutPublicAccessBlock", + } +} + +// getPutPublicAccessBlockRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutPublicAccessBlockInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember, + RequireChecksum: true, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getPutPublicAccessBlockBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getPutPublicAccessBlockBucketMember(input interface{}) (*string, bool) { + in := input.(*PutPublicAccessBlockInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addPutPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getPutPublicAccessBlockBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go new file mode 100644 index 000000000..45a2aee6a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -0,0 +1,457 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Restores an archived copy of an object back into Amazon S3 This action is not +// supported by Amazon S3 on Outposts. This action performs the following types of +// requests: +// +// * select - Perform a select query on an archived object +// +// * restore an +// archive - Restore an archived object +// +// To use this operation, you must have +// permissions to perform the s3:RestoreObject action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Querying Archives with Select Requests You use a +// select type of request to perform SQL queries on archived objects. The archived +// objects that are being queried by the select request must be formatted as +// uncompressed comma-separated values (CSV) files. You can run queries and custom +// analytics on your archived data without having to restore your data to a hotter +// Amazon S3 tier. For an overview about select requests, see Querying Archived +// Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon S3 User Guide. When making a select request, do the following: +// +// * +// Define an output location for the select query's output. This must be an Amazon +// S3 bucket in the same Amazon Web Services Region as the bucket that contains the +// archive object that is being queried. The Amazon Web Services account that +// initiates the job must have permissions to write to the S3 bucket. You can +// specify the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon S3 User Guide. For more information about the S3 structure in the +// request body, see the following: +// +// * PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * Managing +// Access with ACLs +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) in the +// Amazon S3 User Guide +// +// * Protecting Data Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in +// the Amazon S3 User Guide +// +// * Define the SQL expression for the SELECT type of +// restoration for your query in the request body's SelectParameters structure. You +// can use expressions like the following examples. +// +// * The following expression +// returns all records from the specified object. SELECT * FROM Object +// +// * Assuming +// that you are not using any headers for data stored in the object, you can +// specify columns with positional headers. SELECT s._1, s._2 FROM Object s WHERE +// s._3 > 100 +// +// * If you have headers and you set the fileHeaderInfo in the CSV +// structure in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for the +// query.) You cannot mix ordinal positions with header column names. SELECT s.Id, +// s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with S3 +// Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier +// Select +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide. When making a select request, you can also do the +// following: +// +// * To expedite your queries, specify the Expedited tier. For more +// information about tiers, see "Restoring Archives," later in this topic. +// +// * +// Specify details about the data serialization format of both the input object +// that is being queried and the serialization of the CSV-encoded query +// results. +// +// The following are additional important facts about the select +// feature: +// +// * The output results are new Amazon S3 objects. Unlike archive +// retrievals, they are stored until explicitly deleted-manually or through a +// lifecycle policy. +// +// * You can issue more than one select request on the same +// Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing +// duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has +// already been restored. A select request doesn’t return error response +// 409. +// +// Restoring objects Objects that you archive to the S3 Glacier or S3 Glacier +// Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 +// Intelligent-Tiering Deep Archive tiers are not accessible in real time. For +// objects in Archive Access or Deep Archive Access tiers you must first initiate a +// restore request, and then wait until the object is moved into the Frequent +// Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage +// classes you must first initiate a restore request, and then wait until a +// temporary copy of the object is available. To access an archived object, you +// must restore the object for the duration (number of days) that you specify. To +// restore a specific object version, you can provide a version ID. If you don't +// provide a version ID, Amazon S3 restores the current version. When restoring an +// archived object (or using a select request), you can specify one of the +// following data access tier options in the Tier element of the request body: +// +// * +// Expedited - Expedited retrievals allow you to quickly access your data stored in +// the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when +// occasional urgent requests for a subset of archives are required. For all but +// the largest archived objects (250 MB+), data accessed using Expedited retrievals +// is typically made available within 1–5 minutes. Provisioned capacity ensures +// that retrieval capacity for Expedited retrievals is available when you need it. +// Expedited retrievals and provisioned capacity are not available for objects +// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. +// +// * Standard - Standard retrievals allow you to access any of +// your archived objects within several hours. This is the default option for +// retrieval requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier storage +// class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 +// hours for objects stored in the S3 Glacier Deep Archive storage class or S3 +// Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects +// stored in S3 Intelligent-Tiering. +// +// * Bulk - Bulk retrievals are the lowest-cost +// retrieval option in S3 Glacier, enabling you to retrieve large amounts, even +// petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 +// hours for objects stored in the S3 Glacier storage class or S3 +// Intelligent-Tiering Archive tier. They typically finish within 48 hours for +// objects stored in the S3 Glacier Deep Archive storage class or S3 +// Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects +// stored in S3 Intelligent-Tiering. +// +// For more information about archive retrieval +// options and provisioned capacity for Expedited data access, see Restoring +// Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the +// Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the +// restore speed to a faster speed while it is in progress. For more information, +// see Upgrading the speed of an in-progress restore +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon S3 User Guide. To get the status of object restoration, you can +// send a HEAD request. Operations return the x-amz-restore header, which provides +// information about the restoration status, in the response. You can use Amazon S3 +// event notifications to notify you when a restore is initiated or completed. For +// more information, see Configuring Amazon S3 Event Notifications +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the +// Amazon S3 User Guide. After restoring an archived object, you can update the +// restoration period by reissuing the request with a new period. Amazon S3 updates +// the restoration period relative to the current time and charges only for the +// request-there are no data transfer charges. You cannot update the restoration +// period when Amazon S3 is actively processing your current restore request for +// the object. If your bucket has a lifecycle configuration with a rule that +// includes an expiration action, the object expiration overrides the life span +// that you specify in a restore request. For example, if you restore an object +// copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 +// deletes the object in 3 days. For more information about lifecycle +// configuration, see PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in +// Amazon S3 User Guide. Responses A successful action returns either the 200 OK or +// 202 Accepted status code. +// +// * If the object is not previously restored, then +// Amazon S3 returns 202 Accepted in the response. +// +// * If the object is previously +// restored, Amazon S3 returns 200 OK in the response. +// +// # Special Errors +// +// * Code: +// RestoreAlreadyInProgress +// +// * Cause: Object restore is already in progress. (This +// error does not apply to SELECT type requests.) +// +// * HTTP Status Code: 409 +// Conflict +// +// * SOAP Fault Code Prefix: Client +// +// * Code: +// GlacierExpeditedRetrievalNotAvailable +// +// * Cause: expedited retrievals are +// currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to Expedited +// retrievals and not to S3 Standard or Bulk retrievals.) +// +// * HTTP Status Code: +// 503 +// +// * SOAP Fault Code Prefix: N/A +// +// # Related Resources +// +// * +// PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// * +// GetBucketNotificationConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// * +// SQL Reference for Amazon S3 Select and S3 Glacier Select +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide +func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { + if params == nil { + params = &RestoreObjectInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RestoreObject", params, optFns, c.addOperationRestoreObjectMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RestoreObjectOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RestoreObjectInput struct { + + // The bucket name containing the object to restore. When using this action with an + // access point, you must direct requests to the access point hostname. The access + // point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the action was initiated. + // + // This member is required. + Key *string + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Container for restore job parameters. + RestoreRequest *types.RestoreRequest + + // VersionId used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type RestoreObjectOutput struct { + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Indicates the path in the provided S3 output location where Select results will + // be restored to. + RestoreOutputPath *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpRestoreObject{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpRestoreObject{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreObject(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addRestoreObjectUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRestoreObject(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "RestoreObject", + } +} + +// getRestoreObjectRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*RestoreObjectInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getRestoreObjectRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getRestoreObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getRestoreObjectBucketMember(input interface{}) (*string, bool) { + in := input.(*RestoreObjectInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addRestoreObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getRestoreObjectBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go new file mode 100644 index 000000000..1b9dbc7bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go @@ -0,0 +1,426 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithysync "github.com/aws/smithy-go/sync" + smithyhttp "github.com/aws/smithy-go/transport/http" + "sync" +) + +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the SQL +// expression, you must also specify a data serialization format (JSON, CSV, or +// Apache Parquet) of the object. Amazon S3 uses this format to parse object data +// into records, and returns only records that match the specified SQL expression. +// You must also specify the data serialization format for the response. This +// action is not supported by Amazon S3 on Outposts. For more information about +// Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// and SELECT Command +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) +// in the Amazon S3 User Guide. For more information about using SQL with Amazon S3 +// Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon S3 User Guide. Permissions You must have s3:GetObject permission +// for this operation. Amazon S3 Select does not support anonymous access. For more +// information about permissions, see Specifying Permissions in a Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in +// the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to +// query objects that have the following format properties: +// +// * CSV, JSON, and +// Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is +// the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and +// JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only +// compression formats that Amazon S3 Select supports for CSV and JSON files. +// Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. +// Amazon S3 Select does not support whole-object compression for Parquet +// objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must +// use the headers that are documented in the GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). For more +// information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 +// managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), +// server-side encryption is handled transparently, so you don't need to specify +// anything. For more information about server-side encryption, including SSE-S3 +// and SSE-KMS, see Protecting Data Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in +// the Amazon S3 User Guide. +// +// Working with the Response Body Given the response +// size is unknown, Amazon S3 Select streams the response as a series of messages +// and includes a Transfer-Encoding header with chunked as its value in the +// response. For more information, see Appendix: SelectObjectContent Response +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html). +// GetObject Support The SelectObjectContent action does not support the following +// GetObject functionality. For more information, see GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// +// * Range: +// Although you can specify a scan range for an Amazon S3 Select request (see +// SelectObjectContentRequest - ScanRange +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an object +// to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You +// cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon S3 User Guide. +// +// Special Errors For a list of special errors for +// this operation, see List of SELECT Object Content Error Codes +// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// Related Resources +// +// * GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * +// GetBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// * +// PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { + if params == nil { + params = &SelectObjectContentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "SelectObjectContent", params, optFns, c.addOperationSelectObjectContentMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SelectObjectContentOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Request to filter the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the SQL +// expression, you must specify a data serialization format (JSON or CSV) of the +// object. Amazon S3 uses this to parse object data into records. It returns only +// records that match the specified SQL expression. You must also specify the data +// serialization format for the response. For more information, see S3Select API +// Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + + // The S3 bucket. + // + // This member is required. + Bucket *string + + // The expression that is used to query the object. + // + // This member is required. + Expression *string + + // The type of the provided expression (for example, SQL). + // + // This member is required. + ExpressionType types.ExpressionType + + // Describes the format of the data in the object that is being queried. + // + // This member is required. + InputSerialization *types.InputSerialization + + // The object key. + // + // This member is required. + Key *string + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // This member is required. + OutputSerialization *types.OutputSerialization + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Specifies if periodic request progress information should be enabled. + RequestProgress *types.RequestProgress + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string + + // The server-side encryption (SSE) customer managed key. This parameter is needed + // only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKey *string + + // The MD5 server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string + + // Specifies the byte range of the object to get the records from. A record is + // processed when its first byte is contained by the range. This parameter is + // optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. ScanRangemay be + // used in the following ways: + // + // * 50100 - process only the records starting between + // the bytes 50 and 100 (inclusive, counting from zero) + // + // * 50 - process only the + // records starting after the byte 50 + // + // * 50 - process only the records within the + // last 50 bytes of the file. + ScanRange *types.ScanRange + + noSmithyDocumentSerde +} + +type SelectObjectContentOutput struct { + eventStream *SelectObjectContentEventStream + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +// GetStream returns the type to interact with the event stream. +func (o *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return o.eventStream +} + +func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpSelectObjectContent{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpSelectObjectContent{}, middleware.After) + if err != nil { + return err + } + if err = addEventStreamSelectObjectContentMiddleware(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSelectObjectContent(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSelectObjectContent(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "SelectObjectContent", + } +} + +// getSelectObjectContentBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, +func getSelectObjectContentBucketMember(input interface{}) (*string, bool) { + in := input.(*SelectObjectContentInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addSelectObjectContentUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getSelectObjectContentBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent operation. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type SelectObjectContentEventStream struct { + // SelectObjectContentEventStreamReader is the EventStream reader for the + // SelectObjectContentEventStream events. This value is automatically set by the + // SDK when the API call is made Use this member when unit testing your code with + // the SDK to mock out the EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + done chan struct{} + closeOnce sync.Once + err *smithysync.OnceErr +} + +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +func NewSelectObjectContentEventStream(optFns ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: smithysync.NewOnceErr(), + } + for _, fn := range optFns { + fn(es) + } + return es +} + +// Events returns a channel to read events from. +func (es *SelectObjectContentEventStream) Events() <-chan types.SelectObjectContentEventStream { + return es.Reader.Events() +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// Will close the underlying EventStream writer and reader, and no more events can be +// sent or received. +func (es *SelectObjectContentEventStream) Close() error { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + close(es.done) + + es.Reader.Close() +} + +// Err returns any error that occurred while reading or writing EventStream Events +// from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +func (es *SelectObjectContentEventStream) waitStreamClose() { + type errorSet interface { + ErrorSet() <-chan struct{} + } + + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(errorSet); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go new file mode 100644 index 000000000..2617bed60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -0,0 +1,496 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" +) + +// Uploads a part in a multipart upload. In this operation, you provide part data +// in your request. However, you have an option to specify your existing Amazon S3 +// object as a data source for the part you are uploading. To upload a part from an +// existing object, you use the UploadPartCopy +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. You must initiate a multipart upload (see CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon S3 +// returns an upload ID, a unique identifier, that you must include in your upload +// part request. Part numbers can be any number from 1 to 10,000, inclusive. A part +// number uniquely identifies a part and also defines its position within the +// object being created. If you upload a new part using the same part number that +// was used with a previous part, the previously uploaded part is overwritten. For +// information about maximum and minimum part sizes and other multipart upload +// specifications, see Multipart upload limits +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the +// Amazon S3 User Guide. To ensure that data is not corrupted when traversing the +// network, specify the Content-MD5 header in the upload part request. Amazon S3 +// checks the part data against the provided MD5 value. If they do not match, +// Amazon S3 returns an error. If the upload request is signed with Signature +// Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a +// checksum instead of Content-MD5. For more information see Authenticating +// Requests: Using the Authorization Header (Amazon Web Services Signature Version +// 4) +// (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// Note: After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged for +// storage of the uploaded parts. Only after you either complete or abort multipart +// upload, Amazon S3 frees up the parts storage and stops charging you for the +// parts storage. For more information on multipart uploads, go to Multipart Upload +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in +// the Amazon S3 User Guide . For information on the permissions required to use +// the multipart upload API, go to Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the +// Amazon S3 User Guide. You can optionally request server-side encryption where +// Amazon S3 encrypts your data as it writes it to disks in its data centers and +// decrypts it for you when you access it. You have the option of providing your +// own encryption key, or you can use the Amazon Web Services managed encryption +// keys. If you choose to provide your own encryption key, the request headers you +// provide in the request must match the headers you used in the request to +// initiate the upload by using CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// For more information, go to Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. Server-side encryption is supported by the S3 +// Multipart Upload actions. Unless you are using a customer-provided encryption +// key, you don't need to specify the encryption parameters in each UploadPart +// request. Instead, you only need to specify the server-side encryption parameters +// in the initial Initiate Multipart request. For more information, see +// CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// If you requested server-side encryption using a customer-provided encryption key +// in your initiate multipart upload request, you must provide identical encryption +// information in each part upload using the following headers. +// +// * +// x-amz-server-side-encryption-customer-algorithm +// +// * +// x-amz-server-side-encryption-customer-key +// +// * +// x-amz-server-side-encryption-customer-key-MD5 +// +// # Special Errors +// +// * Code: +// NoSuchUpload +// +// * Cause: The specified multipart upload does not exist. The upload +// ID might be invalid, or the multipart upload might have been aborted or +// completed. +// +// * HTTP Status Code: 404 Not Found +// +// * SOAP Fault Code Prefix: +// Client +// +// # Related Resources +// +// * CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { + if params == nil { + params = &UploadPartInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UploadPart", params, optFns, c.addOperationUploadPartMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UploadPartOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UploadPartInput struct { + + // The name of the bucket to which the multipart upload was initiated. When using + // this action with an access point, you must direct requests to the access point + // hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Part number of part being uploaded. This is a positive integer between 1 and + // 10,000. + // + // This member is required. + PartNumber int32 + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // This member is required. + UploadId *string + + // Object data. + Body io.Reader + + // Indicates the algorithm used to create the checksum for the object when using + // the SDK. This header will not provide any additional functionality if not using + // the SDK. When sending this header, there must be a corresponding x-amz-checksum + // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the + // HTTP status code 400 Bad Request. For more information, see Checking object + // integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 + // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must + // be the same for all parts and it match the checksum value supplied in the + // CreateMultipartUpload request. + ChecksumAlgorithm types.ChecksumAlgorithm + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Size of the body in bytes. This parameter is useful when the size of the body + // cannot be determined automatically. + ContentLength int64 + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. + ContentMD5 *string + + // The account ID of the expected bucket owner. If the bucket is owned by a + // different account, the request fails with the HTTP status code 403 Forbidden + // (access denied). + ExpectedBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This must be the same + // encryption key specified in the initiate multipart upload request. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +type UploadPartOutput struct { + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag for the uploaded object. + ETag *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPart{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPart{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpUploadPartValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPart(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addUploadPartUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUploadPart(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "UploadPart", + } +} + +// getUploadPartRequestAlgorithmMember gets the request checksum algorithm value +// provided as input. +func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*UploadPartInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getUploadPartRequestAlgorithmMember, + RequireChecksum: false, + EnableTrailingChecksum: true, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getUploadPartBucketMember returns a pointer to string denoting a provided bucket +// member valueand a boolean indicating if the input has a modeled bucket name, +func getUploadPartBucketMember(input interface{}) (*string, bool) { + in := input.(*UploadPartInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addUploadPartUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getUploadPartBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} + +// PresignUploadPart is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &UploadPartInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns, + c.client.addOperationUploadPartMiddlewares, + presignConverter(options).convertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + addUploadPartPayloadAsUnsigned, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +func addUploadPartPayloadAsUnsigned(stack *middleware.Stack, options Options) error { + v4.RemoveContentSHA256HeaderMiddleware(stack) + v4.RemoveComputePayloadSHA256Middleware(stack) + return v4.AddUnsignedPayloadMiddleware(stack) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go new file mode 100644 index 000000000..f1fa13954 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -0,0 +1,437 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in your +// request and a byte range by adding the request header x-amz-copy-source-range in +// your request. For information about maximum and minimum part sizes and other +// multipart upload specifications, see Multipart upload limits +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the +// Amazon S3 User Guide. Instead of using an existing object as part data, you +// might use the UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action and +// provide data in your request. You must initiate a multipart upload before you +// can upload any part. In response to your initiate request. Amazon S3 returns a +// unique identifier, the upload ID, that you must include in your upload part +// request. For more information about using the UploadPartCopy operation, see the +// following: +// +// * For conceptual information about multipart uploads, see Uploading +// Objects Using Multipart Upload +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the +// Amazon S3 User Guide. +// +// * For information about permissions required to use the +// multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the +// Amazon S3 User Guide. +// +// * For information about copying objects using a single +// atomic action vs. a multipart upload, see Operations on Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the +// Amazon S3 User Guide. +// +// * For information about using server-side encryption with +// customer-provided encryption keys with the UploadPartCopy operation, see +// CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// Note the +// following additional considerations about the request headers +// x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, +// x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the +// x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are +// present in the request as follows: x-amz-copy-source-if-match condition +// evaluates to true, and; x-amz-copy-source-if-unmodified-since condition +// evaluates to false; Amazon S3 returns 200 OK and copies the data. +// +// * +// Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request as +// follows: x-amz-copy-source-if-none-match condition evaluates to false, and; +// x-amz-copy-source-if-modified-since condition evaluates to true; Amazon S3 +// returns 412 Precondition Failed response code. +// +// Versioning If your bucket has +// versioning enabled, you could have multiple versions of the same object. By +// default, x-amz-copy-source identifies the current version of the object to copy. +// If the current version is a delete marker and you don't specify a versionId in +// the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does +// not exist. If you specify versionId in the x-amz-copy-source and the versionId +// is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not +// allowed to specify a delete marker as a version for the x-amz-copy-source. You +// can optionally specify a specific version of the source object to copy by adding +// the versionId subresource as shown in the following example: x-amz-copy-source: +// /bucket/object?versionId=version id Special Errors +// +// * Code: NoSuchUpload +// +// * +// Cause: The specified multipart upload does not exist. The upload ID might be +// invalid, or the multipart upload might have been aborted or completed. +// +// * HTTP +// Status Code: 404 Not Found +// +// * Code: InvalidRequest +// +// * Cause: The specified copy +// source is not supported as a byte-range copy source. +// +// * HTTP Status Code: 400 +// Bad Request +// +// # Related Resources +// +// * CreateMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// * +// UploadPart +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// * +// CompleteMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// * +// AbortMultipartUpload +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// * +// ListParts +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// * +// ListMultipartUploads +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { + if params == nil { + params = &UploadPartCopyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UploadPartCopy", params, optFns, c.addOperationUploadPartCopyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UploadPartCopyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UploadPartCopyInput struct { + + // The bucket name. When using this action with an access point, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using access points + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using + // this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts bucket ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Bucket *string + + // Specifies the source object for the copy operation. You specify the value in one + // of two formats, depending on whether you want to access the source object + // through an access point + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * + // For objects not accessed through an access point, specify the name of the source + // bucket and key of the source object, separated by a slash (/). For example, to + // copy the object reports/january.pdf from the bucket awsexamplebucket, use + // awsexamplebucket/reports/january.pdf. The value must be URL-encoded. + // + // * For + // objects accessed through access points, specify the Amazon Resource Name (ARN) + // of the object as accessed through the access point, in the format + // arn:aws:s3:::accesspoint//object/. For example, to copy the object + // reports/january.pdf through access point my-access-point owned by account + // 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using access + // points only when the source and destination buckets are in the same Amazon Web + // Services Region. Alternatively, for objects accessed through Amazon S3 on + // Outposts, specify the ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 in + // Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // To copy a specific version of an object, append + // ?versionId= to the value (for example, + // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of the + // source object. + // + // This member is required. + CopySource *string + + // Object key for which the multipart upload was initiated. + // + // This member is required. + Key *string + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // This member is required. + PartNumber int32 + + // Upload ID identifying the multipart upload whose part is being copied. + // + // This member is required. + UploadId *string + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time + + // Copies the object if its entity tag (ETag) is different than the specified ETag. + CopySourceIfNoneMatch *string + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time + + // The range of bytes to copy from the source object. The range value must use the + // form bytes=first-last, where the first and last are the zero-based byte offsets + // to copy. For example, bytes=0-9 indicates that you want to copy the first 10 + // bytes of the source. You can copy a range only if the source object is greater + // than 5 MB. + CopySourceRange *string + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + CopySourceSSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one that + // was used when the source object was created. + CopySourceSSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string + + // The account ID of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request fails with the HTTP status + // code 403 Forbidden (access denied). + ExpectedBucketOwner *string + + // The account ID of the expected source bucket owner. If the source bucket is + // owned by a different account, the request fails with the HTTP status code 403 + // Forbidden (access denied). + ExpectedSourceBucketOwner *string + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + RequestPayer types.RequestPayer + + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). + SSECustomerAlgorithm *string + + // Specifies the customer-provided encryption key for Amazon S3 to use in + // encrypting data. This value is used to store the object and then it is + // discarded; Amazon S3 does not store the encryption key. The key must be + // appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. This must be the same + // encryption key specified in the initiate multipart upload request. + SSECustomerKey *string + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + SSECustomerKeyMD5 *string + + noSmithyDocumentSerde +} + +type UploadPartCopyOutput struct { + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Container for all response elements. + CopyPartResult *types.CopyPartResult + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm used. + SSECustomerAlgorithm *string + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round-trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPartCopy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPartCopy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPartCopy(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUploadPartCopy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "UploadPartCopy", + } +} + +// getUploadPartCopyBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getUploadPartCopyBucketMember(input interface{}) (*string, bool) { + in := input.(*UploadPartCopyInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addUploadPartCopyUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getUploadPartCopyBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go new file mode 100644 index 000000000..78eeadd45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -0,0 +1,457 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" + "time" +) + +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see +// Transforming objects with Object Lambda access points +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// in the Amazon S3 User Guide. This operation supports metadata that can be +// returned by GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), in +// addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. +// The GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when it +// internally invokes GetObject. When WriteGetObjectResponse is called by a +// customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. You can include any +// number of metadata headers. When including a metadata header, it should be +// prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: +// MyCustomValue. The primary use case for this is to forward GetObject metadata. +// Amazon Web Services provides some prebuilt Lambda functions that you can use +// with S3 Object Lambda to detect and redact personally identifiable information +// (PII) and decompress S3 objects. These Lambda functions are available in the +// Amazon Web Services Serverless Application Repository, and can be selected +// through the Amazon Web Services Management Console when you create your Object +// Lambda access point. Example 1: PII Access Control - This Lambda function uses +// Amazon Comprehend, a natural language processing (NLP) service using machine +// learning to find insights and relationships in text. It automatically detects +// personally identifiable information (PII) such as names, addresses, dates, +// credit card numbers, and social security numbers from documents in your Amazon +// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon +// Comprehend, a natural language processing (NLP) service using machine learning +// to find insights and relationships in text. It automatically redacts personally +// identifiable information (PII) such as names, addresses, dates, credit card +// numbers, and social security numbers from documents in your Amazon S3 bucket. +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is +// equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information +// on how to view and use these functions, see Using Amazon Web Services built +// Lambda functions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) in +// the Amazon S3 User Guide. +func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { + if params == nil { + params = &WriteGetObjectResponseInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "WriteGetObjectResponse", params, optFns, c.addOperationWriteGetObjectResponseMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*WriteGetObjectResponseOutput) + out.ResultMetadata = metadata + return out, nil +} + +type WriteGetObjectResponseInput struct { + + // Route prefix to the HTTP URL generated. + // + // This member is required. + RequestRoute *string + + // A single use encrypted token that maps WriteGetObjectResponse to the end user + // GetObject request. + // + // This member is required. + RequestToken *string + + // Indicates that a range of bytes was specified. + AcceptRanges *string + + // The object data. + Body io.Reader + + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // server-side encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled bool + + // Specifies caching behavior along the request/reply chain. + CacheControl *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 32-bit CRC32 checksum of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 32-bit CRC32C checksum of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the + // base64-encoded, 256-bit SHA-256 digest of the object returned by the Object + // Lambda function. This may not match the checksum for the object stored in Amazon + // S3. Amazon S3 will perform validation of the checksum values only when the + // original GetObject request required checksum validation. For more information + // about checksums, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. Only one checksum header can be specified at a + // time. If you supply multiple checksum headers, this request will fail. + ChecksumSHA256 *string + + // Specifies presentational information for the object. + ContentDisposition *string + + // Specifies what content encodings have been applied to the object and thus what + // decoding mechanisms must be applied to obtain the media-type referenced by the + // Content-Type header field. + ContentEncoding *string + + // The language the content is in. + ContentLanguage *string + + // The size of the content body in bytes. + ContentLength int64 + + // The portion of the object returned in the response. + ContentRange *string + + // A standard MIME type describing the format of the object data. + ContentType *string + + // Specifies whether an object stored in Amazon S3 is (true) or is not (false) a + // delete marker. + DeleteMarker bool + + // An opaque identifier assigned by a web server to a specific version of a + // resource found at a URL. + ETag *string + + // A string that uniquely identifies an error condition. Returned in the tag of + // the error XML response for a corresponding GetObject call. Cannot be used with a + // successful StatusCode header or when the transformed object is provided in the + // body. All error codes from S3 are sentence-cased. The regular expression (regex) + // value is "^[A-Z][a-zA-Z]+$". + ErrorCode *string + + // Contains a generic description of the error condition. Returned in the tag of + // the error XML response for a corresponding GetObject call. Cannot be used with a + // successful StatusCode header or when the transformed object is provided in body. + ErrorMessage *string + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // that provide the object expiration information. The value of the rule-id is + // URL-encoded. + Expiration *string + + // The date and time at which the object is no longer cacheable. + Expires *time.Time + + // The date and time that the object was last modified. + LastModified *time.Time + + // A map of metadata to store with the object in S3. + Metadata map[string]string + + // Set to the number of metadata entries not returned in x-amz-meta headers. This + // can happen if you create metadata using an API like SOAP that supports more + // flexible metadata than the REST API. For example, using SOAP, you can create + // metadata whose values are not legal HTTP headers. + MissingMeta int32 + + // Indicates whether an object stored in Amazon S3 has an active legal hold. + ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus + + // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For + // more information about S3 Object Lock, see Object Lock + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + ObjectLockMode types.ObjectLockMode + + // The date and time when Object Lock is configured to expire. + ObjectLockRetainUntilDate *time.Time + + // The count of parts this object has. + PartsCount int32 + + // Indicates if request involves bucket that is either a source or destination in a + // Replication rule. For more information about S3 Replication, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). + ReplicationStatus types.ReplicationStatus + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged types.RequestCharged + + // Provides information about object restoration operation and expiration time of + // the restored object copy. + Restore *string + + // Encryption algorithm used if server-side encryption with a customer-provided + // encryption key was specified for object stored in Amazon S3. + SSECustomerAlgorithm *string + + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to + // encrypt data stored in S3. For more information, see Protecting data using + // server-side encryption with customer-provided encryption keys (SSE-C) + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string + + // If present, specifies the ID of the Amazon Web Services Key Management Service + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // stored in Amazon S3 object. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when storing requested object in + // Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption types.ServerSideEncryption + + // The integer status code for an HTTP response of a corresponding GetObject + // request. Status Codes + // + // * 200 - OK + // + // * 206 - Partial Content + // + // * 304 - Not + // Modified + // + // * 400 - Bad Request + // + // * 401 - Unauthorized + // + // * 403 - Forbidden + // + // * 404 - + // Not Found + // + // * 405 - Method Not Allowed + // + // * 409 - Conflict + // + // * 411 - Length + // Required + // + // * 412 - Precondition Failed + // + // * 416 - Range Not Satisfiable + // + // * 500 - + // Internal Server Error + // + // * 503 - Service Unavailable + StatusCode int32 + + // Provides storage class information of the object. Amazon S3 returns this header + // for all objects except for S3 Standard storage class objects. For more + // information, see Storage Classes + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass types.StorageClass + + // The number of tags, if any, on the object. + TagCount int32 + + // An ID used to reference a specific version of the object. + VersionId *string + + noSmithyDocumentSerde +} + +type WriteGetObjectResponseOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestxml_serializeOpWriteGetObjectResponse{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpWriteGetObjectResponse{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddUnsignedPayloadMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + return err + } + if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { + return err + } + if err = addOpWriteGetObjectResponseValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opWriteGetObjectResponse(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +type endpointPrefix_opWriteGetObjectResponseMiddleware struct { +} + +func (*endpointPrefix_opWriteGetObjectResponseMiddleware) ID() string { + return "EndpointHostPrefix" +} + +func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if smithyhttp.GetHostnameImmutable(ctx) || smithyhttp.IsEndpointHostPrefixDisabled(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + input, ok := in.Parameters.(*WriteGetObjectResponseInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input type %T", in.Parameters) + } + + var prefix strings.Builder + if input.RequestRoute == nil { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so may not be nil")} + } else if !smithyhttp.ValidHostLabel(*input.RequestRoute) { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so must match \"[a-zA-Z0-9-]{1,63}\", but was \"%s\"", *input.RequestRoute)} + } else { + prefix.WriteString(*input.RequestRoute) + } + prefix.WriteString(".") + req.URL.Host = prefix.String() + req.URL.Host + + return next.HandleSerialize(ctx, in) +} +func addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack *middleware.Stack) error { + return stack.Serialize.Insert(&endpointPrefix_opWriteGetObjectResponseMiddleware{}, `OperationSerializer`, middleware.After) +} + +func newServiceMetadataMiddleware_opWriteGetObjectResponse(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "s3", + OperationName: "WriteGetObjectResponse", + } +} + +func addWriteGetObjectResponseUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: true, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go new file mode 100644 index 000000000..995d909cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go @@ -0,0 +1,21940 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strconv" + "strings" +) + +type awsRestxml_deserializeOpAbortMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpAbortMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorAbortMultipartUpload(response, &metadata) + } + output := &AbortMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorAbortMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchUpload", errorCode): + return awsRestxml_deserializeErrorNoSuchUpload(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(v *AbortMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpCompleteMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpCompleteMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCompleteMultipartUpload(response, &metadata) + } + output := &CompleteMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCompleteMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *CompleteMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CompleteMultipartUploadOutput + if *v == nil { + sv = &CompleteMultipartUploadOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Location", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Location = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpCopyObject struct { +} + +func (*awsRestxml_deserializeOpCopyObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCopyObject(response, &metadata) + } + output := &CopyObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentCopyObjectResult(&output.CopyObjectResult, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCopyObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ObjectNotInActiveTierError", errorCode): + return awsRestxml_deserializeErrorObjectNotInActiveTierError(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CopySourceVersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCopyObjectOutput(v **CopyObjectOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CopyObjectOutput + if *v == nil { + sv = &CopyObjectOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CopyObjectResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCopyObjectResult(&sv.CopyObjectResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpCreateBucket struct { +} + +func (*awsRestxml_deserializeOpCreateBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateBucket(response, &metadata) + } + output := &CreateBucketOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("BucketAlreadyExists", errorCode): + return awsRestxml_deserializeErrorBucketAlreadyExists(response, errorBody) + + case strings.EqualFold("BucketAlreadyOwnedByYou", errorCode): + return awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("Location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Location = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpCreateMultipartUpload struct { +} + +func (*awsRestxml_deserializeOpCreateMultipartUpload) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateMultipartUpload(response, &metadata) + } + output := &CreateMultipartUploadOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMultipartUploadOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.AbortDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AbortRuleId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-checksum-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateMultipartUploadOutput + if *v == nil { + sv = &CreateMultipartUploadOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpDeleteBucket struct { +} + +func (*awsRestxml_deserializeOpDeleteBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucket(response, &metadata) + } + output := &DeleteBucketOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response, &metadata) + } + output := &DeleteBucketAnalyticsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketCors struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketCors(response, &metadata) + } + output := &DeleteBucketCorsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketEncryption(response, &metadata) + } + output := &DeleteBucketEncryptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &DeleteBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response, &metadata) + } + output := &DeleteBucketInventoryConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketLifecycle struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketLifecycle) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response, &metadata) + } + output := &DeleteBucketLifecycleOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response, &metadata) + } + output := &DeleteBucketMetricsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response, &metadata) + } + output := &DeleteBucketOwnershipControlsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketPolicy(response, &metadata) + } + output := &DeleteBucketPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketReplication struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketReplication(response, &metadata) + } + output := &DeleteBucketReplicationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketTagging struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketTagging(response, &metadata) + } + output := &DeleteBucketTaggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketWebsite(response, &metadata) + } + output := &DeleteBucketWebsiteOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpDeleteObject struct { +} + +func (*awsRestxml_deserializeOpDeleteObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObject(response, &metadata) + } + output := &DeleteObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = vv + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpDeleteObjects struct { +} + +func (*awsRestxml_deserializeOpDeleteObjects) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObjects(response, &metadata) + } + output := &DeleteObjectsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentDeleteObjectsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(v *DeleteObjectsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentDeleteObjectsOutput(v **DeleteObjectsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DeleteObjectsOutput + if *v == nil { + sv = &DeleteObjectsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Deleted", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(&sv.Deleted, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Error", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorsUnwrapped(&sv.Errors, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpDeleteObjectTagging struct { +} + +func (*awsRestxml_deserializeOpDeleteObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteObjectTagging(response, &metadata) + } + output := &DeleteObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(v *DeleteObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpDeletePublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpDeletePublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response, &metadata) + } + output := &DeletePublicAccessBlockOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpGetBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketAccelerateConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response, &metadata) + } + output := &GetBucketAccelerateConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(v **GetBucketAccelerateConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAccelerateConfigurationOutput + if *v == nil { + sv = &GetBucketAccelerateConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.BucketAccelerateStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketAcl struct { +} + +func (*awsRestxml_deserializeOpGetBucketAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAcl(response, &metadata) + } + output := &GetBucketAclOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketAclOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketAclOutput(v **GetBucketAclOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAclOutput + if *v == nil { + sv = &GetBucketAclOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response, &metadata) + } + output := &GetBucketAnalyticsConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentAnalyticsConfiguration(&output.AnalyticsConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketAnalyticsConfigurationOutput(v **GetBucketAnalyticsConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketAnalyticsConfigurationOutput + if *v == nil { + sv = &GetBucketAnalyticsConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&sv.AnalyticsConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketCors struct { +} + +func (*awsRestxml_deserializeOpGetBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketCors(response, &metadata) + } + output := &GetBucketCorsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketCorsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketCorsOutput(v **GetBucketCorsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketCorsOutput + if *v == nil { + sv = &GetBucketCorsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CORSRule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCORSRulesUnwrapped(&sv.CORSRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpGetBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketEncryption(response, &metadata) + } + output := &GetBucketEncryptionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&output.ServerSideEncryptionConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketEncryptionOutput(v **GetBucketEncryptionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketEncryptionOutput + if *v == nil { + sv = &GetBucketEncryptionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ServerSideEncryptionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&sv.ServerSideEncryptionConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &GetBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&output.IntelligentTieringConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketIntelligentTieringConfigurationOutput(v **GetBucketIntelligentTieringConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketIntelligentTieringConfigurationOutput + if *v == nil { + sv = &GetBucketIntelligentTieringConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&sv.IntelligentTieringConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response, &metadata) + } + output := &GetBucketInventoryConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentInventoryConfiguration(&output.InventoryConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketInventoryConfigurationOutput(v **GetBucketInventoryConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketInventoryConfigurationOutput + if *v == nil { + sv = &GetBucketInventoryConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("InventoryConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&sv.InventoryConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketLifecycleConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response, &metadata) + } + output := &GetBucketLifecycleConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLifecycleConfigurationOutput + if *v == nil { + sv = &GetBucketLifecycleConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLocation struct { +} + +func (*awsRestxml_deserializeOpGetBucketLocation) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) + } + output := &GetBucketLocationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLocation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLocationOutput(v **GetBucketLocationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLocationOutput + if *v == nil { + sv = &GetBucketLocationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LocationConstraint", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.LocationConstraint = types.BucketLocationConstraint(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketLogging struct { +} + +func (*awsRestxml_deserializeOpGetBucketLogging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketLogging(response, &metadata) + } + output := &GetBucketLoggingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLoggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketLoggingOutput + if *v == nil { + sv = &GetBucketLoggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LoggingEnabled", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLoggingEnabled(&sv.LoggingEnabled, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) + } + output := &GetBucketMetricsConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketMetricsConfigurationOutput + if *v == nil { + sv = &GetBucketMetricsConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("MetricsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) + } + output := &GetBucketNotificationConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketNotificationConfigurationOutput + if *v == nil { + sv = &GetBucketNotificationConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("QueueConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(&sv.QueueConfigurations, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("TopicConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(&sv.TopicConfigurations, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpGetBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response, &metadata) + } + output := &GetBucketOwnershipControlsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentOwnershipControls(&output.OwnershipControls, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketOwnershipControlsOutput(v **GetBucketOwnershipControlsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketOwnershipControlsOutput + if *v == nil { + sv = &GetBucketOwnershipControlsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("OwnershipControls", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwnershipControls(&sv.OwnershipControls, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpGetBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicy(response, &metadata) + } + output := &GetBucketPolicyOutput{} + out.Result = output + + err = awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(output, response.Body, response.ContentLength) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(v *GetBucketPolicyOutput, body io.ReadCloser, contentLength int64) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + var buf bytes.Buffer + if contentLength > 0 { + buf.Grow(int(contentLength)) + } else { + buf.Grow(512) + } + + _, err := buf.ReadFrom(body) + if err != nil { + return err + } + if buf.Len() > 0 { + v.Policy = ptr.String(buf.String()) + } + return nil +} + +type awsRestxml_deserializeOpGetBucketPolicyStatus struct { +} + +func (*awsRestxml_deserializeOpGetBucketPolicyStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response, &metadata) + } + output := &GetBucketPolicyStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentPolicyStatus(&output.PolicyStatus, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketPolicyStatusOutput(v **GetBucketPolicyStatusOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketPolicyStatusOutput + if *v == nil { + sv = &GetBucketPolicyStatusOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PolicyStatus", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPolicyStatus(&sv.PolicyStatus, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketReplication struct { +} + +func (*awsRestxml_deserializeOpGetBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketReplication(response, &metadata) + } + output := &GetBucketReplicationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentReplicationConfiguration(&output.ReplicationConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketReplicationOutput(v **GetBucketReplicationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketReplicationOutput + if *v == nil { + sv = &GetBucketReplicationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicationConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketRequestPayment struct { +} + +func (*awsRestxml_deserializeOpGetBucketRequestPayment) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketRequestPayment(response, &metadata) + } + output := &GetBucketRequestPaymentOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(v **GetBucketRequestPaymentOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketRequestPaymentOutput + if *v == nil { + sv = &GetBucketRequestPaymentOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Payer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Payer = types.Payer(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketTagging struct { +} + +func (*awsRestxml_deserializeOpGetBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketTagging(response, &metadata) + } + output := &GetBucketTaggingOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(v **GetBucketTaggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketTaggingOutput + if *v == nil { + sv = &GetBucketTaggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketVersioning struct { +} + +func (*awsRestxml_deserializeOpGetBucketVersioning) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketVersioning(response, &metadata) + } + output := &GetBucketVersioningOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(v **GetBucketVersioningOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketVersioningOutput + if *v == nil { + sv = &GetBucketVersioningOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("MfaDelete", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.MFADelete = types.MFADeleteStatus(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.BucketVersioningStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpGetBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketWebsite(response, &metadata) + } + output := &GetBucketWebsiteOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(v **GetBucketWebsiteOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketWebsiteOutput + if *v == nil { + sv = &GetBucketWebsiteOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ErrorDocument", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorDocument(&sv.ErrorDocument, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IndexDocument", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIndexDocument(&sv.IndexDocument, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("RedirectAllRequestsTo", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRedirectAllRequestsTo(&sv.RedirectAllRequestsTo, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("RoutingRules", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRoutingRules(&sv.RoutingRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObject struct { +} + +func (*awsRestxml_deserializeOpGetObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObject(response, &metadata) + } + output := &GetObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + err = awsRestxml_deserializeOpDocumentGetObjectOutput(output, response.Body) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidObjectState", errorCode): + return awsRestxml_deserializeErrorInvalidObjectState(response, errorBody) + + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AcceptRanges = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CacheControl = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentDisposition = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentEncoding = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentLanguage = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.ContentLength = vv + } + + if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentRange = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentType = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = vv + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.Expires = ptr.Time(t) + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + for headerKey, headerValues := range response.Header { + if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { + if v.Metadata == nil { + v.Metadata = map[string]string{} + } + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] + } + } + + if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.MissingMeta = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseDateTime(headerValues[0]) + if err != nil { + return err + } + v.ObjectLockRetainUntilDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.PartsCount = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Restore = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.StorageClass = types.StorageClass(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-tagging-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.TagCount = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.WebsiteRedirectLocation = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectOutput(v *GetObjectOutput, body io.ReadCloser) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + v.Body = body + return nil +} + +type awsRestxml_deserializeOpGetObjectAcl struct { +} + +func (*awsRestxml_deserializeOpGetObjectAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectAcl(response, &metadata) + } + output := &GetObjectAclOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectAclOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(v *GetObjectAclOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectAclOutput(v **GetObjectAclOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectAclOutput + if *v == nil { + sv = &GetObjectAclOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectAttributes struct { +} + +func (*awsRestxml_deserializeOpGetObjectAttributes) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectAttributes(response, &metadata) + } + output := &GetObjectAttributesOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(v *GetObjectAttributesOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = vv + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(v **GetObjectAttributesOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectAttributesOutput + if *v == nil { + sv = &GetObjectAttributesOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Checksum", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksum(&sv.Checksum, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("ObjectParts", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGetObjectAttributesParts(&sv.ObjectParts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ObjectSize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSize = i64 + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectLegalHold struct { +} + +func (*awsRestxml_deserializeOpGetObjectLegalHold) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectLegalHold(response, &metadata) + } + output := &GetObjectLegalHoldOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockLegalHold(&output.LegalHold, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectLegalHoldOutput(v **GetObjectLegalHoldOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectLegalHoldOutput + if *v == nil { + sv = &GetObjectLegalHoldOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("LegalHold", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockLegalHold(&sv.LegalHold, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectLockConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetObjectLockConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response, &metadata) + } + output := &GetObjectLockConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockConfiguration(&output.ObjectLockConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectLockConfigurationOutput(v **GetObjectLockConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectLockConfigurationOutput + if *v == nil { + sv = &GetObjectLockConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectLockConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockConfiguration(&sv.ObjectLockConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectRetention struct { +} + +func (*awsRestxml_deserializeOpGetObjectRetention) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectRetention(response, &metadata) + } + output := &GetObjectRetentionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentObjectLockRetention(&output.Retention, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetObjectRetentionOutput(v **GetObjectRetentionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectRetentionOutput + if *v == nil { + sv = &GetObjectRetentionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Retention", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockRetention(&sv.Retention, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectTagging struct { +} + +func (*awsRestxml_deserializeOpGetObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectTagging(response, &metadata) + } + output := &GetObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(v *GetObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(v **GetObjectTaggingOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetObjectTaggingOutput + if *v == nil { + sv = &GetObjectTaggingOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetObjectTorrent struct { +} + +func (*awsRestxml_deserializeOpGetObjectTorrent) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetObjectTorrent(response, &metadata) + } + output := &GetObjectTorrentOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + err = awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(output, response.Body) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetObjectTorrent(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(v *GetObjectTorrentOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(v *GetObjectTorrentOutput, body io.ReadCloser) error { + if v == nil { + return fmt.Errorf("unsupported deserialization of nil %T", v) + } + v.Body = body + return nil +} + +type awsRestxml_deserializeOpGetPublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpGetPublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetPublicAccessBlock(response, &metadata) + } + output := &GetPublicAccessBlockOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&output.PublicAccessBlockConfiguration, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetPublicAccessBlockOutput(v **GetPublicAccessBlockOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetPublicAccessBlockOutput + if *v == nil { + sv = &GetPublicAccessBlockOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PublicAccessBlockConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&sv.PublicAccessBlockConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpHeadBucket struct { +} + +func (*awsRestxml_deserializeOpHeadBucket) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorHeadBucket(response, &metadata) + } + output := &HeadBucketOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NotFound", errorCode): + return awsRestxml_deserializeErrorNotFound(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpHeadObject struct { +} + +func (*awsRestxml_deserializeOpHeadObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorHeadObject(response, &metadata) + } + output := &HeadObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorHeadObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AcceptRanges = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-archive-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ArchiveStatus = types.ArchiveStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CacheControl = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentDisposition = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentEncoding = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentLanguage = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.ContentLength = vv + } + + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentType = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.DeleteMarker = vv + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.Expires = ptr.Time(t) + } + + if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.LastModified = ptr.Time(t) + } + + for headerKey, headerValues := range response.Header { + if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { + if v.Metadata == nil { + v.Metadata = map[string]string{} + } + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] + } + } + + if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.MissingMeta = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseDateTime(headerValues[0]) + if err != nil { + return err + } + v.ObjectLockRetainUntilDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 32) + if err != nil { + return err + } + v.PartsCount = int32(vv) + } + + if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Restore = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.StorageClass = types.StorageClass(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.WebsiteRedirectLocation = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpListBucketAnalyticsConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketAnalyticsConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response, &metadata) + } + output := &ListBucketAnalyticsConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v **ListBucketAnalyticsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketAnalyticsConfigurationsOutput + if *v == nil { + sv = &ListBucketAnalyticsConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(&sv.AnalyticsConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response, &metadata) + } + output := &ListBucketIntelligentTieringConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(v **ListBucketIntelligentTieringConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketIntelligentTieringConfigurationsOutput + if *v == nil { + sv = &ListBucketIntelligentTieringConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(&sv.IntelligentTieringConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketInventoryConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketInventoryConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response, &metadata) + } + output := &ListBucketInventoryConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v **ListBucketInventoryConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketInventoryConfigurationsOutput + if *v == nil { + sv = &ListBucketInventoryConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("InventoryConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(&sv.InventoryConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBucketMetricsConfigurations struct { +} + +func (*awsRestxml_deserializeOpListBucketMetricsConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response, &metadata) + } + output := &ListBucketMetricsConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **ListBucketMetricsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketMetricsConfigurationsOutput + if *v == nil { + sv = &ListBucketMetricsConfigurationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("MetricsConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(&sv.MetricsConfigurationList, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListBuckets struct { +} + +func (*awsRestxml_deserializeOpListBuckets) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListBuckets(response, &metadata) + } + output := &ListBucketsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListBucketsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListBucketsOutput + if *v == nil { + sv = &ListBucketsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Buckets", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListMultipartUploads struct { +} + +func (*awsRestxml_deserializeOpListMultipartUploads) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata) + } + output := &ListMultipartUploadsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListMultipartUploadsOutput + if *v == nil { + sv = &ListMultipartUploadsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("KeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("MaxUploads", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxUploads = int32(i64) + } + + case strings.EqualFold("NextKeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextKeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("NextUploadIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextUploadIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("UploadIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Upload", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(&sv.Uploads, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjects struct { +} + +func (*awsRestxml_deserializeOpListObjects) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjects(response, &metadata) + } + output := &ListObjectsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectsOutput + if *v == nil { + sv = &ListObjectsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Contents", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("Marker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Marker = ptr.String(xtv) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = int32(i64) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjectsV2 struct { +} + +func (*awsRestxml_deserializeOpListObjectsV2) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjectsV2(response, &metadata) + } + output := &ListObjectsV2Output{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectsV2Output(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjectsV2(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectsV2Output + if *v == nil { + sv = &ListObjectsV2Output{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Contents", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("KeyCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.KeyCount = int32(i64) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = int32(i64) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextContinuationToken = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("StartAfter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StartAfter = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListObjectVersions struct { +} + +func (*awsRestxml_deserializeOpListObjectVersions) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListObjectVersions(response, &metadata) + } + output := &ListObjectVersionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListObjectVersionsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListObjectVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVersionsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListObjectVersionsOutput + if *v == nil { + sv = &ListObjectVersionsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("DeleteMarker", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(&sv.DeleteMarkers, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("KeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("MaxKeys", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxKeys = int32(i64) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + case strings.EqualFold("NextKeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextKeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("NextVersionIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextVersionIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("VersionIdMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionIdMarker = ptr.String(xtv) + } + + case strings.EqualFold("Version", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectVersionListUnwrapped(&sv.Versions, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpListParts struct { +} + +func (*awsRestxml_deserializeOpListParts) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListParts(response, &metadata) + } + output := &ListPartsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsListPartsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListPartsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListParts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsListPartsOutput(v *ListPartsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + t, err := smithytime.ParseHTTPDate(headerValues[0]) + if err != nil { + return err + } + v.AbortDate = ptr.Time(t) + } + + if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.AbortRuleId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListPartsOutput + if *v == nil { + sv = &ListPartsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) + } + + case strings.EqualFold("Initiator", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("MaxParts", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxParts = int32(i64) + } + + case strings.EqualFold("NextPartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextPartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Part", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPartsUnwrapped(&sv.Parts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpPutBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketAccelerateConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response, &metadata) + } + output := &PutBucketAccelerateConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketAcl struct { +} + +func (*awsRestxml_deserializeOpPutBucketAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAcl(response, &metadata) + } + output := &PutBucketAclOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response, &metadata) + } + output := &PutBucketAnalyticsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketCors struct { +} + +func (*awsRestxml_deserializeOpPutBucketCors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketCors(response, &metadata) + } + output := &PutBucketCorsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketEncryption struct { +} + +func (*awsRestxml_deserializeOpPutBucketEncryption) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketEncryption(response, &metadata) + } + output := &PutBucketEncryptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response, &metadata) + } + output := &PutBucketIntelligentTieringConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketInventoryConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketInventoryConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response, &metadata) + } + output := &PutBucketInventoryConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketLifecycleConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response, &metadata) + } + output := &PutBucketLifecycleConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketLogging struct { +} + +func (*awsRestxml_deserializeOpPutBucketLogging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketLogging(response, &metadata) + } + output := &PutBucketLoggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketMetricsConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketMetricsConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response, &metadata) + } + output := &PutBucketMetricsConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketNotificationConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutBucketNotificationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response, &metadata) + } + output := &PutBucketNotificationConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketOwnershipControls struct { +} + +func (*awsRestxml_deserializeOpPutBucketOwnershipControls) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response, &metadata) + } + output := &PutBucketOwnershipControlsOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketPolicy struct { +} + +func (*awsRestxml_deserializeOpPutBucketPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketPolicy(response, &metadata) + } + output := &PutBucketPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketReplication struct { +} + +func (*awsRestxml_deserializeOpPutBucketReplication) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketReplication(response, &metadata) + } + output := &PutBucketReplicationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketRequestPayment struct { +} + +func (*awsRestxml_deserializeOpPutBucketRequestPayment) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketRequestPayment(response, &metadata) + } + output := &PutBucketRequestPaymentOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketTagging struct { +} + +func (*awsRestxml_deserializeOpPutBucketTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketTagging(response, &metadata) + } + output := &PutBucketTaggingOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketVersioning struct { +} + +func (*awsRestxml_deserializeOpPutBucketVersioning) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketVersioning(response, &metadata) + } + output := &PutBucketVersioningOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutBucketWebsite struct { +} + +func (*awsRestxml_deserializeOpPutBucketWebsite) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutBucketWebsite(response, &metadata) + } + output := &PutBucketWebsiteOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpPutObject struct { +} + +func (*awsRestxml_deserializeOpPutObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObject(response, &metadata) + } + output := &PutObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.Expiration = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectAcl struct { +} + +func (*awsRestxml_deserializeOpPutObjectAcl) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectAcl(response, &metadata) + } + output := &PutObjectAclOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchKey", errorCode): + return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(v *PutObjectAclOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectLegalHold struct { +} + +func (*awsRestxml_deserializeOpPutObjectLegalHold) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectLegalHold(response, &metadata) + } + output := &PutObjectLegalHoldOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(v *PutObjectLegalHoldOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectLockConfiguration struct { +} + +func (*awsRestxml_deserializeOpPutObjectLockConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response, &metadata) + } + output := &PutObjectLockConfigurationOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(v *PutObjectLockConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectRetention struct { +} + +func (*awsRestxml_deserializeOpPutObjectRetention) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectRetention(response, &metadata) + } + output := &PutObjectRetentionOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(v *PutObjectRetentionOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutObjectTagging struct { +} + +func (*awsRestxml_deserializeOpPutObjectTagging) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutObjectTagging(response, &metadata) + } + output := &PutObjectTaggingOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(v *PutObjectTaggingOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.VersionId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpPutPublicAccessBlock struct { +} + +func (*awsRestxml_deserializeOpPutPublicAccessBlock) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorPutPublicAccessBlock(response, &metadata) + } + output := &PutPublicAccessBlockOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorPutPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpRestoreObject struct { +} + +func (*awsRestxml_deserializeOpRestoreObject) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorRestoreObject(response, &metadata) + } + output := &RestoreObjectOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorRestoreObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ObjectAlreadyInActiveTierError", errorCode): + return awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(v *RestoreObjectOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-restore-output-path"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RestoreOutputPath = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpSelectObjectContent struct { +} + +func (*awsRestxml_deserializeOpSelectObjectContent) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorSelectObjectContent(response, &metadata) + } + output := &SelectObjectContentOutput{} + out.Result = output + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorSelectObjectContent(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestxml_deserializeOpUploadPart struct { +} + +func (*awsRestxml_deserializeOpUploadPart) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorUploadPart(response, &metadata) + } + output := &UploadPartOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsUploadPartOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorUploadPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC32C = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA1 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumSHA256 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ETag = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} + +type awsRestxml_deserializeOpUploadPartCopy struct { +} + +func (*awsRestxml_deserializeOpUploadPartCopy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorUploadPartCopy(response, &metadata) + } + output := &UploadPartCopyOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentCopyPartResult(&output.CopyPartResult, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorUploadPartCopy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = vv + } + + if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.CopySourceVersionId = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerAlgorithm = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentUploadPartCopyOutput(v **UploadPartCopyOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *UploadPartCopyOutput + if *v == nil { + sv = &UploadPartCopyOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CopyPartResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCopyPartResult(&sv.CopyPartResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpWriteGetObjectResponse struct { +} + +func (*awsRestxml_deserializeOpWriteGetObjectResponse) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorWriteGetObjectResponse(response, &metadata) + } + output := &WriteGetObjectResponseOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorWriteGetObjectResponse(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeEventStreamSelectObjectContentEventStream(v *types.SelectObjectContentEventStream, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + eventType := msg.Headers.Get(eventstreamapi.EventTypeHeader) + if eventType == nil { + return fmt.Errorf("%s event header not present", eventstreamapi.EventTypeHeader) + } + + switch { + case strings.EqualFold("Cont", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberCont{} + if err := awsRestxml_deserializeEventMessageContinuationEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("End", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberEnd{} + if err := awsRestxml_deserializeEventMessageEndEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Progress", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberProgress{} + if err := awsRestxml_deserializeEventMessageProgressEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Records", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberRecords{} + if err := awsRestxml_deserializeEventMessageRecordsEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + case strings.EqualFold("Stats", eventType.String()): + vv := &types.SelectObjectContentEventStreamMemberStats{} + if err := awsRestxml_deserializeEventMessageStatsEvent(&vv.Value, msg); err != nil { + return err + } + *v = vv + return nil + + default: + buffer := bytes.NewBuffer(nil) + eventstream.NewEncoder().Encode(buffer, *msg) + *v = &types.UnknownUnionMember{ + Tag: eventType.String(), + Value: buffer.Bytes(), + } + return nil + + } +} + +func awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg *eventstream.Message) error { + exceptionType := msg.Headers.Get(eventstreamapi.ExceptionTypeHeader) + if exceptionType == nil { + return fmt.Errorf("%s event header not present", eventstreamapi.ExceptionTypeHeader) + } + + switch { + default: + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(br, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + errorComponents, err := awsxml.GetErrorResponseComponents(br, true) + if err != nil { + return err + } + errorCode := "UnknownError" + errorMessage := errorCode + if ev := exceptionType.String(); len(ev) > 0 { + errorCode = ev + } else if ev := errorComponents.Code; len(ev) > 0 { + errorCode = ev + } + if ev := errorComponents.Message; len(ev) > 0 { + errorMessage = ev + } + return &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + + } +} + +func awsRestxml_deserializeEventMessageRecordsEvent(v *types.RecordsEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + if msg.Payload != nil { + bsv := make([]byte, len(msg.Payload)) + copy(bsv, msg.Payload) + + v.Payload = bsv + } + return nil +} + +func awsRestxml_deserializeEventMessageStatsEvent(v *types.StatsEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentStats(&v.Details, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageProgressEvent(v *types.ProgressEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentProgress(&v.Details, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageContinuationEvent(v *types.ContinuationEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentContinuationEvent(&v, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeEventMessageEndEvent(v *types.EndEvent, msg *eventstream.Message) error { + if v == nil { + return fmt.Errorf("unexpected serialization of nil %T", v) + } + + br := bytes.NewReader(msg.Payload) + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(br, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentEndEvent(&v, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return nil +} + +func awsRestxml_deserializeDocumentContinuationEvent(v **types.ContinuationEvent, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ContinuationEvent + if *v == nil { + sv = &types.ContinuationEvent{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEndEvent(v **types.EndEvent, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EndEvent + if *v == nil { + sv = &types.EndEvent{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Progress + if *v == nil { + sv = &types.Progress{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BytesProcessed", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesProcessed = i64 + } + + case strings.EqualFold("BytesReturned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesReturned = i64 + } + + case strings.EqualFold("BytesScanned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesScanned = i64 + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Stats + if *v == nil { + sv = &types.Stats{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BytesProcessed", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesProcessed = i64 + } + + case strings.EqualFold("BytesReturned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesReturned = i64 + } + + case strings.EqualFold("BytesScanned", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.BytesScanned = i64 + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeErrorBucketAlreadyExists(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.BucketAlreadyExists{} + return output +} + +func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.BucketAlreadyOwnedByYou{} + return output +} + +func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidObjectState{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentInvalidObjectState(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchBucket{} + return output +} + +func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchKey{} + return output +} + +func awsRestxml_deserializeErrorNoSuchUpload(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchUpload{} + return output +} + +func awsRestxml_deserializeErrorNotFound(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NotFound{} + return output +} + +func awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ObjectAlreadyInActiveTierError{} + return output +} + +func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ObjectNotInActiveTierError{} + return output +} + +func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AbortIncompleteMultipartUpload + if *v == nil { + sv = &types.AbortIncompleteMultipartUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DaysAfterInitiation", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.DaysAfterInitiation = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAccessControlTranslation(v **types.AccessControlTranslation, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AccessControlTranslation + if *v == nil { + sv = &types.AccessControlTranslation{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Owner", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Owner = types.OwnerOverride(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAllowedMethods(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAllowedOrigins(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAnalyticsAndOperator(v **types.AnalyticsAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsAndOperator + if *v == nil { + sv = &types.AnalyticsAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfiguration(v **types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsConfiguration + if *v == nil { + sv = &types.AnalyticsConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("StorageClassAnalysis", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentStorageClassAnalysis(&sv.StorageClassAnalysis, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfigurationList(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.AnalyticsConfiguration + if *v == nil { + sv = make([]types.AnalyticsConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.AnalyticsConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.AnalyticsConfiguration + if *v == nil { + sv = make([]types.AnalyticsConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.AnalyticsConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentAnalyticsExportDestination(v **types.AnalyticsExportDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsExportDestination + if *v == nil { + sv = &types.AnalyticsExportDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3BucketDestination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsFilter(v *types.AnalyticsFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.AnalyticsFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.AnalyticsAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentAnalyticsAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.AnalyticsFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.AnalyticsFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.AnalyticsFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(v **types.AnalyticsS3BucketDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AnalyticsS3BucketDestination + if *v == nil { + sv = &types.AnalyticsS3BucketDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("BucketAccountId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.BucketAccountId = ptr.String(xtv) + } + + case strings.EqualFold("Format", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Format = types.AnalyticsS3ExportFileFormat(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Bucket + if *v == nil { + sv = &types.Bucket{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CreationDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.CreationDate = ptr.Time(t) + } + + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketAlreadyExists(v **types.BucketAlreadyExists, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.BucketAlreadyExists + if *v == nil { + sv = &types.BucketAlreadyExists{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketAlreadyOwnedByYou(v **types.BucketAlreadyOwnedByYou, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.BucketAlreadyOwnedByYou + if *v == nil { + sv = &types.BucketAlreadyOwnedByYou{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBuckets(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Bucket + if *v == nil { + sv = make([]types.Bucket, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Bucket", t.Name.Local): + var col types.Bucket + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentBucketsUnwrapped(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { + var sv []types.Bucket + if *v == nil { + sv = make([]types.Bucket, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Bucket + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Checksum + if *v == nil { + sv = &types.Checksum{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentChecksumAlgorithmList(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ChecksumAlgorithm + if *v == nil { + sv = make([]types.ChecksumAlgorithm, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ChecksumAlgorithm + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.ChecksumAlgorithm(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { + var sv []types.ChecksumAlgorithm + if *v == nil { + sv = make([]types.ChecksumAlgorithm, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ChecksumAlgorithm + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.ChecksumAlgorithm(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentCommonPrefix(v **types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CommonPrefix + if *v == nil { + sv = &types.CommonPrefix{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCommonPrefixList(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CommonPrefix + if *v == nil { + sv = make([]types.CommonPrefix, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.CommonPrefix + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { + var sv []types.CommonPrefix + if *v == nil { + sv = make([]types.CommonPrefix, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CommonPrefix + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentCondition(v **types.Condition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Condition + if *v == nil { + sv = &types.Condition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HttpErrorCodeReturnedEquals", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HttpErrorCodeReturnedEquals = ptr.String(xtv) + } + + case strings.EqualFold("KeyPrefixEquals", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyPrefixEquals = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CopyObjectResult + if *v == nil { + sv = &types.CopyObjectResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CopyPartResult + if *v == nil { + sv = &types.CopyPartResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CORSRule + if *v == nil { + sv = &types.CORSRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AllowedHeader", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(&sv.AllowedHeaders, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("AllowedMethod", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(&sv.AllowedMethods, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("AllowedOrigin", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(&sv.AllowedOrigins, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ExposeHeader", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentExposeHeadersUnwrapped(&sv.ExposeHeaders, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("MaxAgeSeconds", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxAgeSeconds = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRules(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CORSRule + if *v == nil { + sv = make([]types.CORSRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.CORSRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentCORSRulesUnwrapped(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { + var sv []types.CORSRule + if *v == nil { + sv = make([]types.CORSRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CORSRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DefaultRetention + if *v == nil { + sv = &types.DefaultRetention{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = int32(i64) + } + + case strings.EqualFold("Mode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Mode = types.ObjectLockRetentionMode(xtv) + } + + case strings.EqualFold("Years", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Years = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeletedObject + if *v == nil { + sv = &types.DeletedObject{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DeleteMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val) + } + sv.DeleteMarker = xtv + } + + case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DeleteMarkerVersionId = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObjects(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.DeletedObject + if *v == nil { + sv = make([]types.DeletedObject, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.DeletedObject + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { + var sv []types.DeletedObject + if *v == nil { + sv = make([]types.DeletedObject, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.DeletedObject + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeleteMarkerEntry + if *v == nil { + sv = &types.DeleteMarkerEntry{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsLatest", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) + } + sv.IsLatest = xtv + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkerReplication(v **types.DeleteMarkerReplication, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeleteMarkerReplication + if *v == nil { + sv = &types.DeleteMarkerReplication{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.DeleteMarkerReplicationStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkers(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.DeleteMarkerEntry + if *v == nil { + sv = make([]types.DeleteMarkerEntry, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.DeleteMarkerEntry + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { + var sv []types.DeleteMarkerEntry + if *v == nil { + sv = make([]types.DeleteMarkerEntry, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.DeleteMarkerEntry + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentDestination(v **types.Destination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Destination + if *v == nil { + sv = &types.Destination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessControlTranslation", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAccessControlTranslation(&sv.AccessControlTranslation, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("EncryptionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Metrics", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetrics(&sv.Metrics, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ReplicationTime", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTime(&sv.ReplicationTime, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EncryptionConfiguration + if *v == nil { + sv = &types.EncryptionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicaKmsKeyID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplicaKmsKeyID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Error + if *v == nil { + sv = &types.Error{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Code", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Code = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ErrorDocument + if *v == nil { + sv = &types.ErrorDocument{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrors(v *[]types.Error, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Error + if *v == nil { + sv = make([]types.Error, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Error + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentErrorsUnwrapped(v *[]types.Error, decoder smithyxml.NodeDecoder) error { + var sv []types.Error + if *v == nil { + sv = make([]types.Error, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Error + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentEventBridgeConfiguration(v **types.EventBridgeConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EventBridgeConfiguration + if *v == nil { + sv = &types.EventBridgeConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEventList(v *[]types.Event, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Event + if *v == nil { + sv = make([]types.Event, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Event + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.Event(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentEventListUnwrapped(v *[]types.Event, decoder smithyxml.NodeDecoder) error { + var sv []types.Event + if *v == nil { + sv = make([]types.Event, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Event + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.Event(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentExistingObjectReplication(v **types.ExistingObjectReplication, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExistingObjectReplication + if *v == nil { + sv = &types.ExistingObjectReplication{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ExistingObjectReplicationStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentExposeHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("member", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentExposeHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentFilterRule(v **types.FilterRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FilterRule + if *v == nil { + sv = &types.FilterRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Name", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Name = types.FilterRuleName(xtv) + } + + case strings.EqualFold("Value", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Value = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentFilterRuleList(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.FilterRule + if *v == nil { + sv = make([]types.FilterRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.FilterRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { + var sv []types.FilterRule + if *v == nil { + sv = make([]types.FilterRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.FilterRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.GetObjectAttributesParts + if *v == nil { + sv = &types.GetObjectAttributesParts{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = xtv + } + + case strings.EqualFold("MaxParts", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.MaxParts = int32(i64) + } + + case strings.EqualFold("NextPartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextPartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("PartNumberMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PartNumberMarker = ptr.String(xtv) + } + + case strings.EqualFold("Part", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPartsListUnwrapped(&sv.Parts, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PartsCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.TotalPartsCount = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrant(v **types.Grant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Grant + if *v == nil { + sv = &types.Grant{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Grantee", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Permission", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Permission = types.Permission(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrantee(v **types.Grantee, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Grantee + if *v == nil { + sv = &types.Grantee{} + } else { + sv = *v + } + + for _, attr := range decoder.StartEl.Attr { + name := attr.Name.Local + if len(attr.Name.Space) != 0 { + name = attr.Name.Space + `:` + attr.Name.Local + } + switch { + case strings.EqualFold("xsi:type", name): + val := []byte(attr.Value) + { + xtv := string(val) + sv.Type = types.Type(xtv) + } + + } + } + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("EmailAddress", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EmailAddress = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("URI", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.URI = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrants(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Grant + if *v == nil { + sv = make([]types.Grant, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Grant", t.Name.Local): + var col types.Grant + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentGrantsUnwrapped(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { + var sv []types.Grant + if *v == nil { + sv = make([]types.Grant, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Grant + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentIndexDocument(v **types.IndexDocument, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IndexDocument + if *v == nil { + sv = &types.IndexDocument{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Suffix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Suffix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInitiator(v **types.Initiator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Initiator + if *v == nil { + sv = &types.Initiator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringAndOperator(v **types.IntelligentTieringAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringAndOperator + if *v == nil { + sv = &types.IntelligentTieringAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfiguration(v **types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringConfiguration + if *v == nil { + sv = &types.IntelligentTieringConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.IntelligentTieringStatus(xtv) + } + + case strings.EqualFold("Tiering", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTieringListUnwrapped(&sv.Tierings, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfigurationList(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.IntelligentTieringConfiguration + if *v == nil { + sv = make([]types.IntelligentTieringConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.IntelligentTieringConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.IntelligentTieringConfiguration + if *v == nil { + sv = make([]types.IntelligentTieringConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.IntelligentTieringConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentIntelligentTieringFilter(v **types.IntelligentTieringFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IntelligentTieringFilter + if *v == nil { + sv = &types.IntelligentTieringFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentIntelligentTieringAndOperator(&sv.And, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectState, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidObjectState + if *v == nil { + sv = &types.InvalidObjectState{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessTier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessTier = types.IntelligentTieringAccessTier(xtv) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryConfiguration + if *v == nil { + sv = &types.InventoryConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("IncludedObjectVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.IncludedObjectVersions = types.InventoryIncludedObjectVersions(xtv) + } + + case strings.EqualFold("IsEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val) + } + sv.IsEnabled = xtv + } + + case strings.EqualFold("OptionalFields", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryOptionalFields(&sv.OptionalFields, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Schedule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventorySchedule(&sv.Schedule, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfigurationList(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.InventoryConfiguration + if *v == nil { + sv = make([]types.InventoryConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.InventoryConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.InventoryConfiguration + if *v == nil { + sv = make([]types.InventoryConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.InventoryConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentInventoryDestination(v **types.InventoryDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryDestination + if *v == nil { + sv = &types.InventoryDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3BucketDestination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryEncryption(v **types.InventoryEncryption, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryEncryption + if *v == nil { + sv = &types.InventoryEncryption{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("SSE-KMS", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSSEKMS(&sv.SSEKMS, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SSE-S3", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSSES3(&sv.SSES3, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryFilter(v **types.InventoryFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryFilter + if *v == nil { + sv = &types.InventoryFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryOptionalFields(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.InventoryOptionalField + if *v == nil { + sv = make([]types.InventoryOptionalField, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("Field", t.Name.Local): + var col types.InventoryOptionalField + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = types.InventoryOptionalField(xtv) + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventoryOptionalFieldsUnwrapped(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { + var sv []types.InventoryOptionalField + if *v == nil { + sv = make([]types.InventoryOptionalField, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.InventoryOptionalField + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = types.InventoryOptionalField(xtv) + } + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentInventoryS3BucketDestination(v **types.InventoryS3BucketDestination, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventoryS3BucketDestination + if *v == nil { + sv = &types.InventoryS3BucketDestination{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccountId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccountId = ptr.String(xtv) + } + + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("Encryption", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInventoryEncryption(&sv.Encryption, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Format", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Format = types.InventoryFormat(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInventorySchedule(v **types.InventorySchedule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InventorySchedule + if *v == nil { + sv = &types.InventorySchedule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Frequency", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Frequency = types.InventoryFrequency(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfiguration(v **types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LambdaFunctionConfiguration + if *v == nil { + sv = &types.LambdaFunctionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("CloudFunction", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.LambdaFunctionArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfigurationList(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.LambdaFunctionConfiguration + if *v == nil { + sv = make([]types.LambdaFunctionConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.LambdaFunctionConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.LambdaFunctionConfiguration + if *v == nil { + sv = make([]types.LambdaFunctionConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.LambdaFunctionConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpiration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleExpiration + if *v == nil { + sv = &types.LifecycleExpiration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Date", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Date = ptr.Time(t) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = int32(i64) + } + + case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val) + } + sv.ExpiredObjectDeleteMarker = xtv + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRule(v **types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleRule + if *v == nil { + sv = &types.LifecycleRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AbortIncompleteMultipartUpload", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(&sv.AbortIncompleteMultipartUpload, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Expiration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleExpiration(&sv.Expiration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLifecycleRuleFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("NoncurrentVersionExpiration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNoncurrentVersionExpiration(&sv.NoncurrentVersionExpiration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("NoncurrentVersionTransition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(&sv.NoncurrentVersionTransitions, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ExpirationStatus(xtv) + } + + case strings.EqualFold("Transition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTransitionListUnwrapped(&sv.Transitions, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleRuleAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LifecycleRuleAndOperator + if *v == nil { + sv = &types.LifecycleRuleAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSizeGreaterThan = i64 + } + + case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ObjectSizeLessThan = i64 + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.LifecycleRuleFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.LifecycleRuleAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.LifecycleRuleFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): + var mv int64 + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + mv = i64 + } + uv = &types.LifecycleRuleFilterMemberObjectSizeGreaterThan{Value: mv} + memberFound = true + + case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): + var mv int64 + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + mv = i64 + } + uv = &types.LifecycleRuleFilterMemberObjectSizeLessThan{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.LifecycleRuleFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.LifecycleRuleFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRules(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.LifecycleRule + if *v == nil { + sv = make([]types.LifecycleRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.LifecycleRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { + var sv []types.LifecycleRule + if *v == nil { + sv = make([]types.LifecycleRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.LifecycleRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LoggingEnabled + if *v == nil { + sv = &types.LoggingEnabled{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TargetBucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TargetBucket = ptr.String(xtv) + } + + case strings.EqualFold("TargetGrants", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTargetGrants(&sv.TargetGrants, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("TargetPrefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TargetPrefix = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Metrics + if *v == nil { + sv = &types.Metrics{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("EventThreshold", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.EventThreshold, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.MetricsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsAndOperator(v **types.MetricsAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetricsAndOperator + if *v == nil { + sv = &types.MetricsAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessPointArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessPointArn = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfiguration(v **types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetricsConfiguration + if *v == nil { + sv = &types.MetricsConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetricsFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfigurationList(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MetricsConfiguration + if *v == nil { + sv = make([]types.MetricsConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.MetricsConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.MetricsConfiguration + if *v == nil { + sv = make([]types.MetricsConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MetricsConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentMetricsFilter(v *types.MetricsFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.MetricsFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessPointArn", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.MetricsFilterMemberAccessPointArn{Value: mv} + memberFound = true + + case strings.EqualFold("And", t.Name.Local): + var mv types.MetricsAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMetricsAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.MetricsFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.MetricsFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.MetricsFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MultipartUpload + if *v == nil { + sv = &types.MultipartUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) + } + + case strings.EqualFold("Initiated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Initiated = ptr.Time(t) + } + + case strings.EqualFold("Initiator", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.StorageClass(xtv) + } + + case strings.EqualFold("UploadId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UploadId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUploadList(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MultipartUpload + if *v == nil { + sv = make([]types.MultipartUpload, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.MultipartUpload + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { + var sv []types.MultipartUpload + if *v == nil { + sv = make([]types.MultipartUpload, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MultipartUpload + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.NoncurrentVersionExpiration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoncurrentVersionExpiration + if *v == nil { + sv = &types.NoncurrentVersionExpiration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NewerNoncurrentVersions = int32(i64) + } + + case strings.EqualFold("NoncurrentDays", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NoncurrentDays = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoncurrentVersionTransition + if *v == nil { + sv = &types.NoncurrentVersionTransition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NewerNoncurrentVersions = int32(i64) + } + + case strings.EqualFold("NoncurrentDays", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.NoncurrentDays = int32(i64) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.TransitionStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransitionList(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.NoncurrentVersionTransition + if *v == nil { + sv = make([]types.NoncurrentVersionTransition, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.NoncurrentVersionTransition + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { + var sv []types.NoncurrentVersionTransition + if *v == nil { + sv = make([]types.NoncurrentVersionTransition, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.NoncurrentVersionTransition + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentNoSuchBucket(v **types.NoSuchBucket, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchBucket + if *v == nil { + sv = &types.NoSuchBucket{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoSuchKey(v **types.NoSuchKey, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchKey + if *v == nil { + sv = &types.NoSuchKey{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNoSuchUpload(v **types.NoSuchUpload, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NoSuchUpload + if *v == nil { + sv = &types.NoSuchUpload{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNotFound(v **types.NotFound, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NotFound + if *v == nil { + sv = &types.NotFound{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentNotificationConfigurationFilter(v **types.NotificationConfigurationFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.NotificationConfigurationFilter + if *v == nil { + sv = &types.NotificationConfigurationFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3Key", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentS3KeyFilter(&sv.Key, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Object + if *v == nil { + sv = &types.Object{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = i64 + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.ObjectStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectAlreadyInActiveTierError(v **types.ObjectAlreadyInActiveTierError, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectAlreadyInActiveTierError + if *v == nil { + sv = &types.ObjectAlreadyInActiveTierError{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectList(v *[]types.Object, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Object + if *v == nil { + sv = make([]types.Object, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Object + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectListUnwrapped(v *[]types.Object, decoder smithyxml.NodeDecoder) error { + var sv []types.Object + if *v == nil { + sv = make([]types.Object, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Object + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentObjectLockConfiguration(v **types.ObjectLockConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockConfiguration + if *v == nil { + sv = &types.ObjectLockConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectLockEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ObjectLockEnabled = types.ObjectLockEnabled(xtv) + } + + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentObjectLockRule(&sv.Rule, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockLegalHold(v **types.ObjectLockLegalHold, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockLegalHold + if *v == nil { + sv = &types.ObjectLockLegalHold{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ObjectLockLegalHoldStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockRetention(v **types.ObjectLockRetention, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockRetention + if *v == nil { + sv = &types.ObjectLockRetention{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Mode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Mode = types.ObjectLockRetentionMode(xtv) + } + + case strings.EqualFold("RetainUntilDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.RetainUntilDate = ptr.Time(t) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectLockRule(v **types.ObjectLockRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectLockRule + if *v == nil { + sv = &types.ObjectLockRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DefaultRetention", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDefaultRetention(&sv.DefaultRetention, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectNotInActiveTierError(v **types.ObjectNotInActiveTierError, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectNotInActiveTierError + if *v == nil { + sv = &types.ObjectNotInActiveTierError{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectPart + if *v == nil { + sv = &types.ObjectPart{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("PartNumber", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PartNumber = int32(i64) + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = i64 + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ObjectVersion + if *v == nil { + sv = &types.ObjectVersion{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("IsLatest", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) + } + sv.IsLatest = xtv + } + + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("Owner", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = i64 + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.ObjectVersionStorageClass(xtv) + } + + case strings.EqualFold("VersionId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VersionId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersionList(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ObjectVersion + if *v == nil { + sv = make([]types.ObjectVersion, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ObjectVersion + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentObjectVersionListUnwrapped(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { + var sv []types.ObjectVersion + if *v == nil { + sv = make([]types.ObjectVersion, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ObjectVersion + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentOwner(v **types.Owner, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Owner + if *v == nil { + sv = &types.Owner{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DisplayName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DisplayName = ptr.String(xtv) + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControls(v **types.OwnershipControls, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.OwnershipControls + if *v == nil { + sv = &types.OwnershipControls{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRule(v **types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.OwnershipControlsRule + if *v == nil { + sv = &types.OwnershipControlsRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ObjectOwnership", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ObjectOwnership = types.ObjectOwnership(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRules(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.OwnershipControlsRule + if *v == nil { + sv = make([]types.OwnershipControlsRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.OwnershipControlsRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { + var sv []types.OwnershipControlsRule + if *v == nil { + sv = make([]types.OwnershipControlsRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.OwnershipControlsRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Part + if *v == nil { + sv = &types.Part{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ChecksumCRC32", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumCRC32C", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC32C = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA1", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA1 = ptr.String(xtv) + } + + case strings.EqualFold("ChecksumSHA256", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumSHA256 = ptr.String(xtv) + } + + case strings.EqualFold("ETag", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ETag = ptr.String(xtv) + } + + case strings.EqualFold("LastModified", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.LastModified = ptr.Time(t) + } + + case strings.EqualFold("PartNumber", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PartNumber = int32(i64) + } + + case strings.EqualFold("Size", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Size = i64 + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentParts(v *[]types.Part, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Part + if *v == nil { + sv = make([]types.Part, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Part + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPartsUnwrapped(v *[]types.Part, decoder smithyxml.NodeDecoder) error { + var sv []types.Part + if *v == nil { + sv = make([]types.Part, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Part + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPartsList(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ObjectPart + if *v == nil { + sv = make([]types.ObjectPart, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ObjectPart + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPartsListUnwrapped(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { + var sv []types.ObjectPart + if *v == nil { + sv = make([]types.ObjectPart, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ObjectPart + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PolicyStatus + if *v == nil { + sv = &types.PolicyStatus{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsPublic", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val) + } + sv.IsPublic = xtv + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.PublicAccessBlockConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PublicAccessBlockConfiguration + if *v == nil { + sv = &types.PublicAccessBlockConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("BlockPublicAcls", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.BlockPublicAcls = xtv + } + + case strings.EqualFold("BlockPublicPolicy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.BlockPublicPolicy = xtv + } + + case strings.EqualFold("IgnorePublicAcls", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.IgnorePublicAcls = xtv + } + + case strings.EqualFold("RestrictPublicBuckets", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) + } + sv.RestrictPublicBuckets = xtv + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfiguration(v **types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.QueueConfiguration + if *v == nil { + sv = &types.QueueConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Queue", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.QueueArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfigurationList(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.QueueConfiguration + if *v == nil { + sv = make([]types.QueueConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.QueueConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.QueueConfiguration + if *v == nil { + sv = make([]types.QueueConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.QueueConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentRedirect(v **types.Redirect, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Redirect + if *v == nil { + sv = &types.Redirect{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HostName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HostName = ptr.String(xtv) + } + + case strings.EqualFold("HttpRedirectCode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HttpRedirectCode = ptr.String(xtv) + } + + case strings.EqualFold("Protocol", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Protocol = types.Protocol(xtv) + } + + case strings.EqualFold("ReplaceKeyPrefixWith", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplaceKeyPrefixWith = ptr.String(xtv) + } + + case strings.EqualFold("ReplaceKeyWith", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReplaceKeyWith = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRedirectAllRequestsTo(v **types.RedirectAllRequestsTo, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RedirectAllRequestsTo + if *v == nil { + sv = &types.RedirectAllRequestsTo{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("HostName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.HostName = ptr.String(xtv) + } + + case strings.EqualFold("Protocol", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Protocol = types.Protocol(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicaModifications(v **types.ReplicaModifications, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicaModifications + if *v == nil { + sv = &types.ReplicaModifications{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicaModificationsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationConfiguration + if *v == nil { + sv = &types.ReplicationConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Role", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Role = ptr.String(xtv) + } + + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationRule + if *v == nil { + sv = &types.ReplicationRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DeleteMarkerReplication", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDeleteMarkerReplication(&sv.DeleteMarkerReplication, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ExistingObjectReplication", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentExistingObjectReplication(&sv.ExistingObjectReplication, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationRuleFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ID = ptr.String(xtv) + } + + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Priority", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Priority = int32(i64) + } + + case strings.EqualFold("SourceSelectionCriteria", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSourceSelectionCriteria(&sv.SourceSelectionCriteria, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicationRuleStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.ReplicationRuleAndOperator, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationRuleAndOperator + if *v == nil { + sv = &types.ReplicationRuleAndOperator{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + + case strings.EqualFold("Tag", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var uv types.ReplicationRuleFilter + var memberFound bool + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + if memberFound { + if err = decoder.Decoder.Skip(); err != nil { + return err + } + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("And", t.Name.Local): + var mv types.ReplicationRuleAndOperator + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.ReplicationRuleFilterMemberAnd{Value: mv} + memberFound = true + + case strings.EqualFold("Prefix", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.ReplicationRuleFilterMemberPrefix{Value: mv} + memberFound = true + + case strings.EqualFold("Tag", t.Name.Local): + var mv types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + uv = &types.ReplicationRuleFilterMemberTag{Value: mv} + memberFound = true + + default: + uv = &types.UnknownUnionMember{Tag: t.Name.Local} + memberFound = true + + } + decoder = originalDecoder + } + *v = uv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRules(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ReplicationRule + if *v == nil { + sv = make([]types.ReplicationRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ReplicationRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationRulesUnwrapped(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { + var sv []types.ReplicationRule + if *v == nil { + sv = make([]types.ReplicationRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ReplicationRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentReplicationTime(v **types.ReplicationTime, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationTime + if *v == nil { + sv = &types.ReplicationTime{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.ReplicationTimeStatus(xtv) + } + + case strings.EqualFold("Time", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.Time, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTimeValue, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ReplicationTimeValue + if *v == nil { + sv = &types.ReplicationTimeValue{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Minutes", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Minutes = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRule(v **types.RoutingRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RoutingRule + if *v == nil { + sv = &types.RoutingRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Condition", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCondition(&sv.Condition, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Redirect", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRedirect(&sv.Redirect, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRules(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.RoutingRule + if *v == nil { + sv = make([]types.RoutingRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("RoutingRule", t.Name.Local): + var col types.RoutingRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRoutingRulesUnwrapped(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { + var sv []types.RoutingRule + if *v == nil { + sv = make([]types.RoutingRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.RoutingRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.S3KeyFilter + if *v == nil { + sv = &types.S3KeyFilter{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("FilterRule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentFilterRuleListUnwrapped(&sv.FilterRules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionByDefault + if *v == nil { + sv = &types.ServerSideEncryptionByDefault{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("KMSMasterKeyID", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KMSMasterKeyID = ptr.String(xtv) + } + + case strings.EqualFold("SSEAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SSEAlgorithm = types.ServerSideEncryption(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(v **types.ServerSideEncryptionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionConfiguration + if *v == nil { + sv = &types.ServerSideEncryptionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Rule", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ServerSideEncryptionRule + if *v == nil { + sv = &types.ServerSideEncryptionRule{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ApplyServerSideEncryptionByDefault", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentServerSideEncryptionByDefault(&sv.ApplyServerSideEncryptionByDefault, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("BucketKeyEnabled", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val) + } + sv.BucketKeyEnabled = xtv + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRules(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ServerSideEncryptionRule + if *v == nil { + sv = make([]types.ServerSideEncryptionRule, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.ServerSideEncryptionRule + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { + var sv []types.ServerSideEncryptionRule + if *v == nil { + sv = make([]types.ServerSideEncryptionRule, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ServerSideEncryptionRule + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentSourceSelectionCriteria(v **types.SourceSelectionCriteria, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SourceSelectionCriteria + if *v == nil { + sv = &types.SourceSelectionCriteria{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ReplicaModifications", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentReplicaModifications(&sv.ReplicaModifications, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SseKmsEncryptedObjects", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSseKmsEncryptedObjects(&sv.SseKmsEncryptedObjects, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSSEKMS(v **types.SSEKMS, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SSEKMS + if *v == nil { + sv = &types.SSEKMS{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("KeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSseKmsEncryptedObjects(v **types.SseKmsEncryptedObjects, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SseKmsEncryptedObjects + if *v == nil { + sv = &types.SseKmsEncryptedObjects{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.SseKmsEncryptedObjectsStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSSES3(v **types.SSES3, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SSES3 + if *v == nil { + sv = &types.SSES3{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStorageClassAnalysis(v **types.StorageClassAnalysis, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.StorageClassAnalysis + if *v == nil { + sv = &types.StorageClassAnalysis{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DataExport", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(&sv.DataExport, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(v **types.StorageClassAnalysisDataExport, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.StorageClassAnalysisDataExport + if *v == nil { + sv = &types.StorageClassAnalysisDataExport{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Destination", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentAnalyticsExportDestination(&sv.Destination, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("OutputSchemaVersion", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.OutputSchemaVersion = types.StorageClassAnalysisSchemaVersion(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTag(v **types.Tag, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Key", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Key = ptr.String(xtv) + } + + case strings.EqualFold("Value", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Value = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTagSet(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Tag + if *v == nil { + sv = make([]types.Tag, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Tag", t.Name.Local): + var col types.Tag + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTagSetUnwrapped(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { + var sv []types.Tag + if *v == nil { + sv = make([]types.Tag, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Tag + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTargetGrant(v **types.TargetGrant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TargetGrant + if *v == nil { + sv = &types.TargetGrant{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Grantee", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Permission", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Permission = types.BucketLogsPermission(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTargetGrants(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.TargetGrant + if *v == nil { + sv = make([]types.TargetGrant, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("Grant", t.Name.Local): + var col types.TargetGrant + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTargetGrantsUnwrapped(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { + var sv []types.TargetGrant + if *v == nil { + sv = make([]types.TargetGrant, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.TargetGrant + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Tiering + if *v == nil { + sv = &types.Tiering{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessTier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessTier = types.IntelligentTieringAccessTier(xtv) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = int32(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTieringList(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Tiering + if *v == nil { + sv = make([]types.Tiering, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Tiering + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { + var sv []types.Tiering + if *v == nil { + sv = make([]types.Tiering, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Tiering + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TopicConfiguration + if *v == nil { + sv = &types.TopicConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Event", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Filter", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Id", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Id = ptr.String(xtv) + } + + case strings.EqualFold("Topic", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TopicArn = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTopicConfigurationList(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.TopicConfiguration + if *v == nil { + sv = make([]types.TopicConfiguration, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.TopicConfiguration + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { + var sv []types.TopicConfiguration + if *v == nil { + sv = make([]types.TopicConfiguration, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.TopicConfiguration + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Transition + if *v == nil { + sv = &types.Transition{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Date", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Date = ptr.Time(t) + } + + case strings.EqualFold("Days", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Days = int32(i64) + } + + case strings.EqualFold("StorageClass", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StorageClass = types.TransitionStorageClass(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTransitionList(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.Transition + if *v == nil { + sv = make([]types.Transition, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("member", t.Name.Local): + var col types.Transition + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentTransitionListUnwrapped(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { + var sv []types.Transition + if *v == nil { + sv = make([]types.Transition, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.Transition + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go new file mode 100644 index 000000000..d825a41a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go @@ -0,0 +1,5 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package s3 provides the API client, operations, and parameter types for Amazon +// Simple Storage Service. +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go new file mode 100644 index 000000000..8df6368cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -0,0 +1,208 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "s3" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + + if options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + if options.UseDualstack { + options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled + } else { + options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go new file mode 100644 index 000000000..d6cdb5337 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go @@ -0,0 +1,285 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithysync "github.com/aws/smithy-go/sync" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "sync" +) + +// SelectObjectContentEventStreamReader provides the interface for reading events +// from a stream. +// +// The writer's Close method must allow multiple concurrent calls. +type SelectObjectContentEventStreamReader interface { + Events() <-chan types.SelectObjectContentEventStream + Close() error + Err() error +} + +type selectObjectContentEventStreamReader struct { + stream chan types.SelectObjectContentEventStream + decoder *eventstream.Decoder + eventStream io.ReadCloser + err *smithysync.OnceErr + payloadBuf []byte + done chan struct{} + closeOnce sync.Once +} + +func newSelectObjectContentEventStreamReader(readCloser io.ReadCloser, decoder *eventstream.Decoder) *selectObjectContentEventStreamReader { + w := &selectObjectContentEventStreamReader{ + stream: make(chan types.SelectObjectContentEventStream), + decoder: decoder, + eventStream: readCloser, + err: smithysync.NewOnceErr(), + done: make(chan struct{}), + payloadBuf: make([]byte, 10*1024), + } + + go w.readEventStream() + + return w +} + +func (r *selectObjectContentEventStreamReader) Events() <-chan types.SelectObjectContentEventStream { + return r.stream +} + +func (r *selectObjectContentEventStreamReader) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + r.payloadBuf = r.payloadBuf[0:0] + decodedMessage, err := r.decoder.Decode(r.eventStream, r.payloadBuf) + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + return + default: + r.err.SetError(err) + return + } + } + + event, err := r.deserializeEventMessage(&decodedMessage) + if err != nil { + r.err.SetError(err) + return + } + + select { + case r.stream <- event: + case <-r.done: + return + } + + } +} + +func (r *selectObjectContentEventStreamReader) deserializeEventMessage(msg *eventstream.Message) (types.SelectObjectContentEventStream, error) { + messageType := msg.Headers.Get(eventstreamapi.MessageTypeHeader) + if messageType == nil { + return nil, fmt.Errorf("%s event header not present", eventstreamapi.MessageTypeHeader) + } + + switch messageType.String() { + case eventstreamapi.EventMessageType: + var v types.SelectObjectContentEventStream + if err := awsRestxml_deserializeEventStreamSelectObjectContentEventStream(&v, msg); err != nil { + return nil, err + } + return v, nil + + case eventstreamapi.ExceptionMessageType: + return nil, awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg) + + case eventstreamapi.ErrorMessageType: + errorCode := "UnknownError" + errorMessage := errorCode + if header := msg.Headers.Get(eventstreamapi.ErrorCodeHeader); header != nil { + errorCode = header.String() + } + if header := msg.Headers.Get(eventstreamapi.ErrorMessageHeader); header != nil { + errorMessage = header.String() + } + return nil, &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + + default: + mc := msg.Clone() + return nil, &UnknownEventMessageError{ + Type: messageType.String(), + Message: &mc, + } + + } +} + +func (r *selectObjectContentEventStreamReader) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *selectObjectContentEventStreamReader) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *selectObjectContentEventStreamReader) safeClose() { + close(r.done) + r.eventStream.Close() + +} + +func (r *selectObjectContentEventStreamReader) Err() error { + return r.err.Err() +} + +func (r *selectObjectContentEventStreamReader) Closed() <-chan struct{} { + return r.done +} + +type awsRestxml_deserializeOpEventStreamSelectObjectContent struct { + LogEventStreamWrites bool + LogEventStreamReads bool +} + +func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) ID() string { + return "OperationEventStreamDeserializer" +} + +func (m *awsRestxml_deserializeOpEventStreamSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + defer func() { + if err == nil { + return + } + m.closeResponseBody(out) + }() + + logger := middleware.GetLogger(ctx) + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + _ = request + + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + deserializeOutput, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) + } + _ = deserializeOutput + + output, ok := out.Result.(*SelectObjectContentOutput) + if out.Result != nil && !ok { + return out, metadata, fmt.Errorf("unexpected output result type: %T", out.Result) + } else if out.Result == nil { + output = &SelectObjectContentOutput{} + out.Result = output + } + + eventReader := newSelectObjectContentEventStreamReader( + deserializeOutput.Body, + eventstream.NewDecoder(func(options *eventstream.DecoderOptions) { + options.Logger = logger + options.LogMessages = m.LogEventStreamReads + + }), + ) + defer func() { + if err == nil { + return + } + _ = eventReader.Close() + }() + + output.eventStream = NewSelectObjectContentEventStream(func(stream *SelectObjectContentEventStream) { + stream.Reader = eventReader + }) + + go output.eventStream.waitStreamClose() + + return out, metadata, nil +} + +func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) closeResponseBody(out middleware.DeserializeOutput) { + if resp, ok := out.RawResponse.(*smithyhttp.Response); ok && resp != nil && resp.Body != nil { + _, _ = io.Copy(ioutil.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func addEventStreamSelectObjectContentMiddleware(stack *middleware.Stack, options Options) error { + if err := stack.Deserialize.Insert(&awsRestxml_deserializeOpEventStreamSelectObjectContent{ + LogEventStreamWrites: options.ClientLogMode.IsRequestEventMessage(), + LogEventStreamReads: options.ClientLogMode.IsResponseEventMessage(), + }, "OperationDeserializer", middleware.Before); err != nil { + return err + } + return nil + +} + +// UnknownEventMessageError provides an error when a message is received from the stream, +// but the reader is unable to determine what kind of message it is. +type UnknownEventMessageError struct { + Type string + Message *eventstream.Message +} + +// Error retruns the error message string. +func (e *UnknownEventMessageError) Error() string { + return "unknown event stream message type, " + e.Type +} + +func setSafeEventStreamClientLogMode(o *Options, operation string) { + switch operation { + case "SelectObjectContent": + toggleEventStreamClientLogMode(o, false, true) + return + + default: + return + + } +} +func toggleEventStreamClientLogMode(o *Options, request, response bool) { + mode := o.ClientLogMode + + if request && mode.IsRequestWithBody() { + mode.ClearRequestWithBody() + mode |= aws.LogRequest + } + + if response && mode.IsResponseWithBody() { + mode.ClearResponseWithBody() + mode |= aws.LogResponse + } + + o.ClientLogMode = mode + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json new file mode 100644 index 000000000..8643d42ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -0,0 +1,128 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/v4a": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", + "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AbortMultipartUpload.go", + "api_op_CompleteMultipartUpload.go", + "api_op_CopyObject.go", + "api_op_CreateBucket.go", + "api_op_CreateMultipartUpload.go", + "api_op_DeleteBucket.go", + "api_op_DeleteBucketAnalyticsConfiguration.go", + "api_op_DeleteBucketCors.go", + "api_op_DeleteBucketEncryption.go", + "api_op_DeleteBucketIntelligentTieringConfiguration.go", + "api_op_DeleteBucketInventoryConfiguration.go", + "api_op_DeleteBucketLifecycle.go", + "api_op_DeleteBucketMetricsConfiguration.go", + "api_op_DeleteBucketOwnershipControls.go", + "api_op_DeleteBucketPolicy.go", + "api_op_DeleteBucketReplication.go", + "api_op_DeleteBucketTagging.go", + "api_op_DeleteBucketWebsite.go", + "api_op_DeleteObject.go", + "api_op_DeleteObjectTagging.go", + "api_op_DeleteObjects.go", + "api_op_DeletePublicAccessBlock.go", + "api_op_GetBucketAccelerateConfiguration.go", + "api_op_GetBucketAcl.go", + "api_op_GetBucketAnalyticsConfiguration.go", + "api_op_GetBucketCors.go", + "api_op_GetBucketEncryption.go", + "api_op_GetBucketIntelligentTieringConfiguration.go", + "api_op_GetBucketInventoryConfiguration.go", + "api_op_GetBucketLifecycleConfiguration.go", + "api_op_GetBucketLocation.go", + "api_op_GetBucketLogging.go", + "api_op_GetBucketMetricsConfiguration.go", + "api_op_GetBucketNotificationConfiguration.go", + "api_op_GetBucketOwnershipControls.go", + "api_op_GetBucketPolicy.go", + "api_op_GetBucketPolicyStatus.go", + "api_op_GetBucketReplication.go", + "api_op_GetBucketRequestPayment.go", + "api_op_GetBucketTagging.go", + "api_op_GetBucketVersioning.go", + "api_op_GetBucketWebsite.go", + "api_op_GetObject.go", + "api_op_GetObjectAcl.go", + "api_op_GetObjectAttributes.go", + "api_op_GetObjectLegalHold.go", + "api_op_GetObjectLockConfiguration.go", + "api_op_GetObjectRetention.go", + "api_op_GetObjectTagging.go", + "api_op_GetObjectTorrent.go", + "api_op_GetPublicAccessBlock.go", + "api_op_HeadBucket.go", + "api_op_HeadObject.go", + "api_op_ListBucketAnalyticsConfigurations.go", + "api_op_ListBucketIntelligentTieringConfigurations.go", + "api_op_ListBucketInventoryConfigurations.go", + "api_op_ListBucketMetricsConfigurations.go", + "api_op_ListBuckets.go", + "api_op_ListMultipartUploads.go", + "api_op_ListObjectVersions.go", + "api_op_ListObjects.go", + "api_op_ListObjectsV2.go", + "api_op_ListParts.go", + "api_op_PutBucketAccelerateConfiguration.go", + "api_op_PutBucketAcl.go", + "api_op_PutBucketAnalyticsConfiguration.go", + "api_op_PutBucketCors.go", + "api_op_PutBucketEncryption.go", + "api_op_PutBucketIntelligentTieringConfiguration.go", + "api_op_PutBucketInventoryConfiguration.go", + "api_op_PutBucketLifecycleConfiguration.go", + "api_op_PutBucketLogging.go", + "api_op_PutBucketMetricsConfiguration.go", + "api_op_PutBucketNotificationConfiguration.go", + "api_op_PutBucketOwnershipControls.go", + "api_op_PutBucketPolicy.go", + "api_op_PutBucketReplication.go", + "api_op_PutBucketRequestPayment.go", + "api_op_PutBucketTagging.go", + "api_op_PutBucketVersioning.go", + "api_op_PutBucketWebsite.go", + "api_op_PutObject.go", + "api_op_PutObjectAcl.go", + "api_op_PutObjectLegalHold.go", + "api_op_PutObjectLockConfiguration.go", + "api_op_PutObjectRetention.go", + "api_op_PutObjectTagging.go", + "api_op_PutPublicAccessBlock.go", + "api_op_RestoreObject.go", + "api_op_SelectObjectContent.go", + "api_op_UploadPart.go", + "api_op_UploadPartCopy.go", + "api_op_WriteGetObjectResponse.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "eventstream.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "types/types_exported_test.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/s3", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go new file mode 100644 index 000000000..a79184177 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package s3 + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.27.11" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go new file mode 100644 index 000000000..97b5771bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go @@ -0,0 +1,106 @@ +package arn + +import ( + "fmt" + "strings" + + awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" +) + +const ( + s3Namespace = "s3" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + +// ParseEndpointARN parses a given generic aws ARN into a s3 arn resource. +func ParseEndpointARN(v awsarn.ARN) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + + switch resParts[0] { + case "accesspoint": + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} + } + case "outpost": + if a.Service != s3OutpostsNamespace { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not %s"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + // Do not allow region-less outpost access-point arns. + if len(a.Region) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "region is not set"} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + if len(a.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go new file mode 100644 index 000000000..9a392d120 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go @@ -0,0 +1,80 @@ +/* +Package customizations provides customizations for the Amazon S3 API client. + +This package provides support for following S3 customizations + + ProcessARN Middleware: processes an ARN if provided as input and updates the endpoint as per the arn type + + UpdateEndpoint Middleware: resolves a custom endpoint as per s3 config options + + RemoveBucket Middleware: removes a serialized bucket name from request url path + + processResponseWith200Error Middleware: Deserializing response error with 200 status code + +# Virtual Host style url addressing + +Since serializers serialize by default as path style url, we use customization +to modify the endpoint url when `UsePathStyle` option on S3Client is unset or +false. This flag will be ignored if `UseAccelerate` option is set to true. + +If UseAccelerate is not enabled, and the bucket name is not a valid hostname +label, they SDK will fallback to forcing the request to be made as if +UsePathStyle was enabled. This behavior is also used if UseDualStackEndpoint is enabled. + +https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description + +# Transfer acceleration + +By default S3 Transfer acceleration support is disabled. By enabling `UseAccelerate` +option on S3Client, one can enable s3 transfer acceleration support. Transfer +acceleration only works with Virtual Host style addressing, and thus `UsePathStyle` +option if set is ignored. Transfer acceleration is not supported for S3 operations +DeleteBucket, ListBuckets, and CreateBucket. + +# Dualstack support + +By default dualstack support for s3 client is disabled. By enabling `UseDualstack` +option on s3 client, you can enable dualstack endpoint support. + +# Endpoint customizations + +Customizations to lookup ARN, process ARN needs to happen before request serialization. +UpdateEndpoint middleware which mutates resources based on Options such as +UseDualstack, UseAccelerate for modifying resolved endpoint are executed after +request serialization. Remove bucket middleware is executed after +an request is serialized, and removes the serialized bucket name from request path + + Middleware layering: + + + Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step + + Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware + +Customization options: + + UseARNRegion (Disabled by Default) + + UsePathStyle (Disabled by Default) + + UseAccelerate (Disabled by Default) + + UseDualstack (Disabled by Default) + +# Handle Error response with 200 status code + +S3 operations: CopyObject, CompleteMultipartUpload, UploadPartCopy can have an +error Response with status code 2xx. The processResponseWith200Error middleware +customizations enables SDK to check for an error within response body prior to +deserialization. + +As the check for 2xx response containing an error needs to be performed earlier +than response deserialization. Since the behavior of Deserialization is in +reverse order to the other stack steps its easier to consider that "after" means +"before". + + Middleware layering: + + HTTP Response -> handle 200 error customization -> deserialize +*/ +package customizations diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go new file mode 100644 index 000000000..2b11b1fa2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go @@ -0,0 +1,74 @@ +package customizations + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// HandleResponseErrorWith200Status check for S3 200 error response. +// If an s3 200 error is found, status code for the response is modified temporarily to +// 5xx response status code. +func HandleResponseErrorWith200Status(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&processResponseFor200ErrorMiddleware{}, "OperationDeserializer", middleware.After) +} + +// middleware to process raw response and look for error response with 200 status code +type processResponseFor200ErrorMiddleware struct{} + +// ID returns the middleware ID. +func (*processResponseFor200ErrorMiddleware) ID() string { + return "S3:ProcessResponseFor200Error" +} + +func (m *processResponseFor200ErrorMiddleware) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + // check if response status code is 2xx. + if response.StatusCode < 200 || response.StatusCode >= 300 { + return + } + + var readBuff bytes.Buffer + body := io.TeeReader(response.Body, &readBuff) + + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("received empty response payload"), + } + } + + // rewind response body + response.Body = ioutil.NopCloser(io.MultiReader(&readBuff, response.Body)) + + // if start tag is "Error", the response is consider error response. + if strings.EqualFold(t.Name.Local, "Error") { + // according to https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/ + // 200 error responses are similar to 5xx errors. + response.StatusCode = 500 + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go new file mode 100644 index 000000000..87f7a2232 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go @@ -0,0 +1,22 @@ +package customizations + +import ( + "github.com/aws/smithy-go/transport/http" + "strings" +) + +func updateS3HostForS3AccessPoint(req *http.Request) { + updateHostPrefix(req, "s3", s3AccessPoint) +} + +func updateS3HostForS3ObjectLambda(req *http.Request) { + updateHostPrefix(req, "s3", s3ObjectLambda) +} + +func updateHostPrefix(req *http.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // For example if oldEndpointPrefix=s3 would replace to newEndpointPrefix + req.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go new file mode 100644 index 000000000..f4bbb4b6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go @@ -0,0 +1,49 @@ +package customizations + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddExpiresOnPresignedURL represents a build middleware used to assign +// expiration on a presigned URL. +type AddExpiresOnPresignedURL struct { + + // Expires is time.Duration within which presigned url should be expired. + // This should be the duration in seconds the presigned URL should be considered valid for. + // By default the S3 presigned url expires in 15 minutes ie. 900 seconds. + Expires time.Duration +} + +// ID representing the middleware +func (*AddExpiresOnPresignedURL) ID() string { + return "S3:AddExpiresOnPresignedURL" +} + +// HandleBuild handles the build step middleware behavior +func (m *AddExpiresOnPresignedURL) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + // if expiration is unset skip this middleware + if m.Expires == 0 { + // default to 15 * time.Minutes + m.Expires = 15 * time.Minute + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + // set S3 X-AMZ-Expires header + query := req.URL.Query() + query.Set("X-Amz-Expires", strconv.FormatInt(int64(m.Expires/time.Second), 10)) + req.URL.RawQuery = query.Encode() + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go new file mode 100644 index 000000000..a232e622b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go @@ -0,0 +1,564 @@ +package customizations + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" + s3arn "github.com/aws/aws-sdk-go-v2/service/s3/internal/arn" + "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" +) + +const ( + s3AccessPoint = "s3-accesspoint" + s3ObjectLambda = "s3-object-lambda" +) + +// processARNResource is used to process an ARN resource. +type processARNResource struct { + + // UseARNRegion indicates if region parsed from an ARN should be used. + UseARNRegion bool + + // UseAccelerate indicates if s3 transfer acceleration is enabled + UseAccelerate bool + + // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver + EndpointResolver EndpointResolver + + // EndpointResolverOptions used by endpoint resolver + EndpointResolverOptions EndpointResolverOptions + + // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled + DisableMultiRegionAccessPoints bool +} + +// ID returns the middleware ID. +func (*processARNResource) ID() string { return "S3:ProcessARNResource" } + +func (m *processARNResource) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + // check if arn was provided, if not skip this middleware + arnValue, ok := s3shared.GetARNResourceFromContext(ctx) + if !ok { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // parse arn into an endpoint arn wrt to service + resource, err := s3arn.ParseEndpointARN(arnValue) + if err != nil { + return out, metadata, err + } + + // build a resource request struct + resourceRequest := s3shared.ResourceRequest{ + Resource: resource, + UseARNRegion: m.UseARNRegion, + UseFIPS: m.EndpointResolverOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled, + RequestRegion: awsmiddleware.GetRegion(ctx), + SigningRegion: awsmiddleware.GetSigningRegion(ctx), + PartitionID: awsmiddleware.GetPartitionID(ctx), + } + + // switch to correct endpoint updater + switch tv := resource.(type) { + case arn.AccessPointARN: + // multi-region arns do not need to validate for cross partition request + if len(tv.Region) != 0 { + // validate resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + } + + // Special handling for region-less ap-arns. + if len(tv.Region) == 0 { + // check if multi-region arn support is disabled + if m.DisableMultiRegionAccessPoints { + return out, metadata, fmt.Errorf("Invalid configuration, Multi-Region access point ARNs are disabled") + } + + // Do not allow dual-stack configuration with multi-region arns. + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // fetch arn region to resolve request + resolveRegion := tv.Region + // check if request region is FIPS + if resourceRequest.UseFIPS && len(resolveRegion) == 0 { + // Do not allow Fips support within multi-region arns. + return out, metadata, s3shared.NewClientConfiguredForFIPSError( + tv, resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + var requestBuilder func(context.Context, accesspointOptions) (context.Context, error) + if len(resolveRegion) == 0 { + requestBuilder = buildMultiRegionAccessPointsRequest + } else { + requestBuilder = buildAccessPointRequest + } + + // build request as per accesspoint builder + ctx, err = requestBuilder(ctx, accesspointOptions{ + processARNResource: *m, + request: req, + resource: tv, + resolveRegion: resolveRegion, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + case arn.S3ObjectLambdaAccessPointARN: + // validate region for resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if dualstack + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // fetch arn region to resolve request + resolveRegion := tv.Region + + // build access point request + ctx, err = buildS3ObjectLambdaAccessPointRequest(ctx, accesspointOptions{ + processARNResource: *m, + request: req, + resource: tv.AccessPointARN, + resolveRegion: resolveRegion, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + // process outpost accesspoint ARN + case arn.OutpostAccessPointARN: + // validate region for resource request + if err := validateRegionForResourceRequest(resourceRequest); err != nil { + return out, metadata, err + } + + // check if accelerate + if m.UseAccelerate { + return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if dual stack + if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if request region is FIPS + if resourceRequest.UseFIPS { + return out, metadata, s3shared.NewFIPSConfigurationError(tv, resourceRequest.PartitionID, + resourceRequest.RequestRegion, nil) + } + + // build outpost access point request + ctx, err = buildOutpostAccessPointRequest(ctx, outpostAccessPointOptions{ + processARNResource: *m, + resource: tv, + request: req, + partitionID: resourceRequest.PartitionID, + requestRegion: resourceRequest.RequestRegion, + }) + if err != nil { + return out, metadata, err + } + + default: + return out, metadata, s3shared.NewInvalidARNError(resource, nil) + } + + return next.HandleSerialize(ctx, in) +} + +// validate if s3 resource and request region config is compatible. +func validateRegionForResourceRequest(resourceRequest s3shared.ResourceRequest) error { + // check if resourceRequest leads to a cross partition error + v, err := resourceRequest.IsCrossPartition() + if err != nil { + return err + } + if v { + // if cross partition + return s3shared.NewClientPartitionMismatchError(resourceRequest.Resource, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + // check if resourceRequest leads to a cross region error + if !resourceRequest.AllowCrossRegion() && resourceRequest.IsCrossRegion() { + // if cross region, but not use ARN region is not enabled + return s3shared.NewClientRegionMismatchError(resourceRequest.Resource, + resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) + } + + return nil +} + +// === Accesspoint ========== + +type accesspointOptions struct { + processARNResource + request *http.Request + resource arn.AccessPointARN + resolveRegion string + partitionID string + requestRegion string +} + +func buildAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + tv := options.resource + req := options.request + resolveRegion := options.resolveRegion + + resolveService := tv.Service + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" // clear endpoint option's resolved region so that we resolve using the passed in region + + // resolve endpoint + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + // Must sign with s3-object-lambda + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to "s3-accesspoint" + ctx = awsmiddleware.SetServiceID(ctx, s3AccessPoint) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + updateS3HostForS3AccessPoint(req) + + ctx, err = buildAccessPointHostPrefix(ctx, req, tv) + if err != nil { + return ctx, err + } + + return ctx, nil +} + +func buildS3ObjectLambdaAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + tv := options.resource + req := options.request + resolveRegion := options.resolveRegion + + resolveService := tv.Service + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" // clear endpoint options resolved region so we resolve the passed in region + + // resolve endpoint + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + // Must sign with s3-object-lambda + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to "s3-object-lambda" + ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + if endpoint.Source == aws.EndpointSourceServiceMetadata { + updateS3HostForS3ObjectLambda(req) + } + + ctx, err = buildAccessPointHostPrefix(ctx, req, tv) + if err != nil { + return ctx, err + } + + return ctx, nil +} + +func buildMultiRegionAccessPointsRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { + const s3GlobalLabel = "s3-global." + const accesspointLabel = "accesspoint." + + tv := options.resource + req := options.request + resolveService := tv.Service + resolveRegion := options.requestRegion + arnPartition := tv.Partition + + // resolve endpoint + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // set signing region and version for MRAP + endpoint.SigningRegion = "*" + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = SetSignerVersion(ctx, v4a.Version) + + if len(endpoint.SigningName) != 0 { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + // skip arn processing, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + // modify endpoint host to use s3-global host prefix + scheme := strings.SplitN(endpoint.URL, "://", 2) + dnsSuffix, err := endpoints.GetDNSSuffix(arnPartition, ero) + if err != nil { + return ctx, fmt.Errorf("Error determining dns suffix from arn partition, %w", err) + } + // set url as per partition + endpoint.URL = scheme[0] + "://" + s3GlobalLabel + dnsSuffix + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + // build access point host prefix + accessPointHostPrefix := tv.AccessPointName + "." + accesspointLabel + + // add host prefix to url + req.URL.Host = accessPointHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = accessPointHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, fmt.Errorf("endpoint validation error: %w, when using arn %v", err, tv) + } + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + return ctx, nil +} + +func buildAccessPointHostPrefix(ctx context.Context, req *http.Request, tv arn.AccessPointARN) (context.Context, error) { + // add host prefix for access point + accessPointHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + req.URL.Host = accessPointHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = accessPointHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, s3shared.NewInvalidARNError(tv, err) + } + + return ctx, nil +} + +// ====== Outpost Accesspoint ======== + +type outpostAccessPointOptions struct { + processARNResource + request *http.Request + resource arn.OutpostAccessPointARN + partitionID string + requestRegion string +} + +func buildOutpostAccessPointRequest(ctx context.Context, options outpostAccessPointOptions) (context.Context, error) { + tv := options.resource + req := options.request + + resolveRegion := tv.Region + resolveService := tv.Service + endpointsID := resolveService + if strings.EqualFold(resolveService, "s3-outposts") { + // assign endpoints ID as "S3" + endpointsID = "s3" + } + + ero := options.EndpointResolverOptions + ero.Logger = middleware.GetLogger(ctx) + ero.ResolvedRegion = "" + + // resolve regional endpoint for resolved region. + endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) + if err != nil { + return ctx, s3shared.NewFailedToResolveEndpointError( + tv, + options.partitionID, + options.requestRegion, + err, + ) + } + + // assign resolved endpoint url to request url + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + // assign resolved service from arn as signing name + if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, resolveService) + } + + if len(endpoint.SigningRegion) != 0 { + // redirect signer to use resolved endpoint signing name and region + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) + } + + // update serviceID to resolved service id + ctx = awsmiddleware.SetServiceID(ctx, resolveService) + + // disable host prefix behavior + ctx = http.DisableEndpointHostPrefix(ctx, true) + + // remove the serialized arn in place of /{Bucket} + ctx = setBucketToRemoveOnContext(ctx, tv.String()) + + // skip further customizations, if arn region resolves to a immutable endpoint + if endpoint.HostnameImmutable { + return ctx, nil + } + + updateHostPrefix(req, endpointsID, resolveService) + + // add host prefix for s3-outposts + outpostAPHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + tv.OutpostID + "." + req.URL.Host = outpostAPHostPrefix + req.URL.Host + if len(req.Host) > 0 { + req.Host = outpostAPHostPrefix + req.Host + } + + // validate the endpoint host + if err := http.ValidateEndpointHost(req.URL.Host); err != nil { + return ctx, s3shared.NewInvalidARNError(tv, err) + } + + return ctx, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go new file mode 100644 index 000000000..2e030f29c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go @@ -0,0 +1,58 @@ +package customizations + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +// removeBucketFromPathMiddleware needs to be executed after serialize step is performed +type removeBucketFromPathMiddleware struct { +} + +func (m *removeBucketFromPathMiddleware) ID() string { + return "S3:RemoveBucketFromPathMiddleware" +} + +func (m *removeBucketFromPathMiddleware) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + // check if a bucket removal from HTTP path is required + bucket, ok := getRemoveBucketFromPath(ctx) + if !ok { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + removeBucketFromPath(req.URL, bucket) + return next.HandleSerialize(ctx, in) +} + +type removeBucketKey struct { + bucket string +} + +// setBucketToRemoveOnContext sets the bucket name to be removed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setBucketToRemoveOnContext(ctx context.Context, bucket string) context.Context { + return middleware.WithStackValue(ctx, removeBucketKey{}, bucket) +} + +// getRemoveBucketFromPath returns the bucket name to remove from the path. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func getRemoveBucketFromPath(ctx context.Context) (string, bool) { + v, ok := middleware.GetStackValue(ctx, removeBucketKey{}).(string) + return v, ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go new file mode 100644 index 000000000..325b2d369 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go @@ -0,0 +1,84 @@ +package customizations + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + "net/url" +) + +type s3ObjectLambdaEndpoint struct { + // whether the operation should use the s3-object-lambda endpoint + UseEndpoint bool + + // use transfer acceleration + UseAccelerate bool + + EndpointResolver EndpointResolver + EndpointResolverOptions EndpointResolverOptions +} + +func (t *s3ObjectLambdaEndpoint) ID() string { + return "S3:ObjectLambdaEndpoint" +} + +func (t *s3ObjectLambdaEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !t.UseEndpoint { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) + } + + if t.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + return out, metadata, fmt.Errorf("client configured for dualstack but not supported for operation") + } + + if t.UseAccelerate { + return out, metadata, fmt.Errorf("client configured for accelerate but not supported for operation") + } + + region := awsmiddleware.GetRegion(ctx) + + ero := t.EndpointResolverOptions + + endpoint, err := t.EndpointResolver.ResolveEndpoint(region, ero) + if err != nil { + return out, metadata, err + } + + // Set the ServiceID and SigningName + ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) + + if len(endpoint.SigningName) > 0 && endpoint.Source == aws.EndpointSourceCustom { + ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) + } else { + ctx = awsmiddleware.SetSigningName(ctx, s3ObjectLambda) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, err + } + + if len(endpoint.SigningRegion) > 0 { + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + } else { + ctx = awsmiddleware.SetSigningRegion(ctx, region) + } + + if endpoint.Source == aws.EndpointSourceServiceMetadata || !endpoint.HostnameImmutable { + updateS3HostForS3ObjectLambda(req) + } + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go new file mode 100644 index 000000000..6689acb8e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go @@ -0,0 +1,213 @@ +package customizations + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + "github.com/aws/smithy-go/middleware" +) + +type signerVersionKey struct{} + +// GetSignerVersion retrieves the signer version to use for signing +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSignerVersion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signerVersionKey{}).(string) + return v +} + +// SetSignerVersion sets the signer version to be used for signing the request +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSignerVersion(ctx context.Context, version string) context.Context { + return middleware.WithStackValue(ctx, signerVersionKey{}, version) +} + +// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +type SignHTTPRequestMiddlewareOptions struct { + + // credential provider + CredentialsProvider aws.CredentialsProvider + + // log signing + LogSigning bool + + // v4 signer + V4Signer v4.HTTPSigner + + //v4a signer + V4aSigner v4a.HTTPSigner +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + v4Signer: options.V4Signer, + v4aSigner: options.V4aSigner, + logSigning: options.LogSigning, + } +} + +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation to select HTTP Signing method +type SignHTTPRequestMiddleware struct { + + // credential provider + credentialsProvider aws.CredentialsProvider + + // log signing + logSigning bool + + // v4 signer + v4Signer v4.HTTPSigner + + //v4a signer + v4aSigner v4a.HTTPSigner +} + +// ID is the SignHTTPRequestMiddleware identifier +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + // fetch signer type from context + signerVersion := GetSignerVersion(ctx) + + switch signerVersion { + case v4a.Version: + v4aCredentialProvider, ok := s.credentialsProvider.(v4a.CredentialsProvider) + if !ok { + return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") + } + + mw := v4a.NewSignHTTPRequestMiddleware(v4a.SignHTTPRequestMiddlewareOptions{ + Credentials: v4aCredentialProvider, + Signer: s.v4aSigner, + LogSigning: s.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + + default: + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: s.credentialsProvider, + Signer: s.v4Signer, + LogSigning: s.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + } +} + +// RegisterSigningMiddleware registers the wrapper signing middleware to the stack. If a signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { + const signedID = "Signing" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + V4Presigner v4.HTTPPresigner + V4aPresigner v4a.HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + + // cred provider and signer for sigv4 + credentialsProvider aws.CredentialsProvider + + // sigV4 signer + v4Signer v4.HTTPPresigner + + // sigV4a signer + v4aSigner v4a.HTTPPresigner + + // log signing + logSigning bool +} + +// NewPresignHTTPRequestMiddleware constructs a PresignHTTPRequestMiddleware using the given Signer for signing requests +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + v4Signer: options.V4Presigner, + v4aSigner: options.V4aPresigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 or SigV4a presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (p *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + // fetch signer type from context + signerVersion := GetSignerVersion(ctx) + + switch signerVersion { + case v4a.Version: + v4aCredentialProvider, ok := p.credentialsProvider.(v4a.CredentialsProvider) + if !ok { + return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") + } + + mw := v4a.NewPresignHTTPRequestMiddleware(v4a.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: v4aCredentialProvider, + Presigner: p.v4aSigner, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + + default: + mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: p.credentialsProvider, + Presigner: p.v4Signer, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + } +} + +// RegisterPreSigningMiddleware registers the wrapper pre-signing middleware to the stack. If a pre-signing middleware is already +// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the +// finalize step. +func RegisterPreSigningMiddleware(stack *middleware.Stack, signingMiddleware *PresignHTTPRequestMiddleware) (err error) { + const signedID = "PresignHTTPRequest" + _, present := stack.Finalize.Get(signedID) + if present { + _, err = stack.Finalize.Swap(signedID, signingMiddleware) + } else { + err = stack.Finalize.Add(signingMiddleware, middleware.After) + } + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go new file mode 100644 index 000000000..e5f95254a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go @@ -0,0 +1,306 @@ +package customizations + +import ( + "context" + "fmt" + "log" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// UpdateEndpointParameterAccessor represents accessor functions used by the middleware +type UpdateEndpointParameterAccessor struct { + // functional pointer to fetch bucket name from provided input. + // The function is intended to take an input value, and + // return a string pointer to value of string, and bool if + // input has no bucket member. + GetBucketFromInput func(interface{}) (*string, bool) +} + +// UpdateEndpointOptions provides the options for the UpdateEndpoint middleware setup. +type UpdateEndpointOptions struct { + // Accessor are parameter accessors used by the middleware + Accessor UpdateEndpointParameterAccessor + + // use path style + UsePathStyle bool + + // use transfer acceleration + UseAccelerate bool + + // indicates if an operation supports s3 transfer acceleration. + SupportsAccelerate bool + + // use ARN region + UseARNRegion bool + + // Indicates that the operation should target the s3-object-lambda endpoint. + // Used to direct operations that do not route based on an input ARN. + TargetS3ObjectLambda bool + + // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver + EndpointResolver EndpointResolver + + // EndpointResolverOptions used by endpoint resolver + EndpointResolverOptions EndpointResolverOptions + + // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled + DisableMultiRegionAccessPoints bool +} + +// UpdateEndpoint adds the middleware to the middleware stack based on the UpdateEndpointOptions. +func UpdateEndpoint(stack *middleware.Stack, options UpdateEndpointOptions) (err error) { + const serializerID = "OperationSerializer" + + // initial arn look up middleware + err = stack.Initialize.Add(&s3shared.ARNLookup{ + GetARNValue: options.Accessor.GetBucketFromInput, + }, middleware.Before) + if err != nil { + return err + } + + // process arn + err = stack.Serialize.Insert(&processARNResource{ + UseARNRegion: options.UseARNRegion, + UseAccelerate: options.UseAccelerate, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointResolverOptions, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }, serializerID, middleware.Before) + if err != nil { + return err + } + + // process whether the operation requires the s3-object-lambda endpoint + // Occurs before operation serializer so that hostPrefix mutations + // can be handled correctly. + err = stack.Serialize.Insert(&s3ObjectLambdaEndpoint{ + UseEndpoint: options.TargetS3ObjectLambda, + UseAccelerate: options.UseAccelerate, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointResolverOptions, + }, serializerID, middleware.Before) + if err != nil { + return err + } + + // remove bucket arn middleware + err = stack.Serialize.Insert(&removeBucketFromPathMiddleware{}, serializerID, middleware.After) + if err != nil { + return err + } + + // update endpoint to use options for path style and accelerate + err = stack.Serialize.Insert(&updateEndpoint{ + usePathStyle: options.UsePathStyle, + getBucketFromInput: options.Accessor.GetBucketFromInput, + useAccelerate: options.UseAccelerate, + supportsAccelerate: options.SupportsAccelerate, + }, serializerID, middleware.After) + if err != nil { + return err + } + + return err +} + +type updateEndpoint struct { + // path style options + usePathStyle bool + getBucketFromInput func(interface{}) (*string, bool) + + // accelerate options + useAccelerate bool + supportsAccelerate bool +} + +// ID returns the middleware ID. +func (*updateEndpoint) ID() string { + return "S3:UpdateEndpoint" +} + +func (u *updateEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + // if arn was processed, skip this middleware + if _, ok := s3shared.GetARNResourceFromContext(ctx); ok { + return next.HandleSerialize(ctx, in) + } + + // skip this customization if host name is set as immutable + if smithyhttp.GetHostnameImmutable(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // check if accelerate is supported + if u.useAccelerate && !u.supportsAccelerate { + // accelerate is not supported, thus will be ignored + log.Println("Transfer acceleration is not supported for the operation, ignoring UseAccelerate.") + u.useAccelerate = false + } + + // transfer acceleration is not supported with path style urls + if u.useAccelerate && u.usePathStyle { + log.Println("UseAccelerate is not compatible with UsePathStyle, ignoring UsePathStyle.") + u.usePathStyle = false + } + + if u.getBucketFromInput != nil { + // Below customization only apply if bucket name is provided + bucket, ok := u.getBucketFromInput(in.Parameters) + if ok && bucket != nil { + region := awsmiddleware.GetRegion(ctx) + if err := u.updateEndpointFromConfig(req, *bucket, region); err != nil { + return out, metadata, err + } + } + } + + return next.HandleSerialize(ctx, in) +} + +func (u updateEndpoint) updateEndpointFromConfig(req *smithyhttp.Request, bucket string, region string) error { + // do nothing if path style is enforced + if u.usePathStyle { + return nil + } + + if !hostCompatibleBucketName(req.URL, bucket) { + // bucket name must be valid to put into the host for accelerate operations. + // For non-accelerate operations the bucket name can stay in the path if + // not valid hostname. + var err error + if u.useAccelerate { + err = fmt.Errorf("bucket name %s is not compatible with S3", bucket) + } + + // No-Op if not using accelerate. + return err + } + + // accelerate is only supported if use path style is disabled + if u.useAccelerate { + parts := strings.Split(req.URL.Host, ".") + if len(parts) < 3 { + return fmt.Errorf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", req.URL.Host) + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + + for i := 1; i+1 < len(parts); i++ { + if strings.EqualFold(parts[i], region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + // construct the url host + req.URL.Host = strings.Join(parts, ".") + } + + // move bucket to follow virtual host style + moveBucketNameToHost(req.URL, bucket) + return nil +} + +// updates endpoint to use virtual host styling +func moveBucketNameToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u, bucket) +} + +// remove bucket from url +func removeBucketFromPath(u *url.URL, bucket string) { + if strings.HasPrefix(u.Path, "/"+bucket) { + // modify url path + u.Path = strings.Replace(u.Path, "/"+bucket, "", 1) + + // modify url raw path + u.RawPath = strings.Replace(u.RawPath, "/"+httpbinding.EscapePath(bucket, true), "", 1) + } + + if u.Path == "" { + u.Path = "/" + } + + if u.RawPath == "" { + u.RawPath = "/" + } +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if the bucket is not +// DNS compatible or the EndpointResolver resolves an aws.Endpoint with +// HostnameImmutable member set to true. +// +// https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Endpoint.HostnameImmutable +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + if strings.Contains(bucket, "..") { + return false + } + + // checks for `^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$` domain mapping + if !((bucket[0] > 96 && bucket[0] < 123) || (bucket[0] > 47 && bucket[0] < 58)) { + return false + } + + for _, c := range bucket[1:] { + if !((c > 96 && c < 123) || (c > 47 && c < 58) || c == 46 || c == 45) { + return false + } + } + + // checks for `^(\d+\.){3}\d+$` IPaddressing + v := strings.SplitN(bucket, ".", -1) + if len(v) == 4 { + for _, c := range bucket { + if !((c > 47 && c < 58) || c == 46) { + // we confirm that this is not a IP address + return true + } + } + // this is a IP address + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go new file mode 100644 index 000000000..4b8457ebf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go @@ -0,0 +1,836 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" + "strings" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver S3 endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "af-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.af-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-north-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.eu-west-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "fips-ca-central-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-2", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-2", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.me-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.me-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "s3-external-1", + }: endpoints.Endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-north-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.sc2s.sgov.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, +} + +// GetDNSSuffix returns the dnsSuffix URL component for the given partition id +func GetDNSSuffix(id string, options Options) (string, error) { + variant := transformToSharedOptions(options).GetEndpointVariant() + switch { + case strings.EqualFold(id, "aws"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "amazonaws.com", nil + + case 0: + return "amazonaws.com", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-cn"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com.cn", nil + + case endpoints.FIPSVariant: + return "amazonaws.com.cn", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "api.amazonwebservices.com.cn", nil + + case 0: + return "amazonaws.com.cn", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso"): + switch variant { + case endpoints.FIPSVariant: + return "c2s.ic.gov", nil + + case 0: + return "c2s.ic.gov", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso-b"): + switch variant { + case endpoints.FIPSVariant: + return "sc2s.sgov.gov", nil + + case 0: + return "sc2s.sgov.gov", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-us-gov"): + switch variant { + case endpoints.DualStackVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant: + return "amazonaws.com", nil + + case endpoints.FIPSVariant | endpoints.DualStackVariant: + return "amazonaws.com", nil + + case 0: + return "amazonaws.com", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + default: + return "", fmt.Errorf("unknown partition") + + } +} + +// GetDNSSuffixFromRegion returns the DNS suffix for the provided region and +// options. +func GetDNSSuffixFromRegion(region string, options Options) (string, error) { + switch { + case partitionRegexp.Aws.MatchString(region): + return GetDNSSuffix("aws", options) + + case partitionRegexp.AwsCn.MatchString(region): + return GetDNSSuffix("aws-cn", options) + + case partitionRegexp.AwsIso.MatchString(region): + return GetDNSSuffix("aws-iso", options) + + case partitionRegexp.AwsIsoB.MatchString(region): + return GetDNSSuffix("aws-iso-b", options) + + case partitionRegexp.AwsUsGov.MatchString(region): + return GetDNSSuffix("aws-us-gov", options) + + default: + return GetDNSSuffix("aws", options) + + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go new file mode 100644 index 000000000..126dedcf3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go @@ -0,0 +1,12713 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyxml "github.com/aws/smithy-go/encoding/xml" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "strconv" + "strings" +) + +type awsRestxml_serializeOpAbortMultipartUpload struct { +} + +func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AbortMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=AbortMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpCompleteMultipartUpload struct { +} + +func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CompleteMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=CompleteMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MultipartUpload != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CompleteMultipartUpload", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCompletedMultipartUpload(input.MultipartUpload, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpCopyObject struct { +} + +func (*awsRestxml_serializeOpCopyObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CopyObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=CopyObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCopyObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BucketKeyEnabled { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.CopySource != nil && len(*v.CopySource) > 0 { + locationName := "X-Amz-Copy-Source" + encoder.SetHeader(locationName).String(*v.CopySource) + } + + if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) + } + + if v.CopySourceIfModifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) + } + + if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-None-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) + } + + if v.CopySourceIfUnmodifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) + } + + if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) + } + + if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) + } + + if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + locationName := "X-Amz-Source-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.MetadataDirective) > 0 { + locationName := "X-Amz-Metadata-Directive" + encoder.SetHeader(locationName).String(string(v.MetadataDirective)) + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if len(v.TaggingDirective) > 0 { + locationName := "X-Amz-Tagging-Directive" + encoder.SetHeader(locationName).String(string(v.TaggingDirective)) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpCreateBucket struct { +} + +func (*awsRestxml_serializeOpCreateBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.CreateBucketConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CreateBucketConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCreateBucketConfiguration(input.CreateBucketConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.ObjectLockEnabledForBucket { + locationName := "X-Amz-Bucket-Object-Lock-Enabled" + encoder.SetHeader(locationName).Boolean(v.ObjectLockEnabledForBucket) + } + + if len(v.ObjectOwnership) > 0 { + locationName := "X-Amz-Object-Ownership" + encoder.SetHeader(locationName).String(string(v.ObjectOwnership)) + } + + return nil +} + +type awsRestxml_serializeOpCreateMultipartUpload struct { +} + +func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateMultipartUploadInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?uploads&x-id=CreateMultipartUpload") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BucketKeyEnabled { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucket struct { +} + +func (*awsRestxml_serializeOpDeleteBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketCors struct { +} + +func (*awsRestxml_serializeOpDeleteBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketEncryption struct { +} + +func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketLifecycle struct { +} + +func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketLifecycleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketPolicy struct { +} + +func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketReplication struct { +} + +func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketTagging struct { +} + +func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketWebsite struct { +} + +func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObject struct { +} + +func (*awsRestxml_serializeOpDeleteObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=DeleteObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BypassGovernanceRetention { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObjects struct { +} + +func (*awsRestxml_serializeOpDeleteObjects) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?delete&x-id=DeleteObjects") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Delete != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Delete", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentDelete(input.Delete, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BypassGovernanceRetention { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpDeleteObjectTagging struct { +} + +func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpDeletePublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeletePublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?accelerate") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAcl struct { +} + +func (*awsRestxml_serializeOpGetBucketAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics&x-id=GetBucketAnalyticsConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketCors struct { +} + +func (*awsRestxml_serializeOpGetBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketEncryption struct { +} + +func (*awsRestxml_serializeOpGetBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory&x-id=GetBucketInventoryConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLocation struct { +} + +func (*awsRestxml_serializeOpGetBucketLocation) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLocationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?location") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketLogging struct { +} + +func (*awsRestxml_serializeOpGetBucketLogging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketLoggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?logging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics&x-id=GetBucketMetricsConfiguration") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketNotificationConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?notification") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketPolicy struct { +} + +func (*awsRestxml_serializeOpGetBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketPolicyStatus struct { +} + +func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketPolicyStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policyStatus") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketReplication struct { +} + +func (*awsRestxml_serializeOpGetBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketRequestPayment struct { +} + +func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketRequestPaymentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?requestPayment") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketTagging struct { +} + +func (*awsRestxml_serializeOpGetBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketVersioning struct { +} + +func (*awsRestxml_serializeOpGetBucketVersioning) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketVersioningInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versioning") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketWebsite struct { +} + +func (*awsRestxml_serializeOpGetBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetObject struct { +} + +func (*awsRestxml_serializeOpGetObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=GetObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumMode) > 0 { + locationName := "X-Amz-Checksum-Mode" + encoder.SetHeader(locationName).String(string(v.ChecksumMode)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.IfMatch != nil && len(*v.IfMatch) > 0 { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfModifiedSince != nil { + locationName := "If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) + } + + if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + + if v.IfUnmodifiedSince != nil { + locationName := "If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.PartNumber != 0 { + encoder.SetQuery("partNumber").Integer(v.PartNumber) + } + + if v.Range != nil && len(*v.Range) > 0 { + locationName := "Range" + encoder.SetHeader(locationName).String(*v.Range) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.ResponseCacheControl != nil { + encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) + } + + if v.ResponseContentDisposition != nil { + encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) + } + + if v.ResponseContentEncoding != nil { + encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) + } + + if v.ResponseContentLanguage != nil { + encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) + } + + if v.ResponseContentType != nil { + encoder.SetQuery("response-content-type").String(*v.ResponseContentType) + } + + if v.ResponseExpires != nil { + encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectAcl struct { +} + +func (*awsRestxml_serializeOpGetObjectAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectAttributes struct { +} + +func (*awsRestxml_serializeOpGetObjectAttributes) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectAttributesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?attributes") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MaxParts != 0 { + locationName := "X-Amz-Max-Parts" + encoder.SetHeader(locationName).Integer(v.MaxParts) + } + + if v.ObjectAttributes != nil { + locationName := "X-Amz-Object-Attributes" + for i := range v.ObjectAttributes { + if len(v.ObjectAttributes[i]) > 0 { + escaped := string(v.ObjectAttributes[i]) + if strings.Index(string(v.ObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.ObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.ObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } + } + + if v.PartNumberMarker != nil && len(*v.PartNumberMarker) > 0 { + locationName := "X-Amz-Part-Number-Marker" + encoder.SetHeader(locationName).String(*v.PartNumberMarker) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectLegalHold struct { +} + +func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectLegalHoldInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?legal-hold") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectLockConfiguration struct { +} + +func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectLockConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?object-lock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectRetention struct { +} + +func (*awsRestxml_serializeOpGetObjectRetention) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectRetentionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?retention") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectTagging struct { +} + +func (*awsRestxml_serializeOpGetObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpGetObjectTorrent struct { +} + +func (*awsRestxml_serializeOpGetObjectTorrent) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetObjectTorrentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?torrent") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpGetPublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetPublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpHeadBucket struct { +} + +func (*awsRestxml_serializeOpHeadBucket) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*HeadBucketInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "HEAD" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsHeadBucketInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpHeadObject struct { +} + +func (*awsRestxml_serializeOpHeadObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*HeadObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "HEAD" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsHeadObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumMode) > 0 { + locationName := "X-Amz-Checksum-Mode" + encoder.SetHeader(locationName).String(string(v.ChecksumMode)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.IfMatch != nil && len(*v.IfMatch) > 0 { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfModifiedSince != nil { + locationName := "If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) + } + + if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + + if v.IfUnmodifiedSince != nil { + locationName := "If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.PartNumber != 0 { + encoder.SetQuery("partNumber").Integer(v.PartNumber) + } + + if v.Range != nil && len(*v.Range) > 0 { + locationName := "Range" + encoder.SetHeader(locationName).String(*v.Range) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpListBucketAnalyticsConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics&x-id=ListBucketAnalyticsConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBucketIntelligentTieringConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + return nil +} + +type awsRestxml_serializeOpListBucketInventoryConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory&x-id=ListBucketInventoryConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBucketMetricsConfigurations struct { +} + +func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics&x-id=ListBucketMetricsConfigurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpListBuckets struct { +} + +func (*awsRestxml_serializeOpListBuckets) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListBucketsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +type awsRestxml_serializeOpListMultipartUploads struct { +} + +func (*awsRestxml_serializeOpListMultipartUploads) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListMultipartUploadsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?uploads") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.KeyMarker != nil { + encoder.SetQuery("key-marker").String(*v.KeyMarker) + } + + if v.MaxUploads != 0 { + encoder.SetQuery("max-uploads").Integer(v.MaxUploads) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if v.UploadIdMarker != nil { + encoder.SetQuery("upload-id-marker").String(*v.UploadIdMarker) + } + + return nil +} + +type awsRestxml_serializeOpListObjects struct { +} + +func (*awsRestxml_serializeOpListObjects) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Marker != nil { + encoder.SetQuery("marker").String(*v.Marker) + } + + if v.MaxKeys != 0 { + encoder.SetQuery("max-keys").Integer(v.MaxKeys) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + return nil +} + +type awsRestxml_serializeOpListObjectsV2 struct { +} + +func (*awsRestxml_serializeOpListObjectsV2) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectsV2Input) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?list-type=2") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectsV2Input(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.FetchOwner { + encoder.SetQuery("fetch-owner").Boolean(v.FetchOwner) + } + + if v.MaxKeys != 0 { + encoder.SetQuery("max-keys").Integer(v.MaxKeys) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.StartAfter != nil { + encoder.SetQuery("start-after").String(*v.StartAfter) + } + + return nil +} + +type awsRestxml_serializeOpListObjectVersions struct { +} + +func (*awsRestxml_serializeOpListObjectVersions) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListObjectVersionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versions") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Delimiter != nil { + encoder.SetQuery("delimiter").String(*v.Delimiter) + } + + if len(v.EncodingType) > 0 { + encoder.SetQuery("encoding-type").String(string(v.EncodingType)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.KeyMarker != nil { + encoder.SetQuery("key-marker").String(*v.KeyMarker) + } + + if v.MaxKeys != 0 { + encoder.SetQuery("max-keys").Integer(v.MaxKeys) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + + if v.VersionIdMarker != nil { + encoder.SetQuery("version-id-marker").String(*v.VersionIdMarker) + } + + return nil +} + +type awsRestxml_serializeOpListParts struct { +} + +func (*awsRestxml_serializeOpListParts) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListPartsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=ListParts") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListPartsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.MaxParts != 0 { + encoder.SetQuery("max-parts").Integer(v.MaxParts) + } + + if v.PartNumberMarker != nil { + encoder.SetQuery("part-number-marker").String(*v.PartNumberMarker) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAccelerateConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?accelerate") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccelerateConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccelerateConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccelerateConfiguration(input.AccelerateConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAcl struct { +} + +func (*awsRestxml_serializeOpPutBucketAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccessControlPolicy != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlPolicy", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketAnalyticsConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AnalyticsConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AnalyticsConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAnalyticsConfiguration(input.AnalyticsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketCors struct { +} + +func (*awsRestxml_serializeOpPutBucketCors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketCorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.CORSConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CORSConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentCORSConfiguration(input.CORSConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketEncryption struct { +} + +func (*awsRestxml_serializeOpPutBucketEncryption) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketEncryptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ServerSideEncryptionConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ServerSideEncryptionConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentServerSideEncryptionConfiguration(input.ServerSideEncryptionConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.IntelligentTieringConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IntelligentTieringConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentIntelligentTieringConfiguration(input.IntelligentTieringConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketInventoryConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.InventoryConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InventoryConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentInventoryConfiguration(input.InventoryConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketLifecycleConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.LifecycleConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LifecycleConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentBucketLifecycleConfiguration(input.LifecycleConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketLogging struct { +} + +func (*awsRestxml_serializeOpPutBucketLogging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketLoggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?logging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.BucketLoggingStatus != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketLoggingStatus", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentBucketLoggingStatus(input.BucketLoggingStatus, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketMetricsConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MetricsConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetricsConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentMetricsConfiguration(input.MetricsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Id != nil { + encoder.SetQuery("id").String(*v.Id) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketNotificationConfiguration struct { +} + +func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?notification") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.NotificationConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NotificationConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentNotificationConfiguration(input.NotificationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.SkipDestinationValidation { + locationName := "X-Amz-Skip-Destination-Validation" + encoder.SetHeader(locationName).Boolean(v.SkipDestinationValidation) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketOwnershipControls struct { +} + +func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.OwnershipControls != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OwnershipControls", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentOwnershipControls(input.OwnershipControls, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketPolicy struct { +} + +func (*awsRestxml_serializeOpPutBucketPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("text/plain") + } + + if input.Policy != nil { + payload := strings.NewReader(*input.Policy) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ConfirmRemoveSelfBucketAccess { + locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access" + encoder.SetHeader(locationName).Boolean(v.ConfirmRemoveSelfBucketAccess) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketReplication struct { +} + +func (*awsRestxml_serializeOpPutBucketReplication) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketReplicationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ReplicationConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicationConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentReplicationConfiguration(input.ReplicationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Token != nil && len(*v.Token) > 0 { + locationName := "X-Amz-Bucket-Object-Lock-Token" + encoder.SetHeader(locationName).String(*v.Token) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketRequestPayment struct { +} + +func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketRequestPaymentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?requestPayment") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.RequestPaymentConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RequestPaymentConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentRequestPaymentConfiguration(input.RequestPaymentConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketTagging struct { +} + +func (*awsRestxml_serializeOpPutBucketTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Tagging != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketVersioning struct { +} + +func (*awsRestxml_serializeOpPutBucketVersioning) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketVersioningInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versioning") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.VersioningConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "VersioningConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentVersioningConfiguration(input.VersioningConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.MFA != nil && len(*v.MFA) > 0 { + locationName := "X-Amz-Mfa" + encoder.SetHeader(locationName).String(*v.MFA) + } + + return nil +} + +type awsRestxml_serializeOpPutBucketWebsite struct { +} + +func (*awsRestxml_serializeOpPutBucketWebsite) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutBucketWebsiteInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.WebsiteConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "WebsiteConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentWebsiteConfiguration(input.WebsiteConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpPutObject struct { +} + +func (*awsRestxml_serializeOpPutObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=PutObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + if input.Body != nil { + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BucketKeyEnabled { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentLength != 0 { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(v.ContentLength) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Expires != nil { + locationName := "Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.Tagging != nil && len(*v.Tagging) > 0 { + locationName := "X-Amz-Tagging" + encoder.SetHeader(locationName).String(*v.Tagging) + } + + if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + locationName := "X-Amz-Website-Redirect-Location" + encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectAcl struct { +} + +func (*awsRestxml_serializeOpPutObjectAcl) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectAclInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?acl") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectAclInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.AccessControlPolicy != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlPolicy", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ACL) > 0 { + locationName := "X-Amz-Acl" + encoder.SetHeader(locationName).String(string(v.ACL)) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + locationName := "X-Amz-Grant-Full-Control" + encoder.SetHeader(locationName).String(*v.GrantFullControl) + } + + if v.GrantRead != nil && len(*v.GrantRead) > 0 { + locationName := "X-Amz-Grant-Read" + encoder.SetHeader(locationName).String(*v.GrantRead) + } + + if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + locationName := "X-Amz-Grant-Read-Acp" + encoder.SetHeader(locationName).String(*v.GrantReadACP) + } + + if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + locationName := "X-Amz-Grant-Write" + encoder.SetHeader(locationName).String(*v.GrantWrite) + } + + if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + locationName := "X-Amz-Grant-Write-Acp" + encoder.SetHeader(locationName).String(*v.GrantWriteACP) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectLegalHold struct { +} + +func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectLegalHoldInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?legal-hold") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.LegalHold != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LegalHold", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockLegalHold(input.LegalHold, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectLockConfiguration struct { +} + +func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectLockConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?object-lock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.ObjectLockConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectLockConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockConfiguration(input.ObjectLockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.Token != nil && len(*v.Token) > 0 { + locationName := "X-Amz-Bucket-Object-Lock-Token" + encoder.SetHeader(locationName).String(*v.Token) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectRetention struct { +} + +func (*awsRestxml_serializeOpPutObjectRetention) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectRetentionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?retention") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Retention != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Retention", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentObjectLockRetention(input.Retention, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.BypassGovernanceRetention { + locationName := "X-Amz-Bypass-Governance-Retention" + encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutObjectTagging struct { +} + +func (*awsRestxml_serializeOpPutObjectTagging) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutObjectTaggingInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.Tagging != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpPutPublicAccessBlock struct { +} + +func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutPublicAccessBlockInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.PublicAccessBlockConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PublicAccessBlockConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentPublicAccessBlockConfiguration(input.PublicAccessBlockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpRestoreObject struct { +} + +func (*awsRestxml_serializeOpRestoreObject) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RestoreObjectInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?restore&x-id=RestoreObject") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsRestoreObjectInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.RestoreRequest != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RestoreRequest", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentRestoreRequest(input.RestoreRequest, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.VersionId != nil { + encoder.SetQuery("versionId").String(*v.VersionId) + } + + return nil +} + +type awsRestxml_serializeOpSelectObjectContent struct { +} + +func (*awsRestxml_serializeOpSelectObjectContent) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SelectObjectContentInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?select&select-type=2&x-id=SelectObjectContent") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/xml") + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SelectObjectContentRequest", + }, + Attr: rootAttr, + } + root.Attr = append(root.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeOpDocumentSelectObjectContentInput(input, xmlEncoder.RootElement(root)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + if request, err = request.SetStream(bytes.NewReader(xmlEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + return nil +} + +func awsRestxml_serializeOpDocumentSelectObjectContentInput(v *SelectObjectContentInput, value smithyxml.Value) error { + defer value.Close() + if v.Expression != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expression", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Expression) + } + if len(v.ExpressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ExpressionType)) + } + if v.InputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { + return err + } + } + if v.OutputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { + return err + } + } + if v.RequestProgress != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RequestProgress", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRequestProgress(v.RequestProgress, el); err != nil { + return err + } + } + if v.ScanRange != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ScanRange", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentScanRange(v.ScanRange, el); err != nil { + return err + } + } + return nil +} + +type awsRestxml_serializeOpUploadPart struct { +} + +func (*awsRestxml_serializeOpUploadPart) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UploadPartInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=UploadPart") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsUploadPartInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + if input.Body != nil { + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentLength != 0 { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(v.ContentLength) + } + + if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + { + encoder.SetQuery("partNumber").Integer(v.PartNumber) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpUploadPartCopy struct { +} + +func (*awsRestxml_serializeOpUploadPartCopy) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UploadPartCopyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=UploadPartCopy") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.Bucket == nil || len(*v.Bucket) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} + } + if v.Bucket != nil { + if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { + return err + } + } + + if v.CopySource != nil && len(*v.CopySource) > 0 { + locationName := "X-Amz-Copy-Source" + encoder.SetHeader(locationName).String(*v.CopySource) + } + + if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) + } + + if v.CopySourceIfModifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Modified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) + } + + if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + locationName := "X-Amz-Copy-Source-If-None-Match" + encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) + } + + if v.CopySourceIfUnmodifiedSince != nil { + locationName := "X-Amz-Copy-Source-If-Unmodified-Since" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) + } + + if v.CopySourceRange != nil && len(*v.CopySourceRange) > 0 { + locationName := "X-Amz-Copy-Source-Range" + encoder.SetHeader(locationName).String(*v.CopySourceRange) + } + + if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) + } + + if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) + } + + if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) + } + + if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + locationName := "X-Amz-Source-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) + } + + if v.Key == nil || len(*v.Key) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} + } + if v.Key != nil { + if err := encoder.SetURI("Key").String(*v.Key); err != nil { + return err + } + } + + { + encoder.SetQuery("partNumber").Integer(v.PartNumber) + } + + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key" + encoder.SetHeader(locationName).String(*v.SSECustomerKey) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.UploadId != nil { + encoder.SetQuery("uploadId").String(*v.UploadId) + } + + return nil +} + +type awsRestxml_serializeOpWriteGetObjectResponse struct { +} + +func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*WriteGetObjectResponseInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/octet-stream") + } + + if input.Body != nil { + payload := input.Body + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AcceptRanges != nil && len(*v.AcceptRanges) > 0 { + locationName := "X-Amz-Fwd-Header-Accept-Ranges" + encoder.SetHeader(locationName).String(*v.AcceptRanges) + } + + if v.BucketKeyEnabled { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + } + + if v.CacheControl != nil && len(*v.CacheControl) > 0 { + locationName := "X-Amz-Fwd-Header-Cache-Control" + encoder.SetHeader(locationName).String(*v.CacheControl) + } + + if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32) + } + + if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c" + encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) + } + + if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1" + encoder.SetHeader(locationName).String(*v.ChecksumSHA1) + } + + if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256" + encoder.SetHeader(locationName).String(*v.ChecksumSHA256) + } + + if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Disposition" + encoder.SetHeader(locationName).String(*v.ContentDisposition) + } + + if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Encoding" + encoder.SetHeader(locationName).String(*v.ContentEncoding) + } + + if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Language" + encoder.SetHeader(locationName).String(*v.ContentLanguage) + } + + if v.ContentLength != 0 { + locationName := "Content-Length" + encoder.SetHeader(locationName).Long(v.ContentLength) + } + + if v.ContentRange != nil && len(*v.ContentRange) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Range" + encoder.SetHeader(locationName).String(*v.ContentRange) + } + + if v.ContentType != nil && len(*v.ContentType) > 0 { + locationName := "X-Amz-Fwd-Header-Content-Type" + encoder.SetHeader(locationName).String(*v.ContentType) + } + + if v.DeleteMarker { + locationName := "X-Amz-Fwd-Header-X-Amz-Delete-Marker" + encoder.SetHeader(locationName).Boolean(v.DeleteMarker) + } + + if v.ErrorCode != nil && len(*v.ErrorCode) > 0 { + locationName := "X-Amz-Fwd-Error-Code" + encoder.SetHeader(locationName).String(*v.ErrorCode) + } + + if v.ErrorMessage != nil && len(*v.ErrorMessage) > 0 { + locationName := "X-Amz-Fwd-Error-Message" + encoder.SetHeader(locationName).String(*v.ErrorMessage) + } + + if v.ETag != nil && len(*v.ETag) > 0 { + locationName := "X-Amz-Fwd-Header-Etag" + encoder.SetHeader(locationName).String(*v.ETag) + } + + if v.Expiration != nil && len(*v.Expiration) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Expiration" + encoder.SetHeader(locationName).String(*v.Expiration) + } + + if v.Expires != nil { + locationName := "X-Amz-Fwd-Header-Expires" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) + } + + if v.LastModified != nil { + locationName := "X-Amz-Fwd-Header-Last-Modified" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.LastModified)) + } + + if v.Metadata != nil { + hv := encoder.Headers("X-Amz-Meta-") + for mapKey, mapVal := range v.Metadata { + if len(mapVal) > 0 { + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) + } + } + } + + if v.MissingMeta != 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Missing-Meta" + encoder.SetHeader(locationName).Integer(v.MissingMeta) + } + + if len(v.ObjectLockLegalHoldStatus) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Legal-Hold" + encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) + } + + if len(v.ObjectLockMode) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Mode" + encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) + } + + if v.ObjectLockRetainUntilDate != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Retain-Until-Date" + encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) + } + + if v.PartsCount != 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Mp-Parts-Count" + encoder.SetHeader(locationName).Integer(v.PartsCount) + } + + if len(v.ReplicationStatus) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Replication-Status" + encoder.SetHeader(locationName).String(string(v.ReplicationStatus)) + } + + if len(v.RequestCharged) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Request-Charged" + encoder.SetHeader(locationName).String(string(v.RequestCharged)) + } + + if v.RequestRoute != nil && len(*v.RequestRoute) > 0 { + locationName := "X-Amz-Request-Route" + encoder.SetHeader(locationName).String(*v.RequestRoute) + } + + if v.RequestToken != nil && len(*v.RequestToken) > 0 { + locationName := "X-Amz-Request-Token" + encoder.SetHeader(locationName).String(*v.RequestToken) + } + + if v.Restore != nil && len(*v.Restore) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Restore" + encoder.SetHeader(locationName).String(*v.Restore) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm" + encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) + } + + if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5" + encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) + } + + if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + + if v.StatusCode != 0 { + locationName := "X-Amz-Fwd-Status" + encoder.SetHeader(locationName).Integer(v.StatusCode) + } + + if len(v.StorageClass) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Storage-Class" + encoder.SetHeader(locationName).String(string(v.StorageClass)) + } + + if v.TagCount != 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Tagging-Count" + encoder.SetHeader(locationName).Integer(v.TagCount) + } + + if v.VersionId != nil && len(*v.VersionId) > 0 { + locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id" + encoder.SetHeader(locationName).String(*v.VersionId) + } + + return nil +} + +func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error { + defer value.Close() + if v.DaysAfterInitiation != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DaysAfterInitiation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.DaysAfterInitiation) + } + return nil +} + +func awsRestxml_serializeDocumentAccelerateConfiguration(v *types.AccelerateConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentAccessControlPolicy(v *types.AccessControlPolicy, value smithyxml.Value) error { + defer value.Close() + if v.Grants != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlList", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrants(v.Grants, el); err != nil { + return err + } + } + if v.Owner != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Owner", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOwner(v.Owner, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAccessControlTranslation(v *types.AccessControlTranslation, value smithyxml.Value) error { + defer value.Close() + if len(v.Owner) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Owner", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Owner)) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedHeaders(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedMethods(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAllowedOrigins(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsAndOperator(v *types.AnalyticsAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsConfiguration(v *types.AnalyticsConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.StorageClassAnalysis != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClassAnalysis", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentStorageClassAnalysis(v.StorageClassAnalysis, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsExportDestination(v *types.AnalyticsExportDestination, value smithyxml.Value) error { + defer value.Close() + if v.S3BucketDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3BucketDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v.S3BucketDestination, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsFilter(v types.AnalyticsFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.AnalyticsFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentAnalyticsAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.AnalyticsFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.AnalyticsFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination, value smithyxml.Value) error { + defer value.Close() + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.BucketAccountId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketAccountId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.BucketAccountId) + } + if len(v.Format) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Format", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Format)) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentLifecycleRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentBucketLoggingStatus(v *types.BucketLoggingStatus, value smithyxml.Value) error { + defer value.Close() + if v.LoggingEnabled != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LoggingEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLoggingEnabled(v.LoggingEnabled, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCompletedMultipartUpload(v *types.CompletedMultipartUpload, value smithyxml.Value) error { + defer value.Close() + if v.Parts != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Part", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentCompletedPartList(v.Parts, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smithyxml.Value) error { + defer value.Close() + if v.ChecksumCRC32 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC32", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC32) + } + if v.ChecksumCRC32C != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC32C", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC32C) + } + if v.ChecksumSHA1 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumSHA1", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumSHA1) + } + if v.ChecksumSHA256 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumSHA256", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumSHA256) + } + if v.ETag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ETag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ETag) + } + if v.PartNumber != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PartNumber", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.PartNumber) + } + return nil +} + +func awsRestxml_serializeDocumentCompletedPartList(v []types.CompletedPart, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentCompletedPart(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCondition(v *types.Condition, value smithyxml.Value) error { + defer value.Close() + if v.HttpErrorCodeReturnedEquals != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HttpErrorCodeReturnedEquals", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HttpErrorCodeReturnedEquals) + } + if v.KeyPrefixEquals != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KeyPrefixEquals", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KeyPrefixEquals) + } + return nil +} + +func awsRestxml_serializeDocumentCORSConfiguration(v *types.CORSConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.CORSRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CORSRule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentCORSRules(v.CORSRules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Value) error { + defer value.Close() + if v.AllowedHeaders != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedHeader", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedHeaders(v.AllowedHeaders, el); err != nil { + return err + } + } + if v.AllowedMethods != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedMethod", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedMethods(v.AllowedMethods, el); err != nil { + return err + } + } + if v.AllowedOrigins != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowedOrigin", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentAllowedOrigins(v.AllowedOrigins, el); err != nil { + return err + } + } + if v.ExposeHeaders != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExposeHeader", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentExposeHeaders(v.ExposeHeaders, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.MaxAgeSeconds != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MaxAgeSeconds", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.MaxAgeSeconds) + } + return nil +} + +func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentCORSRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.LocationConstraint) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LocationConstraint", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.LocationConstraint)) + } + return nil +} + +func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error { + defer value.Close() + if v.AllowQuotedRecordDelimiter { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AllowQuotedRecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.AllowQuotedRecordDelimiter) + } + if v.Comments != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Comments", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Comments) + } + if v.FieldDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FieldDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.FieldDelimiter) + } + if len(v.FileHeaderInfo) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FileHeaderInfo", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.FileHeaderInfo)) + } + if v.QuoteCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteCharacter) + } + if v.QuoteEscapeCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteEscapeCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteEscapeCharacter) + } + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.Value) error { + defer value.Close() + if v.FieldDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FieldDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.FieldDelimiter) + } + if v.QuoteCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteCharacter) + } + if v.QuoteEscapeCharacter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteEscapeCharacter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QuoteEscapeCharacter) + } + if len(v.QuoteFields) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QuoteFields", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.QuoteFields)) + } + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error { + defer value.Close() + if v.Days != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + if len(v.Mode) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Mode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Mode)) + } + if v.Years != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Years", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Years) + } + return nil +} + +func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) error { + defer value.Close() + if v.Objects != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Object", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentObjectIdentifierList(v.Objects, el); err != nil { + return err + } + } + if v.Quiet { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Quiet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.Quiet) + } + return nil +} + +func awsRestxml_serializeDocumentDeleteMarkerReplication(v *types.DeleteMarkerReplication, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentDestination(v *types.Destination, value smithyxml.Value) error { + defer value.Close() + if v.AccessControlTranslation != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlTranslation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAccessControlTranslation(v.AccessControlTranslation, el); err != nil { + return err + } + } + if v.Account != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Account", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Account) + } + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.EncryptionConfiguration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EncryptionConfiguration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil { + return err + } + } + if v.Metrics != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Metrics", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentMetrics(v.Metrics, el); err != nil { + return err + } + } + if v.ReplicationTime != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicationTime", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTime(v.ReplicationTime, el); err != nil { + return err + } + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentEncryption(v *types.Encryption, value smithyxml.Value) error { + defer value.Close() + if len(v.EncryptionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EncryptionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.EncryptionType)) + } + if v.KMSContext != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSContext", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSContext) + } + if v.KMSKeyId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSKeyId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSKeyId) + } + return nil +} + +func awsRestxml_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.ReplicaKmsKeyID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicaKmsKeyID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplicaKmsKeyID) + } + return nil +} + +func awsRestxml_serializeDocumentErrorDocument(v *types.ErrorDocument, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + return nil +} + +func awsRestxml_serializeDocumentEventBridgeConfiguration(v *types.EventBridgeConfiguration, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentEventList(v []types.Event, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(string(v[i])) + } + return nil +} + +func awsRestxml_serializeDocumentExistingObjectReplication(v *types.ExistingObjectReplication, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentExposeHeaders(v []string, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + am.String(v[i]) + } + return nil +} + +func awsRestxml_serializeDocumentFilterRule(v *types.FilterRule, value smithyxml.Value) error { + defer value.Close() + if len(v.Name) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Name)) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentFilterRuleList(v []types.FilterRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentFilterRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentGlacierJobParameters(v *types.GlacierJobParameters, value smithyxml.Value) error { + defer value.Close() + if len(v.Tier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Tier)) + } + return nil +} + +func awsRestxml_serializeDocumentGrant(v *types.Grant, value smithyxml.Value) error { + defer value.Close() + if v.Grantee != nil { + rootAttr := []smithyxml.Attr{} + rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) + if len(v.Grantee.Type) > 0 { + var av string + av = string(v.Grantee.Type) + rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) + } + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grantee", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { + return err + } + } + if len(v.Permission) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Permission", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Permission)) + } + return nil +} + +func awsRestxml_serializeDocumentGrantee(v *types.Grantee, value smithyxml.Value) error { + defer value.Close() + if v.DisplayName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DisplayName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.DisplayName) + } + if v.EmailAddress != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EmailAddress", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.EmailAddress) + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.URI != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "URI", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.URI) + } + return nil +} + +func awsRestxml_serializeDocumentGrants(v []types.Grant, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grant", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentGrant(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIndexDocument(v *types.IndexDocument, value smithyxml.Value) error { + defer value.Close() + if v.Suffix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Suffix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Suffix) + } + return nil +} + +func awsRestxml_serializeDocumentInputSerialization(v *types.InputSerialization, value smithyxml.Value) error { + defer value.Close() + if len(v.CompressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CompressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.CompressionType)) + } + if v.CSV != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CSV", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCSVInput(v.CSV, el); err != nil { + return err + } + } + if v.JSON != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "JSON", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentJSONInput(v.JSON, el); err != nil { + return err + } + } + if v.Parquet != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Parquet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentParquetInput(v.Parquet, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIntelligentTieringFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Tierings != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tiering", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTieringList(v.Tierings, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentIntelligentTieringFilter(v *types.IntelligentTieringFilter, value smithyxml.Value) error { + defer value.Close() + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIntelligentTieringAndOperator(v.And, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryDestination(v.Destination, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if len(v.IncludedObjectVersions) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IncludedObjectVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.IncludedObjectVersions)) + } + { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IsEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.IsEnabled) + } + if v.OptionalFields != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OptionalFields", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryOptionalFields(v.OptionalFields, el); err != nil { + return err + } + } + if v.Schedule != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Schedule", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventorySchedule(v.Schedule, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryDestination(v *types.InventoryDestination, value smithyxml.Value) error { + defer value.Close() + if v.S3BucketDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3BucketDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryS3BucketDestination(v.S3BucketDestination, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryEncryption(v *types.InventoryEncryption, value smithyxml.Value) error { + defer value.Close() + if v.SSEKMS != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSE-KMS", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSSEKMS(v.SSEKMS, el); err != nil { + return err + } + } + if v.SSES3 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSE-S3", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSSES3(v.SSES3, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentInventoryFilter(v *types.InventoryFilter, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentInventoryOptionalFields(v []types.InventoryOptionalField, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Field", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + am.String(string(v[i])) + } + return nil +} + +func awsRestxml_serializeDocumentInventoryS3BucketDestination(v *types.InventoryS3BucketDestination, value smithyxml.Value) error { + defer value.Close() + if v.AccountId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccountId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.AccountId) + } + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Bucket) + } + if v.Encryption != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Encryption", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInventoryEncryption(v.Encryption, el); err != nil { + return err + } + } + if len(v.Format) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Format", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Format)) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + return nil +} + +func awsRestxml_serializeDocumentInventorySchedule(v *types.InventorySchedule, value smithyxml.Value) error { + defer value.Close() + if len(v.Frequency) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Frequency", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Frequency)) + } + return nil +} + +func awsRestxml_serializeDocumentJSONInput(v *types.JSONInput, value smithyxml.Value) error { + defer value.Close() + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + +func awsRestxml_serializeDocumentJSONOutput(v *types.JSONOutput, value smithyxml.Value) error { + defer value.Close() + if v.RecordDelimiter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RecordDelimiter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.RecordDelimiter) + } + return nil +} + +func awsRestxml_serializeDocumentLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.LambdaFunctionArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CloudFunction", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.LambdaFunctionArn) + } + return nil +} + +func awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentLambdaFunctionConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiration, value smithyxml.Value) error { + defer value.Close() + if v.Date != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Date", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.Date)) + } + if v.Days != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + if v.ExpiredObjectDeleteMarker { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpiredObjectDeleteMarker", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.ExpiredObjectDeleteMarker) + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smithyxml.Value) error { + defer value.Close() + if v.AbortIncompleteMultipartUpload != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AbortIncompleteMultipartUpload", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v.AbortIncompleteMultipartUpload, el); err != nil { + return err + } + } + if v.Expiration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expiration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleExpiration(v.Expiration, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleRuleFilter(v.Filter, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.NoncurrentVersionExpiration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentVersionExpiration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNoncurrentVersionExpiration(v.NoncurrentVersionExpiration, el); err != nil { + return err + } + } + if v.NoncurrentVersionTransitions != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentVersionTransition", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v.NoncurrentVersionTransitions, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Transitions != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Transition", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTransitionList(v.Transitions, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.ObjectSizeGreaterThan != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeGreaterThan", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(v.ObjectSizeGreaterThan) + } + if v.ObjectSizeLessThan != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeLessThan", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(v.ObjectSizeLessThan) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRuleFilter(v types.LifecycleRuleFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.LifecycleRuleFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.LifecycleRuleFilterMemberObjectSizeGreaterThan: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeGreaterThan", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.Long(uv.Value) + + case *types.LifecycleRuleFilterMemberObjectSizeLessThan: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectSizeLessThan", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.Long(uv.Value) + + case *types.LifecycleRuleFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.LifecycleRuleFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentLifecycleRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error { + defer value.Close() + if v.TargetBucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetBucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TargetBucket) + } + if v.TargetGrants != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetGrants", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTargetGrants(v.TargetGrants, el); err != nil { + return err + } + } + if v.TargetPrefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetPrefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TargetPrefix) + } + return nil +} + +func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smithyxml.Value) error { + defer value.Close() + if v.Name != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Name) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error { + defer value.Close() + if v.EventThreshold != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EventThreshold", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTimeValue(v.EventThreshold, el); err != nil { + return err + } + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentMetricsAndOperator(v *types.MetricsAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.AccessPointArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessPointArn", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.AccessPointArn) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentMetricsConfiguration(v *types.MetricsConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentMetricsFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + return nil +} + +func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.MetricsFilterMemberAccessPointArn: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessPointArn", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.MetricsFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentMetricsAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.MetricsFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.MetricsFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error { + defer value.Close() + if v.NewerNoncurrentVersions != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NewerNoncurrentVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.NewerNoncurrentVersions) + } + if v.NoncurrentDays != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentDays", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.NoncurrentDays) + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error { + defer value.Close() + if v.NewerNoncurrentVersions != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NewerNoncurrentVersions", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.NewerNoncurrentVersions) + } + if v.NoncurrentDays != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "NoncurrentDays", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.NoncurrentDays) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v []types.NoncurrentVersionTransition, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentNoncurrentVersionTransition(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentNotificationConfiguration(v *types.NotificationConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.EventBridgeConfiguration != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "EventBridgeConfiguration", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEventBridgeConfiguration(v.EventBridgeConfiguration, el); err != nil { + return err + } + } + if v.LambdaFunctionConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CloudFunctionConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations, el); err != nil { + return err + } + } + if v.QueueConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "QueueConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentQueueConfigurationList(v.QueueConfigurations, el); err != nil { + return err + } + } + if v.TopicConfigurations != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TopicConfiguration", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTopicConfigurationList(v.TopicConfigurations, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.NotificationConfigurationFilter, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3KeyFilter(v.Key, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + if v.VersionId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "VersionId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.VersionId) + } + return nil +} + +func awsRestxml_serializeDocumentObjectIdentifierList(v []types.ObjectIdentifier, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentObjectIdentifier(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockConfiguration(v *types.ObjectLockConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.ObjectLockEnabled) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectLockEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ObjectLockEnabled)) + } + if v.Rule != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentObjectLockRule(v.Rule, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockLegalHold(v *types.ObjectLockLegalHold, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockRetention(v *types.ObjectLockRetention, value smithyxml.Value) error { + defer value.Close() + if len(v.Mode) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Mode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Mode)) + } + if v.RetainUntilDate != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RetainUntilDate", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.RetainUntilDate)) + } + return nil +} + +func awsRestxml_serializeDocumentObjectLockRule(v *types.ObjectLockRule, value smithyxml.Value) error { + defer value.Close() + if v.DefaultRetention != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DefaultRetention", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDefaultRetention(v.DefaultRetention, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOutputLocation(v *types.OutputLocation, value smithyxml.Value) error { + defer value.Close() + if v.S3 != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3Location(v.S3, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOutputSerialization(v *types.OutputSerialization, value smithyxml.Value) error { + defer value.Close() + if v.CSV != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CSV", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCSVOutput(v.CSV, el); err != nil { + return err + } + } + if v.JSON != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "JSON", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentJSONOutput(v.JSON, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOwner(v *types.Owner, value smithyxml.Value) error { + defer value.Close() + if v.DisplayName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DisplayName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.DisplayName) + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControls(v *types.OwnershipControls, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentOwnershipControlsRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControlsRule(v *types.OwnershipControlsRule, value smithyxml.Value) error { + defer value.Close() + if len(v.ObjectOwnership) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ObjectOwnership", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ObjectOwnership)) + } + return nil +} + +func awsRestxml_serializeDocumentOwnershipControlsRules(v []types.OwnershipControlsRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentOwnershipControlsRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.BlockPublicAcls { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BlockPublicAcls", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.BlockPublicAcls) + } + if v.BlockPublicPolicy { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BlockPublicPolicy", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.BlockPublicPolicy) + } + if v.IgnorePublicAcls { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IgnorePublicAcls", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.IgnorePublicAcls) + } + if v.RestrictPublicBuckets { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RestrictPublicBuckets", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.RestrictPublicBuckets) + } + return nil +} + +func awsRestxml_serializeDocumentQueueConfiguration(v *types.QueueConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.QueueArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Queue", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.QueueArn) + } + return nil +} + +func awsRestxml_serializeDocumentQueueConfigurationList(v []types.QueueConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentQueueConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentRedirect(v *types.Redirect, value smithyxml.Value) error { + defer value.Close() + if v.HostName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HostName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HostName) + } + if v.HttpRedirectCode != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HttpRedirectCode", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HttpRedirectCode) + } + if len(v.Protocol) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Protocol", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Protocol)) + } + if v.ReplaceKeyPrefixWith != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplaceKeyPrefixWith", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplaceKeyPrefixWith) + } + if v.ReplaceKeyWith != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplaceKeyWith", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ReplaceKeyWith) + } + return nil +} + +func awsRestxml_serializeDocumentRedirectAllRequestsTo(v *types.RedirectAllRequestsTo, value smithyxml.Value) error { + defer value.Close() + if v.HostName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "HostName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.HostName) + } + if len(v.Protocol) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Protocol", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Protocol)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicaModifications(v *types.ReplicaModifications, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Role != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Role", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Role) + } + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentReplicationRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyxml.Value) error { + defer value.Close() + if v.DeleteMarkerReplication != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DeleteMarkerReplication", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDeleteMarkerReplication(v.DeleteMarkerReplication, el); err != nil { + return err + } + } + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentDestination(v.Destination, el); err != nil { + return err + } + } + if v.ExistingObjectReplication != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExistingObjectReplication", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentExistingObjectReplication(v.ExistingObjectReplication, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationRuleFilter(v.Filter, el); err != nil { + return err + } + } + if v.ID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ID) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Priority != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Priority", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Priority) + } + if v.SourceSelectionCriteria != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SourceSelectionCriteria", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSourceSelectionCriteria(v.SourceSelectionCriteria, el); err != nil { + return err + } + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator, value smithyxml.Value) error { + defer value.Close() + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tags != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRuleFilter(v types.ReplicationRuleFilter, value smithyxml.Value) error { + defer value.Close() + switch uv := v.(type) { + case *types.ReplicationRuleFilterMemberAnd: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "And", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(&uv.Value, av); err != nil { + return err + } + + case *types.ReplicationRuleFilterMemberPrefix: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + + case *types.ReplicationRuleFilterMemberTag: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + return err + } + + default: + return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) + + } + return nil +} + +func awsRestxml_serializeDocumentReplicationRules(v []types.ReplicationRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentReplicationRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + if v.Time != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Time", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationTimeValue(v.Time, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error { + defer value.Close() + if v.Minutes != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Minutes", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Minutes) + } + return nil +} + +func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPaymentConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.Payer) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Payer", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Payer)) + } + return nil +} + +func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value smithyxml.Value) error { + defer value.Close() + if v.Enabled { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Enabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.Enabled) + } + return nil +} + +func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error { + defer value.Close() + if v.Days != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + if v.Description != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Description", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Description) + } + if v.GlacierJobParameters != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "GlacierJobParameters", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGlacierJobParameters(v.GlacierJobParameters, el); err != nil { + return err + } + } + if v.OutputLocation != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputLocation", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputLocation(v.OutputLocation, el); err != nil { + return err + } + } + if v.SelectParameters != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SelectParameters", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSelectParameters(v.SelectParameters, el); err != nil { + return err + } + } + if len(v.Tier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Tier)) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + +func awsRestxml_serializeDocumentRoutingRule(v *types.RoutingRule, value smithyxml.Value) error { + defer value.Close() + if v.Condition != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Condition", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentCondition(v.Condition, el); err != nil { + return err + } + } + if v.Redirect != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Redirect", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRedirect(v.Redirect, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentRoutingRules(v []types.RoutingRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RoutingRule", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentRoutingRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentS3KeyFilter(v *types.S3KeyFilter, value smithyxml.Value) error { + defer value.Close() + if v.FilterRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "FilterRule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentFilterRuleList(v.FilterRules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml.Value) error { + defer value.Close() + if v.AccessControlList != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessControlList", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrants(v.AccessControlList, el); err != nil { + return err + } + } + if v.BucketName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.BucketName) + } + if len(v.CannedACL) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "CannedACL", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.CannedACL)) + } + if v.Encryption != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Encryption", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentEncryption(v.Encryption, el); err != nil { + return err + } + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Prefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + if v.Tagging != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tagging", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTagging(v.Tagging, el); err != nil { + return err + } + } + if v.UserMetadata != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "UserMetadata", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentUserMetadata(v.UserMetadata, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { + defer value.Close() + if v.End != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "End", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(v.End) + } + if v.Start != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Start", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(v.Start) + } + return nil +} + +func awsRestxml_serializeDocumentSelectParameters(v *types.SelectParameters, value smithyxml.Value) error { + defer value.Close() + if v.Expression != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Expression", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Expression) + } + if len(v.ExpressionType) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ExpressionType", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.ExpressionType)) + } + if v.InputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "InputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { + return err + } + } + if v.OutputSerialization != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSerialization", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault, value smithyxml.Value) error { + defer value.Close() + if v.KMSMasterKeyID != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KMSMasterKeyID", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KMSMasterKeyID) + } + if len(v.SSEAlgorithm) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SSEAlgorithm", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.SSEAlgorithm)) + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Rules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Rule", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentServerSideEncryptionRules(v.Rules, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEncryptionRule, value smithyxml.Value) error { + defer value.Close() + if v.ApplyServerSideEncryptionByDefault != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ApplyServerSideEncryptionByDefault", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault, el); err != nil { + return err + } + } + if v.BucketKeyEnabled { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "BucketKeyEnabled", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Boolean(v.BucketKeyEnabled) + } + return nil +} + +func awsRestxml_serializeDocumentServerSideEncryptionRules(v []types.ServerSideEncryptionRule, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentServerSideEncryptionRule(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentSourceSelectionCriteria(v *types.SourceSelectionCriteria, value smithyxml.Value) error { + defer value.Close() + if v.ReplicaModifications != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ReplicaModifications", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicaModifications(v.ReplicaModifications, el); err != nil { + return err + } + } + if v.SseKmsEncryptedObjects != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SseKmsEncryptedObjects", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSseKmsEncryptedObjects(v.SseKmsEncryptedObjects, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentSSEKMS(v *types.SSEKMS, value smithyxml.Value) error { + defer value.Close() + if v.KeyId != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "KeyId", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.KeyId) + } + return nil +} + +func awsRestxml_serializeDocumentSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects, value smithyxml.Value) error { + defer value.Close() + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentSSES3(v *types.SSES3, value smithyxml.Value) error { + defer value.Close() + return nil +} + +func awsRestxml_serializeDocumentStorageClassAnalysis(v *types.StorageClassAnalysis, value smithyxml.Value) error { + defer value.Close() + if v.DataExport != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DataExport", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v.DataExport, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport, value smithyxml.Value) error { + defer value.Close() + if v.Destination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Destination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentAnalyticsExportDestination(v.Destination, el); err != nil { + return err + } + } + if len(v.OutputSchemaVersion) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "OutputSchemaVersion", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.OutputSchemaVersion)) + } + return nil +} + +func awsRestxml_serializeDocumentTag(v *types.Tag, value smithyxml.Value) error { + defer value.Close() + if v.Key != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Key", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Key) + } + if v.Value != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Value", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Value) + } + return nil +} + +func awsRestxml_serializeDocumentTagging(v *types.Tagging, value smithyxml.Value) error { + defer value.Close() + if v.TagSet != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TagSet", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTagSet(v.TagSet, el); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTagSet(v []types.Tag, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Tag", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTag(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTargetGrant(v *types.TargetGrant, value smithyxml.Value) error { + defer value.Close() + if v.Grantee != nil { + rootAttr := []smithyxml.Attr{} + rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) + if len(v.Grantee.Type) > 0 { + var av string + av = string(v.Grantee.Type) + rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) + } + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grantee", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { + return err + } + } + if len(v.Permission) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Permission", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Permission)) + } + return nil +} + +func awsRestxml_serializeDocumentTargetGrants(v []types.TargetGrant, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Grant", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTargetGrant(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value) error { + defer value.Close() + if len(v.AccessTier) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessTier", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.AccessTier)) + } + { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + return nil +} + +func awsRestxml_serializeDocumentTieringList(v []types.Tiering, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTiering(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTopicConfiguration(v *types.TopicConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.Events != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Event", + }, + Attr: rootAttr, + } + el := value.FlattenedElement(root) + if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { + return err + } + } + if v.Filter != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Filter", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { + return err + } + } + if v.Id != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Id", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Id) + } + if v.TopicArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Topic", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TopicArn) + } + return nil +} + +func awsRestxml_serializeDocumentTopicConfigurationList(v []types.TopicConfiguration, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTopicConfiguration(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml.Value) error { + defer value.Close() + if v.Date != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Date", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatDateTime(*v.Date)) + } + if v.Days != 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Days", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Integer(v.Days) + } + if len(v.StorageClass) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "StorageClass", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.StorageClass)) + } + return nil +} + +func awsRestxml_serializeDocumentTransitionList(v []types.Transition, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + array = value.Array() + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentTransition(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentUserMetadata(v []types.MetadataEntry, value smithyxml.Value) error { + var array *smithyxml.Array + if !value.IsFlattened() { + defer value.Close() + } + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetadataEntry", + }, + Attr: customMemberNameAttr, + } + array = value.ArrayWithCustomName(customMemberName) + for i := range v { + am := array.Member() + if err := awsRestxml_serializeDocumentMetadataEntry(&v[i], am); err != nil { + return err + } + } + return nil +} + +func awsRestxml_serializeDocumentVersioningConfiguration(v *types.VersioningConfiguration, value smithyxml.Value) error { + defer value.Close() + if len(v.MFADelete) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MfaDelete", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.MFADelete)) + } + if len(v.Status) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Status", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Status)) + } + return nil +} + +func awsRestxml_serializeDocumentWebsiteConfiguration(v *types.WebsiteConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.ErrorDocument != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ErrorDocument", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentErrorDocument(v.ErrorDocument, el); err != nil { + return err + } + } + if v.IndexDocument != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "IndexDocument", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentIndexDocument(v.IndexDocument, el); err != nil { + return err + } + } + if v.RedirectAllRequestsTo != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RedirectAllRequestsTo", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRedirectAllRequestsTo(v.RedirectAllRequestsTo, el); err != nil { + return err + } + } + if v.RoutingRules != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "RoutingRules", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentRoutingRules(v.RoutingRules, el); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go new file mode 100644 index 000000000..5b5254083 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -0,0 +1,1200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AnalyticsS3ExportFileFormat string + +// Enum values for AnalyticsS3ExportFileFormat +const ( + AnalyticsS3ExportFileFormatCsv AnalyticsS3ExportFileFormat = "CSV" +) + +// Values returns all known values for AnalyticsS3ExportFileFormat. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { + return []AnalyticsS3ExportFileFormat{ + "CSV", + } +} + +type ArchiveStatus string + +// Enum values for ArchiveStatus +const ( + ArchiveStatusArchiveAccess ArchiveStatus = "ARCHIVE_ACCESS" + ArchiveStatusDeepArchiveAccess ArchiveStatus = "DEEP_ARCHIVE_ACCESS" +) + +// Values returns all known values for ArchiveStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ArchiveStatus) Values() []ArchiveStatus { + return []ArchiveStatus{ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS", + } +} + +type BucketAccelerateStatus string + +// Enum values for BucketAccelerateStatus +const ( + BucketAccelerateStatusEnabled BucketAccelerateStatus = "Enabled" + BucketAccelerateStatusSuspended BucketAccelerateStatus = "Suspended" +) + +// Values returns all known values for BucketAccelerateStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { + return []BucketAccelerateStatus{ + "Enabled", + "Suspended", + } +} + +type BucketCannedACL string + +// Enum values for BucketCannedACL +const ( + BucketCannedACLPrivate BucketCannedACL = "private" + BucketCannedACLPublicRead BucketCannedACL = "public-read" + BucketCannedACLPublicReadWrite BucketCannedACL = "public-read-write" + BucketCannedACLAuthenticatedRead BucketCannedACL = "authenticated-read" +) + +// Values returns all known values for BucketCannedACL. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketCannedACL) Values() []BucketCannedACL { + return []BucketCannedACL{ + "private", + "public-read", + "public-read-write", + "authenticated-read", + } +} + +type BucketLocationConstraint string + +// Enum values for BucketLocationConstraint +const ( + BucketLocationConstraintAfSouth1 BucketLocationConstraint = "af-south-1" + BucketLocationConstraintApEast1 BucketLocationConstraint = "ap-east-1" + BucketLocationConstraintApNortheast1 BucketLocationConstraint = "ap-northeast-1" + BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2" + BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3" + BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1" + BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" + BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" + BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1" + BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1" + BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1" + BucketLocationConstraintEu BucketLocationConstraint = "EU" + BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" + BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" + BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" + BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" + BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" + BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" + BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1" + BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1" + BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2" + BucketLocationConstraintUsGovEast1 BucketLocationConstraint = "us-gov-east-1" + BucketLocationConstraintUsGovWest1 BucketLocationConstraint = "us-gov-west-1" + BucketLocationConstraintUsWest1 BucketLocationConstraint = "us-west-1" + BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2" +) + +// Values returns all known values for BucketLocationConstraint. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketLocationConstraint) Values() []BucketLocationConstraint { + return []BucketLocationConstraint{ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "cn-north-1", + "cn-northwest-1", + "EU", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-2", + "us-gov-east-1", + "us-gov-west-1", + "us-west-1", + "us-west-2", + } +} + +type BucketLogsPermission string + +// Enum values for BucketLogsPermission +const ( + BucketLogsPermissionFullControl BucketLogsPermission = "FULL_CONTROL" + BucketLogsPermissionRead BucketLogsPermission = "READ" + BucketLogsPermissionWrite BucketLogsPermission = "WRITE" +) + +// Values returns all known values for BucketLogsPermission. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketLogsPermission) Values() []BucketLogsPermission { + return []BucketLogsPermission{ + "FULL_CONTROL", + "READ", + "WRITE", + } +} + +type BucketVersioningStatus string + +// Enum values for BucketVersioningStatus +const ( + BucketVersioningStatusEnabled BucketVersioningStatus = "Enabled" + BucketVersioningStatusSuspended BucketVersioningStatus = "Suspended" +) + +// Values returns all known values for BucketVersioningStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (BucketVersioningStatus) Values() []BucketVersioningStatus { + return []BucketVersioningStatus{ + "Enabled", + "Suspended", + } +} + +type ChecksumAlgorithm string + +// Enum values for ChecksumAlgorithm +const ( + ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" + ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" + ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" + ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" +) + +// Values returns all known values for ChecksumAlgorithm. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { + return []ChecksumAlgorithm{ + "CRC32", + "CRC32C", + "SHA1", + "SHA256", + } +} + +type ChecksumMode string + +// Enum values for ChecksumMode +const ( + ChecksumModeEnabled ChecksumMode = "ENABLED" +) + +// Values returns all known values for ChecksumMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (ChecksumMode) Values() []ChecksumMode { + return []ChecksumMode{ + "ENABLED", + } +} + +type CompressionType string + +// Enum values for CompressionType +const ( + CompressionTypeNone CompressionType = "NONE" + CompressionTypeGzip CompressionType = "GZIP" + CompressionTypeBzip2 CompressionType = "BZIP2" +) + +// Values returns all known values for CompressionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CompressionType) Values() []CompressionType { + return []CompressionType{ + "NONE", + "GZIP", + "BZIP2", + } +} + +type DeleteMarkerReplicationStatus string + +// Enum values for DeleteMarkerReplicationStatus +const ( + DeleteMarkerReplicationStatusEnabled DeleteMarkerReplicationStatus = "Enabled" + DeleteMarkerReplicationStatusDisabled DeleteMarkerReplicationStatus = "Disabled" +) + +// Values returns all known values for DeleteMarkerReplicationStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { + return []DeleteMarkerReplicationStatus{ + "Enabled", + "Disabled", + } +} + +type EncodingType string + +// Enum values for EncodingType +const ( + EncodingTypeUrl EncodingType = "url" +) + +// Values returns all known values for EncodingType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (EncodingType) Values() []EncodingType { + return []EncodingType{ + "url", + } +} + +type Event string + +// Values returns all known values for Event. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Event) Values() []Event { + return []Event{ + "s3:ReducedRedundancyLostObject", + "s3:ObjectCreated:*", + "s3:ObjectCreated:Put", + "s3:ObjectCreated:Post", + "s3:ObjectCreated:Copy", + "s3:ObjectCreated:CompleteMultipartUpload", + "s3:ObjectRemoved:*", + "s3:ObjectRemoved:Delete", + "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:*", + "s3:ObjectRestore:Post", + "s3:ObjectRestore:Completed", + "s3:Replication:*", + "s3:Replication:OperationFailedReplication", + "s3:Replication:OperationNotTracked", + "s3:Replication:OperationMissedThreshold", + "s3:Replication:OperationReplicatedAfterThreshold", + "s3:ObjectRestore:Delete", + "s3:LifecycleTransition", + "s3:IntelligentTiering", + "s3:ObjectAcl:Put", + "s3:LifecycleExpiration:*", + "s3:LifecycleExpiration:Delete", + "s3:LifecycleExpiration:DeleteMarkerCreated", + "s3:ObjectTagging:*", + "s3:ObjectTagging:Put", + "s3:ObjectTagging:Delete", + } +} + +type ExistingObjectReplicationStatus string + +// Enum values for ExistingObjectReplicationStatus +const ( + ExistingObjectReplicationStatusEnabled ExistingObjectReplicationStatus = "Enabled" + ExistingObjectReplicationStatusDisabled ExistingObjectReplicationStatus = "Disabled" +) + +// Values returns all known values for ExistingObjectReplicationStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { + return []ExistingObjectReplicationStatus{ + "Enabled", + "Disabled", + } +} + +type ExpirationStatus string + +// Enum values for ExpirationStatus +const ( + ExpirationStatusEnabled ExpirationStatus = "Enabled" + ExpirationStatusDisabled ExpirationStatus = "Disabled" +) + +// Values returns all known values for ExpirationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExpirationStatus) Values() []ExpirationStatus { + return []ExpirationStatus{ + "Enabled", + "Disabled", + } +} + +type ExpressionType string + +// Enum values for ExpressionType +const ( + ExpressionTypeSql ExpressionType = "SQL" +) + +// Values returns all known values for ExpressionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExpressionType) Values() []ExpressionType { + return []ExpressionType{ + "SQL", + } +} + +type FileHeaderInfo string + +// Enum values for FileHeaderInfo +const ( + FileHeaderInfoUse FileHeaderInfo = "USE" + FileHeaderInfoIgnore FileHeaderInfo = "IGNORE" + FileHeaderInfoNone FileHeaderInfo = "NONE" +) + +// Values returns all known values for FileHeaderInfo. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (FileHeaderInfo) Values() []FileHeaderInfo { + return []FileHeaderInfo{ + "USE", + "IGNORE", + "NONE", + } +} + +type FilterRuleName string + +// Enum values for FilterRuleName +const ( + FilterRuleNamePrefix FilterRuleName = "prefix" + FilterRuleNameSuffix FilterRuleName = "suffix" +) + +// Values returns all known values for FilterRuleName. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (FilterRuleName) Values() []FilterRuleName { + return []FilterRuleName{ + "prefix", + "suffix", + } +} + +type IntelligentTieringAccessTier string + +// Enum values for IntelligentTieringAccessTier +const ( + IntelligentTieringAccessTierArchiveAccess IntelligentTieringAccessTier = "ARCHIVE_ACCESS" + IntelligentTieringAccessTierDeepArchiveAccess IntelligentTieringAccessTier = "DEEP_ARCHIVE_ACCESS" +) + +// Values returns all known values for IntelligentTieringAccessTier. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { + return []IntelligentTieringAccessTier{ + "ARCHIVE_ACCESS", + "DEEP_ARCHIVE_ACCESS", + } +} + +type IntelligentTieringStatus string + +// Enum values for IntelligentTieringStatus +const ( + IntelligentTieringStatusEnabled IntelligentTieringStatus = "Enabled" + IntelligentTieringStatusDisabled IntelligentTieringStatus = "Disabled" +) + +// Values returns all known values for IntelligentTieringStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { + return []IntelligentTieringStatus{ + "Enabled", + "Disabled", + } +} + +type InventoryFormat string + +// Enum values for InventoryFormat +const ( + InventoryFormatCsv InventoryFormat = "CSV" + InventoryFormatOrc InventoryFormat = "ORC" + InventoryFormatParquet InventoryFormat = "Parquet" +) + +// Values returns all known values for InventoryFormat. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryFormat) Values() []InventoryFormat { + return []InventoryFormat{ + "CSV", + "ORC", + "Parquet", + } +} + +type InventoryFrequency string + +// Enum values for InventoryFrequency +const ( + InventoryFrequencyDaily InventoryFrequency = "Daily" + InventoryFrequencyWeekly InventoryFrequency = "Weekly" +) + +// Values returns all known values for InventoryFrequency. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryFrequency) Values() []InventoryFrequency { + return []InventoryFrequency{ + "Daily", + "Weekly", + } +} + +type InventoryIncludedObjectVersions string + +// Enum values for InventoryIncludedObjectVersions +const ( + InventoryIncludedObjectVersionsAll InventoryIncludedObjectVersions = "All" + InventoryIncludedObjectVersionsCurrent InventoryIncludedObjectVersions = "Current" +) + +// Values returns all known values for InventoryIncludedObjectVersions. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { + return []InventoryIncludedObjectVersions{ + "All", + "Current", + } +} + +type InventoryOptionalField string + +// Enum values for InventoryOptionalField +const ( + InventoryOptionalFieldSize InventoryOptionalField = "Size" + InventoryOptionalFieldLastModifiedDate InventoryOptionalField = "LastModifiedDate" + InventoryOptionalFieldStorageClass InventoryOptionalField = "StorageClass" + InventoryOptionalFieldETag InventoryOptionalField = "ETag" + InventoryOptionalFieldIsMultipartUploaded InventoryOptionalField = "IsMultipartUploaded" + InventoryOptionalFieldReplicationStatus InventoryOptionalField = "ReplicationStatus" + InventoryOptionalFieldEncryptionStatus InventoryOptionalField = "EncryptionStatus" + InventoryOptionalFieldObjectLockRetainUntilDate InventoryOptionalField = "ObjectLockRetainUntilDate" + InventoryOptionalFieldObjectLockMode InventoryOptionalField = "ObjectLockMode" + InventoryOptionalFieldObjectLockLegalHoldStatus InventoryOptionalField = "ObjectLockLegalHoldStatus" + InventoryOptionalFieldIntelligentTieringAccessTier InventoryOptionalField = "IntelligentTieringAccessTier" + InventoryOptionalFieldBucketKeyStatus InventoryOptionalField = "BucketKeyStatus" + InventoryOptionalFieldChecksumAlgorithm InventoryOptionalField = "ChecksumAlgorithm" +) + +// Values returns all known values for InventoryOptionalField. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (InventoryOptionalField) Values() []InventoryOptionalField { + return []InventoryOptionalField{ + "Size", + "LastModifiedDate", + "StorageClass", + "ETag", + "IsMultipartUploaded", + "ReplicationStatus", + "EncryptionStatus", + "ObjectLockRetainUntilDate", + "ObjectLockMode", + "ObjectLockLegalHoldStatus", + "IntelligentTieringAccessTier", + "BucketKeyStatus", + "ChecksumAlgorithm", + } +} + +type JSONType string + +// Enum values for JSONType +const ( + JSONTypeDocument JSONType = "DOCUMENT" + JSONTypeLines JSONType = "LINES" +) + +// Values returns all known values for JSONType. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (JSONType) Values() []JSONType { + return []JSONType{ + "DOCUMENT", + "LINES", + } +} + +type MetadataDirective string + +// Enum values for MetadataDirective +const ( + MetadataDirectiveCopy MetadataDirective = "COPY" + MetadataDirectiveReplace MetadataDirective = "REPLACE" +) + +// Values returns all known values for MetadataDirective. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MetadataDirective) Values() []MetadataDirective { + return []MetadataDirective{ + "COPY", + "REPLACE", + } +} + +type MetricsStatus string + +// Enum values for MetricsStatus +const ( + MetricsStatusEnabled MetricsStatus = "Enabled" + MetricsStatusDisabled MetricsStatus = "Disabled" +) + +// Values returns all known values for MetricsStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MetricsStatus) Values() []MetricsStatus { + return []MetricsStatus{ + "Enabled", + "Disabled", + } +} + +type MFADelete string + +// Enum values for MFADelete +const ( + MFADeleteEnabled MFADelete = "Enabled" + MFADeleteDisabled MFADelete = "Disabled" +) + +// Values returns all known values for MFADelete. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (MFADelete) Values() []MFADelete { + return []MFADelete{ + "Enabled", + "Disabled", + } +} + +type MFADeleteStatus string + +// Enum values for MFADeleteStatus +const ( + MFADeleteStatusEnabled MFADeleteStatus = "Enabled" + MFADeleteStatusDisabled MFADeleteStatus = "Disabled" +) + +// Values returns all known values for MFADeleteStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MFADeleteStatus) Values() []MFADeleteStatus { + return []MFADeleteStatus{ + "Enabled", + "Disabled", + } +} + +type ObjectAttributes string + +// Enum values for ObjectAttributes +const ( + ObjectAttributesEtag ObjectAttributes = "ETag" + ObjectAttributesChecksum ObjectAttributes = "Checksum" + ObjectAttributesObjectParts ObjectAttributes = "ObjectParts" + ObjectAttributesStorageClass ObjectAttributes = "StorageClass" + ObjectAttributesObjectSize ObjectAttributes = "ObjectSize" +) + +// Values returns all known values for ObjectAttributes. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectAttributes) Values() []ObjectAttributes { + return []ObjectAttributes{ + "ETag", + "Checksum", + "ObjectParts", + "StorageClass", + "ObjectSize", + } +} + +type ObjectCannedACL string + +// Enum values for ObjectCannedACL +const ( + ObjectCannedACLPrivate ObjectCannedACL = "private" + ObjectCannedACLPublicRead ObjectCannedACL = "public-read" + ObjectCannedACLPublicReadWrite ObjectCannedACL = "public-read-write" + ObjectCannedACLAuthenticatedRead ObjectCannedACL = "authenticated-read" + ObjectCannedACLAwsExecRead ObjectCannedACL = "aws-exec-read" + ObjectCannedACLBucketOwnerRead ObjectCannedACL = "bucket-owner-read" + ObjectCannedACLBucketOwnerFullControl ObjectCannedACL = "bucket-owner-full-control" +) + +// Values returns all known values for ObjectCannedACL. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectCannedACL) Values() []ObjectCannedACL { + return []ObjectCannedACL{ + "private", + "public-read", + "public-read-write", + "authenticated-read", + "aws-exec-read", + "bucket-owner-read", + "bucket-owner-full-control", + } +} + +type ObjectLockEnabled string + +// Enum values for ObjectLockEnabled +const ( + ObjectLockEnabledEnabled ObjectLockEnabled = "Enabled" +) + +// Values returns all known values for ObjectLockEnabled. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockEnabled) Values() []ObjectLockEnabled { + return []ObjectLockEnabled{ + "Enabled", + } +} + +type ObjectLockLegalHoldStatus string + +// Enum values for ObjectLockLegalHoldStatus +const ( + ObjectLockLegalHoldStatusOn ObjectLockLegalHoldStatus = "ON" + ObjectLockLegalHoldStatusOff ObjectLockLegalHoldStatus = "OFF" +) + +// Values returns all known values for ObjectLockLegalHoldStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { + return []ObjectLockLegalHoldStatus{ + "ON", + "OFF", + } +} + +type ObjectLockMode string + +// Enum values for ObjectLockMode +const ( + ObjectLockModeGovernance ObjectLockMode = "GOVERNANCE" + ObjectLockModeCompliance ObjectLockMode = "COMPLIANCE" +) + +// Values returns all known values for ObjectLockMode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockMode) Values() []ObjectLockMode { + return []ObjectLockMode{ + "GOVERNANCE", + "COMPLIANCE", + } +} + +type ObjectLockRetentionMode string + +// Enum values for ObjectLockRetentionMode +const ( + ObjectLockRetentionModeGovernance ObjectLockRetentionMode = "GOVERNANCE" + ObjectLockRetentionModeCompliance ObjectLockRetentionMode = "COMPLIANCE" +) + +// Values returns all known values for ObjectLockRetentionMode. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { + return []ObjectLockRetentionMode{ + "GOVERNANCE", + "COMPLIANCE", + } +} + +type ObjectOwnership string + +// Enum values for ObjectOwnership +const ( + ObjectOwnershipBucketOwnerPreferred ObjectOwnership = "BucketOwnerPreferred" + ObjectOwnershipObjectWriter ObjectOwnership = "ObjectWriter" + ObjectOwnershipBucketOwnerEnforced ObjectOwnership = "BucketOwnerEnforced" +) + +// Values returns all known values for ObjectOwnership. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectOwnership) Values() []ObjectOwnership { + return []ObjectOwnership{ + "BucketOwnerPreferred", + "ObjectWriter", + "BucketOwnerEnforced", + } +} + +type ObjectStorageClass string + +// Enum values for ObjectStorageClass +const ( + ObjectStorageClassStandard ObjectStorageClass = "STANDARD" + ObjectStorageClassReducedRedundancy ObjectStorageClass = "REDUCED_REDUNDANCY" + ObjectStorageClassGlacier ObjectStorageClass = "GLACIER" + ObjectStorageClassStandardIa ObjectStorageClass = "STANDARD_IA" + ObjectStorageClassOnezoneIa ObjectStorageClass = "ONEZONE_IA" + ObjectStorageClassIntelligentTiering ObjectStorageClass = "INTELLIGENT_TIERING" + ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE" + ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" + ObjectStorageClassGlacierIr ObjectStorageClass = "GLACIER_IR" +) + +// Values returns all known values for ObjectStorageClass. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ObjectStorageClass) Values() []ObjectStorageClass { + return []ObjectStorageClass{ + "STANDARD", + "REDUCED_REDUNDANCY", + "GLACIER", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE", + "OUTPOSTS", + "GLACIER_IR", + } +} + +type ObjectVersionStorageClass string + +// Enum values for ObjectVersionStorageClass +const ( + ObjectVersionStorageClassStandard ObjectVersionStorageClass = "STANDARD" +) + +// Values returns all known values for ObjectVersionStorageClass. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { + return []ObjectVersionStorageClass{ + "STANDARD", + } +} + +type OwnerOverride string + +// Enum values for OwnerOverride +const ( + OwnerOverrideDestination OwnerOverride = "Destination" +) + +// Values returns all known values for OwnerOverride. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (OwnerOverride) Values() []OwnerOverride { + return []OwnerOverride{ + "Destination", + } +} + +type Payer string + +// Enum values for Payer +const ( + PayerRequester Payer = "Requester" + PayerBucketOwner Payer = "BucketOwner" +) + +// Values returns all known values for Payer. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Payer) Values() []Payer { + return []Payer{ + "Requester", + "BucketOwner", + } +} + +type Permission string + +// Enum values for Permission +const ( + PermissionFullControl Permission = "FULL_CONTROL" + PermissionWrite Permission = "WRITE" + PermissionWriteAcp Permission = "WRITE_ACP" + PermissionRead Permission = "READ" + PermissionReadAcp Permission = "READ_ACP" +) + +// Values returns all known values for Permission. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (Permission) Values() []Permission { + return []Permission{ + "FULL_CONTROL", + "WRITE", + "WRITE_ACP", + "READ", + "READ_ACP", + } +} + +type Protocol string + +// Enum values for Protocol +const ( + ProtocolHttp Protocol = "http" + ProtocolHttps Protocol = "https" +) + +// Values returns all known values for Protocol. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Protocol) Values() []Protocol { + return []Protocol{ + "http", + "https", + } +} + +type QuoteFields string + +// Enum values for QuoteFields +const ( + QuoteFieldsAlways QuoteFields = "ALWAYS" + QuoteFieldsAsneeded QuoteFields = "ASNEEDED" +) + +// Values returns all known values for QuoteFields. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (QuoteFields) Values() []QuoteFields { + return []QuoteFields{ + "ALWAYS", + "ASNEEDED", + } +} + +type ReplicaModificationsStatus string + +// Enum values for ReplicaModificationsStatus +const ( + ReplicaModificationsStatusEnabled ReplicaModificationsStatus = "Enabled" + ReplicaModificationsStatusDisabled ReplicaModificationsStatus = "Disabled" +) + +// Values returns all known values for ReplicaModificationsStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { + return []ReplicaModificationsStatus{ + "Enabled", + "Disabled", + } +} + +type ReplicationRuleStatus string + +// Enum values for ReplicationRuleStatus +const ( + ReplicationRuleStatusEnabled ReplicationRuleStatus = "Enabled" + ReplicationRuleStatusDisabled ReplicationRuleStatus = "Disabled" +) + +// Values returns all known values for ReplicationRuleStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { + return []ReplicationRuleStatus{ + "Enabled", + "Disabled", + } +} + +type ReplicationStatus string + +// Enum values for ReplicationStatus +const ( + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + ReplicationStatusPending ReplicationStatus = "PENDING" + ReplicationStatusFailed ReplicationStatus = "FAILED" + ReplicationStatusReplica ReplicationStatus = "REPLICA" +) + +// Values returns all known values for ReplicationStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationStatus) Values() []ReplicationStatus { + return []ReplicationStatus{ + "COMPLETE", + "PENDING", + "FAILED", + "REPLICA", + } +} + +type ReplicationTimeStatus string + +// Enum values for ReplicationTimeStatus +const ( + ReplicationTimeStatusEnabled ReplicationTimeStatus = "Enabled" + ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled" +) + +// Values returns all known values for ReplicationTimeStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { + return []ReplicationTimeStatus{ + "Enabled", + "Disabled", + } +} + +type RequestCharged string + +// Enum values for RequestCharged +const ( + RequestChargedRequester RequestCharged = "requester" +) + +// Values returns all known values for RequestCharged. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RequestCharged) Values() []RequestCharged { + return []RequestCharged{ + "requester", + } +} + +type RequestPayer string + +// Enum values for RequestPayer +const ( + RequestPayerRequester RequestPayer = "requester" +) + +// Values returns all known values for RequestPayer. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (RequestPayer) Values() []RequestPayer { + return []RequestPayer{ + "requester", + } +} + +type RestoreRequestType string + +// Enum values for RestoreRequestType +const ( + RestoreRequestTypeSelect RestoreRequestType = "SELECT" +) + +// Values returns all known values for RestoreRequestType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (RestoreRequestType) Values() []RestoreRequestType { + return []RestoreRequestType{ + "SELECT", + } +} + +type ServerSideEncryption string + +// Enum values for ServerSideEncryption +const ( + ServerSideEncryptionAes256 ServerSideEncryption = "AES256" + ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms" +) + +// Values returns all known values for ServerSideEncryption. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ServerSideEncryption) Values() []ServerSideEncryption { + return []ServerSideEncryption{ + "AES256", + "aws:kms", + } +} + +type SseKmsEncryptedObjectsStatus string + +// Enum values for SseKmsEncryptedObjectsStatus +const ( + SseKmsEncryptedObjectsStatusEnabled SseKmsEncryptedObjectsStatus = "Enabled" + SseKmsEncryptedObjectsStatusDisabled SseKmsEncryptedObjectsStatus = "Disabled" +) + +// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { + return []SseKmsEncryptedObjectsStatus{ + "Enabled", + "Disabled", + } +} + +type StorageClass string + +// Enum values for StorageClass +const ( + StorageClassStandard StorageClass = "STANDARD" + StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY" + StorageClassStandardIa StorageClass = "STANDARD_IA" + StorageClassOnezoneIa StorageClass = "ONEZONE_IA" + StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING" + StorageClassGlacier StorageClass = "GLACIER" + StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE" + StorageClassOutposts StorageClass = "OUTPOSTS" + StorageClassGlacierIr StorageClass = "GLACIER_IR" +) + +// Values returns all known values for StorageClass. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (StorageClass) Values() []StorageClass { + return []StorageClass{ + "STANDARD", + "REDUCED_REDUNDANCY", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "GLACIER", + "DEEP_ARCHIVE", + "OUTPOSTS", + "GLACIER_IR", + } +} + +type StorageClassAnalysisSchemaVersion string + +// Enum values for StorageClassAnalysisSchemaVersion +const ( + StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1" +) + +// Values returns all known values for StorageClassAnalysisSchemaVersion. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { + return []StorageClassAnalysisSchemaVersion{ + "V_1", + } +} + +type TaggingDirective string + +// Enum values for TaggingDirective +const ( + TaggingDirectiveCopy TaggingDirective = "COPY" + TaggingDirectiveReplace TaggingDirective = "REPLACE" +) + +// Values returns all known values for TaggingDirective. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (TaggingDirective) Values() []TaggingDirective { + return []TaggingDirective{ + "COPY", + "REPLACE", + } +} + +type Tier string + +// Enum values for Tier +const ( + TierStandard Tier = "Standard" + TierBulk Tier = "Bulk" + TierExpedited Tier = "Expedited" +) + +// Values returns all known values for Tier. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Tier) Values() []Tier { + return []Tier{ + "Standard", + "Bulk", + "Expedited", + } +} + +type TransitionStorageClass string + +// Enum values for TransitionStorageClass +const ( + TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" + TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA" + TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA" + TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING" + TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE" + TransitionStorageClassGlacierIr TransitionStorageClass = "GLACIER_IR" +) + +// Values returns all known values for TransitionStorageClass. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (TransitionStorageClass) Values() []TransitionStorageClass { + return []TransitionStorageClass{ + "GLACIER", + "STANDARD_IA", + "ONEZONE_IA", + "INTELLIGENT_TIERING", + "DEEP_ARCHIVE", + "GLACIER_IR", + } +} + +type Type string + +// Enum values for Type +const ( + TypeCanonicalUser Type = "CanonicalUser" + TypeAmazonCustomerByEmail Type = "AmazonCustomerByEmail" + TypeGroup Type = "Group" +) + +// Values returns all known values for Type. Note that this can be expanded in the +// future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Type) Values() []Type { + return []Type{ + "CanonicalUser", + "AmazonCustomerByEmail", + "Group", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go new file mode 100644 index 000000000..8c3a386f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go @@ -0,0 +1,188 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The requested bucket name is not available. The bucket namespace is shared by +// all users of the system. Select a different name and try again. +type BucketAlreadyExists struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *BucketAlreadyExists) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BucketAlreadyExists) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BucketAlreadyExists) ErrorCode() string { return "BucketAlreadyExists" } +func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The bucket you tried to create already exists, and you own it. Amazon S3 returns +// this error in all Amazon Web Services Regions except in the North Virginia +// Region. For legacy compatibility, if you re-create an existing bucket that you +// already own in the North Virginia Region, Amazon S3 returns 200 OK and resets +// the bucket access control lists (ACLs). +type BucketAlreadyOwnedByYou struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *BucketAlreadyOwnedByYou) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BucketAlreadyOwnedByYou) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BucketAlreadyOwnedByYou) ErrorCode() string { return "BucketAlreadyOwnedByYou" } +func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Object is archived and inaccessible until restored. +type InvalidObjectState struct { + Message *string + + StorageClass StorageClass + AccessTier IntelligentTieringAccessTier + + noSmithyDocumentSerde +} + +func (e *InvalidObjectState) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidObjectState) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidObjectState) ErrorCode() string { return "InvalidObjectState" } +func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified bucket does not exist. +type NoSuchBucket struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NoSuchBucket) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchBucket) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchBucket) ErrorCode() string { return "NoSuchBucket" } +func (e *NoSuchBucket) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified key does not exist. +type NoSuchKey struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NoSuchKey) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchKey) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchKey) ErrorCode() string { return "NoSuchKey" } +func (e *NoSuchKey) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified multipart upload does not exist. +type NoSuchUpload struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NoSuchUpload) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NoSuchUpload) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NoSuchUpload) ErrorCode() string { return "NoSuchUpload" } +func (e *NoSuchUpload) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified content does not exist. +type NotFound struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NotFound) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NotFound) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NotFound) ErrorCode() string { return "NotFound" } +func (e *NotFound) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// This action is not allowed against this storage tier. +type ObjectAlreadyInActiveTierError struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ObjectAlreadyInActiveTierError) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ObjectAlreadyInActiveTierError) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ObjectAlreadyInActiveTierError) ErrorCode() string { return "ObjectAlreadyInActiveTierError" } +func (e *ObjectAlreadyInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. +type ObjectNotInActiveTierError struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ObjectNotInActiveTierError) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ObjectNotInActiveTierError) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ObjectNotInActiveTierError) ErrorCode() string { return "ObjectNotInActiveTierError" } +func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go new file mode 100644 index 000000000..bcef62cec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -0,0 +1,3892 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Specifies the days since the initiation of an incomplete multipart upload that +// Amazon S3 will wait before permanently removing all parts of the upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon S3 User Guide. +type AbortIncompleteMultipartUpload struct { + + // Specifies the number of days after which Amazon S3 aborts an incomplete + // multipart upload. + DaysAfterInitiation int32 + + noSmithyDocumentSerde +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in +// the Amazon S3 User Guide. +type AccelerateConfiguration struct { + + // Specifies the transfer acceleration status of the bucket. + Status BucketAccelerateStatus + + noSmithyDocumentSerde +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + + // A list of grants. + Grants []Grant + + // Container for the bucket owner's display name and ID. + Owner *Owner + + noSmithyDocumentSerde +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon S3 API Reference. + // + // This member is required. + Owner OwnerOverride + + noSmithyDocumentSerde +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a metrics +// filter. The operator must have at least two predicates in any combination, and +// an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string + + // The list of tags to use when evaluating an AND predicate. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies the configuration and any analyses for the analytics filter of an +// Amazon S3 bucket. +type AnalyticsConfiguration struct { + + // The ID that identifies the analytics configuration. + // + // This member is required. + Id *string + + // Contains data related to access patterns to be collected and made available to + // analyze the tradeoffs between different storage classes. + // + // This member is required. + StorageClassAnalysis *StorageClassAnalysis + + // The filter used to describe a set of objects for analyses. A filter must have + // exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no + // filter is provided, all objects will be considered in any analysis. + Filter AnalyticsFilter + + noSmithyDocumentSerde +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + + // A destination signifying output to an S3 bucket. + // + // This member is required. + S3BucketDestination *AnalyticsS3BucketDestination + + noSmithyDocumentSerde +} + +// The filter used to describe a set of objects for analyses. A filter must have +// exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no +// filter is provided, all objects will be considered in any analysis. +// +// The following types satisfy this interface: +// +// AnalyticsFilterMemberAnd +// AnalyticsFilterMemberPrefix +// AnalyticsFilterMemberTag +type AnalyticsFilter interface { + isAnalyticsFilter() +} + +// A conjunction (logical AND) of predicates, which is used in evaluating an +// analytics filter. The operator must have at least two predicates. +type AnalyticsFilterMemberAnd struct { + Value AnalyticsAndOperator + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberAnd) isAnalyticsFilter() {} + +// The prefix to use when evaluating an analytics filter. +type AnalyticsFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberPrefix) isAnalyticsFilter() {} + +// The tag to use when evaluating an analytics filter. +type AnalyticsFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*AnalyticsFilterMemberTag) isAnalyticsFilter() {} + +// Contains information about where to publish the analytics results. +type AnalyticsS3BucketDestination struct { + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // This member is required. + Bucket *string + + // Specifies the file format used when exporting data to Amazon S3. + // + // This member is required. + Format AnalyticsS3ExportFileFormat + + // The account ID that owns the destination S3 bucket. If no account ID is + // provided, the owner is not validated before exporting data. Although this value + // is optional, we strongly recommend that you set it to help prevent problems if + // the destination bucket ownership changes. + BucketAccountId *string + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string + + noSmithyDocumentSerde +} + +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is +// globally unique, and the namespace is shared by all Amazon Web Services +// accounts. +type Bucket struct { + + // Date the bucket was created. This date can change when making changes to your + // bucket, such as editing its bucket policy. + CreationDate *time.Time + + // The name of the bucket. + Name *string + + noSmithyDocumentSerde +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For +// more information, see Object Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in +// the Amazon S3 User Guide. +type BucketLifecycleConfiguration struct { + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // This member is required. + Rules []LifecycleRule + + noSmithyDocumentSerde +} + +// Container for logging status information. +type BucketLoggingStatus struct { + + // Describes where logs are stored and the prefix that Amazon S3 assigns to all log + // object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in + // the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled + + noSmithyDocumentSerde +} + +// Contains all the possible checksum or digest values for an object. +type Checksum struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + noSmithyDocumentSerde +} + +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act like +// subdirectories in the directory specified by Prefix. For example, if the prefix +// is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common +// prefix is notes/summer/. +type CommonPrefix struct { + + // Container for the specified common prefix. + Prefix *string + + noSmithyDocumentSerde +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + + // Array of CompletedPart data types. If you do not supply a valid Part with your + // request, the service sends back an HTTP 400 response. + Parts []CompletedPart + + noSmithyDocumentSerde +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag returned when the part was uploaded. + ETag *string + + // Part number that identifies the part. This is a positive integer between 1 and + // 10,000. + PartNumber int32 + + noSmithyDocumentSerde +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + + // The HTTP error code when the redirect is applied. In the event of an error, if + // the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will be + // /docs, which identifies all objects in the docs/ folder. Required when the + // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is + // not specified. If both conditions are specified, both must be true for the + // redirect to be applied. Replacement must be made for object keys containing + // special characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + KeyPrefixEquals *string + + noSmithyDocumentSerde +} + +type ContinuationEvent struct { + noSmithyDocumentSerde +} + +// Container for all response elements. +type CopyObjectResult struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. + ETag *string + + // Creation date of the object. + LastModified *time.Time + + noSmithyDocumentSerde +} + +// Container for all response elements. +type CopyPartResult struct { + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag of the object. + ETag *string + + // Date and time at which the object was uploaded. + LastModified *time.Time + + noSmithyDocumentSerde +} + +// Describes the cross-origin access configuration for objects in an Amazon S3 +// bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 +// User Guide. +type CORSConfiguration struct { + + // A set of origins and methods (cross-origin access that you want to allow). You + // can add up to 100 rules to the configuration. + // + // This member is required. + CORSRules []CORSRule + + noSmithyDocumentSerde +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + + // An HTTP method that you allow the origin to execute. Valid values are GET, PUT, + // HEAD, POST, and DELETE. + // + // This member is required. + AllowedMethods []string + + // One or more origins you want customers to be able to access the bucket from. + // + // This member is required. + AllowedOrigins []string + + // Headers that are specified in the Access-Control-Request-Headers header. These + // headers are allowed in a preflight OPTIONS request. In response to any preflight + // OPTIONS request, Amazon S3 returns any requested headers that are allowed. + AllowedHeaders []string + + // One or more headers in the response that you want customers to be able to access + // from their applications (for example, from a JavaScript XMLHttpRequest object). + ExposeHeaders []string + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string + + // The time in seconds that your browser is to cache the preflight response for the + // specified resource. + MaxAgeSeconds int32 + + noSmithyDocumentSerde +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + + // Specifies the Region where the bucket will be created. If you don't specify a + // Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + LocationConstraint BucketLocationConstraint + + noSmithyDocumentSerde +} + +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. +type CSVInput struct { + + // Specifies that CSV field values may contain quoted record delimiters and such + // records should be allowed. Default value is FALSE. Setting this value to TRUE + // may lower performance. + AllowQuotedRecordDelimiter bool + + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character to + // indicate a comment line. + Comments *string + + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. + FieldDelimiter *string + + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not + // a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such as + // _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First + // line is a header, and you can use the header value to identify a column in an + // expression (SELECT "name" FROM OBJECT). + FileHeaderInfo FileHeaderInfo + + // A single character used for escaping when the field delimiter is part of the + // value. For example, if the value is a, b, Amazon S3 wraps this field value in + // quotation marks, as follows: " a , b ". Type: String Default: " Ancestors: CSV + QuoteCharacter *string + + // A single character used for escaping the quotation mark character inside an + // already escaped value. For example, the value """ a , b """ is parsed as " a , b + // ". + QuoteEscapeCharacter *string + + // A single character used to separate individual records in the input. Instead of + // the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// Describes how uncompressed comma-separated values (CSV)-formatted results are +// formatted. +type CSVOutput struct { + + // The value used to separate individual fields in a record. You can specify an + // arbitrary delimiter. + FieldDelimiter *string + + // A single character used for escaping when the field delimiter is part of the + // value. For example, if the value is a, b, Amazon S3 wraps this field value in + // quotation marks, as follows: " a , b ". + QuoteCharacter *string + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string + + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always + // use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for + // output fields when needed. + QuoteFields QuoteFields + + // A single character used to separate individual records in the output. Instead of + // the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +// +// * The DefaultRetention settings +// require both a mode and a period. +// +// * The DefaultRetention period can be either +// Days or Years but you must select one. You cannot specify Days and Years at the +// same time. +type DefaultRetention struct { + + // The number of days that you want to specify for the default retention period. + // Must be used with Mode. + Days int32 + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. Must be used with either Days or Years. + Mode ObjectLockRetentionMode + + // The number of years that you want to specify for the default retention period. + // Must be used with Mode. + Years int32 + + noSmithyDocumentSerde +} + +// Container for the objects to delete. +type Delete struct { + + // The objects to delete. + // + // This member is required. + Objects []ObjectIdentifier + + // Element to enable quiet mode for the request. When you add this element, you + // must set its value to true. + Quiet bool + + noSmithyDocumentSerde +} + +// Information about the deleted object. +type DeletedObject struct { + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. + DeleteMarker bool + + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header is + // the version ID of the object version deleted. + DeleteMarkerVersionId *string + + // The name of the deleted object. + Key *string + + // The version ID of the deleted object. + VersionId *string + + noSmithyDocumentSerde +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + IsLatest bool + + // The object key. + Key *string + + // Date and time the object was last modified. + LastModified *time.Time + + // The account that created the delete marker.> + Owner *Owner + + // Version ID of an object. + VersionId *string + + noSmithyDocumentSerde +} + +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a +// DeleteMarkerReplication element. If your Filter includes a Tag element, the +// DeleteMarkerReplicationStatus must be set to Disabled, because Amazon S3 does +// not support replicating delete markers for tag-based rules. For an example +// configuration, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// For more information about delete marker replication, see Basic Rule +// Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see +// Backward Compatibility +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +type DeleteMarkerReplication struct { + + // Indicates whether to replicate delete markers. Indicates whether to replicate + // delete markers. + Status DeleteMarkerReplicationStatus + + noSmithyDocumentSerde +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +type Destination struct { + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store + // the results. + // + // This member is required. + Bucket *string + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership to the + // Amazon Web Services account that owns the destination bucket. If this is not + // specified in the replication configuration, the replicas are owned by same + // Amazon Web Services account that owns the source object. + AccessControlTranslation *AccessControlTranslation + + // Destination bucket owner account ID. In a cross-account scenario, if you direct + // Amazon S3 to change replica ownership to the Amazon Web Services account that + // owns the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Replication Additional Configuration: Changing the Replica Owner + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon S3 User Guide. + Account *string + + // A container that provides information about encryption. If + // SourceSelectionCriteria is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration + + // A container specifying replication metrics-related settings enabling replication + // metrics and events. + Metrics *Metrics + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects must + // be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. For valid values, see the StorageClass + // element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon S3 API Reference. + StorageClass StorageClass + + noSmithyDocumentSerde +} + +// Contains the type of server-side encryption used. +type Encryption struct { + + // The server-side encryption algorithm used when storing job results in Amazon S3 + // (for example, AES256, aws:kms). + // + // This member is required. + EncryptionType ServerSideEncryption + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string + + // If the encryption type is aws:kms, this optional value specifies the ID of the + // symmetric customer managed key to use for encryption of job results. Amazon S3 + // only supports symmetric keys. For more information, see Using symmetric and + // asymmetric keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + KMSKeyId *string + + noSmithyDocumentSerde +} + +// Specifies encryption-related information for an Amazon S3 bucket that is a +// destination for replicated objects. +type EncryptionConfiguration struct { + + // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web + // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for + // the destination bucket. Amazon S3 uses this key to encrypt replica objects. + // Amazon S3 only supports symmetric, customer managed KMS keys. For more + // information, see Using symmetric and asymmetric keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + ReplicaKmsKeyID *string + + noSmithyDocumentSerde +} + +// A message that indicates the request is complete and no more messages will be +// sent. You should not assume that the request is complete until the client +// receives an EndEvent. +type EndEvent struct { + noSmithyDocumentSerde +} + +// Container for all error elements. +type Error struct { + + // The error code is a string that uniquely identifies an error condition. It is + // meant to be read and understood by programs that detect and handle errors by + // type. Amazon S3 error codes + // + // * Code: AccessDenied + // + // * Description: Access + // Denied + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * + // Code: AccountProblem + // + // * Description: There is a problem with your Amazon Web + // Services account that prevents the action from completing successfully. Contact + // Amazon Web Services Support for further assistance. + // + // * HTTP Status Code: 403 + // Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: AllAccessDisabled + // + // * + // Description: All access to this Amazon S3 resource has been disabled. Contact + // Amazon Web Services Support for further assistance. + // + // * HTTP Status Code: 403 + // Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // AmbiguousGrantByEmailAddress + // + // * Description: The email address you provided is + // associated with more than one account. + // + // * HTTP Status Code: 400 Bad Request + // + // * + // SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed + // + // * + // Description: The authorization header you provided is invalid. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * HTTP Status Code: N/A + // + // * Code: BadDigest + // + // * + // Description: The Content-MD5 you specified did not match what we received. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // BucketAlreadyExists + // + // * Description: The requested bucket name is not available. + // The bucket namespace is shared by all users of the system. Please select a + // different name and try again. + // + // * HTTP Status Code: 409 Conflict + // + // * SOAP Fault + // Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou + // + // * Description: The bucket + // you tried to create already exists, and you own it. Amazon S3 returns this error + // in all Amazon Web Services Regions except in the North Virginia Region. For + // legacy compatibility, if you re-create an existing bucket that you already own + // in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket + // access control lists (ACLs). + // + // * Code: 409 Conflict (in all Regions except the + // North Virginia Region) + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // BucketNotEmpty + // + // * Description: The bucket you tried to delete is not empty. + // + // * + // HTTP Status Code: 409 Conflict + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // CredentialsNotSupported + // + // * Description: This request does not support + // credentials. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited + // + // * Description: Cross-location + // logging not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall + // + // * Description: Your proposed + // upload is smaller than the minimum allowed object size. + // + // * HTTP Status Code: 400 + // Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: EntityTooLarge + // + // * + // Description: Your proposed upload exceeds the maximum allowed object size. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // ExpiredToken + // + // * Description: The provided token has expired. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // IllegalVersioningConfigurationException + // + // * Description: Indicates that the + // versioning configuration specified in the request is invalid. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // IncompleteBody + // + // * Description: You did not provide the number of bytes specified + // by the Content-Length HTTP header + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest + // + // * + // Description: POST requires exactly one file upload per request. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InlineDataTooLarge + // + // * Description: Inline data exceeds the maximum allowed + // size. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * + // Code: InternalError + // + // * Description: We encountered an internal error. Please try + // again. + // + // * HTTP Status Code: 500 Internal Server Error + // + // * SOAP Fault Code Prefix: + // Server + // + // * Code: InvalidAccessKeyId + // + // * Description: The Amazon Web Services + // access key ID you provided does not exist in our records. + // + // * HTTP Status Code: + // 403 Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidAddressingHeader + // + // * Description: You must specify the Anonymous role. + // + // * + // HTTP Status Code: N/A + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidArgument + // + // * Description: Invalid Argument + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName + // + // * + // Description: The specified bucket is not valid. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState + // + // * + // Description: The request is not valid with the current state of the bucket. + // + // * + // HTTP Status Code: 409 Conflict + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidDigest + // + // * Description: The Content-MD5 you specified is not valid. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidEncryptionAlgorithmError + // + // * Description: The encryption request you + // specified is not valid. The valid value is AES256. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint + // + // * + // Description: The specified location constraint is not valid. For more + // information about Regions, see How to Select a Region for Your Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidObjectState + // + // * Description: The action is not valid for the current state + // of the object. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidPart + // + // * Description: One or more of the specified parts + // could not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. + // + // * HTTP Status Code: + // 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder + // + // * + // Description: The list of parts was not in ascending order. Parts list must be + // specified in order by part number. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPayer + // + // * Description: All access to + // this object has been disabled. Please contact Amazon Web Services Support for + // further assistance. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidPolicyDocument + // + // * Description: The content of the + // form does not meet the conditions specified in the policy document. + // + // * HTTP + // Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidRange + // + // * Description: The requested range cannot be satisfied. + // + // * HTTP + // Status Code: 416 Requested Range Not Satisfiable + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest + // + // * Description: Please use AWS4-HMAC-SHA256. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: InvalidRequest + // + // * + // Description: SOAP requests must be made over an HTTPS connection. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidRequest + // + // * Description: Amazon S3 Transfer Acceleration is not supported + // for buckets with non-DNS compliant names. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * Code: N/A + // + // * Code: InvalidRequest + // + // * Description: Amazon S3 Transfer + // Acceleration is not supported for buckets with periods (.) in their names. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: InvalidRequest + // + // * + // Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style + // requests. + // + // * HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: + // InvalidRequest + // + // * Description: Amazon S3 Transfer Accelerate is not configured + // on this bucket. + // + // * HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: + // InvalidRequest + // + // * Description: Amazon S3 Transfer Accelerate is disabled on this + // bucket. + // + // * HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: + // InvalidRequest + // + // * Description: Amazon S3 Transfer Acceleration is not supported + // on this bucket. Contact Amazon Web Services Support for more information. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * Code: N/A + // + // * Code: InvalidRequest + // + // * + // Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. + // Contact Amazon Web Services Support for more information. + // + // * HTTP Status Code: + // 400 Bad Request + // + // * Code: N/A + // + // * Code: InvalidSecurity + // + // * Description: The + // provided security credentials are not valid. + // + // * HTTP Status Code: 403 + // Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidSOAPRequest + // + // * + // Description: The SOAP request body is invalid. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass + // + // * + // Description: The storage class you specified is not valid. + // + // * HTTP Status Code: + // 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // InvalidTargetBucketForLogging + // + // * Description: The target bucket for logging does + // not exist, is not owned by you, or does not have the appropriate grants for the + // log-delivery group. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidToken + // + // * Description: The provided token is + // malformed or otherwise invalid. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidURI + // + // * Description: Couldn't parse the + // specified URI. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: KeyTooLongError + // + // * Description: Your key is too long. + // + // * HTTP + // Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // MalformedACLError + // + // * Description: The XML you provided was not well-formed or + // did not validate against our published schema. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest + // + // * + // Description: The body of your POST request is not well-formed + // multipart/form-data. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: MalformedXML + // + // * Description: This happens when the user + // sends malformed XML (XML that doesn't conform to the published XSD) for the + // configuration. The error message is, "The XML you provided was not well-formed + // or did not validate against our published schema." + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded + // + // * + // Description: Your request was too big. + // + // * HTTP Status Code: 400 Bad Request + // + // * + // SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError + // + // * + // Description: Your POST request fields preceding the upload file were too + // large. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * + // Code: MetadataTooLarge + // + // * Description: Your metadata headers exceed the maximum + // allowed metadata size. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: MethodNotAllowed + // + // * Description: The specified method is + // not allowed against this resource. + // + // * HTTP Status Code: 405 Method Not + // Allowed + // + // * SOAP Fault Code Prefix: Client + // + // * Code: MissingAttachment + // + // * + // Description: A SOAP attachment was expected, but none were found. + // + // * HTTP Status + // Code: N/A + // + // * SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength + // + // * + // Description: You must provide the Content-Length HTTP header. + // + // * HTTP Status + // Code: 411 Length Required + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // MissingRequestBodyError + // + // * Description: This happens when the user sends an + // empty XML document as a request. The error message is, "Request body is + // empty." + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement + // + // * Description: The SOAP 1.1 request is + // missing a security element. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault + // Code Prefix: Client + // + // * Code: MissingSecurityHeader + // + // * Description: Your request + // is missing a required header. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault + // Code Prefix: Client + // + // * Code: NoLoggingStatusForKey + // + // * Description: There is no + // such thing as a logging status subresource for a key. + // + // * HTTP Status Code: 400 + // Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket + // + // * + // Description: The specified bucket does not exist. + // + // * HTTP Status Code: 404 Not + // Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy + // + // * + // Description: The specified bucket does not have a bucket policy. + // + // * HTTP Status + // Code: 404 Not Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchKey + // + // * + // Description: The specified key does not exist. + // + // * HTTP Status Code: 404 Not + // Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration + // + // * + // Description: The lifecycle configuration does not exist. + // + // * HTTP Status Code: + // 404 Not Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NoSuchUpload + // + // * + // Description: The specified multipart upload does not exist. The upload ID might + // be invalid, or the multipart upload might have been aborted or completed. + // + // * + // HTTP Status Code: 404 Not Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // NoSuchVersion + // + // * Description: Indicates that the version ID specified in the + // request does not match an existing version. + // + // * HTTP Status Code: 404 Not + // Found + // + // * SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented + // + // * Description: + // A header you provided implies functionality that is not implemented. + // + // * HTTP + // Status Code: 501 Not Implemented + // + // * SOAP Fault Code Prefix: Server + // + // * Code: + // NotSignedUp + // + // * Description: Your account is not signed up for the Amazon S3 + // service. You must sign up before you can use Amazon S3. You can sign up at the + // following URL: Amazon S3 (http://aws.amazon.com/s3) + // + // * HTTP Status Code: 403 + // Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted + // + // * + // Description: A conflicting conditional action is currently in progress against + // this resource. Try again. + // + // * HTTP Status Code: 409 Conflict + // + // * SOAP Fault Code + // Prefix: Client + // + // * Code: PermanentRedirect + // + // * Description: The bucket you are + // attempting to access must be addressed using the specified endpoint. Send all + // future requests to this endpoint. + // + // * HTTP Status Code: 301 Moved Permanently + // + // * + // SOAP Fault Code Prefix: Client + // + // * Code: PreconditionFailed + // + // * Description: At + // least one of the preconditions you specified did not hold. + // + // * HTTP Status Code: + // 412 Precondition Failed + // + // * SOAP Fault Code Prefix: Client + // + // * Code: Redirect + // + // * + // Description: Temporary redirect. + // + // * HTTP Status Code: 307 Moved Temporarily + // + // * + // SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress + // + // * Description: + // Object restore is already in progress. + // + // * HTTP Status Code: 409 Conflict + // + // * SOAP + // Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent + // + // * Description: + // Bucket POST must be of the enclosure-type multipart/form-data. + // + // * HTTP Status + // Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // RequestTimeout + // + // * Description: Your socket connection to the server was not read + // from or written to within the timeout period. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed + // + // * + // Description: The difference between the request time and the server's time is + // too large. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: RequestTorrentOfBucketError + // + // * Description: Requesting the + // torrent file of a bucket is not permitted. + // + // * HTTP Status Code: 400 Bad + // Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch + // + // * + // Description: The request signature we calculated does not match the signature + // you provided. Check your Amazon Web Services secret access key and signing + // method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) and + // SOAP Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) for + // details. + // + // * HTTP Status Code: 403 Forbidden + // + // * SOAP Fault Code Prefix: Client + // + // * + // Code: ServiceUnavailable + // + // * Description: Reduce your request rate. + // + // * HTTP + // Status Code: 503 Service Unavailable + // + // * SOAP Fault Code Prefix: Server + // + // * Code: + // SlowDown + // + // * Description: Reduce your request rate. + // + // * HTTP Status Code: 503 Slow + // Down + // + // * SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect + // + // * + // Description: You are being redirected to the bucket while DNS updates. + // + // * HTTP + // Status Code: 307 Moved Temporarily + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // TokenRefreshRequired + // + // * Description: The provided token must be refreshed. + // + // * + // HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // TooManyBuckets + // + // * Description: You have attempted to create more buckets than + // allowed. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: UnexpectedContent + // + // * Description: This request does not support + // content. + // + // * HTTP Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: + // Client + // + // * Code: UnresolvableGrantByEmailAddress + // + // * Description: The email + // address you provided does not match any account on record. + // + // * HTTP Status Code: + // 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + // + // * Code: + // UserKeyMustBeSpecified + // + // * Description: The bucket POST must contain the + // specified field name. If it is specified, check the order of the fields. + // + // * HTTP + // Status Code: 400 Bad Request + // + // * SOAP Fault Code Prefix: Client + Code *string + + // The error key. + Key *string + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they don't + // know how or don't care to handle. Sophisticated programs with more exhaustive + // error handling and proper internationalization are more likely to ignore the + // error message. + Message *string + + // The version ID of the error. + VersionId *string + + noSmithyDocumentSerde +} + +// The error information. +type ErrorDocument struct { + + // The object key name to use when a 4XX class error occurs. Replacement must be + // made for object keys containing special characters (such as carriage returns) + // when using XML requests. For more information, see XML related object key + // constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // This member is required. + Key *string + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for Amazon EventBridge. +type EventBridgeConfiguration struct { + noSmithyDocumentSerde +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 User Guide. +type ExistingObjectReplication struct { + + // + // + // This member is required. + Status ExistingObjectReplicationStatus + + noSmithyDocumentSerde +} + +// Specifies the Amazon S3 object key name to filter on and whether to filter on +// the suffix or prefix of the key name. +type FilterRule struct { + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + Name FilterRuleName + + // The value that the filter searches for in object key names. + Value *string + + noSmithyDocumentSerde +} + +// A collection of parts associated with a multipart upload. +type GetObjectAttributesParts struct { + + // Indicates whether the returned list of parts is truncated. A value of true + // indicates that the list was truncated. A list can be truncated if the number of + // parts exceeds the limit returned in the MaxParts element. + IsTruncated bool + + // The maximum number of parts allowed in the response. + MaxParts int32 + + // When a list is truncated, this element specifies the last part in the list, as + // well as the value to use for the PartNumberMarker request parameter in a + // subsequent request. + NextPartNumberMarker *string + + // The marker for the current part. + PartNumberMarker *string + + // A container for elements related to a particular part. A response can contain + // zero or more Parts elements. + Parts []ObjectPart + + // The total number of parts. + TotalPartsCount int32 + + noSmithyDocumentSerde +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + + // Retrieval tier at which the restore will be processed. + // + // This member is required. + Tier Tier + + noSmithyDocumentSerde +} + +// Container for grant information. +type Grant struct { + + // The person being granted permissions. + Grantee *Grantee + + // Specifies the permission given to the grantee. + Permission Permission + + noSmithyDocumentSerde +} + +// Container for the person being granted permissions. +type Grantee struct { + + // Type of grantee + // + // This member is required. + Type Type + + // Screen name of the grantee. + DisplayName *string + + // Email address of the grantee. Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: + // + // * US East (N. + // Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific + // (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe + // (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 + // supported Regions and endpoints, see Regions and Endpoints + // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the + // Amazon Web Services General Reference. + EmailAddress *string + + // The canonical user ID of the grantee. + ID *string + + // URI of the grantee group. + URI *string + + noSmithyDocumentSerde +} + +// Container for the Suffix element. +type IndexDocument struct { + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (for example,if the suffix is index.html and you make a request to + // samplebucket/images/ the data that is returned will be for the object with the + // key name images/index.html) The suffix must not be empty and must not include a + // slash character. Replacement must be made for object keys containing special + // characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // This member is required. + Suffix *string + + noSmithyDocumentSerde +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + + // Name of the Principal. + DisplayName *string + + // If the principal is an Amazon Web Services account, it provides the Canonical + // User ID. If the principal is an IAM User, it provides a user ARN value. + ID *string + + noSmithyDocumentSerde +} + +// Describes the serialization format of the object. +type InputSerialization struct { + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType CompressionType + + // Specifies JSON as object's input serialization format. + JSON *JSONInput + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput + + noSmithyDocumentSerde +} + +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + + // An object key name prefix that identifies the subset of objects to which the + // configuration applies. + Prefix *string + + // All of these tags must exist in the object's tag set in order for the + // configuration to apply. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For +// information about the S3 Intelligent-Tiering storage class, see Storage class +// for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // This member is required. + Id *string + + // Specifies the status of the configuration. + // + // This member is required. + Status IntelligentTieringStatus + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // This member is required. + Tierings []Tiering + + // Specifies a bucket filter. The configuration only includes objects that meet the + // filter's criteria. + Filter *IntelligentTieringFilter + + noSmithyDocumentSerde +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering +// configuration applies to. +type IntelligentTieringFilter struct { + + // A conjunction (logical AND) of predicates, which is used in evaluating a metrics + // filter. The operator must have at least two predicates, and an object must match + // all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator + + // An object key name prefix that identifies the subset of objects to which the + // rule applies. Replacement must be made for object keys containing special + // characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string + + // A container of a key value name pair. + Tag *Tag + + noSmithyDocumentSerde +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more +// information, see GET Bucket inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon S3 API Reference. +type InventoryConfiguration struct { + + // Contains information about where to publish the inventory results. + // + // This member is required. + Destination *InventoryDestination + + // The ID used to identify the inventory configuration. + // + // This member is required. + Id *string + + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields + // VersionId, IsLatest, and DeleteMarker to the list. If set to Current, the list + // does not contain these version-related fields. + // + // This member is required. + IncludedObjectVersions InventoryIncludedObjectVersions + + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. + // + // This member is required. + IsEnabled bool + + // Specifies the schedule for generating inventory results. + // + // This member is required. + Schedule *InventorySchedule + + // Specifies an inventory filter. The inventory only includes objects that meet the + // filter's criteria. + Filter *InventoryFilter + + // Contains the optional fields that are included in the inventory results. + OptionalFields []InventoryOptionalField + + noSmithyDocumentSerde +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // This member is required. + S3BucketDestination *InventoryS3BucketDestination + + noSmithyDocumentSerde +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS + + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. + SSES3 *SSES3 + + noSmithyDocumentSerde +} + +// Specifies an inventory filter. The inventory only includes objects that meet the +// filter's criteria. +type InventoryFilter struct { + + // The prefix that an object must have to be included in the inventory results. + // + // This member is required. + Prefix *string + + noSmithyDocumentSerde +} + +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. +type InventoryS3BucketDestination struct { + + // The Amazon Resource Name (ARN) of the bucket where inventory results will be + // published. + // + // This member is required. + Bucket *string + + // Specifies the output format of the inventory results. + // + // This member is required. + Format InventoryFormat + + // The account ID that owns the destination S3 bucket. If no account ID is + // provided, the owner is not validated before exporting data. Although this value + // is optional, we strongly recommend that you set it to help prevent problems if + // the destination bucket ownership changes. + AccountId *string + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption + + // The prefix that is prepended to all inventory results. + Prefix *string + + noSmithyDocumentSerde +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + + // Specifies how frequently inventory results are produced. + // + // This member is required. + Frequency InventoryFrequency + + noSmithyDocumentSerde +} + +// Specifies JSON as object's input serialization format. +type JSONInput struct { + + // The type of JSON. Valid values: Document, Lines. + Type JSONType + + noSmithyDocumentSerde +} + +// Specifies JSON as request's output serialization format. +type JSONOutput struct { + + // The value used to separate individual records in the output. If no value is + // specified, Amazon S3 uses a newline character ('\n'). + RecordDelimiter *string + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for Lambda notifications. +type LambdaFunctionConfiguration struct { + + // The Amazon S3 bucket event for which to invoke the Lambda function. For more + // information, see Supported Event Types + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes + // when the specified event type occurs. + // + // This member is required. + LambdaFunctionArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// Container for the expiration for the lifecycle of the object. +type LifecycleExpiration struct { + + // Indicates at what date the object is to be moved or deleted. Should be in GMT + // ISO 8601 Format. + Date *time.Time + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days int32 + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false the + // policy takes no action. This cannot be specified with Days or Date in a + // Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker bool + + noSmithyDocumentSerde +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. +type LifecycleRule struct { + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is + // not currently being applied. + // + // This member is required. + Status ExpirationStatus + + // Specifies the days since the initiation of an incomplete multipart upload that + // Amazon S3 will wait before permanently removing all parts of the upload. For + // more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration + + // The Filter is used to identify objects that a Lifecycle Rule applies to. A + // Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // required if the LifecycleRule does not contain a Prefix element. + Filter LifecycleRuleFilter + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 + // permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) to + // request that Amazon S3 delete noncurrent object versions at a specific period in + // the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action to + // request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + NoncurrentVersionTransitions []NoncurrentVersionTransition + + // Prefix identifying one or more objects to which the rule applies. This is no + // longer used; use Filter instead. Replacement must be made for object keys + // containing special characters (such as carriage returns) when using XML + // requests. For more information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: This member has been deprecated. + Prefix *string + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []Transition + + noSmithyDocumentSerde +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more +// predicates. The Lifecycle Rule will apply to any object matching all of the +// predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan int64 + + // Maximum object size to which the rule applies. + ObjectSizeLessThan int64 + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string + + // All of these tags must exist in the object's tag set in order for the rule to + // apply. + Tags []Tag + + noSmithyDocumentSerde +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. A +// Filter must have exactly one of Prefix, Tag, or And specified. +// +// The following types satisfy this interface: +// +// LifecycleRuleFilterMemberAnd +// LifecycleRuleFilterMemberObjectSizeGreaterThan +// LifecycleRuleFilterMemberObjectSizeLessThan +// LifecycleRuleFilterMemberPrefix +// LifecycleRuleFilterMemberTag +type LifecycleRuleFilter interface { + isLifecycleRuleFilter() +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more +// predicates. The Lifecycle Rule will apply to any object matching all of the +// predicates configured inside the And operator. +type LifecycleRuleFilterMemberAnd struct { + Value LifecycleRuleAndOperator + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberAnd) isLifecycleRuleFilter() {} + +// Minimum object size to which the rule applies. +type LifecycleRuleFilterMemberObjectSizeGreaterThan struct { + Value int64 + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberObjectSizeGreaterThan) isLifecycleRuleFilter() {} + +// Maximum object size to which the rule applies. +type LifecycleRuleFilterMemberObjectSizeLessThan struct { + Value int64 + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {} + +// Prefix identifying one or more objects to which the rule applies. Replacement +// must be made for object keys containing special characters (such as carriage +// returns) when using XML requests. For more information, see XML related object +// key constraints +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). +type LifecycleRuleFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberPrefix) isLifecycleRuleFilter() {} + +// This tag must exist in the object's tag set in order for the rule to apply. +type LifecycleRuleFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to all log +// object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in +// the Amazon S3 API Reference. +type LoggingEnabled struct { + + // Specifies the bucket where you want Amazon S3 to store server access logs. You + // can have your logs delivered to any bucket that you own, including the same + // bucket that is being logged. You can also configure multiple buckets to deliver + // their logs to the same target bucket. In this case, you should choose a + // different TargetPrefix for each source bucket so that the delivered log files + // can be distinguished by key. + // + // This member is required. + TargetBucket *string + + // A prefix for all log object keys. If you store log files from multiple Amazon S3 + // buckets in a single bucket, you can use a prefix to distinguish which log files + // came from which bucket. + // + // This member is required. + TargetPrefix *string + + // Container for granting information. Buckets that use the bucket owner enforced + // setting for Object Ownership don't support target grants. For more information, + // see Permissions for server access log delivery + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) + // in the Amazon S3 User Guide. + TargetGrants []TargetGrant + + noSmithyDocumentSerde +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + + // Name of the Object. + Name *string + + // Value of the Object. + Value *string + + noSmithyDocumentSerde +} + +// A container specifying replication metrics-related settings enabling replication +// metrics and events. +type Metrics struct { + + // Specifies whether the replication metrics are enabled. + // + // This member is required. + Status MetricsStatus + + // A container specifying the time threshold for emitting the + // s3:Replication:OperationMissedThreshold event. + EventThreshold *ReplicationTimeValue + + noSmithyDocumentSerde +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a metrics +// filter. The operator must have at least two predicates, and an object must match +// all of the predicates in order for the filter to apply. +type MetricsAndOperator struct { + + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string + + // The prefix used when evaluating an AND predicate. + Prefix *string + + // The list of tags used when evaluating an AND predicate. + Tags []Tag + + noSmithyDocumentSerde +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an +// existing metrics configuration, note that this is a full replacement of the +// existing metrics configuration. If you don't include the elements you want to +// keep, they are erased. For more information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). +type MetricsConfiguration struct { + + // The ID used to identify the metrics configuration. + // + // This member is required. + Id *string + + // Specifies a metrics configuration filter. The metrics configuration will only + // include objects that meet the filter's criteria. A filter must be a prefix, an + // object tag, an access point ARN, or a conjunction (MetricsAndOperator). + Filter MetricsFilter + + noSmithyDocumentSerde +} + +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, an +// object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more +// information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). +// +// The following types satisfy this interface: +// +// MetricsFilterMemberAccessPointArn +// MetricsFilterMemberAnd +// MetricsFilterMemberPrefix +// MetricsFilterMemberTag +type MetricsFilter interface { + isMetricsFilter() +} + +// The access point ARN used when evaluating a metrics filter. +type MetricsFilterMemberAccessPointArn struct { + Value string + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberAccessPointArn) isMetricsFilter() {} + +// A conjunction (logical AND) of predicates, which is used in evaluating a metrics +// filter. The operator must have at least two predicates, and an object must match +// all of the predicates in order for the filter to apply. +type MetricsFilterMemberAnd struct { + Value MetricsAndOperator + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberAnd) isMetricsFilter() {} + +// The prefix used when evaluating a metrics filter. +type MetricsFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberPrefix) isMetricsFilter() {} + +// The tag used when evaluating a metrics filter. +type MetricsFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberTag) isMetricsFilter() {} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm ChecksumAlgorithm + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time + + // Identifies who initiated the multipart upload. + Initiator *Initiator + + // Key of the object for which the multipart upload was initiated. + Key *string + + // Specifies the owner of the object that is part of the multipart upload. + Owner *Owner + + // The class of storage used to store the object. + StorageClass StorageClass + + // Upload ID that identifies the multipart upload. + UploadId *string + + noSmithyDocumentSerde +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 +// permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) to +// request that Amazon S3 delete noncurrent object versions at a specific period in +// the object's lifetime. +type NoncurrentVersionExpiration struct { + + // Specifies how many noncurrent versions Amazon S3 will retain. If there are this + // many more recent noncurrent versions, Amazon S3 will take the associated action. + // For more information about noncurrent versions, see Lifecycle configuration + // elements + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions int32 + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. The value must be a non-zero positive integer. + // For information about the noncurrent days calculations, see How Amazon S3 + // Calculates When an Object Became Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays int32 + + noSmithyDocumentSerde +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, +// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or +// versioning is suspended), you can set this action to request that Amazon S3 +// transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, +// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at a +// specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + + // Specifies how many noncurrent versions Amazon S3 will retain. If there are this + // many more recent noncurrent versions, Amazon S3 will take the associated action. + // For more information about noncurrent versions, see Lifecycle configuration + // elements + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions int32 + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been + // Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays int32 + + // The class of storage used to store the object. + StorageClass TransitionStorageClass + + noSmithyDocumentSerde +} + +// A container for specifying the notification configuration of the bucket. If this +// element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *EventBridgeConfiguration + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []LambdaFunctionConfiguration + + // The Amazon Simple Queue Service queues to publish messages to and the events for + // which to publish messages. + QueueConfigurations []QueueConfiguration + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []TopicConfiguration + + noSmithyDocumentSerde +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the +// Amazon S3 User Guide. +type NotificationConfigurationFilter struct { + + // A container for object key name prefix and suffix filtering rules. + Key *S3KeyFilter + + noSmithyDocumentSerde +} + +// An object consists of data and its descriptive metadata. +type Object struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []ChecksumAlgorithm + + // The entity tag is a hash of the object. The ETag reflects changes only to the + // contents of an object, not its metadata. The ETag may or may not be an MD5 + // digest of the object data. Whether or not it is depends on how the object was + // created and how it is encrypted as described below: + // + // * Objects created by the + // PUT Object, POST Object, or Copy operation, or through the Amazon Web Services + // Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that + // are an MD5 digest of their object data. + // + // * Objects created by the PUT Object, + // POST Object, or Copy operation, or through the Amazon Web Services Management + // Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 + // digest of their object data. + // + // * If an object is created by either the Multipart + // Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the + // method of encryption. If an object is larger than 16 MB, the Amazon Web Services + // Management Console will upload or copy that object as a Multipart Upload, and + // therefore the ETag will not be an MD5 digest. + ETag *string + + // The name that you assign to an object. You use the object key to retrieve the + // object. + Key *string + + // Creation date of the object. + LastModified *time.Time + + // The owner of the object + Owner *Owner + + // Size in bytes of the object + Size int64 + + // The class of storage used to store the object. + StorageClass ObjectStorageClass + + noSmithyDocumentSerde +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + + // Key name of the object. Replacement must be made for object keys containing + // special characters (such as carriage returns) when using XML requests. For more + // information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // This member is required. + Key *string + + // VersionId for the specific version of the object to delete. + VersionId *string + + noSmithyDocumentSerde +} + +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + ObjectLockEnabled ObjectLockEnabled + + // Specifies the Object Lock rule for the specified object. Enable the this rule + // when you apply ObjectLockConfiguration to a bucket. Bucket settings require both + // a mode and a period. The period can be either Days or Years but you must select + // one. You cannot specify Days and Years at the same time. + Rule *ObjectLockRule + + noSmithyDocumentSerde +} + +// A legal hold configuration for an object. +type ObjectLockLegalHold struct { + + // Indicates whether the specified object has a legal hold in place. + Status ObjectLockLegalHoldStatus + + noSmithyDocumentSerde +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + + // Indicates the Retention mode for the specified object. + Mode ObjectLockRetentionMode + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time + + noSmithyDocumentSerde +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + + // The default Object Lock retention mode and period that you want to apply to new + // objects placed in the specified bucket. Bucket settings require both a mode and + // a period. The period can be either Days or Years but you must select one. You + // cannot specify Days and Years at the same time. + DefaultRetention *DefaultRetention + + noSmithyDocumentSerde +} + +// A container for elements related to an individual part. +type ObjectPart struct { + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // The part number identifying the part. This value is a positive integer between 1 + // and 10,000. + PartNumber int32 + + // The size of the uploaded part in bytes. + Size int64 + + noSmithyDocumentSerde +} + +// The version of an object. +type ObjectVersion struct { + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []ChecksumAlgorithm + + // The entity tag is an MD5 hash of that version of the object. + ETag *string + + // Specifies whether the object is (true) or is not (false) the latest version of + // an object. + IsLatest bool + + // The object key. + Key *string + + // Date and time the object was last modified. + LastModified *time.Time + + // Specifies the owner of the object. + Owner *Owner + + // Size in bytes of the object. + Size int64 + + // The class of storage used to store the object. + StorageClass ObjectVersionStorageClass + + // Version ID of an object. + VersionId *string + + noSmithyDocumentSerde +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + + // Describes an S3 location that will receive the results of the restore request. + S3 *S3Location + + noSmithyDocumentSerde +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput + + noSmithyDocumentSerde +} + +// Container for the owner's display name and ID. +type Owner struct { + + // Container for the display name of the owner. + DisplayName *string + + // Container for the ID of the owner. + ID *string + + noSmithyDocumentSerde +} + +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + + // The container element for an ownership control rule. + // + // This member is required. + Rules []OwnershipControlsRule + + noSmithyDocumentSerde +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + + // The container element for object ownership for a bucket's ownership controls. + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the + // bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. ObjectWriter - The uploading account will own the object if the + // object is uploaded with the bucket-owner-full-control canned ACL. + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that don't + // specify an ACL or bucket owner full control ACLs, such as the + // bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed + // in the XML format. + // + // This member is required. + ObjectOwnership ObjectOwnership + + noSmithyDocumentSerde +} + +// Container for Parquet. +type ParquetInput struct { + noSmithyDocumentSerde +} + +// Container for elements related to a part. +type Part struct { + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. With multipart uploads, this may not + // be a checksum value of the object. For more information about how checksums are + // calculated with multipart uploads, see Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see + // Checking object integrity + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string + + // Entity tag returned when the part was uploaded. + ETag *string + + // Date and time at which the part was uploaded. + LastModified *time.Time + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber int32 + + // Size in bytes of the uploaded part data. + Size int64 + + noSmithyDocumentSerde +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic bool + + noSmithyDocumentSerde +} + +// This data type contains information about progress of an operation. +type Progress struct { + + // The current number of uncompressed object bytes processed. + BytesProcessed int64 + + // The current number of bytes of records payload data returned. + BytesReturned int64 + + // The current number of object bytes scanned. + BytesScanned int64 + + noSmithyDocumentSerde +} + +// This data type contains information about the progress event of an operation. +type ProgressEvent struct { + + // The Progress event details. + Details *Progress + + noSmithyDocumentSerde +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon S3 +// bucket. You can enable the configuration options in any combination. For more +// information about when Amazon S3 considers a bucket or object public, see The +// Meaning of "Public" +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon S3 User Guide. +type PublicAccessBlockConfiguration struct { + + // Specifies whether Amazon S3 should block public access control lists (ACLs) for + // this bucket and objects in this bucket. Setting this element to TRUE causes the + // following behavior: + // + // * PUT Bucket ACL and PUT Object ACL calls fail if the + // specified ACL is public. + // + // * PUT Object calls fail if the request includes a + // public ACL. + // + // * PUT Bucket calls fail if the request includes a public + // ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls bool + + // Specifies whether Amazon S3 should block public bucket policies for this bucket. + // Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket + // policy if the specified bucket policy allows public access. Enabling this + // setting doesn't affect existing bucket policies. + BlockPublicPolicy bool + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore + // all public ACLs on this bucket and objects in this bucket. Enabling this setting + // doesn't affect the persistence of any existing ACLs and doesn't prevent new + // public ACLs from being set. + IgnorePublicAcls bool + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // Amazon Web Service principals and authorized users within this account if the + // bucket has a public policy. Enabling this setting doesn't affect previously + // stored bucket policies, except that public and cross-account access within any + // public bucket policy, including non-public delegation to specific accounts, is + // blocked. + RestrictPublicBuckets bool + + noSmithyDocumentSerde +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + + // A collection of bucket events for which to send notifications + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // This member is required. + QueueArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// The container for the records event. +type RecordsEvent struct { + + // The byte array of partial, one or more result records. + Payload []byte + + noSmithyDocumentSerde +} + +// Specifies how requests are redirected. In the event of an error, you can specify +// a different error code to return. +type Redirect struct { + + // The host name to use in the redirect request. + HostName *string + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string + + // Protocol to use when redirecting requests. The default is the protocol that is + // used in the original request. + Protocol Protocol + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ and + // in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of + // the siblings is present. Can be present only if ReplaceKeyWith is not provided. + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see XML + // related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyPrefixWith *string + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can be + // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made + // for object keys containing special characters (such as carriage returns) when + // using XML requests. For more information, see XML related object key + // constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyWith *string + + noSmithyDocumentSerde +} + +// Specifies the redirect behavior of all requests to a website endpoint of an +// Amazon S3 bucket. +type RedirectAllRequestsTo struct { + + // Name of the host where requests are redirected. + // + // This member is required. + HostName *string + + // Protocol to use when redirecting requests. The default is the protocol that is + // used in the original request. + Protocol Protocol + + noSmithyDocumentSerde +} + +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can specify +// this element and set the status to Enabled to replicate modifications on +// replicas. If you don't specify the Filter element, Amazon S3 assumes that the +// replication configuration is the earlier version, V1. In the earlier version, +// this element is not allowed. +type ReplicaModifications struct { + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // This member is required. + Status ReplicaModificationsStatus + + noSmithyDocumentSerde +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + + // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role + // that Amazon S3 assumes when replicating objects. For more information, see How + // to Set Up Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) in + // the Amazon S3 User Guide. + // + // This member is required. + Role *string + + // A container for one or more replication rules. A replication configuration must + // have at least one rule and can contain a maximum of 1,000 rules. + // + // This member is required. + Rules []ReplicationRule + + noSmithyDocumentSerde +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + + // A container for information about the replication destination and its + // configurations including enabling the S3 Replication Time Control (S3 RTC). + // + // This member is required. + Destination *Destination + + // Specifies whether the rule is enabled. + // + // This member is required. + Status ReplicationRuleStatus + + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a + // DeleteMarkerReplication element. If your Filter includes a Tag element, the + // DeleteMarkerReplicationStatus must be set to Disabled, because Amazon S3 does + // not support replicating delete markers for tag-based rules. For an example + // configuration, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // For more information about delete marker replication, see Basic Rule + // Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + // If you are using an earlier version of the replication configuration, Amazon S3 + // handles replication of delete markers differently. For more information, see + // Backward Compatibility + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + DeleteMarkerReplication *DeleteMarkerReplication + + // + ExistingObjectReplication *ExistingObjectReplication + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter ReplicationRuleFilter + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string + + // An object key name prefix that identifies the object or objects to which the + // rule applies. The maximum prefix length is 1,024 characters. To include all + // objects in a bucket, specify an empty string. Replacement must be made for + // object keys containing special characters (such as carriage returns) when using + // XML requests. For more information, see XML related object key constraints + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: This member has been deprecated. + Prefix *string + + // The priority indicates which rule has precedence whenever two or more + // replication rules conflict. Amazon S3 will attempt to replicate objects + // according to all replication rules. However, if there are two or more rules with + // the same destination bucket, then objects will be replicated according to the + // rule with the highest priority. The higher the number, the higher the priority. + // For more information, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon + // S3 User Guide. + Priority int32 + + // A container that describes additional filters for identifying the source objects + // that you want to replicate. You can choose to enable or disable the replication + // of these objects. Currently, Amazon S3 supports only the filter that you can + // specify for objects created with server-side encryption using a customer managed + // key stored in Amazon Web Services Key Management Service (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria + + noSmithyDocumentSerde +} + +// A container for specifying rule filters. The filters determine the subset of +// objects to which the rule applies. This element is required only if you specify +// more than one filter. For example: +// +// * If you specify both a Prefix and a Tag +// filter, wrap these filters in an And tag. +// +// * If you specify a filter based on +// multiple tags, wrap the Tag elements in an And tag. +type ReplicationRuleAndOperator struct { + + // An object key name prefix that identifies the subset of objects to which the + // rule applies. + Prefix *string + + // An array of tags containing key and value pairs. + Tags []Tag + + noSmithyDocumentSerde +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +// +// The following types satisfy this interface: +// +// ReplicationRuleFilterMemberAnd +// ReplicationRuleFilterMemberPrefix +// ReplicationRuleFilterMemberTag +type ReplicationRuleFilter interface { + isReplicationRuleFilter() +} + +// A container for specifying rule filters. The filters determine the subset of +// objects to which the rule applies. This element is required only if you specify +// more than one filter. For example: +// +// * If you specify both a Prefix and a Tag +// filter, wrap these filters in an And tag. +// +// * If you specify a filter based on +// multiple tags, wrap the Tag elements in an And tag. +type ReplicationRuleFilterMemberAnd struct { + Value ReplicationRuleAndOperator + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {} + +// An object key name prefix that identifies the subset of objects to which the +// rule applies. Replacement must be made for object keys containing special +// characters (such as carriage returns) when using XML requests. For more +// information, see XML related object key constraints +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). +type ReplicationRuleFilterMemberPrefix struct { + Value string + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {} + +// A container for specifying a tag key and value. The rule applies only to objects +// that have the tag in their tag set. +type ReplicationRuleFilterMemberTag struct { + Value Tag + + noSmithyDocumentSerde +} + +func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {} + +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics block. +type ReplicationTime struct { + + // Specifies whether the replication time is enabled. + // + // This member is required. + Status ReplicationTimeStatus + + // A container specifying the time by which replication should be complete for all + // objects and operations on objects. + // + // This member is required. + Time *ReplicationTimeValue + + noSmithyDocumentSerde +} + +// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// and replication metrics EventThreshold. +type ReplicationTimeValue struct { + + // Contains an integer specifying time in minutes. Valid value: 15 + Minutes int32 + + noSmithyDocumentSerde +} + +// Container for Payer. +type RequestPaymentConfiguration struct { + + // Specifies who pays for the download and request fees. + // + // This member is required. + Payer Payer + + noSmithyDocumentSerde +} + +// Container for specifying if periodic QueryProgress messages should be sent. +type RequestProgress struct { + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled bool + + noSmithyDocumentSerde +} + +// Container for restore job parameters. +type RestoreRequest struct { + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. The Days element is required for regular restores, and must not + // be provided for select requests. + Days int32 + + // The optional description for the job. + Description *string + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters + + // Retrieval tier at which the restore will be processed. + Tier Tier + + // Type of restore request. + Type RestoreRequestType + + noSmithyDocumentSerde +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon S3 User Guide. +type RoutingRule struct { + + // Container for redirect information. You can redirect requests to another host, + // to another page, or with another protocol. In the event of an error, you can + // specify a different error code to return. + // + // This member is required. + Redirect *Redirect + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition + + noSmithyDocumentSerde +} + +// A container for object key name prefix and suffix filtering rules. +type S3KeyFilter struct { + + // A list of containers for the key-value pair that defines the criteria for the + // filter rule. + FilterRules []FilterRule + + noSmithyDocumentSerde +} + +// Describes an Amazon S3 location that will receive the results of the restore +// request. +type S3Location struct { + + // The name of the bucket where the restore results will be placed. + // + // This member is required. + BucketName *string + + // The prefix that is prepended to the restore results for this request. + // + // This member is required. + Prefix *string + + // A list of grants that control access to the staged results. + AccessControlList []Grant + + // The canned ACL to apply to the restore results. + CannedACL ObjectCannedACL + + // Contains the type of server-side encryption used. + Encryption *Encryption + + // The class of storage used to store the restore results. + StorageClass StorageClass + + // The tag-set that is applied to the restore results. + Tagging *Tagging + + // A list of metadata to store with the restore results in S3. + UserMetadata []MetadataEntry + + noSmithyDocumentSerde +} + +// Specifies the byte range of the object to get the records from. A record is +// processed when its first byte is contained by the range. This parameter is +// optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the object + // being queried. If only the End parameter is supplied, it is interpreted to mean + // scan the last N bytes of the file. For example, 50 means scan the last 50 bytes. + End int64 + + // Specifies the start of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is 0. If only start is supplied, it + // means scan from that point to the end of the file. For example, 50 means scan + // from byte 50 until the end of the file. + Start int64 + + noSmithyDocumentSerde +} + +// The container for selecting objects from a content event stream. +// +// The following types satisfy this interface: +// +// SelectObjectContentEventStreamMemberCont +// SelectObjectContentEventStreamMemberEnd +// SelectObjectContentEventStreamMemberProgress +// SelectObjectContentEventStreamMemberRecords +// SelectObjectContentEventStreamMemberStats +type SelectObjectContentEventStream interface { + isSelectObjectContentEventStream() +} + +// The Continuation Event. +type SelectObjectContentEventStreamMemberCont struct { + Value ContinuationEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberCont) isSelectObjectContentEventStream() {} + +// The End Event. +type SelectObjectContentEventStreamMemberEnd struct { + Value EndEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberEnd) isSelectObjectContentEventStream() {} + +// The Progress Event. +type SelectObjectContentEventStreamMemberProgress struct { + Value ProgressEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberProgress) isSelectObjectContentEventStream() {} + +// The Records Event. +type SelectObjectContentEventStreamMemberRecords struct { + Value RecordsEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberRecords) isSelectObjectContentEventStream() {} + +// The Stats Event. +type SelectObjectContentEventStreamMemberStats struct { + Value StatsEvent + + noSmithyDocumentSerde +} + +func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {} + +// Describes the parameters for Select job types. +type SelectParameters struct { + + // The expression that is used to query the object. + // + // This member is required. + Expression *string + + // The type of the provided expression (for example, SQL). + // + // This member is required. + ExpressionType ExpressionType + + // Describes the serialization format of the object. + // + // This member is required. + InputSerialization *InputSerialization + + // Describes how the results of the Select job are serialized. + // + // This member is required. + OutputSerialization *OutputSerialization + + noSmithyDocumentSerde +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, this +// default encryption will be applied. If you don't specify a customer managed key +// at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key +// in your Amazon Web Services account the first time that you add an object +// encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for +// SSE-KMS. For more information, see PUT Bucket encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon S3 API Reference. +type ServerSideEncryptionByDefault struct { + + // Server-side encryption algorithm to use for the default encryption. + // + // This member is required. + SSEAlgorithm ServerSideEncryption + + // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services + // KMS key ID to use for the default encryption. This parameter is allowed if and + // only if SSEAlgorithm is set to aws:kms. You can specify the key ID or the Amazon + // Resource Name (ARN) of the KMS key. However, if you are using encryption with + // cross-account or Amazon Web Services service operations you must use a fully + // qualified KMS key ARN. For more information, see Using encryption for + // cross-account operations + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon + // S3 only supports symmetric KMS keys and not asymmetric KMS keys. For more + // information, see Using symmetric and asymmetric keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + KMSMasterKeyID *string + + noSmithyDocumentSerde +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + + // Container for information about a particular server-side encryption + // configuration rule. + // + // This member is required. + Rules []ServerSideEncryptionRule + + noSmithyDocumentSerde +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, this + // default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 + // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more + // information, see Amazon S3 Bucket Keys + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon + // S3 User Guide. + BucketKeyEnabled bool + + noSmithyDocumentSerde +} + +// A container that describes additional filters for identifying the source objects +// that you want to replicate. You can choose to enable or disable the replication +// of these objects. Currently, Amazon S3 supports only the filter that you can +// specify for objects created with server-side encryption using a customer managed +// key stored in Amazon Web Services Key Management Service (SSE-KMS). +type SourceSelectionCriteria struct { + + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can specify + // this element and set the status to Enabled to replicate modifications on + // replicas. If you don't specify the Filter element, Amazon S3 assumes that the + // replication configuration is the earlier version, V1. In the earlier version, + // this element is not allowed + ReplicaModifications *ReplicaModifications + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria + // in the replication configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects + + noSmithyDocumentSerde +} + +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. +type SSEKMS struct { + + // Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web + // Services KMS) symmetric customer managed key to use for encrypting inventory + // reports. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +// A container for filter information for the selection of S3 objects encrypted +// with Amazon Web Services KMS. +type SseKmsEncryptedObjects struct { + + // Specifies whether Amazon S3 replicates objects created with server-side + // encryption using an Amazon Web Services KMS key stored in Amazon Web Services + // Key Management Service. + // + // This member is required. + Status SseKmsEncryptedObjectsStatus + + noSmithyDocumentSerde +} + +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. +type SSES3 struct { + noSmithyDocumentSerde +} + +// Container for the stats details. +type Stats struct { + + // The total number of uncompressed object bytes processed. + BytesProcessed int64 + + // The total number of bytes of records payload data returned. + BytesReturned int64 + + // The total number of object bytes scanned. + BytesScanned int64 + + noSmithyDocumentSerde +} + +// Container for the Stats Event. +type StatsEvent struct { + + // The Stats event details. + Details *Stats + + noSmithyDocumentSerde +} + +// Specifies data related to access patterns to be collected and made available to +// analyze the tradeoffs between different storage classes for an Amazon S3 bucket. +type StorageClassAnalysis struct { + + // Specifies how data related to the storage class analysis for an Amazon S3 bucket + // should be exported. + DataExport *StorageClassAnalysisDataExport + + noSmithyDocumentSerde +} + +// Container for data related to the storage class analysis for an Amazon S3 bucket +// for export. +type StorageClassAnalysisDataExport struct { + + // The place to store the data for an analysis. + // + // This member is required. + Destination *AnalyticsExportDestination + + // The version of the output schema to use when exporting data. Must be V_1. + // + // This member is required. + OutputSchemaVersion StorageClassAnalysisSchemaVersion + + noSmithyDocumentSerde +} + +// A container of a key value name pair. +type Tag struct { + + // Name of the object key. + // + // This member is required. + Key *string + + // Value of the tag. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +// Container for TagSet elements. +type Tagging struct { + + // A collection for a set of tags + // + // This member is required. + TagSet []Tag + + noSmithyDocumentSerde +} + +// Container for granting information. Buckets that use the bucket owner enforced +// setting for Object Ownership don't support target grants. For more information, +// see Permissions server access log delivery +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +type TargetGrant struct { + + // Container for the person being granted permissions. + Grantee *Grantee + + // Logging permissions assigned to the grantee for the bucket. + Permission BucketLogsPermission + + noSmithyDocumentSerde +} + +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, +// without additional operational overhead. +type Tiering struct { + + // S3 Intelligent-Tiering access tier. See Storage class for automatically + // optimizing frequently and infrequently accessed objects + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // This member is required. + AccessTier IntelligentTieringAccessTier + + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number of + // days specified for Archive Access tier must be at least 90 days and Deep Archive + // Access tier must be at least 180 days. The maximum can be up to 2 years (730 + // days). + // + // This member is required. + Days int32 + + noSmithyDocumentSerde +} + +// A container for specifying the configuration for publication of messages to an +// Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects +// specified events. +type TopicConfiguration struct { + + // The Amazon S3 bucket event about which to send notifications. For more + // information, see Supported Event Types + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + // + // This member is required. + Events []Event + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // This member is required. + TopicArn *string + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the + // Amazon S3 User Guide. + Filter *NotificationConfigurationFilter + + // An optional unique identifier for configurations in a notification + // configuration. If you don't provide one, Amazon S3 will assign an ID. + Id *string + + noSmithyDocumentSerde +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon S3 User Guide. +type Transition struct { + + // Indicates when objects are transitioned to the specified storage class. The date + // value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time + + // Indicates the number of days after creation when objects are transitioned to the + // specified storage class. The value must be a positive integer. + Days int32 + + // The storage class to which you want the object to transition. + StorageClass TransitionStorageClass + + noSmithyDocumentSerde +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, see +// PUT Bucket versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon S3 API Reference. +type VersioningConfiguration struct { + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA delete. + // If the bucket has never been so configured, this element is not returned. + MFADelete MFADelete + + // The versioning state of the bucket. + Status BucketVersioningStatus + + noSmithyDocumentSerde +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + + // The name of the error document for the website. + ErrorDocument *ErrorDocument + + // The name of the index document for the website. + IndexDocument *IndexDocument + + // The redirect behavior for every request to this bucket's website endpoint. If + // you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []RoutingRule + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +// UnknownUnionMember is returned when a union member is returned over the wire, +// but has an unknown tag. +type UnknownUnionMember struct { + Tag string + Value []byte + + noSmithyDocumentSerde +} + +func (*UnknownUnionMember) isAnalyticsFilter() {} +func (*UnknownUnionMember) isLifecycleRuleFilter() {} +func (*UnknownUnionMember) isMetricsFilter() {} +func (*UnknownUnionMember) isReplicationRuleFilter() {} +func (*UnknownUnionMember) isSelectObjectContentEventStream() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go new file mode 100644 index 000000000..ccd845a71 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go @@ -0,0 +1,5494 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAbortMultipartUpload struct { +} + +func (*validateOpAbortMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAbortMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AbortMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAbortMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCompleteMultipartUpload struct { +} + +func (*validateOpCompleteMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCompleteMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CompleteMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCompleteMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCopyObject struct { +} + +func (*validateOpCopyObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCopyObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CopyObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCopyObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateBucket struct { +} + +func (*validateOpCreateBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateMultipartUpload struct { +} + +func (*validateOpCreateMultipartUpload) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateMultipartUploadInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketAnalyticsConfiguration struct { +} + +func (*validateOpDeleteBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketCors struct { +} + +func (*validateOpDeleteBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketEncryption struct { +} + +func (*validateOpDeleteBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucket struct { +} + +func (*validateOpDeleteBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpDeleteBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketInventoryConfiguration struct { +} + +func (*validateOpDeleteBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketLifecycle struct { +} + +func (*validateOpDeleteBucketLifecycle) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketLifecycleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketLifecycleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketMetricsConfiguration struct { +} + +func (*validateOpDeleteBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketOwnershipControls struct { +} + +func (*validateOpDeleteBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketPolicy struct { +} + +func (*validateOpDeleteBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketReplication struct { +} + +func (*validateOpDeleteBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketTagging struct { +} + +func (*validateOpDeleteBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteBucketWebsite struct { +} + +func (*validateOpDeleteBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObject struct { +} + +func (*validateOpDeleteObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObjects struct { +} + +func (*validateOpDeleteObjects) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteObjectTagging struct { +} + +func (*validateOpDeleteObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeletePublicAccessBlock struct { +} + +func (*validateOpDeletePublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeletePublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeletePublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeletePublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAccelerateConfiguration struct { +} + +func (*validateOpGetBucketAccelerateConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAccelerateConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAcl struct { +} + +func (*validateOpGetBucketAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketAnalyticsConfiguration struct { +} + +func (*validateOpGetBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketCors struct { +} + +func (*validateOpGetBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketEncryption struct { +} + +func (*validateOpGetBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpGetBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketInventoryConfiguration struct { +} + +func (*validateOpGetBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLifecycleConfiguration struct { +} + +func (*validateOpGetBucketLifecycleConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLifecycleConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLocation struct { +} + +func (*validateOpGetBucketLocation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLocation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLocationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLocationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketLogging struct { +} + +func (*validateOpGetBucketLogging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketLoggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketLoggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketMetricsConfiguration struct { +} + +func (*validateOpGetBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketNotificationConfiguration struct { +} + +func (*validateOpGetBucketNotificationConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketNotificationConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketOwnershipControls struct { +} + +func (*validateOpGetBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketPolicy struct { +} + +func (*validateOpGetBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketPolicyStatus struct { +} + +func (*validateOpGetBucketPolicyStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketPolicyStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketPolicyStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketPolicyStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketReplication struct { +} + +func (*validateOpGetBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketRequestPayment struct { +} + +func (*validateOpGetBucketRequestPayment) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketRequestPaymentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketRequestPaymentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketTagging struct { +} + +func (*validateOpGetBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketVersioning struct { +} + +func (*validateOpGetBucketVersioning) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketVersioningInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketVersioningInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetBucketWebsite struct { +} + +func (*validateOpGetBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectAcl struct { +} + +func (*validateOpGetObjectAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectAttributes struct { +} + +func (*validateOpGetObjectAttributes) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectAttributesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectAttributesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObject struct { +} + +func (*validateOpGetObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectLegalHold struct { +} + +func (*validateOpGetObjectLegalHold) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectLegalHoldInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectLegalHoldInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectLockConfiguration struct { +} + +func (*validateOpGetObjectLockConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectLockConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectLockConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectRetention struct { +} + +func (*validateOpGetObjectRetention) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectRetentionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectRetentionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectTagging struct { +} + +func (*validateOpGetObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetObjectTorrent struct { +} + +func (*validateOpGetObjectTorrent) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetObjectTorrent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetObjectTorrentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetObjectTorrentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetPublicAccessBlock struct { +} + +func (*validateOpGetPublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetPublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetPublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpHeadBucket struct { +} + +func (*validateOpHeadBucket) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpHeadBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*HeadBucketInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpHeadBucketInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpHeadObject struct { +} + +func (*validateOpHeadObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpHeadObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*HeadObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpHeadObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketAnalyticsConfigurations struct { +} + +func (*validateOpListBucketAnalyticsConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketAnalyticsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketAnalyticsConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketIntelligentTieringConfigurations struct { +} + +func (*validateOpListBucketIntelligentTieringConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketIntelligentTieringConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketIntelligentTieringConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketInventoryConfigurations struct { +} + +func (*validateOpListBucketInventoryConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketInventoryConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketInventoryConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListBucketMetricsConfigurations struct { +} + +func (*validateOpListBucketMetricsConfigurations) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListBucketMetricsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListBucketMetricsConfigurationsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListMultipartUploads struct { +} + +func (*validateOpListMultipartUploads) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListMultipartUploads) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListMultipartUploadsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListMultipartUploadsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjects struct { +} + +func (*validateOpListObjects) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjectsV2 struct { +} + +func (*validateOpListObjectsV2) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjectsV2) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectsV2Input) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectsV2Input(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListObjectVersions struct { +} + +func (*validateOpListObjectVersions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListObjectVersions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListObjectVersionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListObjectVersionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListParts struct { +} + +func (*validateOpListParts) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListParts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListPartsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListPartsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAccelerateConfiguration struct { +} + +func (*validateOpPutBucketAccelerateConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAccelerateConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAcl struct { +} + +func (*validateOpPutBucketAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketAnalyticsConfiguration struct { +} + +func (*validateOpPutBucketAnalyticsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketAnalyticsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketCors struct { +} + +func (*validateOpPutBucketCors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketCorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketCorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketEncryption struct { +} + +func (*validateOpPutBucketEncryption) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketEncryptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketEncryptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketIntelligentTieringConfiguration struct { +} + +func (*validateOpPutBucketIntelligentTieringConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketIntelligentTieringConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketInventoryConfiguration struct { +} + +func (*validateOpPutBucketInventoryConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketInventoryConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketLifecycleConfiguration struct { +} + +func (*validateOpPutBucketLifecycleConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketLifecycleConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketLogging struct { +} + +func (*validateOpPutBucketLogging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketLoggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketLoggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketMetricsConfiguration struct { +} + +func (*validateOpPutBucketMetricsConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketMetricsConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketNotificationConfiguration struct { +} + +func (*validateOpPutBucketNotificationConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketNotificationConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketOwnershipControls struct { +} + +func (*validateOpPutBucketOwnershipControls) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketOwnershipControlsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketPolicy struct { +} + +func (*validateOpPutBucketPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketReplication struct { +} + +func (*validateOpPutBucketReplication) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketReplicationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketReplicationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketRequestPayment struct { +} + +func (*validateOpPutBucketRequestPayment) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketRequestPaymentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketRequestPaymentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketTagging struct { +} + +func (*validateOpPutBucketTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketVersioning struct { +} + +func (*validateOpPutBucketVersioning) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketVersioningInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketVersioningInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutBucketWebsite struct { +} + +func (*validateOpPutBucketWebsite) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutBucketWebsiteInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutBucketWebsiteInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectAcl struct { +} + +func (*validateOpPutObjectAcl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectAclInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectAclInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObject struct { +} + +func (*validateOpPutObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectLegalHold struct { +} + +func (*validateOpPutObjectLegalHold) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectLegalHoldInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectLegalHoldInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectLockConfiguration struct { +} + +func (*validateOpPutObjectLockConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectLockConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectLockConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectRetention struct { +} + +func (*validateOpPutObjectRetention) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectRetentionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectRetentionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutObjectTagging struct { +} + +func (*validateOpPutObjectTagging) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutObjectTaggingInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutObjectTaggingInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutPublicAccessBlock struct { +} + +func (*validateOpPutPublicAccessBlock) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutPublicAccessBlockInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutPublicAccessBlockInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRestoreObject struct { +} + +func (*validateOpRestoreObject) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRestoreObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RestoreObjectInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRestoreObjectInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSelectObjectContent struct { +} + +func (*validateOpSelectObjectContent) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSelectObjectContent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SelectObjectContentInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSelectObjectContentInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUploadPartCopy struct { +} + +func (*validateOpUploadPartCopy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUploadPartCopy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UploadPartCopyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUploadPartCopyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUploadPart struct { +} + +func (*validateOpUploadPart) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUploadPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UploadPartInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUploadPartInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpWriteGetObjectResponse struct { +} + +func (*validateOpWriteGetObjectResponse) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpWriteGetObjectResponse) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*WriteGetObjectResponseInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpWriteGetObjectResponseInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAbortMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAbortMultipartUpload{}, middleware.After) +} + +func addOpCompleteMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCompleteMultipartUpload{}, middleware.After) +} + +func addOpCopyObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCopyObject{}, middleware.After) +} + +func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After) +} + +func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) +} + +func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpDeleteBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketCors{}, middleware.After) +} + +func addOpDeleteBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketEncryption{}, middleware.After) +} + +func addOpDeleteBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucket{}, middleware.After) +} + +func addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketInventoryConfiguration{}, middleware.After) +} + +func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After) +} + +func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After) +} + +func addOpDeleteBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketOwnershipControls{}, middleware.After) +} + +func addOpDeleteBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketPolicy{}, middleware.After) +} + +func addOpDeleteBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketReplication{}, middleware.After) +} + +func addOpDeleteBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketTagging{}, middleware.After) +} + +func addOpDeleteBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketWebsite{}, middleware.After) +} + +func addOpDeleteObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObject{}, middleware.After) +} + +func addOpDeleteObjectsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObjects{}, middleware.After) +} + +func addOpDeleteObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteObjectTagging{}, middleware.After) +} + +func addOpDeletePublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeletePublicAccessBlock{}, middleware.After) +} + +func addOpGetBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAccelerateConfiguration{}, middleware.After) +} + +func addOpGetBucketAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAcl{}, middleware.After) +} + +func addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpGetBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketCors{}, middleware.After) +} + +func addOpGetBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketEncryption{}, middleware.After) +} + +func addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpGetBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketInventoryConfiguration{}, middleware.After) +} + +func addOpGetBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLifecycleConfiguration{}, middleware.After) +} + +func addOpGetBucketLocationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLocation{}, middleware.After) +} + +func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After) +} + +func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After) +} + +func addOpGetBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketNotificationConfiguration{}, middleware.After) +} + +func addOpGetBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketOwnershipControls{}, middleware.After) +} + +func addOpGetBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketPolicy{}, middleware.After) +} + +func addOpGetBucketPolicyStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketPolicyStatus{}, middleware.After) +} + +func addOpGetBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketReplication{}, middleware.After) +} + +func addOpGetBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketRequestPayment{}, middleware.After) +} + +func addOpGetBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketTagging{}, middleware.After) +} + +func addOpGetBucketVersioningValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketVersioning{}, middleware.After) +} + +func addOpGetBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketWebsite{}, middleware.After) +} + +func addOpGetObjectAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectAcl{}, middleware.After) +} + +func addOpGetObjectAttributesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectAttributes{}, middleware.After) +} + +func addOpGetObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObject{}, middleware.After) +} + +func addOpGetObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectLegalHold{}, middleware.After) +} + +func addOpGetObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectLockConfiguration{}, middleware.After) +} + +func addOpGetObjectRetentionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectRetention{}, middleware.After) +} + +func addOpGetObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectTagging{}, middleware.After) +} + +func addOpGetObjectTorrentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetObjectTorrent{}, middleware.After) +} + +func addOpGetPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetPublicAccessBlock{}, middleware.After) +} + +func addOpHeadBucketValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpHeadBucket{}, middleware.After) +} + +func addOpHeadObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpHeadObject{}, middleware.After) +} + +func addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketAnalyticsConfigurations{}, middleware.After) +} + +func addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketIntelligentTieringConfigurations{}, middleware.After) +} + +func addOpListBucketInventoryConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketInventoryConfigurations{}, middleware.After) +} + +func addOpListBucketMetricsConfigurationsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListBucketMetricsConfigurations{}, middleware.After) +} + +func addOpListMultipartUploadsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListMultipartUploads{}, middleware.After) +} + +func addOpListObjectsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjects{}, middleware.After) +} + +func addOpListObjectsV2ValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjectsV2{}, middleware.After) +} + +func addOpListObjectVersionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListObjectVersions{}, middleware.After) +} + +func addOpListPartsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListParts{}, middleware.After) +} + +func addOpPutBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAccelerateConfiguration{}, middleware.After) +} + +func addOpPutBucketAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAcl{}, middleware.After) +} + +func addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketAnalyticsConfiguration{}, middleware.After) +} + +func addOpPutBucketCorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketCors{}, middleware.After) +} + +func addOpPutBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketEncryption{}, middleware.After) +} + +func addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketIntelligentTieringConfiguration{}, middleware.After) +} + +func addOpPutBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketInventoryConfiguration{}, middleware.After) +} + +func addOpPutBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketLifecycleConfiguration{}, middleware.After) +} + +func addOpPutBucketLoggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketLogging{}, middleware.After) +} + +func addOpPutBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketMetricsConfiguration{}, middleware.After) +} + +func addOpPutBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketNotificationConfiguration{}, middleware.After) +} + +func addOpPutBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketOwnershipControls{}, middleware.After) +} + +func addOpPutBucketPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketPolicy{}, middleware.After) +} + +func addOpPutBucketReplicationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketReplication{}, middleware.After) +} + +func addOpPutBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketRequestPayment{}, middleware.After) +} + +func addOpPutBucketTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketTagging{}, middleware.After) +} + +func addOpPutBucketVersioningValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketVersioning{}, middleware.After) +} + +func addOpPutBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutBucketWebsite{}, middleware.After) +} + +func addOpPutObjectAclValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectAcl{}, middleware.After) +} + +func addOpPutObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObject{}, middleware.After) +} + +func addOpPutObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectLegalHold{}, middleware.After) +} + +func addOpPutObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectLockConfiguration{}, middleware.After) +} + +func addOpPutObjectRetentionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectRetention{}, middleware.After) +} + +func addOpPutObjectTaggingValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutObjectTagging{}, middleware.After) +} + +func addOpPutPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutPublicAccessBlock{}, middleware.After) +} + +func addOpRestoreObjectValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRestoreObject{}, middleware.After) +} + +func addOpSelectObjectContentValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSelectObjectContent{}, middleware.After) +} + +func addOpUploadPartCopyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUploadPartCopy{}, middleware.After) +} + +func addOpUploadPartValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUploadPart{}, middleware.After) +} + +func addOpWriteGetObjectResponseValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpWriteGetObjectResponse{}, middleware.After) +} + +func validateAccessControlPolicy(v *types.AccessControlPolicy) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AccessControlPolicy"} + if v.Grants != nil { + if err := validateGrants(v.Grants); err != nil { + invalidParams.AddNested("Grants", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAccessControlTranslation(v *types.AccessControlTranslation) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AccessControlTranslation"} + if len(v.Owner) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Owner")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsAndOperator(v *types.AnalyticsAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsConfiguration(v *types.AnalyticsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateAnalyticsFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if v.StorageClassAnalysis == nil { + invalidParams.Add(smithy.NewErrParamRequired("StorageClassAnalysis")) + } else if v.StorageClassAnalysis != nil { + if err := validateStorageClassAnalysis(v.StorageClassAnalysis); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsExportDestination(v *types.AnalyticsExportDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsExportDestination"} + if v.S3BucketDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) + } else if v.S3BucketDestination != nil { + if err := validateAnalyticsS3BucketDestination(v.S3BucketDestination); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsFilter(v types.AnalyticsFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsFilter"} + switch uv := v.(type) { + case *types.AnalyticsFilterMemberAnd: + if err := validateAnalyticsAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.AnalyticsFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AnalyticsS3BucketDestination"} + if len(v.Format) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Format")) + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BucketLifecycleConfiguration"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateLifecycleRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateBucketLoggingStatus(v *types.BucketLoggingStatus) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BucketLoggingStatus"} + if v.LoggingEnabled != nil { + if err := validateLoggingEnabled(v.LoggingEnabled); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSConfiguration(v *types.CORSConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSConfiguration"} + if v.CORSRules == nil { + invalidParams.Add(smithy.NewErrParamRequired("CORSRules")) + } else if v.CORSRules != nil { + if err := validateCORSRules(v.CORSRules); err != nil { + invalidParams.AddNested("CORSRules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSRule(v *types.CORSRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSRule"} + if v.AllowedMethods == nil { + invalidParams.Add(smithy.NewErrParamRequired("AllowedMethods")) + } + if v.AllowedOrigins == nil { + invalidParams.Add(smithy.NewErrParamRequired("AllowedOrigins")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCORSRules(v []types.CORSRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CORSRules"} + for i := range v { + if err := validateCORSRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDelete(v *types.Delete) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Delete"} + if v.Objects == nil { + invalidParams.Add(smithy.NewErrParamRequired("Objects")) + } else if v.Objects != nil { + if err := validateObjectIdentifierList(v.Objects); err != nil { + invalidParams.AddNested("Objects", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDestination(v *types.Destination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Destination"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.AccessControlTranslation != nil { + if err := validateAccessControlTranslation(v.AccessControlTranslation); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicationTime != nil { + if err := validateReplicationTime(v.ReplicationTime); err != nil { + invalidParams.AddNested("ReplicationTime", err.(smithy.InvalidParamsError)) + } + } + if v.Metrics != nil { + if err := validateMetrics(v.Metrics); err != nil { + invalidParams.AddNested("Metrics", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEncryption(v *types.Encryption) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Encryption"} + if len(v.EncryptionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EncryptionType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateErrorDocument(v *types.ErrorDocument) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ErrorDocument"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateExistingObjectReplication(v *types.ExistingObjectReplication) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ExistingObjectReplication"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGlacierJobParameters(v *types.GlacierJobParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GlacierJobParameters"} + if len(v.Tier) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Tier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrant(v *types.Grant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grant"} + if v.Grantee != nil { + if err := validateGrantee(v.Grantee); err != nil { + invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrantee(v *types.Grantee) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grantee"} + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateGrants(v []types.Grant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Grants"} + for i := range v { + if err := validateGrant(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIndexDocument(v *types.IndexDocument) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IndexDocument"} + if v.Suffix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Suffix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateIntelligentTieringFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.Tierings == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tierings")) + } else if v.Tierings != nil { + if err := validateTieringList(v.Tierings); err != nil { + invalidParams.AddNested("Tierings", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateIntelligentTieringFilter(v *types.IntelligentTieringFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringFilter"} + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) + } + } + if v.And != nil { + if err := validateIntelligentTieringAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryConfiguration(v *types.InventoryConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryConfiguration"} + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateInventoryDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if v.Filter != nil { + if err := validateInventoryFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if len(v.IncludedObjectVersions) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("IncludedObjectVersions")) + } + if v.Schedule == nil { + invalidParams.Add(smithy.NewErrParamRequired("Schedule")) + } else if v.Schedule != nil { + if err := validateInventorySchedule(v.Schedule); err != nil { + invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryDestination(v *types.InventoryDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryDestination"} + if v.S3BucketDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) + } else if v.S3BucketDestination != nil { + if err := validateInventoryS3BucketDestination(v.S3BucketDestination); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryEncryption(v *types.InventoryEncryption) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryEncryption"} + if v.SSEKMS != nil { + if err := validateSSEKMS(v.SSEKMS); err != nil { + invalidParams.AddNested("SSEKMS", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryFilter(v *types.InventoryFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryFilter"} + if v.Prefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Prefix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventoryS3BucketDestination(v *types.InventoryS3BucketDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventoryS3BucketDestination"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if len(v.Format) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Format")) + } + if v.Encryption != nil { + if err := validateInventoryEncryption(v.Encryption); err != nil { + invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInventorySchedule(v *types.InventorySchedule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InventorySchedule"} + if len(v.Frequency) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Frequency")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfiguration"} + if v.LambdaFunctionArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("LambdaFunctionArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfigurationList"} + for i := range v { + if err := validateLambdaFunctionConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRule(v *types.LifecycleRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRule"} + if v.Filter != nil { + if err := validateLifecycleRuleFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRuleFilter(v types.LifecycleRuleFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"} + switch uv := v.(type) { + case *types.LifecycleRuleFilterMemberAnd: + if err := validateLifecycleRuleAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.LifecycleRuleFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLifecycleRules(v []types.LifecycleRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LifecycleRules"} + for i := range v { + if err := validateLifecycleRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLoggingEnabled(v *types.LoggingEnabled) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LoggingEnabled"} + if v.TargetBucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetBucket")) + } + if v.TargetGrants != nil { + if err := validateTargetGrants(v.TargetGrants); err != nil { + invalidParams.AddNested("TargetGrants", err.(smithy.InvalidParamsError)) + } + } + if v.TargetPrefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetPrefix")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetrics(v *types.Metrics) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Metrics"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsAndOperator(v *types.MetricsAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsConfiguration(v *types.MetricsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsConfiguration"} + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.Filter != nil { + if err := validateMetricsFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricsFilter(v types.MetricsFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricsFilter"} + switch uv := v.(type) { + case *types.MetricsFilterMemberAnd: + if err := validateMetricsAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.MetricsFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateNotificationConfiguration(v *types.NotificationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "NotificationConfiguration"} + if v.TopicConfigurations != nil { + if err := validateTopicConfigurationList(v.TopicConfigurations); err != nil { + invalidParams.AddNested("TopicConfigurations", err.(smithy.InvalidParamsError)) + } + } + if v.QueueConfigurations != nil { + if err := validateQueueConfigurationList(v.QueueConfigurations); err != nil { + invalidParams.AddNested("QueueConfigurations", err.(smithy.InvalidParamsError)) + } + } + if v.LambdaFunctionConfigurations != nil { + if err := validateLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations); err != nil { + invalidParams.AddNested("LambdaFunctionConfigurations", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateObjectIdentifier(v *types.ObjectIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifier"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateObjectIdentifierList(v []types.ObjectIdentifier) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifierList"} + for i := range v { + if err := validateObjectIdentifier(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOutputLocation(v *types.OutputLocation) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OutputLocation"} + if v.S3 != nil { + if err := validateS3Location(v.S3); err != nil { + invalidParams.AddNested("S3", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControls(v *types.OwnershipControls) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControls"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateOwnershipControlsRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControlsRule(v *types.OwnershipControlsRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRule"} + if len(v.ObjectOwnership) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ObjectOwnership")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOwnershipControlsRules(v []types.OwnershipControlsRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRules"} + for i := range v { + if err := validateOwnershipControlsRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateQueueConfiguration(v *types.QueueConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "QueueConfiguration"} + if v.QueueArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueueArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateQueueConfigurationList(v []types.QueueConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "QueueConfigurationList"} + for i := range v { + if err := validateQueueConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRedirectAllRequestsTo(v *types.RedirectAllRequestsTo) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RedirectAllRequestsTo"} + if v.HostName == nil { + invalidParams.Add(smithy.NewErrParamRequired("HostName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicaModifications(v *types.ReplicaModifications) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicaModifications"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationConfiguration(v *types.ReplicationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"} + if v.Role == nil { + invalidParams.Add(smithy.NewErrParamRequired("Role")) + } + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateReplicationRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRule(v *types.ReplicationRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"} + if v.Filter != nil { + if err := validateReplicationRuleFilter(v.Filter); err != nil { + invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) + } + } + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.SourceSelectionCriteria != nil { + if err := validateSourceSelectionCriteria(v.SourceSelectionCriteria); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(smithy.InvalidParamsError)) + } + } + if v.ExistingObjectReplication != nil { + if err := validateExistingObjectReplication(v.ExistingObjectReplication); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(smithy.InvalidParamsError)) + } + } + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleAndOperator"} + if v.Tags != nil { + if err := validateTagSet(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRuleFilter(v types.ReplicationRuleFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"} + switch uv := v.(type) { + case *types.ReplicationRuleFilterMemberAnd: + if err := validateReplicationRuleAndOperator(&uv.Value); err != nil { + invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + } + + case *types.ReplicationRuleFilterMemberTag: + if err := validateTag(&uv.Value); err != nil { + invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationRules(v []types.ReplicationRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationRules"} + for i := range v { + if err := validateReplicationRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateReplicationTime(v *types.ReplicationTime) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicationTime"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if v.Time == nil { + invalidParams.Add(smithy.NewErrParamRequired("Time")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRequestPaymentConfiguration(v *types.RequestPaymentConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RequestPaymentConfiguration"} + if len(v.Payer) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Payer")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRestoreRequest(v *types.RestoreRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RestoreRequest"} + if v.GlacierJobParameters != nil { + if err := validateGlacierJobParameters(v.GlacierJobParameters); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(smithy.InvalidParamsError)) + } + } + if v.SelectParameters != nil { + if err := validateSelectParameters(v.SelectParameters); err != nil { + invalidParams.AddNested("SelectParameters", err.(smithy.InvalidParamsError)) + } + } + if v.OutputLocation != nil { + if err := validateOutputLocation(v.OutputLocation); err != nil { + invalidParams.AddNested("OutputLocation", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRoutingRule(v *types.RoutingRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RoutingRule"} + if v.Redirect == nil { + invalidParams.Add(smithy.NewErrParamRequired("Redirect")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateRoutingRules(v []types.RoutingRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RoutingRules"} + for i := range v { + if err := validateRoutingRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateS3Location(v *types.S3Location) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3Location"} + if v.BucketName == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketName")) + } + if v.Prefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Prefix")) + } + if v.Encryption != nil { + if err := validateEncryption(v.Encryption); err != nil { + invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) + } + } + if v.AccessControlList != nil { + if err := validateGrants(v.AccessControlList); err != nil { + invalidParams.AddNested("AccessControlList", err.(smithy.InvalidParamsError)) + } + } + if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSelectParameters(v *types.SelectParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SelectParameters"} + if v.InputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) + } + if len(v.ExpressionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) + } + if v.Expression == nil { + invalidParams.Add(smithy.NewErrParamRequired("Expression")) + } + if v.OutputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionByDefault"} + if len(v.SSEAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SSEAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionConfiguration"} + if v.Rules == nil { + invalidParams.Add(smithy.NewErrParamRequired("Rules")) + } else if v.Rules != nil { + if err := validateServerSideEncryptionRules(v.Rules); err != nil { + invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionRule(v *types.ServerSideEncryptionRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRule"} + if v.ApplyServerSideEncryptionByDefault != nil { + if err := validateServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateServerSideEncryptionRules(v []types.ServerSideEncryptionRule) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRules"} + for i := range v { + if err := validateServerSideEncryptionRule(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSourceSelectionCriteria(v *types.SourceSelectionCriteria) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SourceSelectionCriteria"} + if v.SseKmsEncryptedObjects != nil { + if err := validateSseKmsEncryptedObjects(v.SseKmsEncryptedObjects); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(smithy.InvalidParamsError)) + } + } + if v.ReplicaModifications != nil { + if err := validateReplicaModifications(v.ReplicaModifications); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSSEKMS(v *types.SSEKMS) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SSEKMS"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SseKmsEncryptedObjects"} + if len(v.Status) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Status")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStorageClassAnalysis(v *types.StorageClassAnalysis) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysis"} + if v.DataExport != nil { + if err := validateStorageClassAnalysisDataExport(v.DataExport); err != nil { + invalidParams.AddNested("DataExport", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysisDataExport"} + if len(v.OutputSchemaVersion) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("OutputSchemaVersion")) + } + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateAnalyticsExportDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagging(v *types.Tagging) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tagging"} + if v.TagSet == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagSet")) + } else if v.TagSet != nil { + if err := validateTagSet(v.TagSet); err != nil { + invalidParams.AddNested("TagSet", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagSet(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagSet"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTargetGrant(v *types.TargetGrant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TargetGrant"} + if v.Grantee != nil { + if err := validateGrantee(v.Grantee); err != nil { + invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTargetGrants(v []types.TargetGrant) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TargetGrants"} + for i := range v { + if err := validateTargetGrant(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTiering(v *types.Tiering) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tiering"} + if len(v.AccessTier) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("AccessTier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTieringList(v []types.Tiering) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TieringList"} + for i := range v { + if err := validateTiering(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTopicConfiguration(v *types.TopicConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TopicConfiguration"} + if v.TopicArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TopicArn")) + } + if v.Events == nil { + invalidParams.Add(smithy.NewErrParamRequired("Events")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTopicConfigurationList(v []types.TopicConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TopicConfigurationList"} + for i := range v { + if err := validateTopicConfiguration(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWebsiteConfiguration(v *types.WebsiteConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WebsiteConfiguration"} + if v.ErrorDocument != nil { + if err := validateErrorDocument(v.ErrorDocument); err != nil { + invalidParams.AddNested("ErrorDocument", err.(smithy.InvalidParamsError)) + } + } + if v.IndexDocument != nil { + if err := validateIndexDocument(v.IndexDocument); err != nil { + invalidParams.AddNested("IndexDocument", err.(smithy.InvalidParamsError)) + } + } + if v.RedirectAllRequestsTo != nil { + if err := validateRedirectAllRequestsTo(v.RedirectAllRequestsTo); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(smithy.InvalidParamsError)) + } + } + if v.RoutingRules != nil { + if err := validateRoutingRules(v.RoutingRules); err != nil { + invalidParams.AddNested("RoutingRules", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAbortMultipartUploadInput(v *AbortMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AbortMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCompleteMultipartUploadInput(v *CompleteMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CompleteMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCopyObjectInput(v *CopyObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CopyObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CopySource == nil { + invalidParams.Add(smithy.NewErrParamRequired("CopySource")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateBucketInput(v *CreateBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateMultipartUploadInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketCorsInput(v *DeleteBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketInput(v *DeleteBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketLifecycleInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketPolicyInput(v *DeleteBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketReplicationInput(v *DeleteBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketTaggingInput(v *DeleteBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectInput(v *DeleteObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectsInput(v *DeleteObjectsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Delete == nil { + invalidParams.Add(smithy.NewErrParamRequired("Delete")) + } else if v.Delete != nil { + if err := validateDelete(v.Delete); err != nil { + invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteObjectTaggingInput(v *DeleteObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeletePublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAccelerateConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAclInput(v *GetBucketAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAclInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketCorsInput(v *GetBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketEncryptionInput(v *GetBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLifecycleConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLocationInput(v *GetBucketLocationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLocationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketLoggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketNotificationConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketPolicyInput(v *GetBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyStatusInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketReplicationInput(v *GetBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketRequestPaymentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketTaggingInput(v *GetBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketVersioningInput(v *GetBucketVersioningInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketVersioningInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBucketWebsiteInput(v *GetBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectAclInput(v *GetObjectAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectAclInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectAttributesInput(v *GetObjectAttributesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectAttributesInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.ObjectAttributes == nil { + invalidParams.Add(smithy.NewErrParamRequired("ObjectAttributes")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectInput(v *GetObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectLegalHoldInput(v *GetObjectLegalHoldInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectLegalHoldInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectLockConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectRetentionInput(v *GetObjectRetentionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectRetentionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectTaggingInput(v *GetObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetObjectTorrentInput(v *GetObjectTorrentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetObjectTorrentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetPublicAccessBlockInput(v *GetPublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetPublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpHeadBucketInput(v *HeadBucketInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HeadBucketInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpHeadObjectInput(v *HeadObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HeadObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketAnalyticsConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketInventoryConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListBucketMetricsConfigurationsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListMultipartUploadsInput(v *ListMultipartUploadsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListMultipartUploadsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectsInput(v *ListObjectsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectsV2Input(v *ListObjectsV2Input) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectsV2Input"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListObjectVersionsInput(v *ListObjectVersionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListObjectVersionsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListPartsInput(v *ListPartsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListPartsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAccelerateConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.AccelerateConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccelerateConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAclInput(v *PutBucketAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAclInput"} + if v.AccessControlPolicy != nil { + if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketAnalyticsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.AnalyticsConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("AnalyticsConfiguration")) + } else if v.AnalyticsConfiguration != nil { + if err := validateAnalyticsConfiguration(v.AnalyticsConfiguration); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketCorsInput(v *PutBucketCorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketCorsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CORSConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("CORSConfiguration")) + } else if v.CORSConfiguration != nil { + if err := validateCORSConfiguration(v.CORSConfiguration); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketEncryptionInput(v *PutBucketEncryptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketEncryptionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } else if v.ServerSideEncryptionConfiguration != nil { + if err := validateServerSideEncryptionConfiguration(v.ServerSideEncryptionConfiguration); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketIntelligentTieringConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.IntelligentTieringConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("IntelligentTieringConfiguration")) + } else if v.IntelligentTieringConfiguration != nil { + if err := validateIntelligentTieringConfiguration(v.IntelligentTieringConfiguration); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketInventoryConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.InventoryConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("InventoryConfiguration")) + } else if v.InventoryConfiguration != nil { + if err := validateInventoryConfiguration(v.InventoryConfiguration); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketLifecycleConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.LifecycleConfiguration != nil { + if err := validateBucketLifecycleConfiguration(v.LifecycleConfiguration); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketLoggingInput(v *PutBucketLoggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketLoggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.BucketLoggingStatus == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketLoggingStatus")) + } else if v.BucketLoggingStatus != nil { + if err := validateBucketLoggingStatus(v.BucketLoggingStatus); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketMetricsConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if v.MetricsConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("MetricsConfiguration")) + } else if v.MetricsConfiguration != nil { + if err := validateMetricsConfiguration(v.MetricsConfiguration); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketNotificationConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.NotificationConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotificationConfiguration")) + } else if v.NotificationConfiguration != nil { + if err := validateNotificationConfiguration(v.NotificationConfiguration); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketOwnershipControlsInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.OwnershipControls == nil { + invalidParams.Add(smithy.NewErrParamRequired("OwnershipControls")) + } else if v.OwnershipControls != nil { + if err := validateOwnershipControls(v.OwnershipControls); err != nil { + invalidParams.AddNested("OwnershipControls", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketPolicyInput(v *PutBucketPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketPolicyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Policy == nil { + invalidParams.Add(smithy.NewErrParamRequired("Policy")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketReplicationInput(v *PutBucketReplicationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketReplicationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.ReplicationConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration")) + } else if v.ReplicationConfiguration != nil { + if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketRequestPaymentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.RequestPaymentConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestPaymentConfiguration")) + } else if v.RequestPaymentConfiguration != nil { + if err := validateRequestPaymentConfiguration(v.RequestPaymentConfiguration); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketTaggingInput(v *PutBucketTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Tagging == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tagging")) + } else if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketVersioningInput(v *PutBucketVersioningInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketVersioningInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.VersioningConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("VersioningConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutBucketWebsiteInput(v *PutBucketWebsiteInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutBucketWebsiteInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.WebsiteConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebsiteConfiguration")) + } else if v.WebsiteConfiguration != nil { + if err := validateWebsiteConfiguration(v.WebsiteConfiguration); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectAclInput(v *PutObjectAclInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectAclInput"} + if v.AccessControlPolicy != nil { + if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectInput(v *PutObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectLegalHoldInput(v *PutObjectLegalHoldInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectLegalHoldInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectLockConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectRetentionInput(v *PutObjectRetentionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectRetentionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutObjectTaggingInput(v *PutObjectTaggingInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutObjectTaggingInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Tagging == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tagging")) + } else if v.Tagging != nil { + if err := validateTagging(v.Tagging); err != nil { + invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutPublicAccessBlockInput(v *PutPublicAccessBlockInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutPublicAccessBlockInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.PublicAccessBlockConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRestoreObjectInput(v *RestoreObjectInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RestoreObjectInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.RestoreRequest != nil { + if err := validateRestoreRequest(v.RestoreRequest); err != nil { + invalidParams.AddNested("RestoreRequest", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSelectObjectContentInput(v *SelectObjectContentInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SelectObjectContentInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Expression == nil { + invalidParams.Add(smithy.NewErrParamRequired("Expression")) + } + if len(v.ExpressionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) + } + if v.InputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) + } + if v.OutputSerialization == nil { + invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UploadPartCopyInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.CopySource == nil { + invalidParams.Add(smithy.NewErrParamRequired("CopySource")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUploadPartInput(v *UploadPartInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UploadPartInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.UploadId == nil { + invalidParams.Add(smithy.NewErrParamRequired("UploadId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpWriteGetObjectResponseInput(v *WriteGetObjectResponseInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WriteGetObjectResponseInput"} + if v.RequestRoute == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestRoute")) + } + if v.RequestToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("RequestToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md new file mode 100644 index 000000000..42fae8efb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -0,0 +1,255 @@ +# v1.12.12 (2023-06-15) + +* No change notes available for this release. + +# v1.12.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2023-05-04) + +* No change notes available for this release. + +# v1.12.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2023-04-10) + +* No change notes available for this release. + +# v1.12.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.12.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.12.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.11.28 (2022-12-20) + +* No change notes available for this release. + +# v1.11.27 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.25 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.24 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.23 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.22 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.19 (2022-08-30) + +* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. + +# v1.11.18 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2022-08-15) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# v1.11.16 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2022-07-11) + +* No change notes available for this release. + +# v1.11.11 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2022-06-16) + +* No change notes available for this release. + +# v1.11.8 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2022-05-26) + +* No change notes available for this release. + +# v1.11.6 (2022-05-25) + +* No change notes available for this release. + +# v1.11.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated API models +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. + +# v1.6.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go new file mode 100644 index 000000000..6f30ddc99 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -0,0 +1,433 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "SSO" +const ServiceAPIVersion = "2019-06-10" + +// Client provides the API client to make operations call for AWS Single Sign-On. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go new file mode 100644 index 000000000..3825b9e44 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -0,0 +1,129 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { + if params == nil { + params = &GetRoleCredentialsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRoleCredentialsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRoleCredentialsInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The friendly name of the role that is assigned to the user. + // + // This member is required. + RoleName *string + + noSmithyDocumentSerde +} + +type GetRoleCredentialsOutput struct { + + // The credentials for the role that is assigned to the user. + RoleCredentials *types.RoleCredentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetRoleCredentials", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go new file mode 100644 index 000000000..ef3592afc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -0,0 +1,225 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all roles that are assigned to the user for a given AWS account. +func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if params == nil { + params = &ListAccountRolesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountRolesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountRolesInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The number of items that clients can request per page. + MaxResults *int32 + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountRolesOutput struct { + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []types.RoleInfo + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + +// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles +type ListAccountRolesPaginatorOptions struct { + // The number of items that clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountRolesPaginator is a paginator for ListAccountRoles +type ListAccountRolesPaginator struct { + options ListAccountRolesPaginatorOptions + client ListAccountRolesAPIClient + params *ListAccountRolesInput + nextToken *string + firstPage bool +} + +// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator +func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator { + if params == nil { + params = &ListAccountRolesInput{} + } + + options := ListAccountRolesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountRolesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountRolesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccountRoles page. +func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccountRoles", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go new file mode 100644 index 000000000..cfa8cdc97 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -0,0 +1,222 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by +// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the IAM Identity Center User Guide. This operation returns a paginated +// response. +func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if params == nil { + params = &ListAccountsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountsInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // This is the number of items clients can request per page. + MaxResults *int32 + + // (Optional) When requesting subsequent pages, this is the page token from the + // previous response output. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountsOutput struct { + + // A paginated response with the list of account information and the next token if + // more results are available. + AccountList []types.AccountInfo + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListAccountsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + +// ListAccountsPaginatorOptions is the paginator options for ListAccounts +type ListAccountsPaginatorOptions struct { + // This is the number of items clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountsPaginator is a paginator for ListAccounts +type ListAccountsPaginator struct { + options ListAccountsPaginatorOptions + client ListAccountsAPIClient + params *ListAccountsInput + nextToken *string + firstPage bool +} + +// NewListAccountsPaginator returns a new ListAccountsPaginator +func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator { + if params == nil { + params = &ListAccountsInput{} + } + + options := ListAccountsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccounts page. +func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccounts", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go new file mode 100644 index 000000000..c13d73f5a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -0,0 +1,124 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the locally stored SSO tokens from the client-side cache and sends an +// API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. If a user uses IAM Identity +// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is +// used to obtain an IAM session, as specified in the corresponding IAM Identity +// Center permission set. More specifically, IAM Identity Center assumes an IAM +// role in the target account on behalf of the user, and the corresponding +// temporary AWS credentials are returned to the client. After user logout, any +// existing IAM role sessions that were created by using IAM Identity Center +// permission sets continue based on the duration configured in the permission set. +// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. +func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { + if params == nil { + params = &LogoutInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*LogoutOutput) + out.ResultMetadata = metadata + return out, nil +} + +type LogoutInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + noSmithyDocumentSerde +} + +type LogoutOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpLogoutValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Logout", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go new file mode 100644 index 000000000..8bba205f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -0,0 +1,1151 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" +) + +type awsRestjson1_deserializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata) + } + output := &GetRoleCredentialsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRoleCredentialsOutput + if *v == nil { + sv = &GetRoleCredentialsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "roleCredentials": + if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccountRoles struct { +} + +func (*awsRestjson1_deserializeOpListAccountRoles) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata) + } + output := &ListAccountRolesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountRolesOutput + if *v == nil { + sv = &ListAccountRolesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "roleList": + if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccounts struct { +} + +func (*awsRestjson1_deserializeOpListAccounts) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata) + } + output := &ListAccountsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountsOutput + if *v == nil { + sv = &ListAccountsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountList": + if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpLogout struct { +} + +func (*awsRestjson1_deserializeOpLogout) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata) + } + output := &LogoutOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyRequestsException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccountInfo + if *v == nil { + sv = &types.AccountInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "accountName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value) + } + sv.AccountName = ptr.String(jtv) + } + + case "emailAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value) + } + sv.EmailAddress = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AccountInfo + if *v == nil { + cv = []types.AccountInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AccountInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleCredentials + if *v == nil { + sv = &types.RoleCredentials{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value) + } + sv.AccessKeyId = ptr.String(jtv) + } + + case "expiration": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Expiration = i64 + } + + case "secretAccessKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value) + } + sv.SecretAccessKey = ptr.String(jtv) + } + + case "sessionToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value) + } + sv.SessionToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleInfo + if *v == nil { + sv = &types.RoleInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "roleName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value) + } + sv.RoleName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RoleInfo + if *v == nil { + cv = []types.RoleInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RoleInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyRequestsException + if *v == nil { + sv = &types.TooManyRequestsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedException + if *v == nil { + sv = &types.UnauthorizedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go new file mode 100644 index 000000000..59456d5dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -0,0 +1,21 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sso provides the API client, operations, and parameter types for AWS +// Single Sign-On. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity Center +// resources such as the AWS access portal. Users can get AWS account applications +// and roles assigned to them and get federated into the application. Although AWS +// Single Sign-On was renamed, the sso and identitystore API namespaces will +// continue to retain their original name for backward compatibility purposes. For +// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . This reference guide describes the IAM Identity Center Portal operations that +// you can call programatically and includes detailed information on data types and +// errors. AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. +// The SDKs provide a convenient way to create programmatic access to IAM Identity +// Center and other AWS services. For more information about the AWS SDKs, +// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) +// . +package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go new file mode 100644 index 000000000..43c06f11a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssoportal" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json new file mode 100644 index 000000000..5be0e34cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -0,0 +1,30 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_GetRoleCredentials.go", + "api_op_ListAccountRoles.go", + "api_op_ListAccounts.go", + "api_op_Logout.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sso", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go new file mode 100644 index 000000000..b67186c86 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sso + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.12.12" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go new file mode 100644 index 000000000..b1ce03a5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -0,0 +1,492 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go new file mode 100644 index 000000000..29e320811 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go @@ -0,0 +1,256 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRoleCredentialsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/federation/credentials") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.RoleName != nil { + encoder.SetQuery("role_name").String(*v.RoleName) + } + + return nil +} + +type awsRestjson1_serializeOpListAccountRoles struct { +} + +func (*awsRestjson1_serializeOpListAccountRoles) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountRolesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/roles") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListAccounts struct { +} + +func (*awsRestjson1_serializeOpListAccounts) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/accounts") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpLogout struct { +} + +func (*awsRestjson1_serializeOpLogout) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*LogoutInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/logout") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go new file mode 100644 index 000000000..e97a126e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go @@ -0,0 +1,115 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// Indicates that a problem occurred with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is being made too frequently and is more than what +// the server can handle. +type TooManyRequestsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyRequestsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyRequestsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyRequestsException" + } + return *e.ErrorCodeOverride +} +func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnauthorizedException" + } + return *e.ErrorCodeOverride +} +func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go new file mode 100644 index 000000000..8dc02296b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -0,0 +1,61 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +// Provides information about your AWS account. +type AccountInfo struct { + + // The identifier of the AWS account that is assigned to the user. + AccountId *string + + // The display name of the AWS account that is assigned to the user. + AccountName *string + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string + + noSmithyDocumentSerde +} + +// Provides information about the role credentials that are assigned to the user. +type RoleCredentials struct { + + // The identifier used for the temporary security credentials. For more + // information, see Using Temporary Security Credentials to Request Access to AWS + // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string + + // The date on which temporary security credentials expire. + Expiration int64 + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SecretAccessKey *string + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SessionToken *string + + noSmithyDocumentSerde +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + + // The identifier of the AWS account assigned to the user. + AccountId *string + + // The friendly name of the role that is assigned to the user. + RoleName *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go new file mode 100644 index 000000000..f6bf461f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpGetRoleCredentials struct { +} + +func (*validateOpGetRoleCredentials) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetRoleCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetRoleCredentialsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccountRoles struct { +} + +func (*validateOpListAccountRoles) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountRolesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountRolesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccounts struct { +} + +func (*validateOpListAccounts) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpLogout struct { +} + +func (*validateOpLogout) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*LogoutInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpLogoutInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After) +} + +func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After) +} + +func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After) +} + +func addOpLogoutValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpLogout{}, middleware.After) +} + +func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"} + if v.RoleName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleName")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountRolesInput(v *ListAccountRolesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountsInput(v *ListAccountsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpLogoutInput(v *LogoutInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md new file mode 100644 index 000000000..c6c01750b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -0,0 +1,245 @@ +# v1.14.12 (2023-06-15) + +* No change notes available for this release. + +# v1.14.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.10 (2023-05-04) + +* No change notes available for this release. + +# v1.14.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.8 (2023-04-10) + +* No change notes available for this release. + +# v1.14.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.14.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.14.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.13.11 (2022-12-19) + +* No change notes available for this release. + +# v1.13.10 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-09-30) + +* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. + +# v1.13.5 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-08-25) + +* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. + +# v1.12.14 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-07-11) + +* No change notes available for this release. + +# v1.12.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-05-27) + +* No change notes available for this release. + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go new file mode 100644 index 000000000..111f66d3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -0,0 +1,433 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "SSO OIDC" +const ServiceAPIVersion = "2019-06-10" + +// Client provides the API client to make operations call for AWS SSO OIDC. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go new file mode 100644 index 000000000..5d7bd3c1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates and returns an access token for the authorized client. The access token +// issued will be used to fetch short-term credentials for the assigned roles in +// the AWS account. +func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { + if params == nil { + params = &CreateTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateTokenInput struct { + + // The unique identifier string for each client. This value should come from the + // persisted result of the RegisterClient API. + // + // This member is required. + ClientId *string + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // This member is required. + ClientSecret *string + + // Supports grant types for the authorization code, refresh token, and device code + // request. For device code requests, specify the following value: + // urn:ietf:params:oauth:grant-type:device_code For information about how to + // obtain the device code, see the StartDeviceAuthorization topic. + // + // This member is required. + GrantType *string + + // The authorization code received from the authorization service. This parameter + // is required to perform an authorization grant request to get access to a token. + Code *string + + // Used only when calling this API for the device code grant type. This short-term + // code is used to identify this authentication attempt. This should come from an + // in-memory reference to the result of the StartDeviceAuthorization API. + DeviceCode *string + + // The location of the application that will receive the authorization code. Users + // authorize the service to send the request to this location. + RedirectUri *string + + // Currently, refreshToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The token used to obtain an access token in the event that the access token is + // invalid or expired. + RefreshToken *string + + // The list of scopes that is defined by the client. Upon authorization, this list + // is used to restrict permissions when granting an access token. + Scope []string + + noSmithyDocumentSerde +} + +type CreateTokenOutput struct { + + // An opaque token to access IAM Identity Center resources assigned to a user. + AccessToken *string + + // Indicates the time in seconds when an access token will expire. + ExpiresIn int32 + + // Currently, idToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The identifier of the user that associated with the access token, if present. + IdToken *string + + // Currently, refreshToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + RefreshToken *string + + // Used to notify the client that the returned token is an access token. The + // supported type is BearerToken . + TokenType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go new file mode 100644 index 000000000..1d83b226d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -0,0 +1,144 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { + if params == nil { + params = &RegisterClientInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RegisterClientOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RegisterClientInput struct { + + // The friendly name of the client. + // + // This member is required. + ClientName *string + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // This member is required. + ClientType *string + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []string + + noSmithyDocumentSerde +} + +type RegisterClientOutput struct { + + // The endpoint where the client can request authorization. + AuthorizationEndpoint *string + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt int64 + + // A secret string generated for the client. The client will use this string to + // get authenticated by the service in subsequent calls. + ClientSecret *string + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt int64 + + // The endpoint where the client can get an access token. + TokenEndpoint *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpRegisterClientValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RegisterClient", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go new file mode 100644 index 000000000..70e60db14 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -0,0 +1,152 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Initiates device authorization by requesting a pair of verification codes from +// the authorization service. +func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) { + if params == nil { + params = &StartDeviceAuthorizationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartDeviceAuthorizationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartDeviceAuthorizationInput struct { + + // The unique identifier string for the client that is registered with IAM + // Identity Center. This value should come from the persisted result of the + // RegisterClient API operation. + // + // This member is required. + ClientId *string + + // A secret string that is generated for the client. This value should come from + // the persisted result of the RegisterClient API operation. + // + // This member is required. + ClientSecret *string + + // The URL for the AWS access portal. For more information, see Using the AWS + // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // This member is required. + StartUrl *string + + noSmithyDocumentSerde +} + +type StartDeviceAuthorizationOutput struct { + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn int32 + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval int32 + + // A one-time user verification code. This is needed to authorize an in-use device. + UserCode *string + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartDeviceAuthorization", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go new file mode 100644 index 000000000..ca30d22f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -0,0 +1,1689 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" +) + +type awsRestjson1_deserializeOpCreateToken struct { +} + +func (*awsRestjson1_deserializeOpCreateToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata) + } + output := &CreateTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("AuthorizationPendingException", errorCode): + return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) + + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidGrantException", errorCode): + return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTokenOutput + if *v == nil { + sv = &CreateTokenOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) + } + sv.AccessToken = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "idToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) + } + sv.IdToken = ptr.String(jtv) + } + + case "refreshToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) + } + sv.RefreshToken = ptr.String(jtv) + } + + case "tokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) + } + sv.TokenType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpRegisterClient struct { +} + +func (*awsRestjson1_deserializeOpRegisterClient) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata) + } + output := &RegisterClientOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientMetadataException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RegisterClientOutput + if *v == nil { + sv = &RegisterClientOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.AuthorizationEndpoint = ptr.String(jtv) + } + + case "clientId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientId to be of type string, got %T instead", value) + } + sv.ClientId = ptr.String(jtv) + } + + case "clientIdIssuedAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientIdIssuedAt = i64 + } + + case "clientSecret": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value) + } + sv.ClientSecret = ptr.String(jtv) + } + + case "clientSecretExpiresAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientSecretExpiresAt = i64 + } + + case "tokenEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.TokenEndpoint = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata) + } + output := &StartDeviceAuthorizationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartDeviceAuthorizationOutput + if *v == nil { + sv = &StartDeviceAuthorizationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "deviceCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value) + } + sv.DeviceCode = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "interval": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Interval = int32(i64) + } + + case "userCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserCode to be of type string, got %T instead", value) + } + sv.UserCode = ptr.String(jtv) + } + + case "verificationUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUri = ptr.String(jtv) + } + + case "verificationUriComplete": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUriComplete = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AccessDeniedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AuthorizationPendingException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientMetadataException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidGrantException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidScopeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SlowDownException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnsupportedGrantTypeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AuthorizationPendingException + if *v == nil { + sv = &types.AuthorizationPendingException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerException + if *v == nil { + sv = &types.InternalServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientException + if *v == nil { + sv = &types.InvalidClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientMetadataException + if *v == nil { + sv = &types.InvalidClientMetadataException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantException + if *v == nil { + sv = &types.InvalidGrantException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidScopeException + if *v == nil { + sv = &types.InvalidScopeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SlowDownException + if *v == nil { + sv = &types.SlowDownException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedClientException + if *v == nil { + sv = &types.UnauthorizedClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedGrantTypeException + if *v == nil { + sv = &types.UnsupportedGrantTypeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go new file mode 100644 index 000000000..2239427d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -0,0 +1,36 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package ssooidc provides the API client, operations, and parameter types for +// AWS SSO OIDC. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect (OIDC) +// is a web service that enables a client (such as AWS CLI or a native application) +// to register with IAM Identity Center. The service also enables the client to +// fetch the user’s access token upon successful authentication and authorization +// with IAM Identity Center. Although AWS Single Sign-On was renamed, the sso and +// identitystore API namespaces will continue to retain their original name for +// backward compatibility purposes. For more information, see IAM Identity Center +// rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . Considerations for Using This Guide Before you begin using this guide, we +// recommend that you first review the following important information about how +// the IAM Identity Center OIDC service works. +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard ( +// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) +// that are necessary to enable single sign-on authentication with the AWS CLI. +// Support for other OIDC flows frequently needed for native applications, such as +// Authorization Code Flow (+ PKCE), will be addressed in future releases. +// - The service emits only OIDC access tokens, such that obtaining a new token +// (For example, token refresh) requires explicit user re-authentication. +// - The access tokens provided by this service grant access to all AWS account +// entitlements assigned to an IAM Identity Center user, not just a particular +// application. +// - The documentation in this guide does not describe the mechanism to convert +// the access token into AWS Auth (“sigv4”) credentials for use with IAM-protected +// AWS service endpoints. For more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go new file mode 100644 index 000000000..35cd21f18 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssooidc" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json new file mode 100644 index 000000000..4afe3223e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -0,0 +1,29 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CreateToken.go", + "api_op_RegisterClient.go", + "api_op_StartDeviceAuthorization.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go new file mode 100644 index 000000000..82156c20f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ssooidc + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.14.12" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go new file mode 100644 index 000000000..b04fb46fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -0,0 +1,492 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO OIDC endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go new file mode 100644 index 000000000..a8cfd7b46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -0,0 +1,288 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpCreateToken struct { +} + +func (*awsRestjson1_serializeOpCreateToken) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/token") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.Code != nil { + ok := object.Key("code") + ok.String(*v.Code) + } + + if v.DeviceCode != nil { + ok := object.Key("deviceCode") + ok.String(*v.DeviceCode) + } + + if v.GrantType != nil { + ok := object.Key("grantType") + ok.String(*v.GrantType) + } + + if v.RedirectUri != nil { + ok := object.Key("redirectUri") + ok.String(*v.RedirectUri) + } + + if v.RefreshToken != nil { + ok := object.Key("refreshToken") + ok.String(*v.RefreshToken) + } + + if v.Scope != nil { + ok := object.Key("scope") + if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpRegisterClient struct { +} + +func (*awsRestjson1_serializeOpRegisterClient) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RegisterClientInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/client/register") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientName != nil { + ok := object.Key("clientName") + ok.String(*v.ClientName) + } + + if v.ClientType != nil { + ok := object.Key("clientType") + ok.String(*v.ClientType) + } + + if v.Scopes != nil { + ok := object.Key("scopes") + if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/device_authorization") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.StartUrl != nil { + ok := object.Key("startUrl") + ok.String(*v.StartUrl) + } + + return nil +} + +func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go new file mode 100644 index 000000000..115a51a9e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -0,0 +1,366 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AccessDeniedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AccessDeniedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AccessDeniedException" + } + return *e.ErrorCodeOverride +} +func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AuthorizationPendingException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AuthorizationPendingException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AuthorizationPendingException" + } + return *e.ErrorCodeOverride +} +func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that an error from the service occurred while trying to process a +// request. +type InternalServerException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InternalServerException" + } + return *e.ErrorCodeOverride +} +func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret . +type InvalidClientException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidClientException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientMetadataException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientMetadataException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidClientMetadataException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidGrantException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that something is wrong with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidScopeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidScopeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidScopeException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is making the request too frequently and is more than +// the service can handle. +type SlowDownException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SlowDownException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SlowDownException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "SlowDownException" + } + return *e.ErrorCodeOverride +} +func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is not currently authorized to make the request. This +// can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnauthorizedClientException" + } + return *e.ErrorCodeOverride +} +func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedGrantTypeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedGrantTypeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedGrantTypeException" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go new file mode 100644 index 000000000..0ec0789f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go @@ -0,0 +1,9 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go new file mode 100644 index 000000000..5a309484e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go @@ -0,0 +1,142 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCreateToken struct { +} + +func (*validateOpCreateToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRegisterClient struct { +} + +func (*validateOpRegisterClient) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RegisterClientInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRegisterClientInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartDeviceAuthorization struct { +} + +func (*validateOpStartDeviceAuthorization) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartDeviceAuthorizationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) +} + +func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) +} + +func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After) +} + +func validateOpCreateTokenInput(v *CreateTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.GrantType == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRegisterClientInput(v *RegisterClientInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"} + if v.ClientName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientName")) + } + if v.ClientType == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.StartUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("StartUrl")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md new file mode 100644 index 000000000..46a30e5b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -0,0 +1,270 @@ +# v1.19.2 (2023-06-15) + +* No change notes available for this release. + +# v1.19.1 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-05-08) + +* **Feature**: Documentation updates for AWS Security Token Service. + +# v1.18.11 (2023-05-04) + +* No change notes available for this release. + +# v1.18.10 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-04-10) + +* No change notes available for this release. + +# v1.18.8 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# v1.18.2 (2023-01-25) + +* **Documentation**: Doc only change to update wording in a key topic + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + +# v1.18.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.17.7 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-11-22) + +* No change notes available for this release. + +# v1.17.4 (2022-11-17) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.17.3 (2022-11-16) + +* No change notes available for this release. + +# v1.17.2 (2022-11-10) + +* No change notes available for this release. + +# v1.17.1 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-10-21) + +* **Feature**: Add presign functionality for sts:AssumeRole operation +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.19 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.18 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.17 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2022-08-30) + +* No change notes available for this release. + +# v1.16.14 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2022-05-16) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.16.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated service client model to latest release. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.10.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-07-15) + +* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* **Documentation**: Updated service model to latest revision. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go new file mode 100644 index 000000000..78eb26702 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -0,0 +1,537 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "STS" +const ServiceAPIVersion = "2011-06-15" + +// Client provides the API client to make operations call for AWS Security Token +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Finalize.Clear() + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = stack.Finalize.Add(pmw, middleware.After) + if err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + // convert request to a GET request + err = query.AddAsGetRequestMiddleware(stack) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go new file mode 100644 index 000000000..db22efc0e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -0,0 +1,418 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources. These temporary credentials consist of an access +// key ID, a secret access key, and a security token. Typically, you use AssumeRole +// within your account or for cross-account access. For a comparison of AssumeRole +// with other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Permissions The temporary security credentials created by +// AssumeRole can be used to make API calls to any Amazon Web Services service +// with the following exception: You cannot call the Amazon Web Services STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. When you create a role, you create two policies: a role +// trust policy that specifies who can assume the role, and a permissions policy +// that specifies what can be done with the role. You specify the trusted principal +// that is allowed to assume the role in the role trust policy. To assume a role +// from a different account, your Amazon Web Services account must be trusted by +// the role. The trust relationship is defined in the role's trust policy when the +// role is created. That trust policy states which accounts are allowed to delegate +// that access to users in the account. A user who wants to access a role in a +// different account must also have permissions that are delegated from the account +// administrator. The administrator must attach a policy that allows the user to +// call AssumeRole for the ARN of the role in the other account. To allow a user +// to assume a role in the same account, you can do either of the following: +// - Attach a policy to the user that allows the user to call AssumeRole (as long +// as the role's trust policy trusts the account). +// - Add the user as a principal directly in the role's trust policy. +// +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the same +// account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your +// session. These tags are called session tags. For more information about session +// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include +// multi-factor authentication (MFA) information when you call AssumeRole . This is +// useful for cross-account scenarios to ensure that the user that assumes the role +// has been authenticated with an Amazon Web Services MFA device. In that scenario, +// the trust policy of the role being assumed includes a condition that tests for +// MFA authentication. If the caller does not include valid MFA information, the +// request to assume the role is denied. The condition in a trust policy that tests +// for MFA authentication might look like the following example. "Condition": +// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see +// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for +// the SerialNumber and TokenCode parameters. The SerialNumber value identifies +// the user's hardware or virtual MFA device. The TokenCode is the time-based +// one-time password (TOTP) that the MFA device produces. +func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { + if params == nil { + params = &AssumeRoleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleInput struct { + + // The Amazon Resource Name (ARN) of the role to assume. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Use the role session name to + // uniquely identify a session when the same role is assumed by different + // principals or for different reasons. In cross-account scenarios, the role + // session name is visible to, and can be logged by the account that owns the role. + // The role session name is also used in the ARN of the assumed role principal. + // This means that subsequent cross-account API requests that use the temporary + // security credentials will expose the role session name to the external account + // in their CloudTrail logs. The regex used to validate this parameter is a string + // of characters consisting of upper- and lower-case alphanumeric characters with + // no spaces. You can also include underscores or any of the following characters: + // =,.@- + // + // This member is required. + RoleSessionName *string + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for the + // role. The maximum session duration setting can have a value from 1 hour to 12 + // hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web + // Services CLI or Amazon Web Services API role session to a maximum of one hour. + // When you use the AssumeRole API operation to assume a role, you can specify the + // duration of your role session with the DurationSeconds parameter. You can + // specify a parameter value of up to 43200 seconds (12 hours), depending on the + // maximum session duration setting for your role. However, if you assume a role + // using role chaining and provide a DurationSeconds parameter value greater than + // one hour, the operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A + // cross-account role is usually set up to trust everyone in an account. Therefore, + // the administrator of the trusting account might send an external ID to the + // administrator of the trusted account. That way, only someone with the ID can + // assume the role, rather than everyone in the account. For more information about + // the external ID, see How to Use an External ID When Granting Access to Your + // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@:/- + ExternalId *string + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy of + // the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as + // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as + // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter + // is a string of characters consisting of upper- and lower-case alphanumeric + // characters with no spaces. You can also include underscores or any of the + // following characters: =,.@- + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@-. You cannot use a value that begins with the text aws: . This prefix is + // reserved for Amazon Web Services internal use. + SourceIdentity *string + + // A list of session tags that you want to pass. Each session tag consists of a + // key name and an associated value. For more information about session tags, see + // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters, and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // inline session policy, managed policy ARNs, and session tags into a packed + // binary format that has a separate limit. Your request can fail for this limit + // even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. You can pass a session tag with the + // same key as a tag that is already attached to the role. When you do, session + // tags override a role tag with the same key. Tag key–value pairs are not case + // sensitive, but case is preserved. This means that you cannot have separate + // Department and department tag keys. Assume that the role has the Department = + // Marketing tag and you pass the department = engineering session tag. Department + // and department are not saved as separate tags, and the session tag passed in + // the request takes precedence over the role tag. Additionally, if you used + // temporary credentials to perform this operation, the new session inherits any + // transitive session tags from the calling session. If you pass a session tag with + // the same key as an inherited tag, the operation fails. To view the inherited + // tags for a session, see the CloudTrail logs. For more information, see Viewing + // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []types.Tag + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition that + // tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. The format for this parameter, as described by its regex pattern, is a + // sequence of six numeric digits. + TokenCode *string + + // A list of keys for session tags that you want to set as transitive. If you set + // a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. When you set session tags as + // transitive, the session policy and session tags packed binary limit is not + // affected. If you choose not to specify a transitive tag key, then no tags are + // passed from this session to any subsequent sessions. + TransitiveTagKeys []string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole . + AssumedRoleUser *types.AssumedRoleUser + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRole", + } +} + +// PresignAssumeRole is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &AssumeRoleInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns, + c.client.addOperationAssumeRoleMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go new file mode 100644 index 000000000..65dccc9ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -0,0 +1,345 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated via a SAML authentication response. This operation provides a +// mechanism for tying an enterprise identity store or directory to role-based +// Amazon Web Services access without user-specific credentials or configuration. +// For a comparison of AssumeRoleWithSAML with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this +// operation consist of an access key ID, a secret access key, and a security +// token. Applications can use these temporary security credentials to sign calls +// to Amazon Web Services services. Session Duration By default, the temporary +// security credentials created by AssumeRoleWithSAML last for one hour. However, +// you can use the optional DurationSeconds parameter to specify the duration of +// your session. Your role session lasts for the duration that you specify, or +// until the time specified in the SAML authentication response's +// SessionNotOnOrAfter value, whichever is shorter. You can provide a +// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour to 12 +// hours. To learn how to view the maximum value for your role, see View the +// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume a +// role using role chaining and provide a DurationSeconds parameter value greater +// than one hour, the operation fails. Permissions The temporary security +// credentials created by AssumeRoleWithSAML can be used to make API calls to any +// Amazon Web Services service with the following exception: you cannot call the +// STS GetFederationToken or GetSessionToken API operations. (Optional) You can +// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of +// Amazon Web Services security credentials. The identity of the caller is +// validated by using keys in the metadata document that is uploaded for the SAML +// provider entity for your identity provider. Calling AssumeRoleWithSAML can +// result in an entry in your CloudTrail logs. The entry includes the value in the +// NameID element of the SAML assertion. We recommend that you use a NameIDType +// that is not associated with any personally identifiable information (PII). For +// example, you could instead use the persistent identifier ( +// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can +// configure your IdP to pass attributes into your SAML assertion as session tags. +// Each session tag consists of a key name and an associated value. For more +// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// inline session policy, managed policy ARNs, and session tags into a packed +// binary format that has a separate limit. Your request can fail for this limit +// even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags for +// your request are to the upper size limit. You can pass a session tag with the +// same key as a tag that is attached to the role. When you do, session tags +// override the role's tags with the same key. An administrator must grant you the +// permissions necessary to pass session tags. The administrator can also create +// granular permissions to allow you to pass only specific session tags. For more +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. SAML Configuration Before your application can call +// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to +// issue the claims required by Amazon Web Services. Additionally, you must use +// Identity and Access Management (IAM) to create a SAML provider entity in your +// Amazon Web Services account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// For more information, see the following resources: +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { + if params == nil { + params = &AssumeRoleWithSAMLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithSAMLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithSAMLInput struct { + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the + // IdP. + // + // This member is required. + PrincipalArn *string + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // The base64 encoded SAML authentication response provided by the IdP. For more + // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // This member is required. + SAMLAssertion *string + + // The duration, in seconds, of the role session. Your role session lasts for the + // duration that you specify for the DurationSeconds parameter, or until the time + // specified in the SAML authentication response's SessionNotOnOrAfter value, + // whichever is shorter. You can provide a DurationSeconds value from 900 seconds + // (15 minutes) up to the maximum session duration setting for the role. This + // setting can have a value from 1 hour to 12 hours. If you specify a value higher + // than this setting, the operation fails. For example, if you specify a session + // duration of 12 hours, but your administrator set the maximum session duration to + // 6 hours, your operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type AssumeRoleWithSAMLOutput struct { + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *types.AssumedRoleUser + + // The value of the Recipient attribute of the SubjectConfirmationData element of + // the SAML assertion. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // The value of the Issuer element of the SAML assertion. + Issuer *string + + // A hash value based on the concatenation of the following: + // - The Issuer response value. + // - The Amazon Web Services account ID. + // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // The combination of NameQualifier and Subject can be used to uniquely identify a + // user. The following pseudocode shows how the hash value is calculated: BASE64 ( + // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + NameQualifier *string + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The value in the SourceIdentity attribute in the SAML assertion. You can + // require users to set a source identity value when they assume a role. You do + // this by using the sts:SourceIdentity condition key in a role trust policy. That + // way, actions that are taken with the role are associated with that user. After + // the source identity is set, the value cannot be changed. It is present in the + // request for all actions that are taken by the role and persists across chained + // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML + // assertion. For more information about using source identity, see Monitor and + // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient or + // persistent . If the format includes the prefix + // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . + // If the format includes any other prefix, the format is returned with no + // modifications. + SubjectType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithSAML", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go new file mode 100644 index 000000000..f00f30763 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -0,0 +1,363 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated in a mobile or web application with a web identity provider. +// Example providers include the OAuth 2.0 providers Login with Amazon and +// Facebook, or any OpenID Connect-compatible identity provider such as Google or +// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// . For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. To learn more about Amazon +// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not +// require the use of Amazon Web Services security credentials. Therefore, you can +// distribute an application (for example, on mobile devices) that requests +// temporary security credentials without including long-term Amazon Web Services +// credentials in the application. You also don't need to deploy server-based proxy +// services that use long-term Amazon Web Services credentials. Instead, the +// identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this API +// consist of an access key ID, a secret access key, and a security token. +// Applications can use these temporary security credentials to sign calls to +// Amazon Web Services service API operations. Session Duration By default, the +// temporary security credentials created by AssumeRoleWithWebIdentity last for +// one hour. However, you can use the optional DurationSeconds parameter to +// specify the duration of your session. You can provide a value from 900 seconds +// (15 minutes) up to the maximum session duration setting for the role. This +// setting can have a value from 1 hour to 12 hours. To learn how to view the +// maximum value for your role, see View the Maximum Session Duration Setting for +// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Permissions The temporary security credentials created by +// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web +// Services service with the following exception: you cannot call the STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass +// attributes into your web identity token as session tags. Each session tag +// consists of a key name and an associated value. For more information about +// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// inline session policy, managed policy ARNs, and session tags into a packed +// binary format that has a separate limit. Your request can fail for this limit +// even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags for +// your request are to the upper size limit. You can pass a session tag with the +// same key as a tag that is attached to the role. When you do, the session tag +// overrides the role tag with the same key. An administrator must grant you the +// permissions necessary to pass session tags. The administrator can also create +// granular permissions to allow you to pass only specific session tags. For more +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Identities Before your application can call +// AssumeRoleWithWebIdentity , you must have an identity token from a supported +// identity provider and create a role that the application can assume. The role +// that your application assumes must trust the identity provider that is +// associated with the identity token. In other words, the identity provider must +// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can +// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you could +// instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) +// . For more information about how to use web identity federation and the +// AssumeRoleWithWebIdentity API, see the following resources: +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// . +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) +// . Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then using +// those credentials to make a request to Amazon Web Services. +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// . These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) +// . This article discusses web identity federation and shows an example of how to +// use web identity federation to get access to content in Amazon S3. +func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { + if params == nil { + params = &AssumeRoleWithWebIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithWebIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithWebIdentityInput struct { + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Typically, you pass the name or + // identifier that is associated with the user who is using your application. That + // way, the temporary security credentials that your application will use are + // associated with that user. This session name is included as part of the ARN and + // assumed role ID in the AssumedRoleUser response element. The regex used to + // validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@- + // + // This member is required. + RoleSessionName *string + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the + // identity provider. Your application must get this token by authenticating the + // user who is using your application with a web identity provider before the + // application makes an AssumeRoleWithWebIdentity call. + // + // This member is required. + WebIdentityToken *string + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. Do not specify this value for OpenID Connect ID tokens. + ProviderId *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole . + AssumedRoleUser *types.AssumedRoleUser + + // The intended audience (also known as client ID) of the web identity token. This + // is traditionally the client identifier issued to the application that requested + // the web identity token. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. The size of the security token that STS API + // operations return is not fixed. We strongly recommend that you make no + // assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed in + // the AssumeRoleWithWebIdentity request. + Provider *string + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. You can require users to set a source identity value + // when they assume a role. You do this by using the sts:SourceIdentity condition + // key in a role trust policy. That way, actions that are taken with the role are + // associated with that user. After the source identity is set, the value cannot be + // changed. It is present in the request for all actions that are taken by the role + // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON + // web token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with the + // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user + // and the application that acquired the WebIdentityToken (pairwise identifier). + // For OpenID Connect ID tokens, this field contains the value returned by the + // identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithWebIdentity", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go new file mode 100644 index 000000000..587d1d3c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -0,0 +1,148 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decodes additional information about the authorization status of a request from +// an encoded message returned in response to an Amazon Web Services request. For +// example, if a user is not authorized to perform an operation that he or she has +// requested, the request returns a Client.UnauthorizedOperation response (an HTTP +// 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. Only +// certain Amazon Web Services operations return an encoded authorization message. +// The documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. The message is +// encoded because the details of the authorization status can contain privileged +// information that the user who requested the operation should not see. To decode +// an authorization status message, a user must be granted permissions through an +// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) +// action. The decoded message includes the following type of information: +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether a +// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// - The principal who made the request. +// - The requested action. +// - The requested resource. +// - The values of condition keys in the context of the user's request. +func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { + if params == nil { + params = &DecodeAuthorizationMessageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecodeAuthorizationMessageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecodeAuthorizationMessageInput struct { + + // The encoded message that was returned with the response. + // + // This member is required. + EncodedMessage *string + + noSmithyDocumentSerde +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + + // The API returns a response with the decoded message. + DecodedMessage *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "DecodeAuthorizationMessage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go new file mode 100644 index 000000000..f090ecf47 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the account identifier for the specified access key ID. Access keys +// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and +// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). +// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. When you pass an access key ID to this operation, it +// returns the ID of the Amazon Web Services account to which the keys belong. +// Access key IDs beginning with AKIA are long-term credentials for an IAM user or +// the Amazon Web Services account root user. Access key IDs beginning with ASIA +// are temporary credentials that are created using STS operations. If the account +// in the response belongs to you, you can sign in as the root user and review your +// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. This operation does not indicate the state of the access +// key. The key might be active, inactive, or deleted. Active keys might not have +// permissions to perform an operation. Providing a deleted access key might return +// an error that the key doesn't exist. +func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { + if params == nil { + params = &GetAccessKeyInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetAccessKeyInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetAccessKeyInfoInput struct { + + // The identifier of an access key. This parameter allows (through its regex + // pattern) a string of characters that can consist of any upper- or lowercase + // letter or digit. + // + // This member is required. + AccessKeyId *string + + noSmithyDocumentSerde +} + +type GetAccessKeyInfoOutput struct { + + // The number used to identify the Amazon Web Services account. + Account *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetAccessKeyInfo", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go new file mode 100644 index 000000000..93823927b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -0,0 +1,157 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns details about the IAM user or role whose credentials are used to call +// the operation. No permissions are required to perform this operation. If an +// administrator attaches a policy to your identity that explicitly denies access +// to the sts:GetCallerIdentity action, you can still perform this operation. +// Permissions are not required because the same information is returned when +// access is denied. To view an example response, see I Am Not Authorized to +// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCallerIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCallerIdentityInput struct { + noSmithyDocumentSerde +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed in + // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetCallerIdentity", + } +} + +// PresignGetCallerIdentity is used to generate a presigned HTTP Request which +// contains presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, + c.client.addOperationGetCallerIdentityMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go new file mode 100644 index 000000000..ccb7366e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -0,0 +1,308 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials (consisting of an access key +// ID, a secret access key, and a security token) for a user. A typical use is in a +// proxy application that gets temporary security credentials on behalf of +// distributed applications inside a corporate network. You must call the +// GetFederationToken operation using the long-term security credentials of an IAM +// user. As a result, this call is appropriate in contexts where those credentials +// can be safeguarded, usually in a server-based application. For a comparison of +// GetFederationToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Although it is possible to call GetFederationToken using +// the security credentials of an Amazon Web Services account root user rather than +// an IAM user that you create for the purpose of a proxy application, we do not +// recommend it. For more information, see Safeguard your root user credentials +// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. Session duration The temporary credentials are valid for +// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). +// Temporary credentials obtained by using the root user credentials have a maximum +// duration of 3,600 seconds (1 hour). Permissions You can use the temporary +// credentials created by GetFederationToken in any Amazon Web Services service +// with the following exceptions: +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. +// - You cannot call any STS operations except GetCallerIdentity . +// +// You can use temporary credentials for single sign-on (SSO) to the console. You +// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM user +// policies and the session policies that you pass. This gives you a way to further +// restrict the permissions for a federated user. You cannot use session policies +// to grant more permissions than those that are defined in the permissions policy +// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to create +// temporary security credentials, see GetFederationToken—Federation Through a +// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) +// . You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session in the +// Principal element of the policy, the session has the permissions allowed by the +// policy. These permissions are granted in addition to the permissions granted by +// the session policies. Tags (Optional) You can pass tag key-value pairs to your +// session. These are called session tags. For more information about session tags, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is +// preserved. This means that you cannot have separate Department and department +// tag keys. Assume that the user that you are federating has the Department = +// Marketing tag and you pass the department = engineering session tag. Department +// and department are not saved as separate tags, and the session tag passed in +// the request takes precedence over the user tag. +func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { + if params == nil { + params = &GetFederationTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetFederationTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetFederationTokenInput struct { + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob ). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon S3 + // bucket policy. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + // + // This member is required. + Name *string + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using root user credentials are restricted to a maximum of 3,600 seconds (one + // hour). If the specified duration is longer than one hour, the session obtained + // by using root user credentials defaults to one hour. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. This parameter is + // optional. However, if you do not pass any session policies, then the resulting + // federated user session has no permissions. When you pass session policies, the + // session permissions are the intersection of the IAM user policies and the + // session policies that you pass. This gives you a way to further restrict the + // permissions for a federated user. You cannot use session policies to grant more + // permissions than those that are defined in the permissions policy of the IAM + // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. An Amazon Web Services conversion compresses the + // passed inline session policy, managed policy ARNs, and session tags into a + // packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as a managed session policy. The policies must exist in the same account as + // the IAM user that is requesting federated access. You must pass an inline or + // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that you + // use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about ARNs, + // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. This parameter is optional. + // However, if you do not pass any session policies, then the resulting federated + // user session has no permissions. When you pass session policies, the session + // permissions are the intersection of the IAM user policies and the session + // policies that you pass. This gives you a way to further restrict the permissions + // for a federated user. You cannot use session policies to grant more permissions + // than those that are defined in the permissions policy of the IAM user. For more + // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + PolicyArns []types.PolicyDescriptorType + + // A list of session tags. Each session tag consists of a key name and an + // associated value. For more information about session tags, see Passing Session + // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // inline session policy, managed policy ARNs, and session tags into a packed + // binary format that has a separate limit. Your request can fail for this limit + // even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. You can pass a session tag with the + // same key as a tag that is already attached to the user you are federating. When + // you do, session tags override a user tag with the same key. Tag key–value pairs + // are not case sensitive, but case is preserved. This means that you cannot have + // separate Department and department tag keys. Assume that the role has the + // Department = Marketing tag and you pass the department = engineering session + // tag. Department and department are not saved as separate tags, and the session + // tag passed in the request takes precedence over the role tag. + Tags []types.Tag + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetFederationTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Identifiers for the federated user associated with the credentials (such as + // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use + // the federated user's ARN in your resource-based policies, such as an Amazon S3 + // bucket policy. + FederatedUser *types.FederatedUser + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetFederationToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go new file mode 100644 index 000000000..4dfac4c98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -0,0 +1,191 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary credentials for an Amazon Web Services account or +// IAM user. The credentials consist of an access key ID, a secret access key, and +// a security token. Typically, you use GetSessionToken if you want to use MFA to +// protect programmatic calls to specific Amazon Web Services API operations like +// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and +// submit an MFA code that is associated with their MFA device. Using the temporary +// security credentials that the call returns, IAM users can then make programmatic +// calls to API operations that require MFA authentication. An incorrect MFA code +// causes the API to return an access denied error. For a comparison of +// GetSessionToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. No permissions are required for users to perform this +// operation. The purpose of the sts:GetSessionToken operation is to authenticate +// the user using MFA. You cannot use policies to control authentication +// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// in the IAM User Guide. Session Duration The GetSessionToken operation must be +// called by using the long-term Amazon Web Services security credentials of an IAM +// user. Credentials that are created by IAM users are valid for the duration that +// you specify. This duration can range from 900 seconds (15 minutes) up to a +// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 +// hours). Credentials based on account credentials can range from 900 seconds (15 +// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The +// temporary security credentials created by GetSessionToken can be used to make +// API calls to any Amazon Web Services service with the following exceptions: +// - You cannot call any IAM API operations unless MFA authentication +// information is included in the request. +// - You cannot call any STS API except AssumeRole or GetCallerIdentity . +// +// The credentials that GetSessionToken returns are based on permissions +// associated with the IAM user whose credentials were used to call the operation. +// The temporary credentials have the same permissions as the IAM user. Although it +// is possible to call GetSessionToken using the security credentials of an Amazon +// Web Services account root user rather than an IAM user, we do not recommend it. +// If GetSessionToken is called using root user credentials, the temporary +// credentials have root user permissions. For more information, see Safeguard +// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide For more information about using GetSessionToken to +// create temporary credentials, see Temporary Credentials for Users in Untrusted +// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { + if params == nil { + params = &GetSessionTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSessionTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSessionTokenInput struct { + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for + // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds + // (one hour). If the duration is longer than one hour, the session for Amazon Web + // Services account owners defaults to one hour. + DurationSeconds *int32 + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM user + // has a policy that requires MFA authentication. The value is either the serial + // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name + // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You + // can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. The regex used + // to validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@:/- + SerialNumber *string + + // The value provided by the MFA device, if MFA is required. If any policy + // requires the IAM user to submit an MFA code, specify this value. If MFA + // authentication is required, the user must provide a code when requesting a set + // of temporary security credentials. A user who fails to provide the code receives + // an "access denied" response when requesting resources that require MFA + // authentication. The format for this parameter, as described by its regex + // pattern, is a sequence of six numeric digits. + TokenCode *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetSessionTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetSessionToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go new file mode 100644 index 000000000..5d634ce35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -0,0 +1,2507 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strconv" + "strings" +) + +type awsAwsquery_deserializeOpAssumeRole struct { +} + +func (*awsAwsquery_deserializeOpAssumeRole) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) + } + output := &AssumeRoleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) + } + output := &AssumeRoleWithSAMLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithSAMLResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) + } + output := &AssumeRoleWithWebIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPCommunicationError", errorCode): + return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) + } + output := &DecodeAuthorizationMessageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("DecodeAuthorizationMessageResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): + return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) + } + output := &GetAccessKeyInfoOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetAccessKeyInfoResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) + } + output := &GetCallerIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetCallerIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetFederationToken struct { +} + +func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) + } + output := &GetFederationTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetFederationTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetSessionToken struct { +} + +func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) + } + output := &GetSessionTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetSessionTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPCommunicationErrorException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPRejectedClaimException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidAuthorizationMessageException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidIdentityTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.MalformedPolicyDocumentException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.PackedPolicyTooLargeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.RegionDisabledException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AssumedRoleUser + if *v == nil { + sv = &types.AssumedRoleUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("AssumedRoleId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AssumedRoleId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Credentials + if *v == nil { + sv = &types.Credentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FederatedUser + if *v == nil { + sv = &types.FederatedUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("FederatedUserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.FederatedUserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPCommunicationErrorException + if *v == nil { + sv = &types.IDPCommunicationErrorException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPRejectedClaimException + if *v == nil { + sv = &types.IDPRejectedClaimException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidAuthorizationMessageException + if *v == nil { + sv = &types.InvalidAuthorizationMessageException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidIdentityTokenException + if *v == nil { + sv = &types.InvalidIdentityTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PackedPolicyTooLargeException + if *v == nil { + sv = &types.PackedPolicyTooLargeException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RegionDisabledException + if *v == nil { + sv = &types.RegionDisabledException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleOutput + if *v == nil { + sv = &AssumeRoleOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithSAMLOutput + if *v == nil { + sv = &AssumeRoleWithSAMLOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Issuer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Issuer = ptr.String(xtv) + } + + case strings.EqualFold("NameQualifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NameQualifier = ptr.String(xtv) + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("Subject", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Subject = ptr.String(xtv) + } + + case strings.EqualFold("SubjectType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectType = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithWebIdentityOutput + if *v == nil { + sv = &AssumeRoleWithWebIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Provider", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Provider = ptr.String(xtv) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectFromWebIdentityToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DecodeAuthorizationMessageOutput + if *v == nil { + sv = &DecodeAuthorizationMessageOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DecodedMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DecodedMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetAccessKeyInfoOutput + if *v == nil { + sv = &GetAccessKeyInfoOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetCallerIdentityOutput + if *v == nil { + sv = &GetCallerIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("UserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetFederationTokenOutput + if *v == nil { + sv = &GetFederationTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("FederatedUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetSessionTokenOutput + if *v == nil { + sv = &GetSessionTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go new file mode 100644 index 000000000..d963fd8d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sts provides the API client, operations, and parameter types for AWS +// Security Token Service. +// +// Security Token Service Security Token Service (STS) enables you to request +// temporary, limited-privilege credentials for users. This guide provides +// descriptions of the STS API. For more information about using this service, see +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// . +package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go new file mode 100644 index 000000000..cababea22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "sts" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json new file mode 100644 index 000000000..86341bb7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -0,0 +1,35 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AssumeRole.go", + "api_op_AssumeRoleWithSAML.go", + "api_op_AssumeRoleWithWebIdentity.go", + "api_op_DecodeAuthorizationMessage.go", + "api_op_GetAccessKeyInfo.go", + "api_op_GetCallerIdentity.go", + "api_op_GetFederationToken.go", + "api_op_GetSessionToken.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sts", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go new file mode 100644 index 000000000..f88dca99c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sts + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.19.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go new file mode 100644 index 000000000..0413fd89a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -0,0 +1,506 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver STS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go new file mode 100644 index 000000000..eb60f61b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -0,0 +1,826 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsquery_serializeOpAssumeRole struct { +} + +func (*awsAwsquery_serializeOpAssumeRole) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRole") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithSAML") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithWebIdentity") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DecodeAuthorizationMessage") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetAccessKeyInfo") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCallerIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetCallerIdentity") + body.Key("Version").String("2011-06-15") + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetFederationToken struct { +} + +func (*awsAwsquery_serializeOpGetFederationToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetFederationTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetFederationToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetSessionToken struct { +} + +func (*awsAwsquery_serializeOpGetSessionToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSessionTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetSessionToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { + object := value.Object() + _ = object + + if v.Arn != nil { + objectKey := object.Key("arn") + objectKey.String(*v.Arn) + } + + return nil +} + +func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { + object := value.Object() + _ = object + + if v.Key != nil { + objectKey := object.Key("Key") + objectKey.String(*v.Key) + } + + if v.Value != nil { + objectKey := object.Key("Value") + objectKey.String(*v.Value) + } + + return nil +} + +func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.ExternalId != nil { + objectKey := object.Key("ExternalId") + objectKey.String(*v.ExternalId) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.SourceIdentity != nil { + objectKey := object.Key("SourceIdentity") + objectKey.String(*v.SourceIdentity) + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + if v.TransitiveTagKeys != nil { + objectKey := object.Key("TransitiveTagKeys") + if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.PrincipalArn != nil { + objectKey := object.Key("PrincipalArn") + objectKey.String(*v.PrincipalArn) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.SAMLAssertion != nil { + objectKey := object.Key("SAMLAssertion") + objectKey.String(*v.SAMLAssertion) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProviderId != nil { + objectKey := object.Key("ProviderId") + objectKey.String(*v.ProviderId) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.WebIdentityToken != nil { + objectKey := object.Key("WebIdentityToken") + objectKey.String(*v.WebIdentityToken) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { + object := value.Object() + _ = object + + if v.EncodedMessage != nil { + objectKey := object.Key("EncodedMessage") + objectKey.String(*v.EncodedMessage) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { + object := value.Object() + _ = object + + if v.AccessKeyId != nil { + objectKey := object.Key("AccessKeyId") + objectKey.String(*v.AccessKeyId) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + return nil +} + +func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Name != nil { + objectKey := object.Key("Name") + objectKey.String(*v.Name) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go new file mode 100644 index 000000000..097875b27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The web identity token that was passed is expired or is not valid. Get a new +// identity token from the identity provider and then retry the request. +type ExpiredTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request could not be fulfilled because the identity provider (IDP) that was +// asked to verify the incoming identity token could not be reached. This is often +// a transient error caused by network conditions. Retry the request a limited +// number of times so that you don't exceed the request rate. If the error +// persists, the identity provider might be down or not responding. +type IDPCommunicationErrorException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IDPCommunicationErrorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPCommunicationErrorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPCommunicationErrorException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IDPCommunicationError" + } + return *e.ErrorCodeOverride +} +func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The identity provider (IdP) reported that authentication failed. This might be +// because the claim is invalid. If this error is returned for the +// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired +// or has been explicitly revoked. +type IDPRejectedClaimException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IDPRejectedClaimException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPRejectedClaimException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPRejectedClaimException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IDPRejectedClaim" + } + return *e.ErrorCodeOverride +} +func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +type InvalidAuthorizationMessageException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidAuthorizationMessageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAuthorizationMessageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAuthorizationMessageException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidAuthorizationMessageException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry the +// request. +type InvalidIdentityTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidIdentityTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidIdentityTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidIdentityTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidIdentityToken" + } + return *e.ErrorCodeOverride +} +func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +type MalformedPolicyDocumentException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "MalformedPolicyDocument" + } + return *e.ErrorCodeOverride +} +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session tags +// into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper size +// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You could receive this error even though you meet other +// defined session policy and session tag limits. For more information, see IAM +// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +type PackedPolicyTooLargeException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PackedPolicyTooLargeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PackedPolicyTooLargeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PackedPolicyTooLargeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PackedPolicyTooLarge" + } + return *e.ErrorCodeOverride +} +func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +type RegionDisabledException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RegionDisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RegionDisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RegionDisabledException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RegionDisabledException" + } + return *e.ErrorCodeOverride +} +func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go new file mode 100644 index 000000000..90d4f62ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -0,0 +1,118 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // This member is required. + Arn *string + + // A unique identifier that contains the role ID and the role session name of the + // role that is being assumed. The role ID is generated by Amazon Web Services when + // the role is created. + // + // This member is required. + AssumedRoleId *string + + noSmithyDocumentSerde +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + + // The access key ID that identifies the temporary security credentials. + // + // This member is required. + AccessKeyId *string + + // The date on which the current credentials expire. + // + // This member is required. + Expiration *time.Time + + // The secret access key that can be used to sign requests. + // + // This member is required. + SecretAccessKey *string + + // The token that users must pass to the service API to use the temporary + // credentials. + // + // This member is required. + SessionToken *string + + noSmithyDocumentSerde +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + + // The ARN that specifies the federated user that is associated with the + // credentials. For more information about ARNs and how to use them in policies, + // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // This member is required. + Arn *string + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // This member is required. + FederatedUserId *string + + noSmithyDocumentSerde +} + +// A reference to the IAM managed policy that is passed as a session policy for a +// role session or a federated user session. +type PolicyDescriptorType struct { + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource Names + // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + Arn *string + + noSmithyDocumentSerde +} + +// You can pass custom key-value pair attributes when you assume a role or +// federate a user. These are called session tags. You can then use the session +// tags to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + + // The key for a session tag. You can pass up to 50 session tags. The plain text + // session tag keys can’t exceed 128 characters. For these and additional limits, + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Key *string + + // The value for a session tag. You can pass up to 50 session tags. The plain text + // session tag values can’t exceed 256 characters. For these and additional limits, + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go new file mode 100644 index 000000000..3e4bad2a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -0,0 +1,305 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAssumeRole struct { +} + +func (*validateOpAssumeRole) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithSAML struct { +} + +func (*validateOpAssumeRoleWithSAML) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithWebIdentity struct { +} + +func (*validateOpAssumeRoleWithWebIdentity) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecodeAuthorizationMessage struct { +} + +func (*validateOpDecodeAuthorizationMessage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetAccessKeyInfo struct { +} + +func (*validateOpGetAccessKeyInfo) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetAccessKeyInfoInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetFederationToken struct { +} + +func (*validateOpGetFederationToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetFederationTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetFederationTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) +} + +func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) +} + +func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) +} + +func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) +} + +func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) +} + +func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagListType(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagListType"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleInput(v *AssumeRoleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.PrincipalArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) + } + if v.SAMLAssertion == nil { + invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.WebIdentityToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} + if v.EncodedMessage == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} + if v.AccessKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore new file mode 100644 index 000000000..c01141aa4 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.gitignore @@ -0,0 +1,22 @@ +# Eclipse +.classpath +.project +.settings/ + +# Intellij +.idea/ +*.iml +*.iws + +# Mac +.DS_Store + +# Maven +target/ +**/dependency-reduced-pom.xml + +# Gradle +/.gradle +build/ +*/out/ +*/*/out/ diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml new file mode 100644 index 000000000..f8d1035cc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.travis.yml @@ -0,0 +1,28 @@ +language: go +sudo: true +dist: bionic + +branches: + only: + - main + +os: + - linux + - osx + # Travis doesn't work with windows and Go tip + #- windows + +go: + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi + - (cd /tmp/; go get golang.org/x/lint/golint) + +script: + - make go test -v ./...; + diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md new file mode 100644 index 000000000..1e23bf95b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -0,0 +1,167 @@ +# Release (2022-12-02) + +* No change notes available for this release. + +# Release (2022-10-24) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.13.4 + * **Bug Fix**: fixed document type checking for encoding nested types + +# Release (2022-09-14) + +* No change notes available for this release. + +# Release (v1.13.2) + +* No change notes available for this release. + +# Release (v1.13.1) + +* No change notes available for this release. + +# Release (v1.13.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.13.0 + * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation. + +# Release (v1.12.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.12.1 + * **Bug Fix**: Fixes a bug where JSON object keys were not escaped. + +# Release (v1.12.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.12.0 + * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value. + +# Release (v1.11.3) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.3 + * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8. + +# Release (v1.11.2) + +* No change notes available for this release. + +# Release (v1.11.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.1 + * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) + +# Release (v1.11.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.0 + * **Feature**: Updates deserialization of header list to supported quoted strings + +# Release (v1.10.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.10.0 + * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. + +# Release (v1.9.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.1 + * **Documentation**: Fixes various typos in Go package documentation. + +# Release (v1.9.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.0 + * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. + * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. + +# Release v1.8.1 + +### Smithy Go Module +* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. + * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) + +# Release v1.8.0 + +### Smithy Go Module + +* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) + * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. + * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) + +# Release v1.7.0 + +### Smithy Go Module +* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) + * Handle error for defer close call +* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) + * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. +* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) + +### Codegen +* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) + * Adds support for Smithy Document shapes and supporting types for protocols to implement support + +# Release v1.6.0 (2021-07-15) + +### Smithy Go Module +* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) + +### Codegen +* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) +* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) + +# Release v1.5.0 (2021-06-25) + +### Smithy Go module +* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) + * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. + +### Codegen +* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) +* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) +* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) +* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) +* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) +* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) + +# Release v1.4.0 (2021-05-06) + +### Smithy Go module +* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) + +### Codegen +* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) +* Add support for httpQueryParams location +* Add support for model renaming conflict resolution with service closure + +# Release v1.3.1 (2021-04-08) + +### Smithy Go module +* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) +* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) + +# Release v1.3.0 (2021-04-01) + +### Smithy Go module +* `transport/http`: Add utility to safely join string to url path, and url raw query. + +### Codegen +* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. + +# Release v1.2.0 (2021-03-12) + +### Smithy Go module +* Fix support for parsing shortened year format in HTTP Date header. +* Fix GitHub APIDiff action workflow to get gorelease tool correctly. +* Fix codegen artifact unit test for Go 1.16 + +### Codegen +* Fix generating paginator nil parameter handling before usage. +* Fix Serialize unboxed members decorated as required. +* Add ability to define resolvers at both client construction and operation invocation. +* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..5b627cfa6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md new file mode 100644 index 000000000..c4b6a1c50 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md @@ -0,0 +1,59 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *main* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile new file mode 100644 index 000000000..4b3c20937 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -0,0 +1,97 @@ +PRE_RELEASE_VERSION ?= + +RELEASE_MANIFEST_FILE ?= +RELEASE_CHGLOG_DESC_FILE ?= + +REPOTOOLS_VERSION ?= latest +REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools +REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= +REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} + +UNIT_TEST_TAGS= +BUILD_TAGS= + +ifneq ($(PRE_RELEASE_VERSION),) + REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} +endif + +smithy-publish-local: + cd codegen && ./gradlew publishToMavenLocal + +smithy-build: + cd codegen && ./gradlew build + +smithy-clean: + cd codegen && ./gradlew clean + +################## +# Linting/Verify # +################## +.PHONY: verify vet + +verify: vet + +vet: + go vet ${BUILD_TAGS} --all ./... + +################ +# Unit Testing # +################ +.PHONY: unit unit-race unit-test unit-race-test + +unit: verify + go vet ${BUILD_TAGS} --all ./... && \ + go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ + go test -timeout=1m ${UNIT_TEST_TAGS} ./... + +unit-race: verify + go vet ${BUILD_TAGS} --all ./... && \ + go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ + go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... + +unit-test: verify + go test -timeout=1m ${UNIT_TEST_TAGS} ./... + +unit-race-test: verify + go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... + +##################### +# Release Process # +##################### +.PHONY: preview-release pre-release-validation release + +preview-release: + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + +pre-release-validation: + @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ + echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ + fi + @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ + echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ + fi + +release: pre-release-validation + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} + go run ${REPOTOOLS_CMD_CHANGELOG} rm -all + go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} + +module-version: + @go run ${REPOTOOLS_CMD_MODULE_VERSION} . + +############## +# Repo Tools # +############## +.PHONY: install-changelog + +install-changelog: + go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE new file mode 100644 index 000000000..616fc5889 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/NOTICE @@ -0,0 +1 @@ +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md new file mode 100644 index 000000000..a4bb43fbe --- /dev/null +++ b/vendor/github.com/aws/smithy-go/README.md @@ -0,0 +1,12 @@ +## Smithy Go + +[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) + +[Smithy](https://smithy.io/) code generators for Go. + +**WARNING: All interfaces are subject to change.** + +## License + +This project is licensed under the Apache-2.0 License. + diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go new file mode 100644 index 000000000..1c9b9715c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go @@ -0,0 +1,3 @@ +// Package bearer provides middleware and utilities for authenticating API +// operation calls with a Bearer Token. +package bearer diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go new file mode 100644 index 000000000..8c7d72099 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go @@ -0,0 +1,104 @@ +package bearer + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Message is the middleware stack's request transport message value. +type Message interface{} + +// Signer provides an interface for implementations to decorate a request +// message with a bearer token. The signer is responsible for validating the +// message type is compatible with the signer. +type Signer interface { + SignWithBearerToken(context.Context, Token, Message) (Message, error) +} + +// AuthenticationMiddleware provides the Finalize middleware step for signing +// an request message with a bearer token. +type AuthenticationMiddleware struct { + signer Signer + tokenProvider TokenProvider +} + +// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the +// middleware Stack in the Finalize step with the options provided. +func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error { + return s.Finalize.Add( + NewAuthenticationMiddleware(signer, tokenProvider), + middleware.After, + ) +} + +// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware. +func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware { + return &AuthenticationMiddleware{ + signer: signer, + tokenProvider: tokenProvider, + } +} + +const authenticationMiddlewareID = "BearerTokenAuthentication" + +// ID returns the resolver identifier +func (m *AuthenticationMiddleware) ID() string { + return authenticationMiddlewareID +} + +// HandleFinalize implements the FinalizeMiddleware interface in order to +// update the request with bearer token authentication. +func (m *AuthenticationMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + token, err := m.tokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err) + } + + signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request) + if err != nil { + return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err) + } + + in.Request = signedMessage + return next.HandleFinalize(ctx, in) +} + +// SignHTTPSMessage provides a bearer token authentication implementation that +// will sign the message with the provided bearer token. +// +// Will fail if the message is not a smithy-go HTTP request or the request is +// not HTTPS. +type SignHTTPSMessage struct{} + +// NewSignHTTPSMessage returns an initialized signer for HTTP messages. +func NewSignHTTPSMessage() *SignHTTPSMessage { + return &SignHTTPSMessage{} +} + +// SignWithBearerToken returns a copy of the HTTP request with the bearer token +// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750. +// +// Returns an error if the request's URL scheme is not HTTPS, or the request +// message is not an smithy-go HTTP Request pointer type. +func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) { + req, ok := message.(*smithyhttp.Request) + if !ok { + return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message) + } + + if !req.IsHTTPS() { + return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS") + } + + reqClone := req.Clone() + reqClone.Header.Set("Authorization", "Bearer "+token.Value) + + return reqClone, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token.go b/vendor/github.com/aws/smithy-go/auth/bearer/token.go new file mode 100644 index 000000000..be260d4c7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "context" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpires is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(context.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(context.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go new file mode 100644 index 000000000..223ddf52b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go @@ -0,0 +1,208 @@ +package bearer + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + smithycontext "github.com/aws/smithy-go/context" + "github.com/aws/smithy-go/internal/sync/singleflight" +) + +// package variable that can be override in unit tests. +var timeNow = time.Now + +// TokenCacheOptions provides a set of optional configuration options for the +// TokenCache TokenProvider. +type TokenCacheOptions struct { + // The duration before the token will expire when the credentials will be + // refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls + // will be blocking. + // + // Asynchronous refreshes are deduplicated, and only one will be in-flight + // at a time. If the token expires while an asynchronous refresh is in + // flight, the next call to RetrieveBearerToken will block on that refresh + // to return. + RefreshBeforeExpires time.Duration + + // The timeout the underlying TokenProvider's RetrieveBearerToken call must + // return within, or will be canceled. Defaults to 0, no timeout. + // + // If 0 timeout, its possible for the underlying tokenProvider's + // RetrieveBearerToken call to block forever. Preventing subsequent + // TokenCache attempts to refresh the token. + // + // If this timeout is reached all pending deduplicated calls to + // TokenCache RetrieveBearerToken will fail with an error. + RetrieveBearerTokenTimeout time.Duration + + // The minimum duration between asynchronous refresh attempts. If the next + // asynchronous recent refresh attempt was within the minimum delay + // duration, the call to retrieve will return the current cached token, if + // not expired. + // + // The asynchronous retrieve is deduplicated across multiple calls when + // RetrieveBearerToken is called. The asynchronous retrieve is not a + // periodic task. It is only performed when the token has not yet expired, + // and the current item is within the RefreshBeforeExpires window, and the + // TokenCache's RetrieveBearerToken method is called. + // + // If 0, (default) there will be no minimum delay between asynchronous + // refresh attempts. + // + // If DisableAsyncRefresh is true, this option is ignored. + AsyncRefreshMinimumDelay time.Duration + + // Sets if the TokenCache will attempt to refresh the token in the + // background asynchronously instead of blocking for credentials to be + // refreshed. If disabled token refresh will be blocking. + // + // The first call to RetrieveBearerToken will always be blocking, because + // there is no cached token. + DisableAsyncRefresh bool +} + +// TokenCache provides an utility to cache Bearer Authentication tokens from a +// wrapped TokenProvider. The TokenCache can be has options to configure the +// cache's early and asynchronous refresh of the token. +type TokenCache struct { + options TokenCacheOptions + provider TokenProvider + + cachedToken atomic.Value + lastRefreshAttemptTime atomic.Value + sfGroup singleflight.Group +} + +// NewTokenCache returns a initialized TokenCache that implements the +// TokenProvider interface. Wrapping the provider passed in. Also taking a set +// of optional functional option parameters to configure the token cache. +func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache { + var options TokenCacheOptions + for _, fn := range optFns { + fn(&options) + } + + return &TokenCache{ + options: options, + provider: provider, + } +} + +// RetrieveBearerToken returns the token if it could be obtained, or error if a +// valid token could not be retrieved. +// +// The passed in Context's cancel/deadline/timeout will impacting only this +// individual retrieve call and not any other already queued up calls. This +// means underlying provider's RetrieveBearerToken calls could block for ever, +// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to +// provide a timeout, preventing the underlying TokenProvider blocking forever. +// +// By default, if the passed in Context is canceled, all of its values will be +// considered expired. The wrapped TokenProvider will not be able to lookup the +// values from the Context once it is expired. This is done to protect against +// expired values no longer being valid. To disable this behavior, use +// smithy-go's context.WithPreserveExpiredValues to add a value to the Context +// before calling RetrieveBearerToken to enable support for expired values. +// +// Without RetrieveBearerTokenTimeout there is the potential for a underlying +// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent +// attempts at refreshing the token. +func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) { + cachedToken, ok := p.getCachedToken() + if !ok || cachedToken.Expired(timeNow()) { + return p.refreshBearerToken(ctx) + } + + // Check if the token should be refreshed before it expires. + refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires)) + if !refreshToken { + return cachedToken, nil + } + + if p.options.DisableAsyncRefresh { + return p.refreshBearerToken(ctx) + } + + p.tryAsyncRefresh(ctx) + + return cachedToken, nil +} + +// tryAsyncRefresh attempts to asynchronously refresh the token returning the +// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and +// the duration since the last refresh is less than that value, nothing will be +// done. +func (p *TokenCache) tryAsyncRefresh(ctx context.Context) { + if p.options.AsyncRefreshMinimumDelay != 0 { + var lastRefreshAttempt time.Time + if v := p.lastRefreshAttemptTime.Load(); v != nil { + lastRefreshAttempt = v.(time.Time) + } + + if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) { + return + } + } + + // Ignore the returned channel so this won't be blocking, and limit the + // number of additional goroutines created. + p.sfGroup.DoChan("async-refresh", func() (interface{}, error) { + res, err := p.refreshBearerToken(ctx) + if p.options.AsyncRefreshMinimumDelay != 0 { + var refreshAttempt time.Time + if err != nil { + refreshAttempt = timeNow() + } + p.lastRefreshAttemptTime.Store(refreshAttempt) + } + + return res, err + }) +} + +func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) { + resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) { + ctx := smithycontext.WithSuppressCancel(ctx) + if v := p.options.RetrieveBearerTokenTimeout; v != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, v) + defer cancel() + } + return p.singleRetrieve(ctx) + }) + + select { + case res := <-resCh: + return res.Val.(Token), res.Err + case <-ctx.Done(): + return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err()) + } +} + +func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) { + token, err := p.provider.RetrieveBearerToken(ctx) + if err != nil { + return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err) + } + + p.cachedToken.Store(&token) + return token, nil +} + +// getCachedToken returns the currently cached token and true if found. Returns +// false if no token is cached. +func (p *TokenCache) getCachedToken() (Token, bool) { + v := p.cachedToken.Load() + if v == nil { + return Token{}, false + } + + t := v.(*Token) + if t == nil || t.Value == "" { + return Token{}, false + } + + return *t, true +} diff --git a/vendor/github.com/aws/smithy-go/context/suppress_expired.go b/vendor/github.com/aws/smithy-go/context/suppress_expired.go new file mode 100644 index 000000000..a39b84a27 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/context/suppress_expired.go @@ -0,0 +1,81 @@ +package context + +import "context" + +// valueOnlyContext provides a utility to preserve only the values of a +// Context. Suppressing any cancellation or deadline on that context being +// propagated downstream of this value. +// +// If preserveExpiredValues is false (default), and the valueCtx is canceled, +// calls to lookup values with the Values method, will always return nil. Setting +// preserveExpiredValues to true, will allow the valueOnlyContext to lookup +// values in valueCtx even if valueCtx is canceled. +// +// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility. +// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go +type valueOnlyContext struct { + context.Context + + preserveExpiredValues bool + valuesCtx context.Context +} + +var _ context.Context = (*valueOnlyContext)(nil) + +// Value looks up the key, returning its value. If configured to not preserve +// values of expired context, and the wrapping context is canceled, nil will be +// returned. +func (v *valueOnlyContext) Value(key interface{}) interface{} { + if !v.preserveExpiredValues { + select { + case <-v.valuesCtx.Done(): + return nil + default: + } + } + + return v.valuesCtx.Value(key) +} + +// WithSuppressCancel wraps the Context value, suppressing its deadline and +// cancellation events being propagated downstream to consumer of the returned +// context. +// +// By default the wrapped Context's Values are available downstream until the +// wrapped Context is canceled. Once the wrapped Context is canceled, Values +// method called on the context return will no longer lookup any key. As they +// are now considered expired. +// +// To override this behavior, use WithPreserveExpiredValues on the Context +// before it is wrapped by WithSuppressCancel. This will make the Context +// returned by WithSuppressCancel allow lookup of expired values. +func WithSuppressCancel(ctx context.Context) context.Context { + return &valueOnlyContext{ + Context: context.Background(), + valuesCtx: ctx, + + preserveExpiredValues: GetPreserveExpiredValues(ctx), + } +} + +type preserveExpiredValuesKey struct{} + +// WithPreserveExpiredValues adds a Value to the Context if expired values +// should be preserved, and looked up by a Context wrapped by +// WithSuppressCancel. +// +// WithPreserveExpiredValues must be added as a value to a Context, before that +// Context is wrapped by WithSuppressCancel +func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context { + return context.WithValue(ctx, preserveExpiredValuesKey{}, enable) +} + +// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues +// value in the context. Returning true if enabled, false otherwise. +func GetPreserveExpiredValues(ctx context.Context) bool { + v := ctx.Value(preserveExpiredValuesKey{}) + if v != nil { + return v.(bool) + } + return false +} diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go new file mode 100644 index 000000000..87b0c74b7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/doc.go @@ -0,0 +1,2 @@ +// Package smithy provides the core components for a Smithy SDK. +package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go new file mode 100644 index 000000000..dec498c57 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document.go @@ -0,0 +1,10 @@ +package smithy + +// Document provides access to loosely structured data in a document-like +// format. +// +// Deprecated: See the github.com/aws/smithy-go/document package. +type Document interface { + UnmarshalDocument(interface{}) error + GetValue() (interface{}, error) +} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go new file mode 100644 index 000000000..03055b7a1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/doc.go @@ -0,0 +1,12 @@ +// Package document provides interface definitions and error types for document types. +// +// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send +// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 +// strings to these values. +// +// API Clients expose document constructors in their respective client document packages which must be used to +// Marshal and Unmarshal Go types to and from their respective protocol representations. +// +// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from +// document types. +package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go new file mode 100644 index 000000000..8f852d95c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/document.go @@ -0,0 +1,153 @@ +package document + +import ( + "fmt" + "math/big" + "strconv" +) + +// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and +// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. +// +// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. +// Anonymous nested types are flattened based on Go anonymous type visibility. +// +// When defining struct types. the `document` struct tag can be used to control how the value will be +// marshaled into the resulting protocol document. +// +// // Field is ignored +// Field int `document:"-"` +// +// // Field object of key "myName" +// Field int `document:"myName"` +// +// // Field object key of key "myName", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:"myName,omitempty"` +// +// // Field object key of "Field", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:",omitempty"` +// +// All struct fields, including anonymous fields, are marshaled unless the +// any of the following conditions are meet. +// +// - the field is not exported +// - document field tag is "-" +// - document field tag specifies "omitempty", and is a zero value. +// +// Pointer and interface values are encoded as the value pointed to or +// contained in the interface. A nil value encodes as a null +// value unless `omitempty` struct tag is provided. +// +// Channel, complex, and function values are not encoded and will be skipped +// when walking the value to be marshaled. +// +// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented +// by your application as a string or numerical representation. +// +// Errors that occur when marshaling will stop the marshaler, and return the error. +// +// Marshal cannot represent cyclic data structures and will not handle them. +// Passing cyclic structures to Marshal will result in an infinite recursion. +type Marshaler interface { + MarshalSmithyDocument() ([]byte, error) +} + +// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and +// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be +// returned. +// +// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` +// struct field tag for controlling how struct fields are unmarshaled. +// +// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document +// into an empty interface the Unmarshaler will store one of these values: +// bool, for boolean values +// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) +// string, for string values +// []interface{}, for array values +// map[string]interface{}, for objects +// nil, for null values +// +// When unmarshaling, any error that occurs will halt the unmarshal and return the error. +type Unmarshaler interface { + UnmarshalSmithyDocument(v interface{}) error +} + +type noSerde interface { + noSmithyDocumentSerde() +} + +// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled +// into a protocol document. +type NoSerde struct{} + +func (n NoSerde) noSmithyDocumentSerde() {} + +var _ noSerde = (*NoSerde)(nil) + +// IsNoSerde returns whether the given type implements the no smithy document serde interface. +func IsNoSerde(x interface{}) bool { + _, ok := x.(noSerde) + return ok +} + +// Number is an arbitrary precision numerical value +type Number string + +// Int64 returns the number as a string. +func (n Number) String() string { + return string(n) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return n.intOfBitSize(64) +} + +func (n Number) intOfBitSize(bitSize int) (int64, error) { + return strconv.ParseInt(string(n), 10, bitSize) +} + +// Uint64 returns the number as a uint64. +func (n Number) Uint64() (uint64, error) { + return n.uintOfBitSize(64) +} + +func (n Number) uintOfBitSize(bitSize int) (uint64, error) { + return strconv.ParseUint(string(n), 10, bitSize) +} + +// Float32 returns the number parsed as a 32-bit float, returns a float64. +func (n Number) Float32() (float64, error) { + return n.floatOfBitSize(32) +} + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return n.floatOfBitSize(64) +} + +// Float64 returns the number as a float64. +func (n Number) floatOfBitSize(bitSize int) (float64, error) { + return strconv.ParseFloat(string(n), bitSize) +} + +// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. +func (n Number) BigFloat() (*big.Float, error) { + f, ok := (&big.Float{}).SetString(string(n)) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} + +// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. +func (n Number) BigInt() (*big.Int, error) { + f, ok := (&big.Int{}).SetString(string(n), 10) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go new file mode 100644 index 000000000..046a7a765 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/errors.go @@ -0,0 +1,75 @@ +package document + +import ( + "fmt" + "reflect" +) + +// UnmarshalTypeError is an error type representing an error +// unmarshaling a Smithy document to a Go value type. This is different +// from UnmarshalError in that it does not wrap an underlying error type. +type UnmarshalTypeError struct { + Value string + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalTypeError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", + e.Value, e.Type.String()) +} + +// An InvalidUnmarshalError is an error type representing an invalid type +// encountered while unmarshaling a Smithy document to a Go value type. +type InvalidUnmarshalError struct { + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidUnmarshalError) Error() string { + var msg string + if e.Type == nil { + msg = "cannot unmarshal to nil value" + } else if e.Type.Kind() != reflect.Ptr { + msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) + } else { + msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) + } + + return fmt.Sprintf("unmarshal failed, %s", msg) +} + +// An UnmarshalError wraps an error that occurred while unmarshaling a +// Smithy document into a Go type. This is different from +// UnmarshalTypeError in that it wraps the underlying error that occurred. +type UnmarshalError struct { + Err error + Value string + Type reflect.Type +} + +// Unwrap returns the underlying unmarshaling error +func (e *UnmarshalError) Unwrap() error { + return e.Err +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", + e.Value, e.Type.String(), e.Err) +} + +// An InvalidMarshalError is an error type representing an error +// occurring when marshaling a Go value type. +type InvalidMarshalError struct { + Message string +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidMarshalError) Error() string { + return fmt.Sprintf("marshal failed, %s", e.Message) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go new file mode 100644 index 000000000..792fdfa08 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/doc.go @@ -0,0 +1,4 @@ +// Package encoding provides utilities for encoding values for specific +// document encodings. + +package encoding diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go new file mode 100644 index 000000000..2fdfb5225 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/encoding.go @@ -0,0 +1,40 @@ +package encoding + +import ( + "fmt" + "math" + "strconv" +) + +// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol +// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers +// +// Based on encoding/json floatEncoder from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func EncodeFloat(dst []byte, v float64, bits int) []byte { + if math.IsInf(v, 0) || math.IsNaN(v) { + panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits))) + } + + abs := math.Abs(v) + fmt := byte('f') + + if abs != 0 { + if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + + dst = strconv.AppendFloat(dst, v, fmt, -1, bits) + + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(dst) + if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { + dst[n-2] = dst[n-1] + dst = dst[:n-1] + } + } + + return dst +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go new file mode 100644 index 000000000..96abd073a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go @@ -0,0 +1,116 @@ +package httpbinding + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + contentLengthHeader = "Content-Length" + floatNaN = "NaN" + floatInfinity = "Infinity" + floatNegInfinity = "-Infinity" +) + +// An Encoder provides encoding of REST URI path, query, and header components +// of an HTTP request. Can also encode a stream as the payload. +// +// Does not support SetFields. +type Encoder struct { + path, rawPath, pathBuffer []byte + + query url.Values + header http.Header +} + +// NewEncoder creates a new encoder from the passed in request. All query and +// header values will be added on top of the request's existing values. Overwriting +// duplicate values. +func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { + parseQuery, err := url.ParseQuery(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query string: %w", err) + } + + e := &Encoder{ + path: []byte(path), + rawPath: []byte(path), + query: parseQuery, + header: headers.Clone(), + } + + return e, nil +} + +// Encode returns a REST protocol encoder for encoding HTTP bindings. +// +// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode +// will look for whether the header is present, and if so will remove it and set the respective value on http.Request. +// +// Returns any error occurring during encoding. +func (e *Encoder) Encode(req *http.Request) (*http.Request, error) { + req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath) + req.URL.RawQuery = e.query.Encode() + + // net/http ignores Content-Length header and requires it to be set on http.Request + if v := e.header.Get(contentLengthHeader); len(v) > 0 { + iv, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, err + } + req.ContentLength = iv + e.header.Del(contentLengthHeader) + } + + req.Header = e.header + + return req, nil +} + +// AddHeader returns a HeaderValue for appending to the given header name +func (e *Encoder) AddHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, true) +} + +// SetHeader returns a HeaderValue for setting the given header name +func (e *Encoder) SetHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, false) +} + +// Headers returns a Header used for encoding headers with the given prefix +func (e *Encoder) Headers(prefix string) Headers { + return Headers{ + header: e.header, + prefix: strings.TrimSpace(prefix), + } +} + +// HasHeader returns if a header with the key specified exists with one or +// more value. +func (e Encoder) HasHeader(key string) bool { + return len(e.header[key]) != 0 +} + +// SetURI returns a URIValue used for setting the given path key +func (e *Encoder) SetURI(key string) URIValue { + return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key) +} + +// SetQuery returns a QueryValue used for setting the given query key +func (e *Encoder) SetQuery(key string) QueryValue { + return NewQueryValue(e.query, key, false) +} + +// AddQuery returns a QueryValue used for appending the given query key +func (e *Encoder) AddQuery(key string) QueryValue { + return NewQueryValue(e.query, key, true) +} + +// HasQuery returns if a query with the key specified exists with one or +// more values. +func (e *Encoder) HasQuery(key string) bool { + return len(e.query.Get(key)) != 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go new file mode 100644 index 000000000..f9256e175 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go @@ -0,0 +1,122 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/http" + "strconv" + "strings" +) + +// Headers is used to encode header keys using a provided prefix +type Headers struct { + header http.Header + prefix string +} + +// AddHeader returns a HeaderValue used to append values to prefix+key +func (h Headers) AddHeader(key string) HeaderValue { + return h.newHeaderValue(key, true) +} + +// SetHeader returns a HeaderValue used to set the value of prefix+key +func (h Headers) SetHeader(key string) HeaderValue { + return h.newHeaderValue(key, false) +} + +func (h Headers) newHeaderValue(key string, append bool) HeaderValue { + return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append) +} + +// HeaderValue is used to encode values to an HTTP header +type HeaderValue struct { + header http.Header + key string + append bool +} + +func newHeaderValue(header http.Header, key string, append bool) HeaderValue { + return HeaderValue{header: header, key: strings.TrimSpace(key), append: append} +} + +func (h HeaderValue) modifyHeader(value string) { + if h.append { + h.header[h.key] = append(h.header[h.key], value) + } else { + h.header[h.key] = append(h.header[h.key][:0], value) + } +} + +// String encodes the value v as the header string value +func (h HeaderValue) String(v string) { + h.modifyHeader(v) +} + +// Byte encodes the value v as a query string value +func (h HeaderValue) Byte(v int8) { + h.Long(int64(v)) +} + +// Short encodes the value v as a query string value +func (h HeaderValue) Short(v int16) { + h.Long(int64(v)) +} + +// Integer encodes the value v as the header string value +func (h HeaderValue) Integer(v int32) { + h.Long(int64(v)) +} + +// Long encodes the value v as the header string value +func (h HeaderValue) Long(v int64) { + h.modifyHeader(strconv.FormatInt(v, 10)) +} + +// Boolean encodes the value v as a query string value +func (h HeaderValue) Boolean(v bool) { + h.modifyHeader(strconv.FormatBool(v)) +} + +// Float encodes the value v as a query string value +func (h HeaderValue) Float(v float32) { + h.float(float64(v), 32) +} + +// Double encodes the value v as a query string value +func (h HeaderValue) Double(v float64) { + h.float(v, 64) +} + +func (h HeaderValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + h.String(floatNaN) + case math.IsInf(v, 1): + h.String(floatInfinity) + case math.IsInf(v, -1): + h.String(floatNegInfinity) + default: + h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes the value v as a query string value +func (h HeaderValue) BigInteger(v *big.Int) { + h.modifyHeader(v.String()) +} + +// BigDecimal encodes the value v as a query string value +func (h HeaderValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + h.Long(i) + return + } + h.modifyHeader(v.Text('e', -1)) +} + +// Blob encodes the value v as a base64 header string value +func (h HeaderValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + h.modifyHeader(encodeToString) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go new file mode 100644 index 000000000..e78926c9a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go @@ -0,0 +1,108 @@ +package httpbinding + +import ( + "bytes" + "fmt" +) + +const ( + uriTokenStart = '{' + uriTokenStop = '}' + uriTokenSkip = '+' +) + +func bufCap(b []byte, n int) []byte { + if cap(b) < n { + return make([]byte, 0, n) + } + + return b[0:0] +} + +// replacePathElement replaces a single element in the path []byte. +// Escape is used to control whether the value will be escaped using Amazon path escape style. +func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { + fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } + fieldBuf = append(fieldBuf, uriTokenStart) + fieldBuf = append(fieldBuf, key...) + + start := bytes.Index(path, fieldBuf) + end := start + len(fieldBuf) + if start < 0 || len(path[end:]) == 0 { + // TODO what to do about error? + return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) + } + + encodeSep := true + if path[end] == uriTokenSkip { + // '+' token means do not escape slashes + encodeSep = false + end++ + } + + if escape { + val = EscapePath(val, encodeSep) + } + + if path[end] != uriTokenStop { + return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) + } + end++ + + fieldBuf = bufCap(fieldBuf, len(val)) + fieldBuf = append(fieldBuf, val...) + + keyLen := end - start + valLen := len(fieldBuf) + + if keyLen == valLen { + copy(path[start:], fieldBuf) + return path, fieldBuf, nil + } + + newLen := len(path) + (valLen - keyLen) + if len(path) < newLen { + path = path[:cap(path)] + } + if cap(path) < newLen { + newURI := make([]byte, newLen) + copy(newURI, path) + path = newURI + } + + // shift + copy(path[start+valLen:], path[end:]) + path = path[:newLen] + copy(path[start:], fieldBuf) + + return path, fieldBuf, nil +} + +// EscapePath escapes part of a URL path in Amazon style. +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go new file mode 100644 index 000000000..c2e7d0a20 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go @@ -0,0 +1,107 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/url" + "strconv" +) + +// QueryValue is used to encode query key values +type QueryValue struct { + query url.Values + key string + append bool +} + +// NewQueryValue creates a new QueryValue which enables encoding +// a query value into the given url.Values. +func NewQueryValue(query url.Values, key string, append bool) QueryValue { + return QueryValue{ + query: query, + key: key, + append: append, + } +} + +func (qv QueryValue) updateKey(value string) { + if qv.append { + qv.query.Add(qv.key, value) + } else { + qv.query.Set(qv.key, value) + } +} + +// Blob encodes v as a base64 query string value +func (qv QueryValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + qv.updateKey(encodeToString) +} + +// Boolean encodes v as a query string value +func (qv QueryValue) Boolean(v bool) { + qv.updateKey(strconv.FormatBool(v)) +} + +// String encodes v as a query string value +func (qv QueryValue) String(v string) { + qv.updateKey(v) +} + +// Byte encodes v as a query string value +func (qv QueryValue) Byte(v int8) { + qv.Long(int64(v)) +} + +// Short encodes v as a query string value +func (qv QueryValue) Short(v int16) { + qv.Long(int64(v)) +} + +// Integer encodes v as a query string value +func (qv QueryValue) Integer(v int32) { + qv.Long(int64(v)) +} + +// Long encodes v as a query string value +func (qv QueryValue) Long(v int64) { + qv.updateKey(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (qv QueryValue) Float(v float32) { + qv.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (qv QueryValue) Double(v float64) { + qv.float(v, 64) +} + +func (qv QueryValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + qv.String(floatNaN) + case math.IsInf(v, 1): + qv.String(floatInfinity) + case math.IsInf(v, -1): + qv.String(floatNegInfinity) + default: + qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (qv QueryValue) BigInteger(v *big.Int) { + qv.updateKey(v.String()) +} + +// BigDecimal encodes v as a query string value +func (qv QueryValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + qv.Long(i) + return + } + qv.updateKey(v.Text('e', -1)) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go new file mode 100644 index 000000000..f04e11984 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go @@ -0,0 +1,111 @@ +package httpbinding + +import ( + "math" + "math/big" + "strconv" + "strings" +) + +// URIValue is used to encode named URI parameters +type URIValue struct { + path, rawPath, buffer *[]byte + + key string +} + +func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue { + return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key} +} + +func (u URIValue) modifyURI(value string) (err error) { + *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) + if err != nil { + return err + } + *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) + return err +} + +// Boolean encodes v as a URI string value +func (u URIValue) Boolean(v bool) error { + return u.modifyURI(strconv.FormatBool(v)) +} + +// String encodes v as a URI string value +func (u URIValue) String(v string) error { + return u.modifyURI(v) +} + +// Byte encodes v as a URI string value +func (u URIValue) Byte(v int8) error { + return u.Long(int64(v)) +} + +// Short encodes v as a URI string value +func (u URIValue) Short(v int16) error { + return u.Long(int64(v)) +} + +// Integer encodes v as a URI string value +func (u URIValue) Integer(v int32) error { + return u.Long(int64(v)) +} + +// Long encodes v as a URI string value +func (u URIValue) Long(v int64) error { + return u.modifyURI(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (u URIValue) Float(v float32) error { + return u.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (u URIValue) Double(v float64) error { + return u.float(v, 64) +} + +func (u URIValue) float(v float64, bitSize int) error { + switch { + case math.IsNaN(v): + return u.String(floatNaN) + case math.IsInf(v, 1): + return u.String(floatInfinity) + case math.IsInf(v, -1): + return u.String(floatNegInfinity) + default: + return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (u URIValue) BigInteger(v *big.Int) error { + return u.modifyURI(v.String()) +} + +// BigDecimal encodes v as a query string value +func (u URIValue) BigDecimal(v *big.Float) error { + if i, accuracy := v.Int64(); accuracy == big.Exact { + return u.Long(i) + } + return u.modifyURI(v.Text('e', -1)) +} + +// SplitURI parses a Smithy HTTP binding trait URI +func SplitURI(uri string) (path, query string) { + queryStart := strings.IndexRune(uri, '?') + if queryStart == -1 { + path = uri + return path, query + } + + path = uri[:queryStart] + if queryStart+1 >= len(uri) { + return path, query + } + query = uri[queryStart+1:] + + return path, query +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/array.go b/vendor/github.com/aws/smithy-go/encoding/json/array.go new file mode 100644 index 000000000..7a232f660 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/array.go @@ -0,0 +1,35 @@ +package json + +import ( + "bytes" +) + +// Array represents the encoding of a JSON Array +type Array struct { + w *bytes.Buffer + writeComma bool + scratch *[]byte +} + +func newArray(w *bytes.Buffer, scratch *[]byte) *Array { + w.WriteRune(leftBracket) + return &Array{w: w, scratch: scratch} +} + +// Value adds a new element to the JSON Array. +// Returns a Value type that is used to encode +// the array element. +func (a *Array) Value() Value { + if a.writeComma { + a.w.WriteRune(comma) + } else { + a.writeComma = true + } + + return newValue(a.w, a.scratch) +} + +// Close encodes the end of the JSON Array +func (a *Array) Close() { + a.w.WriteRune(rightBracket) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/constants.go b/vendor/github.com/aws/smithy-go/encoding/json/constants.go new file mode 100644 index 000000000..91044092a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/constants.go @@ -0,0 +1,15 @@ +package json + +const ( + leftBrace = '{' + rightBrace = '}' + + leftBracket = '[' + rightBracket = ']' + + comma = ',' + quote = '"' + colon = ':' + + null = "null" +) diff --git a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go new file mode 100644 index 000000000..7050c85b3 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go @@ -0,0 +1,139 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// DiscardUnknownField discards unknown fields from a decoder body. +// This function is useful while deserializing a JSON body with additional +// unknown information that should be discarded. +func DiscardUnknownField(decoder *json.Decoder) error { + // This deliberately does not share logic with CollectUnknownField, even + // though it could, because if we were to delegate to that then we'd incur + // extra allocations and general memory usage. + v, err := decoder.Token() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if _, ok := v.(json.Delim); ok { + for decoder.More() { + err = DiscardUnknownField(decoder) + } + endToken, err := decoder.Token() + if err != nil { + return err + } + if _, ok := endToken.(json.Delim); !ok { + return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v", + endToken, endToken) + } + } + + return nil +} + +// CollectUnknownField grabs the contents of unknown fields from the decoder body +// and returns them as a byte slice. This is useful for skipping unknown fields without +// completely discarding them. +func CollectUnknownField(decoder *json.Decoder) ([]byte, error) { + result, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + buff := bytes.NewBuffer(nil) + encoder := json.NewEncoder(buff) + + if err := encoder.Encode(result); err != nil { + return nil, err + } + + return buff.Bytes(), nil +} + +func collectUnknownField(decoder *json.Decoder) (interface{}, error) { + // Grab the initial value. This could either be a concrete value like a string or a a + // delimiter. + token, err := decoder.Token() + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + + // If it's an array or object, we'll need to recurse. + delim, ok := token.(json.Delim) + if ok { + var result interface{} + if delim == '{' { + result, err = collectUnknownObject(decoder) + if err != nil { + return nil, err + } + } else { + result, err = collectUnknownArray(decoder) + if err != nil { + return nil, err + } + } + + // Discard the closing token. decoder.Token handles checking for matching delimiters + if _, err := decoder.Token(); err != nil { + return nil, err + } + return result, nil + } + + return token, nil +} + +func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) { + // We need to create an empty array here instead of a nil array, since by getting + // into this function at all we necessarily have seen a non-nil list. + array := []interface{}{} + + for decoder.More() { + value, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + array = append(array, value) + } + + return array, nil +} + +func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) { + object := make(map[string]interface{}) + + for decoder.More() { + key, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + // Keys have to be strings, which is particularly important as the encoder + // won't except a map with interface{} keys + stringKey, ok := key.(string) + if !ok { + return nil, fmt.Errorf("expected string key, found %T", key) + } + + value, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + object[stringKey] = value + } + + return object, nil +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go new file mode 100644 index 000000000..8772953f1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go @@ -0,0 +1,30 @@ +package json + +import ( + "bytes" +) + +// Encoder is JSON encoder that supports construction of JSON values +// using methods. +type Encoder struct { + w *bytes.Buffer + Value +} + +// NewEncoder returns a new JSON encoder +func NewEncoder() *Encoder { + writer := bytes.NewBuffer(nil) + scratch := make([]byte, 64) + + return &Encoder{w: writer, Value: newValue(writer, &scratch)} +} + +// String returns the String output of the JSON encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the JSON encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/escape.go b/vendor/github.com/aws/smithy-go/encoding/json/escape.go new file mode 100644 index 000000000..d984d0cdc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/escape.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet + +package json + +import ( + "bytes" + "unicode/utf8" +) + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// copied from Go 1.8 stdlib's encoding/json/#hex +var hex = "0123456789abcdef" + +// escapeStringBytes escapes and writes the passed in string bytes to the dst +// buffer +// +// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes +func escapeStringBytes(e *bytes.Buffer, s []byte) { + e.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + if start < i { + e.Write(s[start:i]) + } + switch b { + case '\\', '"': + e.WriteByte('\\') + e.WriteByte(b) + case '\n': + e.WriteByte('\\') + e.WriteByte('n') + case '\r': + e.WriteByte('\\') + e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // If escapeHTML is set, it also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + e.WriteString(`\u00`) + e.WriteByte(hex[b>>4]) + e.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\u202`) + e.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + e.Write(s[start:]) + } + e.WriteByte('"') +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go new file mode 100644 index 000000000..722346d03 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/object.go @@ -0,0 +1,40 @@ +package json + +import ( + "bytes" +) + +// Object represents the encoding of a JSON Object type +type Object struct { + w *bytes.Buffer + writeComma bool + scratch *[]byte +} + +func newObject(w *bytes.Buffer, scratch *[]byte) *Object { + w.WriteRune(leftBrace) + return &Object{w: w, scratch: scratch} +} + +func (o *Object) writeKey(key string) { + escapeStringBytes(o.w, []byte(key)) + o.w.WriteRune(colon) +} + +// Key adds the given named key to the JSON object. +// Returns a Value encoder that should be used to encode +// a JSON value type. +func (o *Object) Key(name string) Value { + if o.writeComma { + o.w.WriteRune(comma) + } else { + o.writeComma = true + } + o.writeKey(name) + return newValue(o.w, o.scratch) +} + +// Close encodes the end of the JSON Object +func (o *Object) Close() { + o.w.WriteRune(rightBrace) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/value.go b/vendor/github.com/aws/smithy-go/encoding/json/value.go new file mode 100644 index 000000000..b41ff1e15 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/value.go @@ -0,0 +1,149 @@ +package json + +import ( + "bytes" + "encoding/base64" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents a JSON Value type +// JSON Value types: Object, Array, String, Number, Boolean, and Null +type Value struct { + w *bytes.Buffer + scratch *[]byte +} + +// newValue returns a new Value encoder +func newValue(w *bytes.Buffer, scratch *[]byte) Value { + return Value{w: w, scratch: scratch} +} + +// String encodes v as a JSON string +func (jv Value) String(v string) { + escapeStringBytes(jv.w, []byte(v)) +} + +// Byte encodes v as a JSON number +func (jv Value) Byte(v int8) { + jv.Long(int64(v)) +} + +// Short encodes v as a JSON number +func (jv Value) Short(v int16) { + jv.Long(int64(v)) +} + +// Integer encodes v as a JSON number +func (jv Value) Integer(v int32) { + jv.Long(int64(v)) +} + +// Long encodes v as a JSON number +func (jv Value) Long(v int64) { + *jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10) + jv.w.Write(*jv.scratch) +} + +// ULong encodes v as a JSON number +func (jv Value) ULong(v uint64) { + *jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10) + jv.w.Write(*jv.scratch) +} + +// Float encodes v as a JSON number +func (jv Value) Float(v float32) { + jv.float(float64(v), 32) +} + +// Double encodes v as a JSON number +func (jv Value) Double(v float64) { + jv.float(v, 64) +} + +func (jv Value) float(v float64, bits int) { + *jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits) + jv.w.Write(*jv.scratch) +} + +// Boolean encodes v as a JSON boolean +func (jv Value) Boolean(v bool) { + *jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v) + jv.w.Write(*jv.scratch) +} + +// Base64EncodeBytes writes v as a base64 value in JSON string +func (jv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(jv.w, (*jv.scratch)[:0], v) +} + +// Write writes v directly to the JSON document +func (jv Value) Write(v []byte) { + jv.w.Write(v) +} + +// Array returns a new Array encoder +func (jv Value) Array() *Array { + return newArray(jv.w, jv.scratch) +} + +// Object returns a new Object encoder +func (jv Value) Object() *Object { + return newObject(jv.w, jv.scratch) +} + +// Null encodes a null JSON value +func (jv Value) Null() { + jv.w.WriteString(null) +} + +// BigInteger encodes v as JSON value +func (jv Value) BigInteger(v *big.Int) { + jv.w.Write([]byte(v.Text(10))) +} + +// BigDecimal encodes v as JSON value +func (jv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + jv.Long(i) + return + } + // TODO: Should this try to match ES6 ToString similar to stdlib JSON? + jv.w.Write([]byte(v.Text('e', -1))) +} + +// Based on encoding/json encodeByteSlice from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) { + if v == nil { + w.WriteString(null) + return + } + + w.WriteRune(quote) + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } + + w.WriteRune(quote) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go new file mode 100644 index 000000000..508f3c997 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/array.go @@ -0,0 +1,49 @@ +package xml + +// arrayMemberWrapper is the default member wrapper tag name for XML Array type +var arrayMemberWrapper = StartElement{ + Name: Name{Local: "member"}, +} + +// Array represents the encoding of a XML array type +type Array struct { + w writer + scratch *[]byte + + // member start element is the array member wrapper start element + memberStartElement StartElement + + // isFlattened indicates if the array is a flattened array. + isFlattened bool +} + +// newArray returns an array encoder. +// It also takes in the member start element, array start element. +// It takes in a isFlattened bool, indicating that an array is flattened array. +// +// A wrapped array ["value1", "value2"] is represented as +// `value1value2`. + +// A flattened array `someList: ["value1", "value2"]` is represented as +// `value1value2`. +func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array { + var memberWrapper = memberStartElement + if isFlattened { + memberWrapper = arrayStartElement + } + + return &Array{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: isFlattened, + } +} + +// Member adds a new member to the XML array. +// It returns a Value encoder. +func (a *Array) Member() Value { + v := newValue(a.w, a.scratch, a.memberStartElement) + v.isFlattened = a.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go new file mode 100644 index 000000000..ccee90a63 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go @@ -0,0 +1,10 @@ +package xml + +const ( + leftAngleBracket = '<' + rightAngleBracket = '>' + forwardSlash = '/' + colon = ':' + equals = '=' + quote = '"' +) diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go new file mode 100644 index 000000000..f9200093e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go @@ -0,0 +1,49 @@ +/* +Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to +shape serializer function in which a xml.Value will be passed around. + +Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings + +Member Element + +Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element +write their own element start tag. These elements should always be closed. + +Flattened Element + +Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element +do not write a start tag, and thus should not be closed. + +Simple types encoding + +All simple type methods on value such as String(), Long() etc; auto close the associated member element. + +Array + +Array returns the collection encoder. It has two modes, wrapped and flattened encoding. + +Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping. +By default, a wrapped array members are wrapped with `member` named start element. + + appletree + +Flattened arrays rely on Value being marked as flattened. +If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements. + + appletree + +Map + +Map is the map encoder. It has two modes, wrapped and flattened encoding. + +Wrapped map has Array() method, which facilitate map member wrapping. +By default, a wrapped map members are wrapped with `entry` named start element. + + appletreesnowice + +Flattened map rely on Value being marked as flattened. +If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements. + + appletreesnowice +*/ +package xml diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go new file mode 100644 index 000000000..ae84e7999 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/element.go @@ -0,0 +1,91 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +// A Name represents an XML name (Local) annotated +// with a name space identifier (Space). +// In tokens returned by Decoder.Token, the Space identifier +// is given as a canonical URL, not the short prefix used +// in the document being parsed. +type Name struct { + Space, Local string +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +/* +NewAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. +*/ +func NewAttribute(local, value string) Attr { + return Attr{ + Name: Name{ + Local: local, + }, + Value: value, + } +} + +/* +NewNamespaceAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. + +NewNamespaceAttribute appends `xmlns:` in front of namespace +prefix. + +For creating a name space attribute representing +`xmlns:prefix="http://example.com`, the breakdown would be: +local = "prefix" +value = "http://example.com" +*/ +func NewNamespaceAttribute(local, value string) Attr { + attr := NewAttribute(local, value) + + // default name space identifier + attr.Name.Space = "xmlns" + return attr +} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +// Copy creates a new copy of StartElement. +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// returns true if start element local name is empty +func (e StartElement) isZero() bool { + return len(e.Name.Local) == 0 +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// returns true if end element local name is empty +func (e EndElement) isZero() bool { + return len(e.Name.Local) == 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go new file mode 100644 index 000000000..16fb3dddb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go @@ -0,0 +1,51 @@ +package xml + +// writer interface used by the xml encoder to write an encoded xml +// document in a writer. +type writer interface { + + // Write takes in a byte slice and returns number of bytes written and error + Write(p []byte) (n int, err error) + + // WriteRune takes in a rune and returns number of bytes written and error + WriteRune(r rune) (n int, err error) + + // WriteString takes in a string and returns number of bytes written and error + WriteString(s string) (n int, err error) + + // String method returns a string + String() string + + // Bytes return a byte slice. + Bytes() []byte +} + +// Encoder is an XML encoder that supports construction of XML values +// using methods. The encoder takes in a writer and maintains a scratch buffer. +type Encoder struct { + w writer + scratch *[]byte +} + +// NewEncoder returns an XML encoder +func NewEncoder(w writer) *Encoder { + scratch := make([]byte, 64) + + return &Encoder{w: w, scratch: &scratch} +} + +// String returns the string output of the XML encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the XML encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} + +// RootElement builds a root element encoding +// It writes it's start element tag. The value should be closed. +func (e Encoder) RootElement(element StartElement) Value { + return newValue(e.w, e.scratch, element) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go new file mode 100644 index 000000000..f3db6ccca --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go @@ -0,0 +1,51 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal ... +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go new file mode 100644 index 000000000..1c5479af6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go @@ -0,0 +1,137 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +import ( + "unicode/utf8" +) + +// Copied from Go 1.14 stdlib's encoding/xml +var ( + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character + + // Additional Escapes + escNextLine = []byte("…") + escLS = []byte("
") +) + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of https://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +// TODO: When do we need to escape the string? +// Based on encoding/xml escapeString from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeString(e writer, s string) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.WriteString(s[last : i-width]) + e.Write(esc) + last = i + } + e.WriteString(s[last:]) +} + +// escapeText writes to w the properly escaped XML equivalent +// of the plain text data s. If escapeNewline is true, newline +// characters will be escaped. +// +// Based on encoding/xml escapeText from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeText(e writer, s []byte) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRune(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + // This always escapes newline, which is different than stdlib's optional + // escape of new line. + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.Write(s[last : i-width]) + e.Write(esc) + last = i + } + e.Write(s[last:]) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go new file mode 100644 index 000000000..e42858965 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/map.go @@ -0,0 +1,53 @@ +package xml + +// mapEntryWrapper is the default member wrapper start element for XML Map entry +var mapEntryWrapper = StartElement{ + Name: Name{Local: "entry"}, +} + +// Map represents the encoding of a XML map type +type Map struct { + w writer + scratch *[]byte + + // member start element is the map entry wrapper start element + memberStartElement StartElement + + // isFlattened returns true if the map is a flattened map + isFlattened bool +} + +// newMap returns a map encoder which sets the default map +// entry wrapper to `entry`. +// +// A map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newMap(w writer, scratch *[]byte) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: mapEntryWrapper, + } +} + +// newFlattenedMap returns a map encoder which sets the map +// entry wrapper to the passed in memberWrapper`. +// +// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: true, + } +} + +// Entry returns a Value encoder with map's element. +// It writes the member wrapper start tag for each entry. +func (m *Map) Entry() Value { + v := newValue(m.w, m.scratch, m.memberStartElement) + v.isFlattened = m.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go new file mode 100644 index 000000000..09434b2c0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/value.go @@ -0,0 +1,302 @@ +package xml + +import ( + "encoding/base64" + "fmt" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents an XML Value type +// XML Value types: Object, Array, Map, String, Number, Boolean. +type Value struct { + w writer + scratch *[]byte + + // xml start element is the associated start element for the Value + startElement StartElement + + // indicates if the Value represents a flattened shape + isFlattened bool +} + +// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag +func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value { + return Value{ + w: w, + scratch: scratch, + startElement: startElement, + } +} + +// newValue writes the start element xml tag and returns a Value +func newValue(w writer, scratch *[]byte, startElement StartElement) Value { + writeStartElement(w, startElement) + return Value{w: w, scratch: scratch, startElement: startElement} +} + +// writeStartElement takes in a start element and writes it. +// It handles namespace, attributes in start element. +func writeStartElement(w writer, el StartElement) error { + if el.isZero() { + return fmt.Errorf("xml start element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + for _, attr := range el.Attr { + w.WriteRune(' ') + writeAttribute(w, &attr) + } + + w.WriteRune(rightAngleBracket) + return nil +} + +// writeAttribute writes an attribute from a provided Attribute +// For a namespace attribute, the attr.Name.Space must be defined as "xmlns". +// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName +func writeAttribute(w writer, attr *Attr) { + // if local, space both are not empty + if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 { + escapeString(w, attr.Name.Space) + w.WriteRune(colon) + } + + // if prefix is empty, the default `xmlns` space should be used as prefix. + if len(attr.Name.Local) == 0 { + attr.Name.Local = attr.Name.Space + } + + escapeString(w, attr.Name.Local) + w.WriteRune(equals) + w.WriteRune(quote) + escapeString(w, attr.Value) + w.WriteRune(quote) +} + +// writeEndElement takes in a end element and writes it. +func writeEndElement(w writer, el EndElement) error { + if el.isZero() { + return fmt.Errorf("xml end element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + w.WriteRune(forwardSlash) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + w.WriteRune(rightAngleBracket) + + return nil +} + +// String encodes v as a XML string. +// It will auto close the parent xml element tag. +func (xv Value) String(v string) { + escapeString(xv.w, v) + xv.Close() +} + +// Byte encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Byte(v int8) { + xv.Long(int64(v)) +} + +// Short encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Short(v int16) { + xv.Long(int64(v)) +} + +// Integer encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Integer(v int32) { + xv.Long(int64(v)) +} + +// Long encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Long(v int64) { + *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Float encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Float(v float32) { + xv.float(float64(v), 32) + xv.Close() +} + +// Double encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Double(v float64) { + xv.float(v, 64) + xv.Close() +} + +func (xv Value) float(v float64, bits int) { + *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits) + xv.w.Write(*xv.scratch) +} + +// Boolean encodes v as a XML boolean. +// It will auto close the parent xml element tag. +func (xv Value) Boolean(v bool) { + *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Base64EncodeBytes writes v as a base64 value in XML string. +// It will auto close the parent xml element tag. +func (xv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(xv.w, (*xv.scratch)[:0], v) + xv.Close() +} + +// BigInteger encodes v big.Int as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigInteger(v *big.Int) { + xv.w.Write([]byte(v.Text(10))) + xv.Close() +} + +// BigDecimal encodes v big.Float as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + xv.Long(i) + return + } + + xv.w.Write([]byte(v.Text('e', -1))) + xv.Close() +} + +// Write writes v directly to the xml document +// if escapeXMLText is set to true, write will escape text. +// It will auto close the parent xml element tag. +func (xv Value) Write(v []byte, escapeXMLText bool) { + // escape and write xml text + if escapeXMLText { + escapeText(xv.w, v) + } else { + // write xml directly + xv.w.Write(v) + } + + xv.Close() +} + +// MemberElement does member element encoding. It returns a Value. +// Member Element method should be used for all shapes except flattened shapes. +// +// A call to MemberElement will write nested element tags directly using the +// provided start element. The value returned by MemberElement should be closed. +func (xv Value) MemberElement(element StartElement) Value { + return newValue(xv.w, xv.scratch, element) +} + +// FlattenedElement returns flattened element encoding. It returns a Value. +// This method should be used for flattened shapes. +// +// Unlike MemberElement, flattened element will NOT write element tags +// directly for the associated start element. +// +// The value returned by the FlattenedElement does not need to be closed. +func (xv Value) FlattenedElement(element StartElement) Value { + v := newFlattenedValue(xv.w, xv.scratch, element) + v.isFlattened = true + return v +} + +// Array returns an array encoder. By default, the members of array are +// wrapped with `` element tag. +// If value is marked as flattened, the start element is used to wrap the members instead of +// the `` element. +func (xv Value) Array() *Array { + return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened) +} + +/* +ArrayWithCustomName returns an array encoder. + +It takes named start element as an argument, the named start element will used to wrap xml array entries. +for eg, `entry1` +Here `customName` named start element will be wrapped on each array member. +*/ +func (xv Value) ArrayWithCustomName(element StartElement) *Array { + return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened) +} + +/* +Map returns a map encoder. By default, the map entries are +wrapped with `` element tag. + +If value is marked as flattened, the start element is used to wrap the entry instead of +the `` element. +*/ +func (xv Value) Map() *Map { + // flattened map + if xv.isFlattened { + return newFlattenedMap(xv.w, xv.scratch, xv.startElement) + } + + // un-flattened map + return newMap(xv.w, xv.scratch) +} + +// encodeByteSlice is modified copy of json encoder's encodeByteSlice. +// It is used to base64 encode a byte slice. +func encodeByteSlice(w writer, scratch []byte, v []byte) { + if v == nil { + return + } + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } +} + +// IsFlattened returns true if value is for flattened shape. +func (xv Value) IsFlattened() bool { + return xv.isFlattened +} + +// Close closes the value. +func (xv Value) Close() { + writeEndElement(xv.w, xv.startElement.End()) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go new file mode 100644 index 000000000..dc4eebdff --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go @@ -0,0 +1,154 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "strings" +) + +// NodeDecoder is a XML decoder wrapper that is responsible to decoding +// a single XML Node element and it's nested member elements. This wrapper decoder +// takes in the start element of the top level node being decoded. +type NodeDecoder struct { + Decoder *xml.Decoder + StartEl xml.StartElement +} + +// WrapNodeDecoder returns an initialized XMLNodeDecoder +func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder { + return NodeDecoder{ + Decoder: decoder, + StartEl: startEl, + } +} + +// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the +// a token is the node decoder's end node token; and an error which indicates any error +// that occurred while retrieving the start element +func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) { + for { + token, e := d.Decoder.Token() + if e != nil { + return t, done, e + } + + // check if we reach end of the node being decoded + if el, ok := token.(xml.EndElement); ok { + return t, el == d.StartEl.End(), err + } + + if t, ok := token.(xml.StartElement); ok { + return restoreAttrNamespaces(t), false, err + } + + // skip token if it is a comment or preamble or empty space value due to indentation + // or if it's a value and is not expected + } +} + +// restoreAttrNamespaces update XML attributes to restore the short namespaces found within +// the raw XML document. +func restoreAttrNamespaces(node xml.StartElement) xml.StartElement { + if len(node.Attr) == 0 { + return node + } + + // Generate a mapping of XML namespace values to their short names. + ns := map[string]string{} + for _, a := range node.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + break + } + } + + for i, a := range node.Attr { + if a.Name.Space == "xmlns" { + continue + } + // By default, xml.Decoder will fully resolve these namespaces. So if you had + // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to + // continue to resolve as `bar` so we can easily identify it later on. + if v, ok := ns[node.Attr[i].Name.Space]; ok { + node.Attr[i].Name.Space = v + } + } + return node +} + +// GetElement looks for the given tag name at the current level, and returns the element if found, and +// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking +// the document. +func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) { + for { + token, done, err := d.Token() + if err != nil { + return t, err + } + if done { + return t, fmt.Errorf("%s node not found", name) + } + switch { + case strings.EqualFold(name, token.Name.Local): + return token, nil + default: + err = d.Decoder.Skip() + if err != nil { + return t, err + } + } + } +} + +// Value provides an abstraction to retrieve char data value within an xml element. +// The method will return an error if it encounters a nested xml element instead of char data. +// This method should only be used to retrieve simple type or blob shape values as []byte. +func (d NodeDecoder) Value() (c []byte, err error) { + t, e := d.Decoder.Token() + if e != nil { + return c, e + } + + endElement := d.StartEl.End() + + switch ev := t.(type) { + case xml.CharData: + c = ev.Copy() + case xml.EndElement: // end tag or self-closing + if ev == endElement { + return []byte{}, err + } + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + default: + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + } + + t, e = d.Decoder.Token() + if e != nil { + return c, e + } + + if ev, ok := t.(xml.EndElement); ok { + if ev == endElement { + return c, err + } + } + + return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t) +} + +// FetchRootElement takes in a decoder and returns the first start element within the xml body. +// This function is useful in fetching the start element of an XML response and ignore the +// comments and preamble +func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) { + for { + t, e := decoder.Token() + if e != nil { + return startElement, e + } + + if startElement, ok := t.(xml.StartElement); ok { + return startElement, err + } + } +} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go new file mode 100644 index 000000000..d6948d020 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/errors.go @@ -0,0 +1,137 @@ +package smithy + +import "fmt" + +// APIError provides the generic API and protocol agnostic error type all SDK +// generated exception types will implement. +type APIError interface { + error + + // ErrorCode returns the error code for the API exception. + ErrorCode() string + // ErrorMessage returns the error message for the API exception. + ErrorMessage() string + // ErrorFault returns the fault for the API exception. + ErrorFault() ErrorFault +} + +// GenericAPIError provides a generic concrete API error type that SDKs can use +// to deserialize error responses into. Should be used for unmodeled or untyped +// errors. +type GenericAPIError struct { + Code string + Message string + Fault ErrorFault +} + +// ErrorCode returns the error code for the API exception. +func (e *GenericAPIError) ErrorCode() string { return e.Code } + +// ErrorMessage returns the error message for the API exception. +func (e *GenericAPIError) ErrorMessage() string { return e.Message } + +// ErrorFault returns the fault for the API exception. +func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } + +func (e *GenericAPIError) Error() string { + return fmt.Sprintf("api error %s: %s", e.Code, e.Message) +} + +var _ APIError = (*GenericAPIError)(nil) + +// OperationError decorates an underlying error which occurred while invoking +// an operation with names of the operation and API. +type OperationError struct { + ServiceID string + OperationName string + Err error +} + +// Service returns the name of the API service the error occurred with. +func (e *OperationError) Service() string { return e.ServiceID } + +// Operation returns the name of the API operation the error occurred with. +func (e *OperationError) Operation() string { return e.OperationName } + +// Unwrap returns the nested error if any, or nil. +func (e *OperationError) Unwrap() error { return e.Err } + +func (e *OperationError) Error() string { + return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) +} + +// DeserializationError provides a wrapper for an error that occurs during +// deserialization. +type DeserializationError struct { + Err error // original error + Snapshot []byte +} + +// Error returns a formatted error for DeserializationError +func (e *DeserializationError) Error() string { + const msg = "deserialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s, %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in DeserializationError +func (e *DeserializationError) Unwrap() error { return e.Err } + +// ErrorFault provides the type for a Smithy API error fault. +type ErrorFault int + +// ErrorFault enumeration values +const ( + FaultUnknown ErrorFault = iota + FaultServer + FaultClient +) + +func (f ErrorFault) String() string { + switch f { + case FaultServer: + return "server" + case FaultClient: + return "client" + default: + return "unknown" + } +} + +// SerializationError represents an error that occurred while attempting to serialize a request +type SerializationError struct { + Err error // original error +} + +// Error returns a formatted error for SerializationError +func (e *SerializationError) Error() string { + const msg = "serialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s: %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in SerializationError +func (e *SerializationError) Unwrap() error { return e.Err } + +// CanceledError is the error that will be returned by an API request that was +// canceled. API operations given a Context may return this error when +// canceled. +type CanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*CanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *CanceledError) Unwrap() error { + return e.Err +} + +func (e *CanceledError) Error() string { + return fmt.Sprintf("canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go new file mode 100644 index 000000000..8eaac41e7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package smithy + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.13.5" diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000..fe6a62006 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go new file mode 100644 index 000000000..9c9d02b94 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go @@ -0,0 +1,8 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight + +package singleflight diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000..e8a1b17d5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go new file mode 100644 index 000000000..f8417c15b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/byte.go @@ -0,0 +1,12 @@ +package io + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go new file mode 100644 index 000000000..a6a33eaf5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/doc.go @@ -0,0 +1,2 @@ +// Package io provides utilities for Smithy generated API clients. +package io diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go new file mode 100644 index 000000000..07063f296 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/reader.go @@ -0,0 +1,16 @@ +package io + +import ( + "io" +) + +// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method +// that does nothing. +type ReadSeekNopCloser struct { + io.ReadSeeker +} + +// Close does nothing. +func (ReadSeekNopCloser) Close() error { + return nil +} diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go new file mode 100644 index 000000000..06b476add --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/ringbuffer.go @@ -0,0 +1,94 @@ +package io + +import ( + "bytes" + "io" +) + +// RingBuffer struct satisfies io.ReadWrite interface. +// +// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a +// revolving window. +type RingBuffer struct { + slice []byte + start int + end int + size int +} + +// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer. +func NewRingBuffer(slice []byte) *RingBuffer { + ringBuf := RingBuffer{ + slice: slice, + } + return &ringBuf +} + +// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error. +func (r *RingBuffer) Write(p []byte) (int, error) { + for _, b := range p { + // check if end points to invalid index, we need to circle back + if r.end == len(r.slice) { + r.end = 0 + } + // check if start points to invalid index, we need to circle back + if r.start == len(r.slice) { + r.start = 0 + } + // if ring buffer is filled, increment the start index + if r.size == len(r.slice) { + r.size-- + r.start++ + } + + r.slice[r.end] = b + r.end++ + r.size++ + } + return len(p), nil +} + +// Read copies the data on the ring buffer into the byte slice provided to the method. +// Returns the read count along with any error encountered while reading. +func (r *RingBuffer) Read(p []byte) (int, error) { + // readCount keeps track of the number of bytes read + var readCount int + for j := 0; j < len(p); j++ { + // if ring buffer is empty or completely read + // return EOF error. + if r.size == 0 { + return readCount, io.EOF + } + + if r.start == len(r.slice) { + r.start = 0 + } + + p[j] = r.slice[r.start] + readCount++ + // increment the start pointer for ring buffer + r.start++ + // decrement the size of ring buffer + r.size-- + } + return readCount, nil +} + +// Len returns the number of unread bytes in the buffer. +func (r *RingBuffer) Len() int { + return r.size +} + +// Bytes returns a copy of the RingBuffer's bytes. +func (r RingBuffer) Bytes() []byte { + var b bytes.Buffer + io.Copy(&b, &r) + return b.Bytes() +} + +// Reset resets the ring buffer. +func (r *RingBuffer) Reset() { + *r = RingBuffer{ + slice: r.slice, + } +} diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh new file mode 100644 index 000000000..800bf3769 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/local-mod-replace.sh @@ -0,0 +1,39 @@ +#1/usr/bin/env bash + +PROJECT_DIR="" +SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) + +usage() { + echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 + exit 1 +} + +while getopts "hs:d:" options; do + case "${options}" in + s) + SMITHY_SOURCE_DIR=${OPTARG} + if [ "$SMITHY_SOURCE_DIR" == "" ]; then + echo "path to smithy-go source directory is required" || exit + usage + fi + ;; + d) + PROJECT_DIR=${OPTARG} + ;; + h) + usage + ;; + *) + usage + ;; + esac +done + +if [ "$PROJECT_DIR" != "" ]; then + cd $PROJECT_DIR || exit +fi + +go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do + repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} + echo -replace $x=$repPath +done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go new file mode 100644 index 000000000..2071924bd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/logging/logger.go @@ -0,0 +1,82 @@ +package logging + +import ( + "context" + "io" + "log" +) + +// Classification is the type of the log entry's classification name. +type Classification string + +// Set of standard classifications that can be used by clients and middleware +const ( + Warn Classification = "WARN" + Debug Classification = "DEBUG" +) + +// Logger is an interface for logging entries at certain classifications. +type Logger interface { + // Logf is expected to support the standard fmt package "verbs". + Logf(classification Classification, format string, v ...interface{}) +} + +// LoggerFunc is a wrapper around a function to satisfy the Logger interface. +type LoggerFunc func(classification Classification, format string, v ...interface{}) + +// Logf delegates the logging request to the wrapped function. +func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) { + f(classification, format, v...) +} + +// ContextLogger is an optional interface a Logger implementation may expose that provides +// the ability to create context aware log entries. +type ContextLogger interface { + WithContext(context.Context) Logger +} + +// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting +// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will +// be returned to the caller. +func WithContext(ctx context.Context, logger Logger) Logger { + if logger == nil { + return Nop{} + } + + cl, ok := logger.(ContextLogger) + if !ok { + return logger + } + + return cl.WithContext(ctx) +} + +// Nop is a Logger implementation that simply does not perform any logging. +type Nop struct{} + +// Logf simply returns without performing any action +func (n Nop) Logf(Classification, string, ...interface{}) { + return +} + +// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's +// Printf method. +type StandardLogger struct { + Logger *log.Logger +} + +// Logf logs the given classification and message to the underlying logger. +func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) { + if len(classification) != 0 { + format = string(classification) + " " + format + } + + s.Logger.Printf(format, v...) +} + +// NewStandardLogger returns a new StandardLogger +func NewStandardLogger(writer io.Writer) *StandardLogger { + return &StandardLogger{ + Logger: log.New(writer, "SDK ", log.LstdFlags), + } +} diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go new file mode 100644 index 000000000..9858928a7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/doc.go @@ -0,0 +1,67 @@ +// Package middleware provides transport agnostic middleware for decorating SDK +// handlers. +// +// The Smithy middleware stack provides ordered behavior to be invoked on an +// underlying handler. The stack is separated into steps that are invoked in a +// static order. A step is a collection of middleware that are injected into a +// ordered list defined by the user. The user may add, insert, swap, and remove a +// step's middleware. When the stack is invoked the step middleware become static, +// and their order cannot be modified. +// +// A stack and its step middleware are **not** safe to modify concurrently. +// +// A stack will use the ordered list of middleware to decorate a underlying +// handler. A handler could be something like an HTTP Client that round trips an +// API operation over HTTP. +// +// Smithy Middleware Stack +// +// A Stack is a collection of middleware that wrap a handler. The stack can be +// broken down into discreet steps. Each step may contain zero or more middleware +// specific to that stack's step. +// +// A Stack Step is a predefined set of middleware that are invoked in a static +// order by the Stack. These steps represent fixed points in the middleware stack +// for organizing specific behavior, such as serialize and build. A Stack Step is +// composed of zero or more middleware that are specific to that step. A step may +// define its own set of input/output parameters the generic input/output +// parameters are cast from. A step calls its middleware recursively, before +// calling the next step in the stack returning the result or error of the step +// middleware decorating the underlying handler. +// +// * Initialize: Prepares the input, and sets any default parameters as needed, +// (e.g. idempotency token, and presigned URLs). +// +// * Serialize: Serializes the prepared input into a data structure that can be +// consumed by the target transport's message, (e.g. REST-JSON serialization). +// +// * Build: Adds additional metadata to the serialized transport message, (e.g. +// HTTP's Content-Length header, or body checksum). Decorations and +// modifications to the message should be copied to all message attempts. +// +// * Finalize: Performs final preparations needed before sending the message. The +// message should already be complete by this stage, and is only alternated to +// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request +// signing). +// +// * Deserialize: Reacts to the handler's response returned by the recipient of +// the request message. Deserializes the response into a structured type or +// error above stacks can react to. +// +// Adding Middleware to a Stack Step +// +// Middleware can be added to a step front or back, or relative, by name, to an +// existing middleware in that stack. If a middleware does not have a name, a +// unique name will be generated at the middleware and be added to the step. +// +// // Create middleware stack +// stack := middleware.NewStack() +// +// // Add middleware to stack steps +// stack.Initialize.Add(paramValidationMiddleware, middleware.After) +// stack.Serialize.Add(marshalOperationFoo, middleware.After) +// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) +// +// // Invoke middleware on handler. +// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) +package middleware diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go new file mode 100644 index 000000000..c2f0dbb6b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/logging.go @@ -0,0 +1,46 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/logging" +) + +// loggerKey is the context value key for which the logger is associated with. +type loggerKey struct{} + +// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger +// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed +// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is. +func GetLogger(ctx context.Context) logging.Logger { + logger, ok := ctx.Value(loggerKey{}).(logging.Logger) + if !ok || logger == nil { + return logging.Nop{} + } + + return logging.WithContext(ctx, logger) +} + +// SetLogger sets the provided logger value on the provided ctx. +func SetLogger(ctx context.Context, logger logging.Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +type setLogger struct { + Logger logging.Logger +} + +// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context. +func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error { + return stack.Initialize.Add(&setLogger{Logger: logger}, After) +} + +func (a *setLogger) ID() string { + return "SetLogger" +} + +func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return next.HandleInitialize(SetLogger(ctx, a.Logger), in) +} diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go new file mode 100644 index 000000000..7bb7dbcf5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/metadata.go @@ -0,0 +1,65 @@ +package middleware + +// MetadataReader provides an interface for reading metadata from the +// underlying metadata container. +type MetadataReader interface { + Get(key interface{}) interface{} +} + +// Metadata provides storing and reading metadata values. Keys may be any +// comparable value type. Get and set will panic if key is not a comparable +// value type. +// +// Metadata uses lazy initialization, and Set method must be called as an +// addressable value, or pointer. Not doing so may cause key/value pair to not +// be set. +type Metadata struct { + values map[interface{}]interface{} +} + +// Get attempts to retrieve the value the key points to. Returns nil if the +// key was not found. +// +// Panics if key type is not comparable. +func (m Metadata) Get(key interface{}) interface{} { + return m.values[key] +} + +// Clone creates a shallow copy of Metadata entries, returning a new Metadata +// value with the original entries copied into it. +func (m Metadata) Clone() Metadata { + vs := make(map[interface{}]interface{}, len(m.values)) + for k, v := range m.values { + vs[k] = v + } + + return Metadata{ + values: vs, + } +} + +// Set stores the value pointed to by the key. If a value already exists at +// that key it will be replaced with the new value. +// +// Set method must be called as an addressable value, or pointer. If Set is not +// called as an addressable value or pointer, the key value pair being set may +// be lost. +// +// Panics if the key type is not comparable. +func (m *Metadata) Set(key, value interface{}) { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } + m.values[key] = value +} + +// Has returns whether the key exists in the metadata. +// +// Panics if the key type is not comparable. +func (m Metadata) Has(key interface{}) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go new file mode 100644 index 000000000..803b7c751 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/middleware.go @@ -0,0 +1,71 @@ +package middleware + +import ( + "context" +) + +// Handler provides the interface for performing the logic to obtain an output, +// or error for the given input. +type Handler interface { + // Handle performs logic to obtain an output for the given input. Handler + // should be decorated with middleware to perform input specific behavior. + Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, + ) +} + +// HandlerFunc provides a wrapper around a function pointer to be used as a +// middleware handler. +type HandlerFunc func(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) + +// Handle invokes the underlying function, returning the result. +func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return fn(ctx, input) +} + +// Middleware provides the interface to call handlers in a chain. +type Middleware interface { + // ID provides a unique identifier for the middleware. + ID() string + + // Performs the middleware's handling of the input, returning the output, + // or error. The middleware can invoke the next Handler if handling should + // continue. + HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, + ) +} + +// decoratedHandler wraps a middleware in order to to call the next handler in +// the chain. +type decoratedHandler struct { + // The next handler to be called. + Next Handler + + // The current middleware decorating the handler. + With Middleware +} + +// Handle implements the Handler interface to handle a operation invocation. +func (m decoratedHandler) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return m.With.HandleMiddleware(ctx, input, m.Next) +} + +// DecorateHandler decorates a handler with a middleware. Wrapping the handler +// with the middleware. +func DecorateHandler(h Handler, with ...Middleware) Handler { + for i := len(with) - 1; i >= 0; i-- { + h = decoratedHandler{ + Next: h, + With: with[i], + } + } + + return h +} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go new file mode 100644 index 000000000..4b195308c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go @@ -0,0 +1,268 @@ +package middleware + +import "fmt" + +// RelativePosition provides specifying the relative position of a middleware +// in an ordered group. +type RelativePosition int + +// Relative position for middleware in steps. +const ( + After RelativePosition = iota + Before +) + +type ider interface { + ID() string +} + +// orderedIDs provides an ordered collection of items with relative ordering +// by name. +type orderedIDs struct { + order *relativeOrder + items map[string]ider +} + +const baseOrderedItems = 5 + +func newOrderedIDs() *orderedIDs { + return &orderedIDs{ + order: newRelativeOrder(), + items: make(map[string]ider, baseOrderedItems), + } +} + +// Add injects the item to the relative position of the item group. Returns an +// error if the item already exists. +func (g *orderedIDs) Add(m ider, pos RelativePosition) error { + id := m.ID() + if len(id) == 0 { + return fmt.Errorf("empty ID, ID must not be empty") + } + + if err := g.order.Add(pos, id); err != nil { + return err + } + + g.items[id] = m + return nil +} + +// Insert injects the item relative to an existing item id. Returns an error if +// the original item does not exist, or the item being added already exists. +func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error { + if len(m.ID()) == 0 { + return fmt.Errorf("insert ID must not be empty") + } + if len(relativeTo) == 0 { + return fmt.Errorf("relative to ID must not be empty") + } + + if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil { + return err + } + + g.items[m.ID()] = m + return nil +} + +// Get returns the ider identified by id. If ider is not present, returns false. +func (g *orderedIDs) Get(id string) (ider, bool) { + v, ok := g.items[id] + return v, ok +} + +// Swap removes the item by id, replacing it with the new item. Returns an error +// if the original item doesn't exist. +func (g *orderedIDs) Swap(id string, m ider) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("swap from ID must not be empty") + } + + iderID := m.ID() + if len(iderID) == 0 { + return nil, fmt.Errorf("swap to ID must not be empty") + } + + if err := g.order.Swap(id, iderID); err != nil { + return nil, err + } + + removed := g.items[id] + + delete(g.items, id) + g.items[iderID] = m + + return removed, nil +} + +// Remove removes the item by id. Returns an error if the item +// doesn't exist. +func (g *orderedIDs) Remove(id string) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("remove ID must not be empty") + } + + if err := g.order.Remove(id); err != nil { + return nil, err + } + + removed := g.items[id] + delete(g.items, id) + return removed, nil +} + +func (g *orderedIDs) List() []string { + items := g.order.List() + order := make([]string, len(items)) + copy(order, items) + return order +} + +// Clear removes all entries and slots. +func (g *orderedIDs) Clear() { + g.order.Clear() + g.items = map[string]ider{} +} + +// GetOrder returns the item in the order it should be invoked in. +func (g *orderedIDs) GetOrder() []interface{} { + order := g.order.List() + ordered := make([]interface{}, len(order)) + for i := 0; i < len(order); i++ { + ordered[i] = g.items[order[i]] + } + + return ordered +} + +// relativeOrder provides ordering of item +type relativeOrder struct { + order []string +} + +func newRelativeOrder() *relativeOrder { + return &relativeOrder{ + order: make([]string, 0, baseOrderedItems), + } +} + +// Add inserts an item into the order relative to the position provided. +func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + switch pos { + case Before: + return s.insert(0, Before, ids...) + + case After: + s.order = append(s.order, ids...) + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +// Insert injects an item before or after the relative item. Returns +// an error if the relative item does not exist. +func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + i, ok := s.has(relativeTo) + if !ok { + return fmt.Errorf("not found, %v", relativeTo) + } + + return s.insert(i, pos, ids...) +} + +// Swap will replace the item id with the to item. Returns an +// error if the original item id does not exist. Allows swapping out an +// item for another item with the same id. +func (s *relativeOrder) Swap(id, to string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + if _, ok = s.has(to); ok && id != to { + return fmt.Errorf("already exists, %v", to) + } + + s.order[i] = to + return nil +} + +func (s *relativeOrder) Remove(id string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + s.order = append(s.order[:i], s.order[i+1:]...) + return nil +} + +func (s *relativeOrder) List() []string { + return s.order +} + +func (s *relativeOrder) Clear() { + s.order = s.order[0:0] +} + +func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error { + switch pos { + case Before: + n := len(ids) + var src []string + if n <= cap(s.order)-len(s.order) { + s.order = s.order[:len(s.order)+n] + src = s.order + } else { + src = s.order + s.order = make([]string, len(s.order)+n) + copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half + } + copy(s.order[i+n:], src[i:]) + copy(s.order[i:], ids) + case After: + if i == len(s.order)-1 || len(s.order) == 0 { + s.order = append(s.order, ids...) + } else { + s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...) + } + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +func (s *relativeOrder) has(id string) (i int, found bool) { + for i := 0; i < len(s.order); i++ { + if s.order[i] == id { + return i, true + } + } + return 0, false +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go new file mode 100644 index 000000000..45ccb5b93 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack.go @@ -0,0 +1,209 @@ +package middleware + +import ( + "context" + "io" + "strings" +) + +// Stack provides protocol and transport agnostic set of middleware split into +// distinct steps. Steps have specific transitions between them, that are +// managed by the individual step. +// +// Steps are composed as middleware around the underlying handler in the +// following order: +// +// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler +// +// Any middleware within the chain may choose to stop and return an error or +// response. Since the middleware decorate the handler like a call stack, each +// middleware will receive the result of the next middleware in the chain. +// Middleware that does not need to react to an input, or result must forward +// along the input down the chain, or return the result back up the chain. +// +// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler +type Stack struct { + // Initialize prepares the input, and sets any default parameters as + // needed, (e.g. idempotency token, and presigned URLs). + // + // Takes Input Parameters, and returns result or error. + // + // Receives result or error from Serialize step. + Initialize *InitializeStep + + // Serialize serializes the prepared input into a data structure that can be consumed + // by the target transport's message, (e.g. REST-JSON serialization) + // + // Converts Input Parameters into a Request, and returns the result or error. + // + // Receives result or error from Build step. + Serialize *SerializeStep + + // Build adds additional metadata to the serialized transport message + // (e.g. HTTP's Content-Length header, or body checksum). Decorations and + // modifications to the message should be copied to all message attempts. + // + // Takes Request, and returns result or error. + // + // Receives result or error from Finalize step. + Build *BuildStep + + // Finalize performs final preparations needed before sending the message. The + // message should already be complete by this stage, and is only alternated + // to meet the expectations of the recipient (e.g. Retry and AWS SigV4 + // request signing) + // + // Takes Request, and returns result or error. + // + // Receives result or error from Deserialize step. + Finalize *FinalizeStep + + // Deserialize reacts to the handler's response returned by the recipient of the request + // message. Deserializes the response into a structured type or error above + // stacks can react to. + // + // Should only forward Request to underlying handler. + // + // Takes Request, and returns result or error. + // + // Receives raw response, or error from underlying handler. + Deserialize *DeserializeStep + + id string +} + +// NewStack returns an initialize empty stack. +func NewStack(id string, newRequestFn func() interface{}) *Stack { + return &Stack{ + id: id, + Initialize: NewInitializeStep(), + Serialize: NewSerializeStep(newRequestFn), + Build: NewBuildStep(), + Finalize: NewFinalizeStep(), + Deserialize: NewDeserializeStep(), + } +} + +// ID returns the unique ID for the stack as a middleware. +func (s *Stack) ID() string { return s.id } + +// HandleMiddleware invokes the middleware stack decorating the next handler. +// Each step of stack will be invoked in order before calling the next step. +// With the next handler call last. +// +// The input value must be the input parameters of the operation being +// performed. +// +// Will return the result of the operation, or error. +func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, +) { + h := DecorateHandler(next, + s.Initialize, + s.Serialize, + s.Build, + s.Finalize, + s.Deserialize, + ) + + return h.Handle(ctx, input) +} + +// List returns a list of all middleware in the stack by step. +func (s *Stack) List() []string { + var l []string + l = append(l, s.id) + + l = append(l, s.Initialize.ID()) + l = append(l, s.Initialize.List()...) + + l = append(l, s.Serialize.ID()) + l = append(l, s.Serialize.List()...) + + l = append(l, s.Build.ID()) + l = append(l, s.Build.List()...) + + l = append(l, s.Finalize.ID()) + l = append(l, s.Finalize.List()...) + + l = append(l, s.Deserialize.ID()) + l = append(l, s.Deserialize.List()...) + + return l +} + +func (s *Stack) String() string { + var b strings.Builder + + w := &indentWriter{w: &b} + + w.WriteLine(s.id) + w.Push() + + writeStepItems(w, s.Initialize) + writeStepItems(w, s.Serialize) + writeStepItems(w, s.Build) + writeStepItems(w, s.Finalize) + writeStepItems(w, s.Deserialize) + + return b.String() +} + +type stackStepper interface { + ID() string + List() []string +} + +func writeStepItems(w *indentWriter, s stackStepper) { + type lister interface { + List() []string + } + + w.WriteLine(s.ID()) + w.Push() + + defer w.Pop() + + // ignore stack to prevent circular iterations + if _, ok := s.(*Stack); ok { + return + } + + for _, id := range s.List() { + w.WriteLine(id) + } +} + +type stringWriter interface { + io.Writer + WriteString(string) (int, error) + WriteRune(rune) (int, error) +} + +type indentWriter struct { + w stringWriter + depth int +} + +const indentDepth = "\t\t\t\t\t\t\t\t\t\t" + +func (w *indentWriter) Push() { + w.depth++ +} + +func (w *indentWriter) Pop() { + w.depth-- + if w.depth < 0 { + w.depth = 0 + } +} + +func (w *indentWriter) WriteLine(v string) { + w.w.WriteString(indentDepth[:w.depth]) + + v = strings.ReplaceAll(v, "\n", "\\n") + v = strings.ReplaceAll(v, "\r", "\\r") + + w.w.WriteString(v) + w.w.WriteRune('\n') +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go new file mode 100644 index 000000000..ef96009ba --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack_values.go @@ -0,0 +1,100 @@ +package middleware + +import ( + "context" + "reflect" + "strings" +) + +// WithStackValue adds a key value pair to the context that is intended to be +// scoped to a stack. Use ClearStackValues to get a new context with all stack +// values cleared. +func WithStackValue(ctx context.Context, key, value interface{}) context.Context { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + + md = withStackValue(md, key, value) + return context.WithValue(ctx, stackValuesKey{}, md) +} + +// ClearStackValues returns a context without any stack values. +func ClearStackValues(ctx context.Context) context.Context { + return context.WithValue(ctx, stackValuesKey{}, nil) +} + +// GetStackValues returns the value pointed to by the key within the stack +// values, if it is present. +func GetStackValue(ctx context.Context, key interface{}) interface{} { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + if md == nil { + return nil + } + + return md.Value(key) +} + +type stackValuesKey struct{} + +type stackValues struct { + key interface{} + value interface{} + parent *stackValues +} + +func withStackValue(parent *stackValues, key, value interface{}) *stackValues { + if key == nil { + panic("nil key") + } + if !reflect.TypeOf(key).Comparable() { + panic("key is not comparable") + } + return &stackValues{key: key, value: value, parent: parent} +} + +func (m *stackValues) Value(key interface{}) interface{} { + if key == m.key { + return m.value + } + + if m.parent == nil { + return nil + } + + return m.parent.Value(key) +} + +func (c *stackValues) String() string { + var str strings.Builder + + cc := c + for cc == nil { + str.WriteString("(" + + reflect.TypeOf(c.key).String() + + ": " + + stringify(cc.value) + + ")") + if cc.parent != nil { + str.WriteString(" -> ") + } + cc = cc.parent + } + str.WriteRune('}') + + return str.String() +} + +type stringer interface { + String() string +} + +// stringify tries a bit to stringify v, without using fmt, since we don't +// want context depending on the unicode tables. This is only used by +// *valueCtx.String(). +func stringify(v interface{}) string { + switch s := v.(type) { + case stringer: + return s.String() + case string: + return s + } + return "" +} diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go new file mode 100644 index 000000000..7e1d94cae --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go @@ -0,0 +1,211 @@ +package middleware + +import ( + "context" +) + +// BuildInput provides the input parameters for the BuildMiddleware to consume. +// BuildMiddleware may modify the Request value before forwarding the input +// along to the next BuildHandler. +type BuildInput struct { + Request interface{} +} + +// BuildOutput provides the result returned by the next BuildHandler. +type BuildOutput struct { + Result interface{} +} + +// BuildHandler provides the interface for the next handler the +// BuildMiddleware will call in the middleware chain. +type BuildHandler interface { + HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next BuildHandler for further +// processing. +type BuildMiddleware interface { + // Unique ID for the middleware in theBuildStep. The step does not allow + // duplicate IDs. + ID() string + + // Invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided, +// and the func to be invoked. +func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware { + return buildMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type buildMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) +} + +// ID returns the unique ID for the middleware. +func (s buildMiddlewareFunc) ID() string { return s.id } + +// HandleBuild invokes the middleware Fn. +func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ BuildMiddleware = (buildMiddlewareFunc{}) + +// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on +// a handler. +type BuildStep struct { + ids *orderedIDs +} + +// NewBuildStep returns a BuildStep ready to have middleware for +// initialization added to it. +func NewBuildStep() *BuildStep { + return &BuildStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*BuildStep)(nil) + +// ID returns the unique name of the step as a middleware. +func (s *BuildStep) ID() string { + return "Build stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h BuildHandler = buildWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedBuildHandler{ + Next: h, + With: order[i].(BuildMiddleware), + } + } + + sIn := BuildInput{ + Request: in, + } + + res, metadata, err := h.HandleBuild(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(BuildMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware id. +// Returns an error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or an error if the middleware to be removed +// doesn't exist. +func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *BuildStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *BuildStep) Clear() { + s.ids.Clear() +} + +type buildWrapHandler struct { + Next Handler +} + +var _ BuildHandler = (*buildWrapHandler)(nil) + +// Implements BuildHandler, converts types and delegates to underlying +// generic handler. +func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return BuildOutput{ + Result: res, + }, metadata, err +} + +type decoratedBuildHandler struct { + Next BuildHandler + With BuildMiddleware +} + +var _ BuildHandler = (*decoratedBuildHandler)(nil) + +func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + return h.With.HandleBuild(ctx, in, h.Next) +} + +// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. +type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) + +// HandleBuild invokes the wrapped function with the provided arguments. +func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { + return b(ctx, in) +} + +var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go new file mode 100644 index 000000000..448607215 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go @@ -0,0 +1,217 @@ +package middleware + +import ( + "context" +) + +// DeserializeInput provides the input parameters for the DeserializeInput to +// consume. DeserializeMiddleware should not modify the Request, and instead +// forward it along to the next DeserializeHandler. +type DeserializeInput struct { + Request interface{} +} + +// DeserializeOutput provides the result returned by the next +// DeserializeHandler. The DeserializeMiddleware should deserialize the +// RawResponse into a Result that can be consumed by middleware higher up in +// the stack. +type DeserializeOutput struct { + RawResponse interface{} + Result interface{} +} + +// DeserializeHandler provides the interface for the next handler the +// DeserializeMiddleware will call in the middleware chain. +type DeserializeHandler interface { + HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next DeserializeHandler for further +// processing. +type DeserializeMiddleware interface { + // ID returns a unique ID for the middleware in the DeserializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleDeserialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID +// provided, and the func to be invoked. +func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { + return deserializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type deserializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, DeserializeInput, DeserializeHandler) ( + DeserializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s deserializeMiddlewareFunc) ID() string { return s.id } + +// HandleDeserialize invokes the middleware Fn. +func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) + +// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be +// invoked on a handler. +type DeserializeStep struct { + ids *orderedIDs +} + +// NewDeserializeStep returns a DeserializeStep ready to have middleware for +// initialization added to it. +func NewDeserializeStep() *DeserializeStep { + return &DeserializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*DeserializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *DeserializeStep) ID() string { + return "Deserialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h DeserializeHandler = deserializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedDeserializeHandler{ + Next: h, + With: order[i].(DeserializeMiddleware), + } + } + + sIn := DeserializeInput{ + Request: in, + } + + res, metadata, err := h.HandleDeserialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(DeserializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *DeserializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *DeserializeStep) Clear() { + s.ids.Clear() +} + +type deserializeWrapHandler struct { + Next Handler +} + +var _ DeserializeHandler = (*deserializeWrapHandler)(nil) + +// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying +// generic handler. +func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + resp, metadata, err := w.Next.Handle(ctx, in.Request) + return DeserializeOutput{ + RawResponse: resp, + }, metadata, err +} + +type decoratedDeserializeHandler struct { + Next DeserializeHandler + With DeserializeMiddleware +} + +var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil) + +func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return h.With.HandleDeserialize(ctx, in, h.Next) +} + +// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. +type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) + +// HandleDeserialize invokes the wrapped function with the given arguments. +func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { + return d(ctx, in) +} + +var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go new file mode 100644 index 000000000..065e3885d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// FinalizeInput provides the input parameters for the FinalizeMiddleware to +// consume. FinalizeMiddleware may modify the Request value before forwarding +// the FinalizeInput along to the next next FinalizeHandler. +type FinalizeInput struct { + Request interface{} +} + +// FinalizeOutput provides the result returned by the next FinalizeHandler. +type FinalizeOutput struct { + Result interface{} +} + +// FinalizeHandler provides the interface for the next handler the +// FinalizeMiddleware will call in the middleware chain. +type FinalizeHandler interface { + HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next FinalizeHandler for further +// processing. +type FinalizeMiddleware interface { + // ID returns a unique ID for the middleware in the FinalizeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleFinalize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID +// provided, and the func to be invoked. +func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { + return finalizeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type finalizeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, FinalizeInput, FinalizeHandler) ( + FinalizeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s finalizeMiddlewareFunc) ID() string { return s.id } + +// HandleFinalize invokes the middleware Fn. +func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) + +// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be +// invoked on a handler. +type FinalizeStep struct { + ids *orderedIDs +} + +// NewFinalizeStep returns a FinalizeStep ready to have middleware for +// initialization added to it. +func NewFinalizeStep() *FinalizeStep { + return &FinalizeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*FinalizeStep)(nil) + +// ID returns the unique id of the step as a middleware. +func (s *FinalizeStep) ID() string { + return "Finalize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h FinalizeHandler = finalizeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedFinalizeHandler{ + Next: h, + With: order[i].(FinalizeMiddleware), + } + } + + sIn := FinalizeInput{ + Request: in, + } + + res, metadata, err := h.HandleFinalize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(FinalizeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *FinalizeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *FinalizeStep) Clear() { + s.ids.Clear() +} + +type finalizeWrapHandler struct { + Next Handler +} + +var _ FinalizeHandler = (*finalizeWrapHandler)(nil) + +// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying +// generic handler. +func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return FinalizeOutput{ + Result: res, + }, metadata, err +} + +type decoratedFinalizeHandler struct { + Next FinalizeHandler + With FinalizeMiddleware +} + +var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil) + +func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return h.With.HandleFinalize(ctx, in, h.Next) +} + +// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. +type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) + +// HandleFinalize invokes the wrapped function with the given arguments. +func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { + return f(ctx, in) +} + +var _ FinalizeHandler = FinalizeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go new file mode 100644 index 000000000..fe359144d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// InitializeInput wraps the input parameters for the InitializeMiddlewares to +// consume. InitializeMiddleware may modify the parameter value before +// forwarding it along to the next InitializeHandler. +type InitializeInput struct { + Parameters interface{} +} + +// InitializeOutput provides the result returned by the next InitializeHandler. +type InitializeOutput struct { + Result interface{} +} + +// InitializeHandler provides the interface for the next handler the +// InitializeMiddleware will call in the middleware chain. +type InitializeHandler interface { + HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddleware provides the interface for middleware specific to the +// initialize step. Delegates to the next InitializeHandler for further +// processing. +type InitializeMiddleware interface { + // ID returns a unique ID for the middleware in the InitializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleInitialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided, +// and the func to be invoked. +func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware { + return initializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type initializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, InitializeInput, InitializeHandler) ( + InitializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s initializeMiddlewareFunc) ID() string { return s.id } + +// HandleInitialize invokes the middleware Fn. +func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ InitializeMiddleware = (initializeMiddlewareFunc{}) + +// InitializeStep provides the ordered grouping of InitializeMiddleware to be +// invoked on a handler. +type InitializeStep struct { + ids *orderedIDs +} + +// NewInitializeStep returns an InitializeStep ready to have middleware for +// initialization added to it. +func NewInitializeStep() *InitializeStep { + return &InitializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*InitializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *InitializeStep) ID() string { + return "Initialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h InitializeHandler = initializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedInitializeHandler{ + Next: h, + With: order[i].(InitializeMiddleware), + } + } + + sIn := InitializeInput{ + Parameters: in, + } + + res, metadata, err := h.HandleInitialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(InitializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *InitializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *InitializeStep) Clear() { + s.ids.Clear() +} + +type initializeWrapHandler struct { + Next Handler +} + +var _ InitializeHandler = (*initializeWrapHandler)(nil) + +// HandleInitialize implements InitializeHandler, converts types and delegates to underlying +// generic handler. +func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Parameters) + return InitializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedInitializeHandler struct { + Next InitializeHandler + With InitializeMiddleware +} + +var _ InitializeHandler = (*decoratedInitializeHandler)(nil) + +func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + return h.With.HandleInitialize(ctx, in, h.Next) +} + +// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. +type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) + +// HandleInitialize calls the wrapped function with the provided arguments. +func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { + return i(ctx, in) +} + +var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go new file mode 100644 index 000000000..114bafced --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go @@ -0,0 +1,219 @@ +package middleware + +import "context" + +// SerializeInput provides the input parameters for the SerializeMiddleware to +// consume. SerializeMiddleware may modify the Request value before forwarding +// SerializeInput along to the next SerializeHandler. The Parameters member +// should not be modified by SerializeMiddleware, InitializeMiddleware should +// be responsible for modifying the provided Parameter value. +type SerializeInput struct { + Parameters interface{} + Request interface{} +} + +// SerializeOutput provides the result returned by the next SerializeHandler. +type SerializeOutput struct { + Result interface{} +} + +// SerializeHandler provides the interface for the next handler the +// SerializeMiddleware will call in the middleware chain. +type SerializeHandler interface { + HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next SerializeHandler for further +// processing. +type SerializeMiddleware interface { + // ID returns a unique ID for the middleware in the SerializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleSerialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID +// provided, and the func to be invoked. +func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { + return serializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type serializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, SerializeInput, SerializeHandler) ( + SerializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s serializeMiddlewareFunc) ID() string { return s.id } + +// HandleSerialize invokes the middleware Fn. +func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ SerializeMiddleware = (serializeMiddlewareFunc{}) + +// SerializeStep provides the ordered grouping of SerializeMiddleware to be +// invoked on a handler. +type SerializeStep struct { + newRequest func() interface{} + ids *orderedIDs +} + +// NewSerializeStep returns a SerializeStep ready to have middleware for +// initialization added to it. The newRequest func parameter is used to +// initialize the transport specific request for the stack SerializeStep to +// serialize the input parameters into. +func NewSerializeStep(newRequest func() interface{}) *SerializeStep { + return &SerializeStep{ + ids: newOrderedIDs(), + newRequest: newRequest, + } +} + +var _ Middleware = (*SerializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *SerializeStep) ID() string { + return "Serialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h SerializeHandler = serializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedSerializeHandler{ + Next: h, + With: order[i].(SerializeMiddleware), + } + } + + sIn := SerializeInput{ + Parameters: in, + Request: s.newRequest(), + } + + res, metadata, err := h.HandleSerialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(SerializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *SerializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *SerializeStep) Clear() { + s.ids.Clear() +} + +type serializeWrapHandler struct { + Next Handler +} + +var _ SerializeHandler = (*serializeWrapHandler)(nil) + +// Implements SerializeHandler, converts types and delegates to underlying +// generic handler. +func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return SerializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedSerializeHandler struct { + Next SerializeHandler + With SerializeMiddleware +} + +var _ SerializeHandler = (*decoratedSerializeHandler)(nil) + +func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + return h.With.HandleSerialize(ctx, in, h.Next) +} + +// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. +type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) + +// HandleSerialize calls the wrapped function with the provided arguments. +func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { + return s(ctx, in) +} + +var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml new file mode 100644 index 000000000..20295cdd2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -0,0 +1,11 @@ +[dependencies] + "github.com/google/go-cmp" = "v0.5.8" + "github.com/jmespath/go-jmespath" = "v0.4.0" + +[modules] + + [modules.codegen] + no_tag = true + + [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"] + no_tag = true diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go new file mode 100644 index 000000000..bc1f69961 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/doc.go @@ -0,0 +1,5 @@ +// Package ptr provides utilities for converting scalar literal type values to and from pointers inline. +package ptr + +//go:generate go run -tags codegen generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go new file mode 100644 index 000000000..a2845bb2c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go @@ -0,0 +1,601 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + if p == nil { + return v + } + + return *p +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + ps := make([]bool, len(vs)) + for i, v := range vs { + ps[i] = ToBool(v) + } + + return ps +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + ps := make(map[string]bool, len(vs)) + for k, v := range vs { + ps[k] = ToBool(v) + } + + return ps +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + if p == nil { + return v + } + + return *p +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + ps := make([]byte, len(vs)) + for i, v := range vs { + ps[i] = ToByte(v) + } + + return ps +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + ps := make(map[string]byte, len(vs)) + for k, v := range vs { + ps[k] = ToByte(v) + } + + return ps +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + if p == nil { + return v + } + + return *p +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + ps := make([]string, len(vs)) + for i, v := range vs { + ps[i] = ToString(v) + } + + return ps +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + ps := make(map[string]string, len(vs)) + for k, v := range vs { + ps[k] = ToString(v) + } + + return ps +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + if p == nil { + return v + } + + return *p +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + ps := make([]int, len(vs)) + for i, v := range vs { + ps[i] = ToInt(v) + } + + return ps +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + ps := make(map[string]int, len(vs)) + for k, v := range vs { + ps[k] = ToInt(v) + } + + return ps +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + if p == nil { + return v + } + + return *p +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + ps := make([]int8, len(vs)) + for i, v := range vs { + ps[i] = ToInt8(v) + } + + return ps +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + ps := make(map[string]int8, len(vs)) + for k, v := range vs { + ps[k] = ToInt8(v) + } + + return ps +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + if p == nil { + return v + } + + return *p +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + ps := make([]int16, len(vs)) + for i, v := range vs { + ps[i] = ToInt16(v) + } + + return ps +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + ps := make(map[string]int16, len(vs)) + for k, v := range vs { + ps[k] = ToInt16(v) + } + + return ps +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + if p == nil { + return v + } + + return *p +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + ps := make([]int32, len(vs)) + for i, v := range vs { + ps[i] = ToInt32(v) + } + + return ps +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + ps := make(map[string]int32, len(vs)) + for k, v := range vs { + ps[k] = ToInt32(v) + } + + return ps +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + if p == nil { + return v + } + + return *p +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + ps := make([]int64, len(vs)) + for i, v := range vs { + ps[i] = ToInt64(v) + } + + return ps +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + ps := make(map[string]int64, len(vs)) + for k, v := range vs { + ps[k] = ToInt64(v) + } + + return ps +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + if p == nil { + return v + } + + return *p +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + ps := make([]uint, len(vs)) + for i, v := range vs { + ps[i] = ToUint(v) + } + + return ps +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + ps := make(map[string]uint, len(vs)) + for k, v := range vs { + ps[k] = ToUint(v) + } + + return ps +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + if p == nil { + return v + } + + return *p +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + ps := make([]uint8, len(vs)) + for i, v := range vs { + ps[i] = ToUint8(v) + } + + return ps +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + ps := make(map[string]uint8, len(vs)) + for k, v := range vs { + ps[k] = ToUint8(v) + } + + return ps +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + if p == nil { + return v + } + + return *p +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + ps := make([]uint16, len(vs)) + for i, v := range vs { + ps[i] = ToUint16(v) + } + + return ps +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + ps := make(map[string]uint16, len(vs)) + for k, v := range vs { + ps[k] = ToUint16(v) + } + + return ps +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + if p == nil { + return v + } + + return *p +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + ps := make([]uint32, len(vs)) + for i, v := range vs { + ps[i] = ToUint32(v) + } + + return ps +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + ps := make(map[string]uint32, len(vs)) + for k, v := range vs { + ps[k] = ToUint32(v) + } + + return ps +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + if p == nil { + return v + } + + return *p +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + ps := make([]uint64, len(vs)) + for i, v := range vs { + ps[i] = ToUint64(v) + } + + return ps +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + ps := make(map[string]uint64, len(vs)) + for k, v := range vs { + ps[k] = ToUint64(v) + } + + return ps +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + if p == nil { + return v + } + + return *p +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + ps := make([]float32, len(vs)) + for i, v := range vs { + ps[i] = ToFloat32(v) + } + + return ps +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + ps := make(map[string]float32, len(vs)) + for k, v := range vs { + ps[k] = ToFloat32(v) + } + + return ps +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + if p == nil { + return v + } + + return *p +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + ps := make([]float64, len(vs)) + for i, v := range vs { + ps[i] = ToFloat64(v) + } + + return ps +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + ps := make(map[string]float64, len(vs)) + for k, v := range vs { + ps[k] = ToFloat64(v) + } + + return ps +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + ps := make([]time.Time, len(vs)) + for i, v := range vs { + ps[i] = ToTime(v) + } + + return ps +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + ps := make(map[string]time.Time, len(vs)) + for k, v := range vs { + ps[k] = ToTime(v) + } + + return ps +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + if p == nil { + return v + } + + return *p +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + ps := make([]time.Duration, len(vs)) + for i, v := range vs { + ps[i] = ToDuration(v) + } + + return ps +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + ps := make(map[string]time.Duration, len(vs)) + for k, v := range vs { + ps[k] = ToDuration(v) + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go new file mode 100644 index 000000000..97f01011e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go @@ -0,0 +1,83 @@ +//go:build codegen +// +build codegen + +package ptr + +import "strings" + +func GetScalars() Scalars { + return Scalars{ + {Type: "bool"}, + {Type: "byte"}, + {Type: "string"}, + {Type: "int"}, + {Type: "int8"}, + {Type: "int16"}, + {Type: "int32"}, + {Type: "int64"}, + {Type: "uint"}, + {Type: "uint8"}, + {Type: "uint16"}, + {Type: "uint32"}, + {Type: "uint64"}, + {Type: "float32"}, + {Type: "float64"}, + {Type: "Time", Import: &Import{Path: "time"}}, + {Type: "Duration", Import: &Import{Path: "time"}}, + } +} + +// Import provides the import path and optional alias +type Import struct { + Path string + Alias string +} + +// Package returns the Go package name for the import. Returns alias if set. +func (i Import) Package() string { + if v := i.Alias; len(v) != 0 { + return v + } + + if v := i.Path; len(v) != 0 { + parts := strings.Split(v, "/") + pkg := parts[len(parts)-1] + return pkg + } + + return "" +} + +// Scalar provides the definition of a type to generate pointer utilities for. +type Scalar struct { + Type string + Import *Import +} + +// Name returns the exported function name for the type. +func (t Scalar) Name() string { + return strings.Title(t.Type) +} + +// Symbol returns the scalar's Go symbol with path if needed. +func (t Scalar) Symbol() string { + if t.Import != nil { + return t.Import.Package() + "." + t.Type + } + return t.Type +} + +// Scalars is a list of scalars. +type Scalars []Scalar + +// Imports returns all imports for the scalars. +func (ts Scalars) Imports() []*Import { + imports := []*Import{} + for _, t := range ts { + if v := t.Import; v != nil { + imports = append(imports, v) + } + } + + return imports +} diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go new file mode 100644 index 000000000..0bfbbecbd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go @@ -0,0 +1,499 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + ps := make([]*bool, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + ps := make(map[string]*bool, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return &v +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + ps := make([]*byte, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + ps := make(map[string]*byte, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return &v +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + ps := make([]*string, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + ps := make(map[string]*string, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return &v +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + ps := make([]*int, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + ps := make(map[string]*int, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + ps := make([]*int8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + ps := make(map[string]*int8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + ps := make([]*int16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + ps := make(map[string]*int16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + ps := make([]*int32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + ps := make(map[string]*int32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + ps := make([]*int64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + ps := make(map[string]*int64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + ps := make([]*uint, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + ps := make(map[string]*uint, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + ps := make([]*uint8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + ps := make(map[string]*uint8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + ps := make([]*uint16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + ps := make(map[string]*uint16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + ps := make([]*uint32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + ps := make(map[string]*uint32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + ps := make([]*uint64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + ps := make(map[string]*uint64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + ps := make([]*float32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + ps := make(map[string]*float32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + ps := make([]*float64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + ps := make(map[string]*float64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + ps := make([]*time.Time, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + ps := make(map[string]*time.Time, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return &v +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + ps := make([]*time.Duration, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + ps := make(map[string]*time.Duration, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go new file mode 100644 index 000000000..f8b25d562 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/doc.go @@ -0,0 +1,3 @@ +// Package rand provides utilities for creating and working with random value +// generators. +package rand diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go new file mode 100644 index 000000000..9c479f62b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/rand.go @@ -0,0 +1,31 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +// Int63n returns a int64 between zero and value of max, read from an io.Reader source. +func Int63n(reader io.Reader, max int64) (int64, error) { + bi, err := rand.Int(reader, big.NewInt(max)) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %w", err) + } + + return bi.Int64(), nil +} + +// CryptoRandInt63n returns a random int64 between zero and value of max +// obtained from the crypto rand source. +func CryptoRandInt63n(max int64) (int64, error) { + return Int63n(Reader, max) +} diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go new file mode 100644 index 000000000..dc81cbc68 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/uuid.go @@ -0,0 +1,87 @@ +package rand + +import ( + "encoding/hex" + "io" +) + +const dash byte = '-' + +// UUIDIdempotencyToken provides a utility to get idempotency tokens in the +// UUID format. +type UUIDIdempotencyToken struct { + uuid *UUID +} + +// NewUUIDIdempotencyToken returns a idempotency token provider returning +// tokens in the UUID random format using the reader provided. +func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken { + return &UUIDIdempotencyToken{uuid: NewUUID(r)} +} + +// GetIdempotencyToken returns a random UUID value for Idempotency token. +func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) { + return u.uuid.GetUUID() +} + +// UUID provides computing random UUID version 4 values from a random source +// reader. +type UUID struct { + randSrc io.Reader +} + +// NewUUID returns an initialized UUID value that can be used to retrieve +// random UUID version 4 values. +func NewUUID(r io.Reader) *UUID { + return &UUID{randSrc: r} +} + +// GetUUID returns a random UUID version 4 string representation sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetUUID() (string, error) { + var b [16]byte + if _, err := io.ReadFull(r.randSrc, b[:]); err != nil { + return "", err + } + r.makeUUIDv4(b[:]) + return format(b), nil +} + +// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetBytes() (u []byte, err error) { + u = make([]byte, 16) + if _, err = io.ReadFull(r.randSrc, u); err != nil { + return u, err + } + r.makeUUIDv4(u) + return u, nil +} + +func (r *UUID) makeUUIDv4(u []byte) { + // 13th character is "4" + u[6] = (u[6] & 0x0f) | 0x40 // Version 4 + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0 +} + +// Format returns the canonical text representation of a UUID. +// This implementation is optimized to not use fmt. +// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d +func format(u [16]byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + + var scratch [36]byte + + hex.Encode(scratch[:8], u[0:4]) + scratch[8] = dash + hex.Encode(scratch[9:13], u[4:6]) + scratch[13] = dash + hex.Encode(scratch[14:18], u[6:8]) + scratch[18] = dash + hex.Encode(scratch[19:23], u[8:10]) + scratch[23] = dash + hex.Encode(scratch[24:], u[10:]) + + return string(scratch[:]) +} diff --git a/vendor/github.com/aws/smithy-go/sync/error.go b/vendor/github.com/aws/smithy-go/sync/error.go new file mode 100644 index 000000000..629207672 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/sync/error.go @@ -0,0 +1,53 @@ +package sync + +import "sync" + +// OnceErr wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceErr struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceErr return a new OnceErr +func NewOnceErr() *OnceErr { + return &OnceErr{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceErr) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceErr) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceErr. +func (e *OnceErr) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go new file mode 100644 index 000000000..b552a09f8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/time/time.go @@ -0,0 +1,134 @@ +package time + +import ( + "context" + "fmt" + "math/big" + "strings" + "time" +) + +const ( + // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6 + dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z" + dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999" + dateTimeFormatOutput = "2006-01-02T15:04:05.999Z" + + // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1 + // IMF-fixdate with no UTC offset. + httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // Additional formats needed for compatibility. + httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +var millisecondFloat = big.NewFloat(1e3) + +// FormatDateTime formats value as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func FormatDateTime(value time.Time) string { + return value.UTC().Format(dateTimeFormatOutput) +} + +// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func ParseDateTime(value string) (time.Time, error) { + return tryParse(value, + dateTimeFormatInput, + dateTimeFormatInputNoZ, + time.RFC3339Nano, + time.RFC3339, + ) +} + +// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func FormatHTTPDate(value time.Time) string { + return value.UTC().Format(httpDateFormat) +} + +// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func ParseHTTPDate(value string) (time.Time, error) { + return tryParse(value, + httpDateFormat, + httpDateFormatSingleDigitDay, + httpDateFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) +} + +// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func FormatEpochSeconds(value time.Time) float64 { + ms := value.UnixNano() / int64(time.Millisecond) + return float64(ms) / 1e3 +} + +// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func ParseEpochSeconds(value float64) time.Time { + f := big.NewFloat(value) + f = f.Mul(f, millisecondFloat) + i, _ := f.Int64() + // Offset to `UTC` because time.Unix returns the time value based on system + // local setting. + return time.Unix(0, i*1e6).UTC() +} + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s strings.Builder + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} + +// SleepWithContext will wait for the timer duration to expire, or until the context +// is canceled. Whichever happens first. If the context is canceled the +// Context's error will be returned. +func SleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go new file mode 100644 index 000000000..bc4ad6e79 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go @@ -0,0 +1,70 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +const contentMD5Header = "Content-Md5" + +// contentMD5Checksum provides a middleware to compute and set +// content-md5 checksum for a http request +type contentMD5Checksum struct { +} + +// AddContentChecksumMiddleware adds checksum middleware to middleware's +// build step. +func AddContentChecksumMiddleware(stack *middleware.Stack) error { + // This middleware must be executed before request body is set. + return stack.Build.Add(&contentMD5Checksum{}, middleware.Before) +} + +// ID returns the identifier for the checksum middleware +func (m *contentMD5Checksum) ID() string { return "ContentChecksum" } + +// HandleBuild adds behavior to compute md5 checksum and add content-md5 header +// on http request +func (m *contentMD5Checksum) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if Content-MD5 header is already present, return + if v := req.Header.Get(contentMD5Header); len(v) != 0 { + return next.HandleBuild(ctx, in) + } + + // fetch the request stream. + stream := req.GetStream() + // compute checksum if payload is explicit + if stream != nil { + if !req.IsStreamSeekable() { + return out, metadata, fmt.Errorf( + "unseekable stream is not supported for computing md5 checksum") + } + + v, err := computeMD5Checksum(stream) + if err != nil { + return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err) + } + + // reset the request stream + if err := req.RewindStream(); err != nil { + return out, metadata, fmt.Errorf( + "error rewinding request stream after computing md5 checksum, %w", err) + } + + // set the 'Content-MD5' header + req.Header.Set(contentMD5Header, string(v)) + } + + // set md5 header value + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go new file mode 100644 index 000000000..e691c69bf --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/client.go @@ -0,0 +1,120 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +// ClientDo provides the interface for custom HTTP client implementations. +type ClientDo interface { + Do(*http.Request) (*http.Response, error) +} + +// ClientDoFunc provides a helper to wrap a function as an HTTP client for +// round tripping requests. +type ClientDoFunc func(*http.Request) (*http.Response, error) + +// Do will invoke the underlying func, returning the result. +func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { + return fn(r) +} + +// ClientHandler wraps a client that implements the HTTP Do method. Standard +// implementation is http.Client. +type ClientHandler struct { + client ClientDo +} + +// NewClientHandler returns an initialized middleware handler for the client. +func NewClientHandler(client ClientDo) ClientHandler { + return ClientHandler{ + client: client, + } +} + +// Handle implements the middleware Handler interface, that will invoke the +// underlying HTTP client. Requires the input to be a Smithy *Request. Returns +// a smithy *Response, or error if the request failed. +func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( + out interface{}, metadata middleware.Metadata, err error, +) { + req, ok := input.(*Request) + if !ok { + return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) + } + + builtRequest := req.Build(ctx) + if err := ValidateEndpointHost(builtRequest.Host); err != nil { + return nil, metadata, err + } + + resp, err := c.client.Do(builtRequest) + if resp == nil { + // Ensure a http response value is always present to prevent unexpected + // panics. + resp = &http.Response{ + Header: http.Header{}, + Body: http.NoBody, + } + } + if err != nil { + err = &RequestSendError{Err: err} + + // Override the error with a context canceled error, if that was canceled. + select { + case <-ctx.Done(): + err = &smithy.CanceledError{Err: ctx.Err()} + default: + } + } + + // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner. + // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the + // stream reference so that it can be safely reused. + if builtRequest.Body != nil { + _ = builtRequest.Body.Close() + } + + return &Response{Response: resp}, metadata, err +} + +// RequestSendError provides a generic request transport error. This error +// should wrap errors making HTTP client requests. +// +// The ClientHandler will wrap the HTTP client's error if the client request +// fails, and did not fail because of context canceled. +type RequestSendError struct { + Err error +} + +// ConnectionError returns that the error is related to not being able to send +// the request, or receive a response from the service. +func (e *RequestSendError) ConnectionError() bool { + return true +} + +// Unwrap returns the underlying error, if there was one. +func (e *RequestSendError) Unwrap() error { + return e.Err +} + +func (e *RequestSendError) Error() string { + return fmt.Sprintf("request send failed, %v", e.Err) +} + +// NopClient provides a client that ignores the request, and returns an empty +// successful HTTP response value. +type NopClient struct{} + +// Do ignores the request and returns a 200 status empty response. +func (NopClient) Do(r *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: http.NoBody, + }, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go new file mode 100644 index 000000000..07366ac85 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/doc.go @@ -0,0 +1,5 @@ +/* +Package http provides the HTTP transport client and request/response types +needed to round trip API operation calls with an service. +*/ +package http diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go new file mode 100644 index 000000000..cbc9deb4d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go @@ -0,0 +1,163 @@ +package http + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) { + values := make([]string, 0, len(vs)) + + for i := 0; i < len(vs); i++ { + parts, err := splitFn(vs[i]) + if err != nil { + return nil, err + } + values = append(values, parts...) + } + + return values, nil +} + +// SplitHeaderListValues attempts to split the elements of the slice by commas, +// and return a list of all values separated. Returns error if unable to +// separate the values. +func SplitHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, quotedCommaSplit) +} + +func quotedCommaSplit(v string) (parts []string, err error) { + v = strings.TrimSpace(v) + + expectMore := true + for i := 0; i < len(v); i++ { + if unicode.IsSpace(rune(v[i])) { + continue + } + expectMore = false + + // leading space in part is ignored. + // Start of value must be non-space, or quote. + // + // - If quote, enter quoted mode, find next non-escaped quote to + // terminate the value. + // - Otherwise, find next comma to terminate value. + + remaining := v[i:] + + var value string + var valueLen int + if remaining[0] == '"' { + //------------------------------ + // Quoted value + //------------------------------ + var j int + var skipQuote bool + for j += 1; j < len(remaining); j++ { + if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) { + skipQuote = !skipQuote + continue + } + if remaining[j] == '"' { + break + } + } + if j == len(remaining) || j == 1 { + return nil, fmt.Errorf("value %v missing closing double quote", + remaining) + } + valueLen = j + 1 + + tail := remaining[valueLen:] + var k int + for ; k < len(tail); k++ { + if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' { + return nil, fmt.Errorf("value %v has non-space trailing characters", + remaining) + } + if tail[k] == ',' { + expectMore = true + break + } + } + value = remaining[:valueLen] + value, err = strconv.Unquote(value) + if err != nil { + return nil, fmt.Errorf("failed to unquote value %v, %w", value, err) + } + + // Pad valueLen to include trailing space(s) so `i` is updated correctly. + valueLen += k + + } else { + //------------------------------ + // Unquoted value + //------------------------------ + + // Index of the next comma is the length of the value, or end of string. + valueLen = strings.Index(remaining, ",") + if valueLen != -1 { + expectMore = true + } else { + valueLen = len(remaining) + } + value = strings.TrimSpace(remaining[:valueLen]) + } + + i += valueLen + parts = append(parts, value) + + } + + if expectMore { + parts = append(parts, "") + } + + return parts, nil +} + +// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date +// timestamp values in the slice by commas, and return a list of all values +// separated. The split is aware of the HTTP-Date timestamp format, and will skip +// comma within the timestamp value. Returns an error if unable to split the +// timestamp values. +func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, splitHTTPDateHeaderValue) +} + +func splitHTTPDateHeaderValue(v string) ([]string, error) { + if n := strings.Count(v, ","); n <= 1 { + // Nothing to do if only contains a no, or single HTTPDate value + return []string{v}, nil + } else if n%2 == 0 { + return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v) + } + + var parts []string + var i, j int + + var doSplit bool + for ; i < len(v); i++ { + if v[i] == ',' { + if doSplit { + doSplit = false + parts = append(parts, strings.TrimSpace(v[j:i])) + j = i + 1 + } else { + // Skip the first comma in the timestamp value since that + // separates the day from the rest of the timestamp. + // + // Tue, 17 Dec 2019 23:48:18 GMT + doSplit = true + } + } + } + // Add final part + if j < len(v) { + parts = append(parts, strings.TrimSpace(v[j:])) + } + + return parts, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go new file mode 100644 index 000000000..6b290fec0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/host.go @@ -0,0 +1,89 @@ +package http + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(host string) error { + var errors strings.Builder + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + if err != nil { + errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host)) + errors.WriteString(err.Error()) + } + + if !ValidPortNumber(port) { + errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ") + errors.WriteString(label) + } + } + + if len(hostname) == 0 && len(port) != 0 { + errors.WriteString("\nendpoint host with port must not be empty") + } + + if len(hostname) > 255 { + errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname))) + } + + if len(errors.String()) > 0 { + return fmt.Errorf("invalid endpoint host%s", errors.String()) + } + return nil +} + +// ValidPortNumber returns whether the port is valid RFC 3986 port. +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} + +// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go new file mode 100644 index 000000000..941a8d6b5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go @@ -0,0 +1,75 @@ +package io + +import ( + "io" + "sync" +) + +// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser. +func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser { + sr := &safeReadCloser{ + readCloser: readCloser, + } + + if _, ok := readCloser.(io.WriterTo); ok { + return &safeWriteToReadCloser{safeReadCloser: sr} + } + + return sr +} + +// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic +// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this +// type. +type safeWriteToReadCloser struct { + *safeReadCloser +} + +// WriteTo implements the io.WriteTo interface. +func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) { + r.safeReadCloser.mtx.Lock() + defer r.safeReadCloser.mtx.Unlock() + + if r.safeReadCloser.closed { + return 0, io.EOF + } + + return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w) +} + +// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser +// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type +// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime +// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported. +// This type is thread-safe. +type safeReadCloser struct { + readCloser io.ReadCloser + closed bool + mtx sync.Mutex +} + +// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned. +func (r *safeReadCloser) Read(p []byte) (n int, err error) { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return 0, io.EOF + } + + return r.readCloser.Read(p) +} + +// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error +// reported from Close. Subsequent calls to Close will always return a nil error. +func (r *safeReadCloser) Close() error { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return nil + } + + r.closed = true + rc := r.readCloser + r.readCloser = nil + return rc.Close() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go new file mode 100644 index 000000000..5d6a4b23a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go @@ -0,0 +1,25 @@ +package http + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + "io" +) + +// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents. +// Returns the byte slice of md5 checksum and an error. +func computeMD5Checksum(r io.Reader) ([]byte, error) { + h := md5.New() + // copy errors may be assumed to be from the body. + _, err := io.Copy(h, r) + if err != nil { + return nil, fmt.Errorf("failed to read body: %w", err) + } + + // encode the md5 checksum in base64. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + return sum64, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go new file mode 100644 index 000000000..1d3b218a1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + "io" + "io/ioutil" +) + +// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically +// close the response body of an operation request if the request response +// failed. +func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before) +} + +type errorCloseResponseBodyMiddleware struct{} + +func (*errorCloseResponseBodyMiddleware) ID() string { + return "ErrorCloseResponseBody" +} + +func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { + // Consume the full body to prevent TCP connection resets on some platforms + _, _ = io.Copy(ioutil.Discard, resp.Body) + // Do not validate that the response closes successfully. + resp.Body.Close() + } + } + + return out, metadata, err +} + +// AddCloseResponseBodyMiddleware adds the middleware to automatically close +// the response body of an operation request, after the response had been +// deserialized. +func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before) +} + +type closeResponseBody struct{} + +func (*closeResponseBody) ID() string { + return "CloseResponseBody" +} + +func (m *closeResponseBody) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + return out, metadata, err + } + + if resp, ok := out.RawResponse.(*Response); ok { + // Consume the full body to prevent TCP connection resets on some platforms + _, copyErr := io.Copy(ioutil.Discard, resp.Body) + if copyErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") + } + + closeErr := resp.Body.Close() + if closeErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse") + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go new file mode 100644 index 000000000..9969389bb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go @@ -0,0 +1,84 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +// ComputeContentLength provides a middleware to set the content-length +// header for the length of a serialize request body. +type ComputeContentLength struct { +} + +// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware +// stack's Build step. +func AddComputeContentLengthMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ComputeContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *ComputeContentLength) ID() string { return "ComputeContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *ComputeContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // do nothing if request content-length was set to 0 or above. + if req.ContentLength >= 0 { + return next.HandleBuild(ctx, in) + } + + // attempt to compute stream length + if n, ok, err := req.StreamLength(); err != nil { + return out, metadata, fmt.Errorf( + "failed getting length of request stream, %w", err) + } else if ok { + req.ContentLength = n + } + + return next.HandleBuild(ctx, in) +} + +// validateContentLength provides a middleware to validate the content-length +// is valid (greater than zero), for the serialized request payload. +type validateContentLength struct{} + +// ValidateContentLengthHeader adds middleware that validates request content-length +// is set to value greater than zero. +func ValidateContentLengthHeader(stack *middleware.Stack) error { + return stack.Build.Add(&validateContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *validateContentLength) ID() string { return "ValidateContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *validateContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if request content-length was set to less than 0, return an error + if req.ContentLength < 0 { + return out, metadata, fmt.Errorf( + "content length for payload is required and must be at least 0") + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go new file mode 100644 index 000000000..eac32b4ba --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go @@ -0,0 +1,167 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +type isContentTypeAutoSet struct{} + +// SetIsContentTypeDefaultValue returns a Context specifying if the request's +// content-type header was set to a default value. +func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context { + return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault) +} + +// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the +// request is a default value that was auto assigned by an operation +// serializer. Allows middleware post serialization to know if the content-type +// was auto set to a default value or not. +// +// Also returns false if the Context value was never updated to include if +// content-type was set to a default value. +func GetIsContentTypeDefaultValue(ctx context.Context) bool { + v, _ := ctx.Value(isContentTypeAutoSet{}).(bool) + return v +} + +// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover +// middleware to the stack after the operation serializer. This middleware will +// remove the content-type header from the request if it was set as a default +// value, and no request payload is present. +// +// Returns error if unable to add the middleware. +func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { + err = stack.Serialize.Insert(removeDefaultContentType{}, + "OperationSerializer", middleware.After) + if err != nil { + return fmt.Errorf("failed to add %s serialize middleware, %w", + removeDefaultContentType{}.ID(), err) + } + + return nil +} + +// RemoveNoPayloadDefaultContentTypeRemover removes the +// DefaultContentTypeRemover middleware from the stack. Returns an error if +// unable to remove the middleware. +func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { + _, err = stack.Serialize.Remove(removeDefaultContentType{}.ID()) + if err != nil { + return fmt.Errorf("failed to remove %s serialize middleware, %w", + removeDefaultContentType{}.ID(), err) + + } + return nil +} + +// removeDefaultContentType provides after serialization middleware that will +// remove the content-type header from an HTTP request if the header was set as +// a default value by the operation serializer, and there is no request payload. +type removeDefaultContentType struct{} + +// ID returns the middleware ID +func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" } + +// HandleSerialize implements the serialization middleware. +func (removeDefaultContentType) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, meta middleware.Metadata, err error, +) { + req, ok := input.Request.(*Request) + if !ok { + return out, meta, fmt.Errorf( + "unexpected request type %T for removeDefaultContentType middleware", + input.Request) + } + + if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil { + req.Header.Del("Content-Type") + input.Request = req + } + + return next.HandleSerialize(ctx, input) +} + +type headerValue struct { + header string + value string + append bool +} + +type headerValueHelper struct { + headerValues []headerValue +} + +func (h *headerValueHelper) addHeaderValue(value headerValue) { + h.headerValues = append(h.headerValues, value) +} + +func (h *headerValueHelper) ID() string { + return "HTTPHeaderHelper" +} + +func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for _, value := range h.headerValues { + if value.append { + req.Header.Add(value.header, value.value) + } else { + req.Header.Set(value.header, value.value) + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) { + id := (*headerValueHelper)(nil).ID() + m, ok := stack.Build.Get(id) + if !ok { + m = &headerValueHelper{} + err := stack.Build.Add(m, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := m.(*headerValueHelper) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id) + } + + return requestUserAgent, nil +} + +// AddHeaderValue returns a stack mutator that adds the header value pair to header. +// Appends to any existing values if present. +func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: true}) + return nil + } +} + +// SetHeaderValue returns a stack mutator that adds the header value pair to header. +// Replaces any existing values if present. +func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: false}) + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go new file mode 100644 index 000000000..d5909b0a2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go @@ -0,0 +1,75 @@ +package http + +import ( + "context" + "fmt" + "net/http/httputil" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally +// their respective bodies. Will not perform any logging if none of the options are set. +type RequestResponseLogger struct { + LogRequest bool + LogRequestWithBody bool + + LogResponse bool + LogResponseWithBody bool +} + +// ID is the middleware identifier. +func (r *RequestResponseLogger) ID() string { + return "RequestResponseLogger" +} + +// HandleDeserialize will log the request and response HTTP messages if configured accordingly. +func (r *RequestResponseLogger) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + logger := middleware.GetLogger(ctx) + + if r.LogRequest || r.LogRequestWithBody { + smithyRequest, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + rc := smithyRequest.Build(ctx) + reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody) + if err != nil { + return out, metadata, err + } + + logger.Logf(logging.Debug, "Request\n%v", string(reqBytes)) + + if r.LogRequestWithBody { + smithyRequest, err = smithyRequest.SetStream(rc.Body) + if err != nil { + return out, metadata, err + } + in.Request = smithyRequest + } + } + + out, metadata, err = next.HandleDeserialize(ctx, in) + + if (err == nil) && (r.LogResponse || r.LogResponseWithBody) { + smithyResponse, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) + } + + respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody) + if err != nil { + return out, metadata, fmt.Errorf("failed to dump response %w", err) + } + + logger.Logf(logging.Debug, "Response\n%v", string(respBytes)) + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go new file mode 100644 index 000000000..d6079b259 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go @@ -0,0 +1,51 @@ +package http + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type ( + hostnameImmutableKey struct{} + hostPrefixDisableKey struct{} +) + +// GetHostnameImmutable retrieves whether the endpoint hostname should be considered +// immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func GetHostnameImmutable(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool) + return v +} + +// SetHostnameImmutable sets or modifies whether the request's endpoint hostname +// should be considered immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func SetHostnameImmutable(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value) +} + +// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is +// disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool) + return v +} + +// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host +// prefixing should be disabled. If value is true, endpoint host prefixing +// will be disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go new file mode 100644 index 000000000..326cb8a6c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + "strings" +) + +// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum +// HTTP protocol version. +type MinimumProtocolError struct { + proto string + expectedProtoMajor int + expectedProtoMinor int +} + +// Error returns the error message. +func (m *MinimumProtocolError) Error() string { + return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + m.expectedProtoMajor, m.expectedProtoMinor, m.proto) +} + +// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection +// meets the minimum major ad minor version. +type RequireMinimumProtocol struct { + ProtoMajor int + ProtoMinor int +} + +// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum +// protocol major and minor version. +func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error { + return stack.Deserialize.Insert(&RequireMinimumProtocol{ + ProtoMajor: major, + ProtoMinor: minor, + }, "OperationDeserializer", middleware.Before) +} + +// ID returns the middleware identifier string. +func (r *RequireMinimumProtocol) ID() string { + return "RequireMinimumProtocol" +} + +// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor +// protocol version. +func (r *RequireMinimumProtocol) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) + } + + if !strings.HasPrefix(response.Proto, "HTTP") { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go new file mode 100644 index 000000000..7177d6f95 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -0,0 +1,189 @@ +package http + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + iointernal "github.com/aws/smithy-go/transport/http/internal/io" +) + +// Request provides the HTTP specific request structure for HTTP specific +// middleware steps to use to serialize input, and send an operation's request. +type Request struct { + *http.Request + stream io.Reader + isStreamSeekable bool + streamStartPos int64 +} + +// NewStackRequest returns an initialized request ready to be populated with the +// HTTP request details. Returns empty interface so the function can be used as +// a parameter to the Smithy middleware Stack constructor. +func NewStackRequest() interface{} { + return &Request{ + Request: &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + ContentLength: -1, // default to unknown length + }, + } +} + +// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set. +func (r *Request) IsHTTPS() bool { + if r.URL == nil { + return false + } + return strings.EqualFold(r.URL.Scheme, "https") +} + +// Clone returns a deep copy of the Request for the new context. A reference to +// the Stream is copied, but the underlying stream is not copied. +func (r *Request) Clone() *Request { + rc := *r + rc.Request = rc.Request.Clone(context.TODO()) + return &rc +} + +// StreamLength returns the number of bytes of the serialized stream attached +// to the request and ok set. If the length cannot be determined, an error will +// be returned. +func (r *Request) StreamLength() (size int64, ok bool, err error) { + return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos) +} + +func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) { + if stream == nil { + return 0, true, nil + } + + if l, ok := stream.(interface{ Len() int }); ok { + return int64(l.Len()), true, nil + } + + if !seekable { + return 0, false, nil + } + + s := stream.(io.Seeker) + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, false, err + } + + // The reason to seek to streamStartPos instead of 0 is to ensure that the + // SDK only sends the stream from the starting position the user's + // application provided it to the SDK at. For example application opens a + // file, and wants to skip the first N bytes uploading the rest. The + // application would move the file's offset N bytes, then hand it off to + // the SDK to send the remaining. The SDK should respect that initial offset. + _, err = s.Seek(startPos, io.SeekStart) + if err != nil { + return 0, false, err + } + + return endOffset - startPos, true, nil +} + +// RewindStream will rewind the io.Reader to the relative start position if it +// is an io.Seeker. +func (r *Request) RewindStream() error { + // If there is no stream there is nothing to rewind. + if r.stream == nil { + return nil + } + + if !r.isStreamSeekable { + return fmt.Errorf("request stream is not seekable") + } + _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart) + return err +} + +// GetStream returns the request stream io.Reader if a stream is set. If no +// stream is present nil will be returned. +func (r *Request) GetStream() io.Reader { + return r.stream +} + +// IsStreamSeekable returns whether the stream is seekable. +func (r *Request) IsStreamSeekable() bool { + return r.isStreamSeekable +} + +// SetStream returns a clone of the request with the stream set to the provided +// reader. May return an error if the provided reader is seekable but returns +// an error. +func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) { + rc = r.Clone() + + if reader == http.NoBody { + reader = nil + } + + var isStreamSeekable bool + var streamStartPos int64 + switch v := reader.(type) { + case io.Seeker: + n, err := v.Seek(0, io.SeekCurrent) + if err != nil { + return r, err + } + isStreamSeekable = true + streamStartPos = n + default: + // If the stream length can be determined, and is determined to be empty, + // use a nil stream to prevent confusion between empty vs not-empty + // streams. + length, ok, err := streamLength(reader, false, 0) + if err != nil { + return nil, err + } else if ok && length == 0 { + reader = nil + } + } + + rc.stream = reader + rc.isStreamSeekable = isStreamSeekable + rc.streamStartPos = streamStartPos + + return rc, err +} + +// Build returns a build standard HTTP request value from the Smithy request. +// The request's stream is wrapped in a safe container that allows it to be +// reused for subsequent attempts. +func (r *Request) Build(ctx context.Context) *http.Request { + req := r.Request.Clone(ctx) + + if r.stream == nil && req.ContentLength == -1 { + req.ContentLength = 0 + } + + switch stream := r.stream.(type) { + case *io.PipeReader: + req.Body = ioutil.NopCloser(stream) + req.ContentLength = -1 + default: + // HTTP Client Request must only have a non-nil body if the + // ContentLength is explicitly unknown (-1) or non-zero. The HTTP + // Client will interpret a non-nil body and ContentLength 0 as + // "unknown". This is unwanted behavior. + if req.ContentLength != 0 && r.stream != nil { + req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) + } + } + + return req +} + +// RequestCloner is a function that can take an input request type and clone the request +// for use in a subsequent retry attempt. +func RequestCloner(v interface{}) interface{} { + return v.(*Request).Clone() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go new file mode 100644 index 000000000..0c13bfcc8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/response.go @@ -0,0 +1,34 @@ +package http + +import ( + "fmt" + "net/http" +) + +// Response provides the HTTP specific response structure for HTTP specific +// middleware steps to use to deserialize the response from an operation call. +type Response struct { + *http.Response +} + +// ResponseError provides the HTTP centric error type wrapping the underlying +// error with the HTTP response value. +type ResponseError struct { + Response *Response + Err error +} + +// HTTPStatusCode returns the HTTP response status code received from the service. +func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode } + +// HTTPResponse returns the HTTP response received from the service. +func (e *ResponseError) HTTPResponse() *Response { return e.Response } + +// Unwrap returns the nested error if any, or nil. +func (e *ResponseError) Unwrap() error { return e.Err } + +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "http response error StatusCode: %d, %v", + e.Response.StatusCode, e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go new file mode 100644 index 000000000..607b196a8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/time.go @@ -0,0 +1,13 @@ +package http + +import ( + "time" + + smithytime "github.com/aws/smithy-go/time" +) + +// ParseTime parses a time string like the HTTP Date header. This uses a more +// relaxed rule set for date parsing compared to the standard library. +func ParseTime(text string) (t time.Time, err error) { + return smithytime.ParseHTTPDate(text) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go new file mode 100644 index 000000000..60a5fc100 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/url.go @@ -0,0 +1,44 @@ +package http + +import "strings" + +// JoinPath returns an absolute URL path composed of the two paths provided. +// Enforces that the returned path begins with '/'. If added path is empty the +// returned path suffix will match the first parameter suffix. +func JoinPath(a, b string) string { + if len(a) == 0 { + a = "/" + } else if a[0] != '/' { + a = "/" + a + } + + if len(b) != 0 && b[0] == '/' { + b = b[1:] + } + + if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' { + a = a + "/" + } + + return a + b +} + +// JoinRawQuery returns an absolute raw query expression. Any duplicate '&' +// will be collapsed to single separator between values. +func JoinRawQuery(a, b string) string { + a = strings.TrimFunc(a, isAmpersand) + b = strings.TrimFunc(b, isAmpersand) + + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + + return a + "&" + b +} + +func isAmpersand(v rune) bool { + return v == '&' +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go new file mode 100644 index 000000000..71a7e0d8a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go @@ -0,0 +1,37 @@ +package http + +import ( + "strings" +) + +// UserAgentBuilder is a builder for a HTTP User-Agent string. +type UserAgentBuilder struct { + sb strings.Builder +} + +// NewUserAgentBuilder returns a new UserAgentBuilder. +func NewUserAgentBuilder() *UserAgentBuilder { + return &UserAgentBuilder{sb: strings.Builder{}} +} + +// AddKey adds the named component/product to the agent string +func (u *UserAgentBuilder) AddKey(key string) { + u.appendTo(key) +} + +// AddKeyValue adds the named key to the agent string with the given value. +func (u *UserAgentBuilder) AddKeyValue(key, value string) { + u.appendTo(key + "/" + value) +} + +// Build returns the constructed User-Agent string. May be called multiple times. +func (u *UserAgentBuilder) Build() string { + return u.sb.String() +} + +func (u *UserAgentBuilder) appendTo(value string) { + if u.sb.Len() > 0 { + u.sb.WriteRune(' ') + } + u.sb.WriteString(value) +} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go new file mode 100644 index 000000000..b5eedc1f9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/validation.go @@ -0,0 +1,140 @@ +package smithy + +import ( + "bytes" + "fmt" + "strings" +) + +// An InvalidParamsError provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type InvalidParamsError struct { + // Context is the base context of the invalid parameter group. + Context string + errs []InvalidParamError +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *InvalidParamsError) Add(err InvalidParamError) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another InvalidParamsError +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e *InvalidParamsError) Len() int { + return len(e.errs) +} + +// Error returns the string formatted form of the invalid parameters. +func (e InvalidParamsError) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Error()) + } + + return w.String() +} + +// Errs returns a slice of the invalid parameters +func (e InvalidParamsError) Errs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An InvalidParamError represents an invalid parameter error type. +type InvalidParamError interface { + error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type invalidParamError struct { + context string + nestedContext string + field string + reason string +} + +// Error returns the string version of the invalid parameter error. +func (e invalidParamError) Error() string { + return fmt.Sprintf("%s, %s.", e.reason, e.Field()) +} + +// Field Returns the field and context the error occurred. +func (e invalidParamError) Field() string { + sb := &strings.Builder{} + sb.WriteString(e.context) + if sb.Len() > 0 { + if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { + sb.WriteRune('.') + } + } + if len(e.nestedContext) > 0 { + sb.WriteString(e.nestedContext) + sb.WriteRune('.') + } + sb.WriteString(e.field) + return sb.String() +} + +// SetContext updates the base context of the error. +func (e *invalidParamError) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *invalidParamError) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + return + } + // Check if our nested context is an index into a slice or map + if e.nestedContext[:1] != "[" { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + return + } + e.nestedContext = ctx + e.nestedContext +} + +// An ParamRequiredError represents an required parameter error. +type ParamRequiredError struct { + invalidParamError +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ParamRequiredError { + return &ParamRequiredError{ + invalidParamError{ + field: field, + reason: fmt.Sprintf("missing required field"), + }, + } +} diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go new file mode 100644 index 000000000..8d70a03ff --- /dev/null +++ b/vendor/github.com/aws/smithy-go/waiter/logger.go @@ -0,0 +1,36 @@ +package waiter + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// Logger is the Logger middleware used by the waiter to log an attempt +type Logger struct { + // Attempt is the current attempt to be logged + Attempt int64 +} + +// ID representing the Logger middleware +func (*Logger) ID() string { + return "WaiterLogger" +} + +// HandleInitialize performs handling of request in initialize stack step +func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + logger := middleware.GetLogger(ctx) + + logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt)) + + return next.HandleInitialize(ctx, in) +} + +// AddLogger is a helper util to add waiter logger after `SetLogger` middleware in +func (m Logger) AddLogger(stack *middleware.Stack) error { + return stack.Initialize.Insert(&m, "SetLogger", middleware.After) +} diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go new file mode 100644 index 000000000..03e46e2ee --- /dev/null +++ b/vendor/github.com/aws/smithy-go/waiter/waiter.go @@ -0,0 +1,66 @@ +package waiter + +import ( + "fmt" + "math" + "time" + + "github.com/aws/smithy-go/rand" +) + +// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count, +// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay +// must always be greater than 0, along with minDelay lesser than or equal to maxDelay. +// +// Returns the computed delay and if next attempt count is possible within the given input time constraints. +// Note that the zeroth attempt results in no delay. +func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) { + // zeroth attempt, no delay + if attempt <= 0 { + return 0, nil + } + + // remainingTime is zero or less, no delay + if remainingTime <= 0 { + return 0, nil + } + + // validate min delay is greater than 0 + if minDelay == 0 { + return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay") + } + + // validate max delay is greater than 0 + if maxDelay == 0 { + return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay") + } + + // Get attempt ceiling to prevent integer overflow. + attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1 + + if attempt > int64(attemptCeiling) { + delay = maxDelay + } else { + // Compute exponential delay based on attempt. + ri := 1 << uint64(attempt-1) + // compute delay + delay = minDelay * time.Duration(ri) + } + + if delay != minDelay { + // randomize to get jitter between min delay and delay value + d, err := rand.CryptoRandInt63n(int64(delay - minDelay)) + if err != nil { + return 0, fmt.Errorf("error computing retry jitter, %w", err) + } + + delay = time.Duration(d) + minDelay + } + + // check if this is the last attempt possible and compute delay accordingly + if remainingTime-delay <= minDelay { + delay = remainingTime - minDelay + } + + return delay, nil +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/LICENSE b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/LICENSE new file mode 100644 index 000000000..bb3f49d69 --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/LICENSE @@ -0,0 +1,51 @@ +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +1. Definitions. +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: +1. You must give any other recipients of the Work or Derivative Works a copy of this License; and +2. You must cause any modified files to carry prominent notices stating that You changed the files; and +3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +END OF TERMS AND CONDITIONS +APPENDIX: How to apply the Apache License to your work +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*** + +Note: Other license terms may apply to certain, identified software files contained within or distributed with the accompanying software if such terms are included in the directory containing the accompanying software. Such other license terms will then apply in lieu of the terms of the software license above. diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go new file mode 100644 index 000000000..2289ef6d9 --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/client.go @@ -0,0 +1,302 @@ +// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package api + +import ( + "context" + "encoding/base64" + "fmt" + "net/url" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ecr" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache" +) + +const ( + proxyEndpointScheme = "https://" + programName = "docker-credential-ecr-login" + ecrPublicName = "public.ecr.aws" + ecrPublicEndpoint = proxyEndpointScheme + ecrPublicName +) + +var ecrPattern = regexp.MustCompile(`(^[a-zA-Z0-9][a-zA-Z0-9-_]*)\.dkr\.ecr(-fips)?\.([a-zA-Z0-9][a-zA-Z0-9-_]*)\.amazonaws\.com(\.cn)?$`) + +type Service string + +const ( + ServiceECR Service = "ecr" + ServiceECRPublic Service = "ecr-public" +) + +// Registry in ECR +type Registry struct { + Service Service + ID string + FIPS bool + Region string +} + +// ExtractRegistry returns the ECR registry behind a given service endpoint +func ExtractRegistry(input string) (*Registry, error) { + if strings.HasPrefix(input, proxyEndpointScheme) { + input = strings.TrimPrefix(input, proxyEndpointScheme) + } + serverURL, err := url.Parse(proxyEndpointScheme + input) + if err != nil { + return nil, err + } + if serverURL.Hostname() == ecrPublicName { + return &Registry{ + Service: ServiceECRPublic, + }, nil + } + matches := ecrPattern.FindStringSubmatch(serverURL.Hostname()) + if len(matches) == 0 { + return nil, fmt.Errorf(programName + " can only be used with Amazon Elastic Container Registry.") + } else if len(matches) < 3 { + return nil, fmt.Errorf("%q is not a valid repository URI for Amazon Elastic Container Registry.", input) + } + return &Registry{ + Service: ServiceECR, + ID: matches[1], + FIPS: matches[2] == "-fips", + Region: matches[3], + }, nil +} + +// Client used for calling ECR service +type Client interface { + GetCredentials(serverURL string) (*Auth, error) + GetCredentialsByRegistryID(registryID string) (*Auth, error) + ListCredentials() ([]*Auth, error) +} + +// Auth credentials returned by ECR service to allow docker login +type Auth struct { + ProxyEndpoint string + Username string + Password string +} + +type defaultClient struct { + ecrClient ECRAPI + ecrPublicClient ECRPublicAPI + credentialCache cache.CredentialsCache +} + +type ECRAPI interface { + GetAuthorizationToken(context.Context, *ecr.GetAuthorizationTokenInput, ...func(*ecr.Options)) (*ecr.GetAuthorizationTokenOutput, error) +} + +type ECRPublicAPI interface { + GetAuthorizationToken(context.Context, *ecrpublic.GetAuthorizationTokenInput, ...func(*ecrpublic.Options)) (*ecrpublic.GetAuthorizationTokenOutput, error) +} + +// GetCredentials returns username, password, and proxyEndpoint +func (c *defaultClient) GetCredentials(serverURL string) (*Auth, error) { + registry, err := ExtractRegistry(serverURL) + if err != nil { + return nil, err + } + logrus. + WithField("service", registry.Service). + WithField("registry", registry.ID). + WithField("region", registry.Region). + WithField("serverURL", serverURL). + Debug("Retrieving credentials") + switch registry.Service { + case ServiceECR: + return c.GetCredentialsByRegistryID(registry.ID) + case ServiceECRPublic: + return c.GetPublicCredentials() + } + return nil, fmt.Errorf("unknown service %q", registry.Service) +} + +// GetCredentialsByRegistryID returns username, password, and proxyEndpoint +func (c *defaultClient) GetCredentialsByRegistryID(registryID string) (*Auth, error) { + cachedEntry := c.credentialCache.Get(registryID) + if cachedEntry != nil { + if cachedEntry.IsValid(time.Now()) { + logrus.WithField("registry", registryID).Debug("Using cached token") + return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) + } + logrus. + WithField("requestedAt", cachedEntry.RequestedAt). + WithField("expiresAt", cachedEntry.ExpiresAt). + Debug("Cached token is no longer valid") + } + + auth, err := c.getAuthorizationToken(registryID) + + // if we have a cached token, fall back to avoid failing the request. This may result an expired token + // being returned, but if there is a 500 or timeout from the service side, we'd like to attempt to re-use an + // old token. We invalidate tokens prior to their expiration date to help mitigate this scenario. + if err != nil && cachedEntry != nil { + logrus.WithError(err).Info("Got error fetching authorization token. Falling back to cached token.") + return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) + } + return auth, err +} + +func (c *defaultClient) GetPublicCredentials() (*Auth, error) { + cachedEntry := c.credentialCache.GetPublic() + if cachedEntry != nil { + if cachedEntry.IsValid(time.Now()) { + logrus.WithField("registry", ecrPublicName).Debug("Using cached token") + return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) + } + logrus. + WithField("requestedAt", cachedEntry.RequestedAt). + WithField("expiresAt", cachedEntry.ExpiresAt). + Debug("Cached token is no longer valid") + } + + auth, err := c.getPublicAuthorizationToken() + // if we have a cached token, fall back to avoid failing the request. This may result an expired token + // being returned, but if there is a 500 or timeout from the service side, we'd like to attempt to re-use an + // old token. We invalidate tokens prior to their expiration date to help mitigate this scenario. + if err != nil && cachedEntry != nil { + logrus.WithError(err).Info("Got error fetching authorization token. Falling back to cached token.") + return extractToken(cachedEntry.AuthorizationToken, cachedEntry.ProxyEndpoint) + } + return auth, err +} + +func (c *defaultClient) ListCredentials() ([]*Auth, error) { + // prime the cache with default authorization tokens + _, err := c.GetCredentialsByRegistryID("") + if err != nil { + logrus.WithError(err).Debug("couldn't get authorization token for default registry") + } + _, err = c.GetPublicCredentials() + if err != nil { + logrus.WithError(err).Debug("couldn't get authorization token for public registry") + } + + auths := make([]*Auth, 0) + for _, authEntry := range c.credentialCache.List() { + auth, err := extractToken(authEntry.AuthorizationToken, authEntry.ProxyEndpoint) + if err != nil { + logrus.WithError(err).Debug("Could not extract token") + } else { + auths = append(auths, auth) + } + } + + return auths, nil +} + +func (c *defaultClient) getAuthorizationToken(registryID string) (*Auth, error) { + var input *ecr.GetAuthorizationTokenInput + if registryID == "" { + logrus.Debug("Calling ECR.GetAuthorizationToken for default registry") + input = &ecr.GetAuthorizationTokenInput{} + } else { + logrus.WithField("registry", registryID).Debug("Calling ECR.GetAuthorizationToken") + input = &ecr.GetAuthorizationTokenInput{ + RegistryIds: []string{registryID}, + } + } + + output, err := c.ecrClient.GetAuthorizationToken(context.TODO(), input) + if err != nil || output == nil { + if err == nil { + if registryID == "" { + err = fmt.Errorf("missing AuthorizationData in ECR response for default registry") + } else { + err = fmt.Errorf("missing AuthorizationData in ECR response for %s", registryID) + } + } + return nil, errors.Wrap(err, "ecr: Failed to get authorization token") + } + + for _, authData := range output.AuthorizationData { + if authData.ProxyEndpoint != nil && authData.AuthorizationToken != nil { + authEntry := cache.AuthEntry{ + AuthorizationToken: aws.ToString(authData.AuthorizationToken), + RequestedAt: time.Now(), + ExpiresAt: aws.ToTime(authData.ExpiresAt), + ProxyEndpoint: aws.ToString(authData.ProxyEndpoint), + Service: cache.ServiceECR, + } + registry, err := ExtractRegistry(authEntry.ProxyEndpoint) + if err != nil { + return nil, fmt.Errorf("Invalid ProxyEndpoint returned by ECR: %s", authEntry.ProxyEndpoint) + } + auth, err := extractToken(authEntry.AuthorizationToken, authEntry.ProxyEndpoint) + if err != nil { + return nil, err + } + c.credentialCache.Set(registry.ID, &authEntry) + return auth, nil + } + } + if registryID == "" { + return nil, fmt.Errorf("No AuthorizationToken found for default registry") + } + return nil, fmt.Errorf("No AuthorizationToken found for %s", registryID) +} + +func (c *defaultClient) getPublicAuthorizationToken() (*Auth, error) { + var input *ecrpublic.GetAuthorizationTokenInput + + output, err := c.ecrPublicClient.GetAuthorizationToken(context.TODO(), input) + if err != nil { + return nil, errors.Wrap(err, "ecr: failed to get authorization token") + } + if output == nil || output.AuthorizationData == nil { + return nil, fmt.Errorf("ecr: missing AuthorizationData in ECR Public response") + } + authData := output.AuthorizationData + token, err := extractToken(aws.ToString(authData.AuthorizationToken), ecrPublicEndpoint) + if err != nil { + return nil, err + } + authEntry := cache.AuthEntry{ + AuthorizationToken: aws.ToString(authData.AuthorizationToken), + RequestedAt: time.Now(), + ExpiresAt: aws.ToTime(authData.ExpiresAt), + ProxyEndpoint: ecrPublicEndpoint, + Service: cache.ServiceECRPublic, + } + c.credentialCache.Set(ecrPublicName, &authEntry) + return token, nil +} + +func extractToken(token string, proxyEndpoint string) (*Auth, error) { + decodedToken, err := base64.StdEncoding.DecodeString(token) + if err != nil { + return nil, errors.Wrap(err, "invalid token") + } + + parts := strings.SplitN(string(decodedToken), ":", 2) + if len(parts) < 2 { + return nil, fmt.Errorf("invalid token: expected two parts, got %d", len(parts)) + } + + return &Auth{ + Username: parts[0], + Password: parts[1], + ProxyEndpoint: proxyEndpoint, + }, nil +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go new file mode 100644 index 000000000..110a6c3bd --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api/factory.go @@ -0,0 +1,107 @@ +// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package api + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ecr" + "github.com/aws/aws-sdk-go-v2/service/ecrpublic" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache" + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version" +) + +// Options makes the constructors more configurable +type Options struct { + Config aws.Config + CacheDir string +} + +// ClientFactory is a factory for creating clients to interact with ECR +type ClientFactory interface { + NewClient(awsConfig aws.Config) Client + NewClientWithOptions(opts Options) Client + NewClientFromRegion(region string) Client + NewClientWithFipsEndpoint(region string) (Client, error) + NewClientWithDefaults() Client +} + +// DefaultClientFactory is a default implementation of the ClientFactory +type DefaultClientFactory struct{} + +var userAgentLoadOption = config.WithAPIOptions([]func(*middleware.Stack) error{ + http.AddHeaderValue("User-Agent", "amazon-ecr-credential-helper/"+version.Version), +}) + +// NewClientWithDefaults creates the client and defaults region +func (defaultClientFactory DefaultClientFactory) NewClientWithDefaults() Client { + awsConfig, err := config.LoadDefaultConfig(context.TODO(), userAgentLoadOption) + if err != nil { + panic(err) + } + + return defaultClientFactory.NewClientWithOptions(Options{Config: awsConfig}) +} + +// NewClientWithFipsEndpoint overrides the default ECR service endpoint in a given region to use the FIPS endpoint +func (defaultClientFactory DefaultClientFactory) NewClientWithFipsEndpoint(region string) (Client, error) { + awsConfig, err := config.LoadDefaultConfig( + context.TODO(), + userAgentLoadOption, + config.WithRegion(region), + config.WithEndpointDiscovery(aws.EndpointDiscoveryEnabled), + ) + if err != nil { + return nil, err + } + + return defaultClientFactory.NewClientWithOptions(Options{Config: awsConfig}), nil +} + +// NewClientFromRegion uses the region to create the client +func (defaultClientFactory DefaultClientFactory) NewClientFromRegion(region string) Client { + awsConfig, err := config.LoadDefaultConfig( + context.TODO(), + userAgentLoadOption, + config.WithRegion(region), + ) + if err != nil { + panic(err) + } + + return defaultClientFactory.NewClientWithOptions(Options{ + Config: awsConfig, + }) +} + +// NewClient Create new client with AWS Config +func (defaultClientFactory DefaultClientFactory) NewClient(awsConfig aws.Config) Client { + return defaultClientFactory.NewClientWithOptions(Options{Config: awsConfig}) +} + +// NewClientWithOptions Create new client with Options +func (defaultClientFactory DefaultClientFactory) NewClientWithOptions(opts Options) Client { + // The ECR Public API is only available in us-east-1 today + publicConfig := opts.Config.Copy() + publicConfig.Region = "us-east-1" + return &defaultClient{ + ecrClient: ecr.NewFromConfig(opts.Config), + ecrPublicClient: ecrpublic.NewFromConfig(publicConfig), + credentialCache: cache.BuildCredentialsCache(opts.Config, opts.CacheDir), + } +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go new file mode 100644 index 000000000..8aa21287d --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/build.go @@ -0,0 +1,73 @@ +// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache + +import ( + "context" + "crypto/md5" + "encoding/base64" + "fmt" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/mitchellh/go-homedir" + "github.com/sirupsen/logrus" + + ecrconfig "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config" +) + +func BuildCredentialsCache(config aws.Config, cacheDir string) CredentialsCache { + if os.Getenv("AWS_ECR_DISABLE_CACHE") != "" { + logrus.Debug("Cache disabled due to AWS_ECR_DISABLE_CACHE") + return NewNullCredentialsCache() + } + + if cacheDir == "" { + //Get cacheDir from env var "AWS_ECR_CACHE_DIR" or set to default + cacheDir = ecrconfig.GetCacheDir() + } + + cacheDir, err := homedir.Expand(cacheDir) + if err != nil { + logrus.WithError(err).Debug("Could not expand cache path, disabling cache") + return NewNullCredentialsCache() + } + + cacheFilename := "cache.json" + + credentials, err := config.Credentials.Retrieve(context.TODO()) + if err != nil { + logrus.WithError(err).Debug("Could not fetch credentials for cache prefix, disabling cache") + return NewNullCredentialsCache() + } + + return NewFileCredentialsCache(cacheDir, cacheFilename, credentialsCachePrefix(config.Region, credentials), credentialsPublicCacheKey(credentials)) +} + +// Determine a key prefix for a credentials cache. Because auth tokens are scoped to an account and region, rely on provided +// region, as well as hash of the access key. +func credentialsCachePrefix(region string, credentials aws.Credentials) string { + return fmt.Sprintf("%s-%s-", region, checksum(credentials.AccessKeyID)) +} + +func credentialsPublicCacheKey(credentials aws.Credentials) string { + return fmt.Sprintf("%s-%s", ServiceECRPublic, checksum(credentials.AccessKeyID)) +} + +// Base64 encodes an MD5 checksum. Relied on for uniqueness, and not for cryptographic security. +func checksum(text string) string { + hasher := md5.New() + data := hasher.Sum([]byte(text)) + return base64.StdEncoding.EncodeToString(data) +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/credentials.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/credentials.go new file mode 100644 index 000000000..0b74c0f31 --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/credentials.go @@ -0,0 +1,49 @@ +// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache + +import ( + "time" +) + +type CredentialsCache interface { + Get(registry string) *AuthEntry + GetPublic() *AuthEntry + Set(registry string, entry *AuthEntry) + List() []*AuthEntry + Clear() +} + +type Service string + +const ( + ServiceECR Service = "ecr" + ServiceECRPublic Service = "ecr-public" +) + +type AuthEntry struct { + AuthorizationToken string + RequestedAt time.Time + ExpiresAt time.Time + ProxyEndpoint string + Service Service +} + +// IsValid checks if AuthEntry is still valid at testTime. AuthEntries expire at 1/2 of their original +// requested window. +func (authEntry *AuthEntry) IsValid(testTime time.Time) bool { + validWindow := authEntry.ExpiresAt.Sub(authEntry.RequestedAt) + refreshTime := authEntry.ExpiresAt.Add(-1 * validWindow / time.Duration(2)) + return testTime.Before(refreshTime) +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go new file mode 100644 index 000000000..bc70693b8 --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/file.go @@ -0,0 +1,195 @@ +// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/sirupsen/logrus" +) + +const registryCacheVersion = "1.0" + +type RegistryCache struct { + Registries map[string]*AuthEntry + Version string +} + +type fileCredentialCache struct { + path string + filename string + cachePrefixKey string + publicCacheKey string +} + +func newRegistryCache() *RegistryCache { + return &RegistryCache{ + Registries: make(map[string]*AuthEntry), + Version: registryCacheVersion, + } +} + +// NewFileCredentialsCache returns a new file credentials cache. +// +// path is used for temporary files during save, and filename should be a relative filename +// in the same directory where the cache is serialized and deserialized. +// +// cachePrefixKey is used for scoping credentials for a given credential cache (i.e. region and +// accessKey). +func NewFileCredentialsCache(path string, filename string, cachePrefixKey string, publicCacheKey string) CredentialsCache { + if _, err := os.Stat(path); err != nil { + os.MkdirAll(path, 0700) + } + return &fileCredentialCache{ + path: path, + filename: filename, + cachePrefixKey: cachePrefixKey, + publicCacheKey: publicCacheKey, + } +} + +func (f *fileCredentialCache) Get(registry string) *AuthEntry { + logrus.WithField("registry", registry).Debug("Checking file cache") + registryCache := f.init() + return registryCache.Registries[f.cachePrefixKey+registry] +} + +func (f *fileCredentialCache) GetPublic() *AuthEntry { + logrus.Debug("Checking file cache for ECR Public") + registryCache := f.init() + return registryCache.Registries[f.publicCacheKey] +} + +func (f *fileCredentialCache) Set(registry string, entry *AuthEntry) { + logrus. + WithField("registry", registry). + WithField("service", entry.Service). + Debug("Saving credentials to file cache") + registryCache := f.init() + + key := f.cachePrefixKey + registry + if entry.Service == ServiceECRPublic { + key = f.publicCacheKey + } + registryCache.Registries[key] = entry + + err := f.save(registryCache) + if err != nil { + logrus.WithError(err).Info("Could not save cache") + } +} + +// List returns all of the available AuthEntries (regardless of prefix) +func (f *fileCredentialCache) List() []*AuthEntry { + registryCache := f.init() + + // optimize allocation for copy + entries := make([]*AuthEntry, 0, len(registryCache.Registries)) + + for _, entry := range registryCache.Registries { + entries = append(entries, entry) + } + + return entries +} + +func (f *fileCredentialCache) Clear() { + err := os.Remove(f.fullFilePath()) + if err != nil { + logrus.WithError(err).Info("Could not clear cache") + } +} + +func (f *fileCredentialCache) fullFilePath() string { + return filepath.Join(f.path, f.filename) +} + +// Saves credential cache to disk. This writes to a temporary file first, then moves the file to the config location. +// This eliminates from reading partially written credential files, and reduces (but does not eliminate) concurrent +// file access. There is not guarantee here for handling multiple writes at once since there is no out of process locking. +func (f *fileCredentialCache) save(registryCache *RegistryCache) error { + file, err := ioutil.TempFile(f.path, ".config.json.tmp") + if err != nil { + return err + } + + buff, err := json.MarshalIndent(registryCache, "", " ") + if err != nil { + file.Close() + os.Remove(file.Name()) + return err + } + + _, err = file.Write(buff) + + if err != nil { + file.Close() + os.Remove(file.Name()) + return err + } + + file.Close() + // note this is only atomic when relying on linux syscalls + os.Rename(file.Name(), f.fullFilePath()) + return err +} + +func (f *fileCredentialCache) init() *RegistryCache { + registryCache, err := f.load() + if err != nil { + logrus.WithError(err).Info("Could not load existing cache") + f.Clear() + registryCache = newRegistryCache() + } + return registryCache +} + +// Loading a cache from disk will return errors for malformed or incompatible cache files. +func (f *fileCredentialCache) load() (*RegistryCache, error) { + registryCache := newRegistryCache() + + file, err := os.Open(f.fullFilePath()) + if os.IsNotExist(err) { + return registryCache, nil + } + + if err != nil { + return nil, err + } + + defer file.Close() + + if err = json.NewDecoder(file).Decode(®istryCache); err != nil { + return nil, err + } + + if registryCache.Version != registryCacheVersion { + return nil, fmt.Errorf("ecr: Registry cache version %#v is not compatible with %#v, ignoring existing cache", + registryCache.Version, + registryCacheVersion) + } + + // migrate entries + for key := range registryCache.Registries { + if registryCache.Registries[key].Service == "" { + registryCache.Registries[key].Service = ServiceECR + } + } + + return registryCache, nil +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/null.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/null.go new file mode 100644 index 000000000..64a7212bd --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cache/null.go @@ -0,0 +1,38 @@ +// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package cache + +type nullCredentialsCache struct{} + +func NewNullCredentialsCache() CredentialsCache { + return &nullCredentialsCache{} +} + +func (n *nullCredentialsCache) Get(_ string) *AuthEntry { + return nil +} + +func (n *nullCredentialsCache) GetPublic() *AuthEntry { + return nil +} + +func (n *nullCredentialsCache) Set(_ string, _ *AuthEntry) { +} + +func (n *nullCredentialsCache) List() []*AuthEntry { + return []*AuthEntry{} +} + +func (n *nullCredentialsCache) Clear() { +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/cache_dir.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/cache_dir.go new file mode 100644 index 000000000..d7c2ee644 --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/cache_dir.go @@ -0,0 +1,23 @@ +// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import "os" + +func GetCacheDir() string { + if cacheDir := os.Getenv("AWS_ECR_CACHE_DIR"); cacheDir != "" { + return cacheDir + } + return "~/.ecr" +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go new file mode 100644 index 000000000..771611bad --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/config/log.go @@ -0,0 +1,48 @@ +// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/mitchellh/go-homedir" + "github.com/sirupsen/logrus" +) + +func SetupLogger() { + logrusConfig() +} + +func logrusConfig() { + logdir, err := homedir.Expand(GetCacheDir() + "/log") + if err != nil { + fmt.Fprintf(os.Stderr, "log: failed to find directory: %v", err) + logdir = os.TempDir() + } + // Clean the path to replace with OS-specific separators + logdir = filepath.Clean(logdir) + err = os.MkdirAll(logdir, os.ModeDir|0700) + if err != nil { + fmt.Fprintf(os.Stderr, "log: failed to create directory: %v", err) + logdir = os.TempDir() + } + file, err := os.OpenFile(filepath.Join(logdir, "ecr-login.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + return + } + logrus.SetLevel(logrus.DebugLevel) + logrus.SetOutput(file) +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go new file mode 100644 index 000000000..461e3090a --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/ecr.go @@ -0,0 +1,129 @@ +// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package ecr + +import ( + "errors" + "fmt" + "io" + + "github.com/sirupsen/logrus" + + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api" + "github.com/docker/docker-credential-helpers/credentials" +) + +var notImplemented = errors.New("not implemented") + +type ECRHelper struct { + clientFactory api.ClientFactory + logger *logrus.Logger +} + +type Option func(*ECRHelper) + +// WithClientFactory sets the ClientFactory used to make API requests. +func WithClientFactory(clientFactory api.ClientFactory) Option { + return func(e *ECRHelper) { + e.clientFactory = clientFactory + } +} + +// WithLogger sets a new logger instance that writes to the given writer, +// instead of the default writer which writes to stderr. +// +// This can be useful if callers want to redirect logging emitted by this tool +// to another location. +func WithLogger(w io.Writer) Option { + return func(e *ECRHelper) { + logger := logrus.New() + logger.Out = w + e.logger = logger + } +} + +// NewECRHelper returns a new ECRHelper with the given options to override +// default behavior. +func NewECRHelper(opts ...Option) *ECRHelper { + e := &ECRHelper{ + clientFactory: api.DefaultClientFactory{}, + logger: logrus.StandardLogger(), + } + for _, o := range opts { + o(e) + } + + return e +} + +// ensure ECRHelper adheres to the credentials.Helper interface +var _ credentials.Helper = (*ECRHelper)(nil) + +func (ECRHelper) Add(creds *credentials.Credentials) error { + // This does not seem to get called + return notImplemented +} + +func (ECRHelper) Delete(serverURL string) error { + // This does not seem to get called + return notImplemented +} + +func (self ECRHelper) Get(serverURL string) (string, string, error) { + registry, err := api.ExtractRegistry(serverURL) + if err != nil { + self.logger. + WithError(err). + WithField("serverURL", serverURL). + Error("Error parsing the serverURL") + return "", "", credentials.NewErrCredentialsNotFound() + } + + var client api.Client + if registry.FIPS { + client, err = self.clientFactory.NewClientWithFipsEndpoint(registry.Region) + if err != nil { + self.logger.WithError(err).Error("Error resolving FIPS endpoint") + return "", "", credentials.NewErrCredentialsNotFound() + } + } else { + client = self.clientFactory.NewClientFromRegion(registry.Region) + } + + auth, err := client.GetCredentials(serverURL) + if err != nil { + self.logger.WithError(err).Error("Error retrieving credentials") + return "", "", credentials.NewErrCredentialsNotFound() + } + return auth.Username, auth.Password, nil +} + +func (self ECRHelper) List() (map[string]string, error) { + self.logger.Debug("Listing credentials") + client := self.clientFactory.NewClientWithDefaults() + + auths, err := client.ListCredentials() + if err != nil { + self.logger.WithError(err).Error("Error listing credentials") + return nil, fmt.Errorf("ecr: could not list credentials: %v", err) + } + + result := map[string]string{} + + for _, auth := range auths { + serverURL := auth.ProxyEndpoint + result[serverURL] = auth.Username + } + return result, nil +} diff --git a/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version/version.go b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version/version.go new file mode 100644 index 000000000..a02a670aa --- /dev/null +++ b/vendor/github.com/awslabs/amazon-ecr-credential-helper/ecr-login/version/version.go @@ -0,0 +1,7 @@ +package version + +// Version indicates which version of the binary is running. +var Version = "development" + +// GitCommitSHA indicates which git shorthash the binary was built off of +var GitCommitSHA string diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml new file mode 100644 index 000000000..102fb9a69 --- /dev/null +++ b/vendor/github.com/blang/semver/.travis.yml @@ -0,0 +1,21 @@ +language: go +matrix: + include: + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip +install: +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci + -repotoken $COVERALLS_TOKEN +- echo "Build examples" ; cd examples && go build +- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) +env: + global: + secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/Masterminds/squirrel/LICENSE.txt b/vendor/github.com/blang/semver/LICENSE similarity index 87% rename from vendor/github.com/Masterminds/squirrel/LICENSE.txt rename to vendor/github.com/blang/semver/LICENSE index 74c20a2b9..5ba5c86fc 100644 --- a/vendor/github.com/Masterminds/squirrel/LICENSE.txt +++ b/vendor/github.com/blang/semver/LICENSE @@ -1,8 +1,6 @@ -Squirrel -The Masterminds -Copyright (C) 2014-2015, Lann Martin -Copyright (C) 2015-2016, Google -Copyright (C) 2015, Matt Farina and Matt Butcher +The MIT License + +Copyright (c) 2014 Benedikt Lang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -21,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md new file mode 100644 index 000000000..08b2e4a3d --- /dev/null +++ b/vendor/github.com/blang/semver/README.md @@ -0,0 +1,194 @@ +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) +====== + +semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. + +Usage +----- +```bash +$ go get github.com/blang/semver +``` +Note: Always vendor your dependencies or fix on a specific version tag. + +```go +import github.com/blang/semver +v1, err := semver.Make("1.0.0-beta") +v2, err := semver.Make("2.0.0-beta") +v1.Compare(v2) +``` + +Also check the [GoDocs](http://godoc.org/github.com/blang/semver). + +Why should I use this lib? +----- + +- Fully spec compatible +- No reflection +- No regex +- Fully tested (Coverage >99%) +- Readable parsing/validation errors +- Fast (See [Benchmarks](#benchmarks)) +- Only Stdlib +- Uses values instead of pointers +- Many features, see below + + +Features +----- + +- Parsing and validation at all levels +- Comparator-like comparisons +- Compare Helper Methods +- InPlace manipulation +- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` +- Sortable (implements sort.Interface) +- database/sql compatible (sql.Scanner/Valuer) +- encoding/json compatible (json.Marshaler/Unmarshaler) + +Ranges +------ + +A `Range` is a set of conditions which specify which versions satisfy the range. + +A condition is composed of an operator and a version. The supported operators are: + +- `<1.0.0` Less than `1.0.0` +- `<=1.0.0` Less than or equal to `1.0.0` +- `>1.0.0` Greater than `1.0.0` +- `>=1.0.0` Greater than or equal to `1.0.0` +- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` +- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. + +Note that spaces between the operator and the version will be gracefully tolerated. + +A `Range` can link multiple `Ranges` separated by space: + +Ranges can be linked by logical AND: + + - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` + - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` + +Ranges can also be linked by logical OR: + + - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` + +AND has a higher precedence than OR. It's not possible to use brackets. + +Ranges can be combined by both AND and OR + + - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` + +Range usage: + +``` +v, err := semver.Parse("1.2.3") +range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if range(v) { + //valid +} + +``` + +Example +----- + +Have a look at full examples in [examples/main.go](examples/main.go) + +```go +import github.com/blang/semver + +v, err := semver.Make("0.0.1-alpha.preview+123.github") +fmt.Printf("Major: %d\n", v.Major) +fmt.Printf("Minor: %d\n", v.Minor) +fmt.Printf("Patch: %d\n", v.Patch) +fmt.Printf("Pre: %s\n", v.Pre) +fmt.Printf("Build: %s\n", v.Build) + +// Prerelease versions array +if len(v.Pre) > 0 { + fmt.Println("Prerelease versions:") + for i, pre := range v.Pre { + fmt.Printf("%d: %q\n", i, pre) + } +} + +// Build meta data array +if len(v.Build) > 0 { + fmt.Println("Build meta data:") + for i, build := range v.Build { + fmt.Printf("%d: %q\n", i, build) + } +} + +v001, err := semver.Make("0.0.1") +// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE +v001.GT(v) == true +v.LT(v001) == true +v.GTE(v) == true +v.LTE(v) == true + +// Or use v.Compare(v2) for comparisons (-1, 0, 1): +v001.Compare(v) == 1 +v.Compare(v001) == -1 +v.Compare(v) == 0 + +// Manipulate Version in place: +v.Pre[0], err = semver.NewPRVersion("beta") +if err != nil { + fmt.Printf("Error parsing pre release version: %q", err) +} + +fmt.Println("\nValidate versions:") +v.Build[0] = "?" + +err = v.Validate() +if err != nil { + fmt.Printf("Validation failed: %s\n", err) +} +``` + + +Benchmarks +----- + + BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op + BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op + BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op + BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op + BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op + BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op + BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op + BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op + BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op + BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op + BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op + BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op + BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op + BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op + BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op + BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op + +See benchmark cases at [semver_test.go](semver_test.go) + + +Motivation +----- + +I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. + + +Contribution +----- + +Feel free to make a pull request. For bigger changes create a issue first to discuss about it. + + +License +----- + +See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go new file mode 100644 index 000000000..a74bf7c44 --- /dev/null +++ b/vendor/github.com/blang/semver/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json new file mode 100644 index 000000000..1cf8ebdd9 --- /dev/null +++ b/vendor/github.com/blang/semver/package.json @@ -0,0 +1,17 @@ +{ + "author": "blang", + "bugs": { + "URL": "https://github.com/blang/semver/issues", + "url": "https://github.com/blang/semver/issues" + }, + "gx": { + "dvcsimport": "github.com/blang/semver" + }, + "gxVersion": "0.10.0", + "language": "go", + "license": "MIT", + "name": "semver", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "3.5.1" +} + diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go new file mode 100644 index 000000000..fca406d47 --- /dev/null +++ b/vendor/github.com/blang/semver/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Index(ap, "x") != -1 { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go new file mode 100644 index 000000000..8ee0842e6 --- /dev/null +++ b/vendor/github.com/blang/semver/semver.go @@ -0,0 +1,418 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precendence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (vp *Version, err error) { + v, err := Parse(s) + vp = &v + return +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions +// with only major and minor components specified +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + s = strings.Join(parts, ".") + } + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go new file mode 100644 index 000000000..e18f88082 --- /dev/null +++ b/vendor/github.com/blang/semver/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go new file mode 100644 index 000000000..eb4d80266 --- /dev/null +++ b/vendor/github.com/blang/semver/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/buildkite/agent/v3/LICENSE.txt b/vendor/github.com/buildkite/agent/v3/LICENSE.txt new file mode 100644 index 000000000..327a143d6 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014-2018 Buildkite Pty Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/buildkite/agent/v3/api/agents.go b/vendor/github.com/buildkite/agent/v3/api/agents.go new file mode 100644 index 000000000..64bd26144 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/agents.go @@ -0,0 +1,69 @@ +package api + +import "context" + +// AgentRegisterRequest is a call to register on the Buildkite Agent API +type AgentRegisterRequest struct { + Name string `json:"name"` + Hostname string `json:"hostname"` + OS string `json:"os"` + Arch string `json:"arch"` + ScriptEvalEnabled bool `json:"script_eval_enabled"` + IgnoreInDispatches bool `json:"ignore_in_dispatches"` + Priority string `json:"priority,omitempty"` + Version string `json:"version"` + Build string `json:"build"` + Tags []string `json:"meta_data"` + PID int `json:"pid,omitempty"` + MachineID string `json:"machine_id,omitempty"` + Features []string `json:"features"` +} + +// AgentRegisterResponse is the response from the Buildkite Agent API +type AgentRegisterResponse struct { + UUID string `json:"id"` + Name string `json:"name"` + AccessToken string `json:"access_token"` + Endpoint string `json:"endpoint"` + PingInterval int `json:"ping_interval"` + JobStatusInterval int `json:"job_status_interval"` + HeartbeatInterval int `json:"heartbeat_interval"` + Tags []string `json:"meta_data"` +} + +// Registers the agent against the Buildkite Agent API. The client for this +// call must be authenticated using an Agent Registration Token +func (c *Client) Register(ctx context.Context, regReq *AgentRegisterRequest) (*AgentRegisterResponse, *Response, error) { + req, err := c.newRequest(ctx, "POST", "register", regReq) + if err != nil { + return nil, nil, err + } + + a := new(AgentRegisterResponse) + resp, err := c.doRequest(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, err +} + +// Connects the agent to the Buildkite Agent API +func (c *Client) Connect(ctx context.Context) (*Response, error) { + req, err := c.newRequest(ctx, "POST", "connect", nil) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} + +// Disconnects the agent to the Buildkite Agent API +func (c *Client) Disconnect(ctx context.Context) (*Response, error) { + req, err := c.newRequest(ctx, "POST", "disconnect", nil) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} diff --git a/vendor/github.com/buildkite/agent/v3/api/annotations.go b/vendor/github.com/buildkite/agent/v3/api/annotations.go new file mode 100644 index 000000000..31c7e9bea --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/annotations.go @@ -0,0 +1,38 @@ +package api + +import ( + "context" + "fmt" +) + +// Annotation represents a Buildkite Agent API Annotation +type Annotation struct { + Body string `json:"body,omitempty"` + Context string `json:"context,omitempty"` + Style string `json:"style,omitempty"` + Append bool `json:"append,omitempty"` +} + +// Annotate a build in the Buildkite UI +func (c *Client) Annotate(ctx context.Context, jobId string, annotation *Annotation) (*Response, error) { + u := fmt.Sprintf("jobs/%s/annotations", jobId) + + req, err := c.newRequest(ctx, "POST", u, annotation) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} + +// Remove an annotation from a build +func (c *Client) AnnotationRemove(ctx context.Context, jobId string, context string) (*Response, error) { + u := fmt.Sprintf("jobs/%s/annotations/%s", jobId, context) + + req, err := c.newRequest(ctx, "DELETE", u, nil) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} diff --git a/vendor/github.com/buildkite/agent/v3/api/artifacts.go b/vendor/github.com/buildkite/agent/v3/api/artifacts.go new file mode 100644 index 000000000..df2a84f86 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/artifacts.go @@ -0,0 +1,154 @@ +package api + +import ( + "context" + "fmt" + "time" +) + +// Artifact represents an artifact on the Buildkite Agent API +type Artifact struct { + // The ID of the artifact. The ID is assigned to it after a successful + // batch creation + ID string `json:"id"` + + // The path to the artifact relative to the working directory + Path string `json:"path"` + + // The absolute path to the artifact + AbsolutePath string `json:"absolute_path"` + + // The glob path used to find this artifact + GlobPath string `json:"glob_path"` + + // The size of the file in bytes + FileSize int64 `json:"file_size"` + + // A SHA-1 hash of the uploaded file + Sha1Sum string `json:"sha1sum"` + + // A SHA-2 256-bit hash of the uploaded file, possibly empty + Sha256Sum string `json:"sha256sum"` + + // ID of the job that created this artifact (from API) + JobID string `json:"job_id"` + + // UTC timestamp this artifact was considered created + CreatedAt time.Time `json:"created_at"` + + // The HTTP url to this artifact once it's been uploaded + URL string `json:"url,omitempty"` + + // The destination specified on the command line when this file was + // uploaded + UploadDestination string `json:"upload_destination,omitempty"` + + // Information on how to upload this artifact. + UploadInstructions *ArtifactUploadInstructions `json:"-"` + + // A specific Content-Type to use on upload + ContentType string `json:"-"` +} + +type ArtifactBatch struct { + ID string `json:"id"` + Artifacts []*Artifact `json:"artifacts"` + UploadDestination string `json:"upload_destination"` +} + +type ArtifactUploadInstructions struct { + Data map[string]string `json:"data"` + Action struct { + URL string `json:"url,omitempty"` + Method string `json:"method"` + Path string `json:"path"` + FileInput string `json:"file_input"` + } +} + +type ArtifactBatchCreateResponse struct { + ID string `json:"id"` + ArtifactIDs []string `json:"artifact_ids"` + UploadInstructions *ArtifactUploadInstructions `json:"upload_instructions"` +} + +// ArtifactSearchOptions specifies the optional parameters to the +// ArtifactsService.Search method. +type ArtifactSearchOptions struct { + Query string `url:"query,omitempty"` + Scope string `url:"scope,omitempty"` + State string `url:"state,omitempty"` + IncludeRetriedJobs bool `url:"include_retried_jobs,omitempty"` + IncludeDuplicates bool `url:"include_duplicates,omitempty"` +} + +type ArtifactBatchUpdateArtifact struct { + ID string `json:"id"` + State string `json:"state"` +} + +type ArtifactBatchUpdateRequest struct { + Artifacts []*ArtifactBatchUpdateArtifact `json:"artifacts"` +} + +// CreateArtifacts takes a slice of artifacts, and creates them on Buildkite as a batch. +func (c *Client) CreateArtifacts(ctx context.Context, jobId string, batch *ArtifactBatch) (*ArtifactBatchCreateResponse, *Response, error) { + u := fmt.Sprintf("jobs/%s/artifacts", jobId) + + req, err := c.newRequest(ctx, "POST", u, batch) + if err != nil { + return nil, nil, err + } + + createResponse := new(ArtifactBatchCreateResponse) + resp, err := c.doRequest(req, createResponse) + if err != nil { + return nil, resp, err + } + + return createResponse, resp, err +} + +// Updates a particular artifact +func (c *Client) UpdateArtifacts(ctx context.Context, jobId string, artifactStates map[string]string) (*Response, error) { + u := fmt.Sprintf("jobs/%s/artifacts", jobId) + payload := ArtifactBatchUpdateRequest{} + + for id, state := range artifactStates { + payload.Artifacts = append(payload.Artifacts, &ArtifactBatchUpdateArtifact{id, state}) + } + + req, err := c.newRequest(ctx, "PUT", u, payload) + if err != nil { + return nil, err + } + + resp, err := c.doRequest(req, nil) + if err != nil { + return resp, err + } + + return resp, err +} + +// SearchArtifacts searches Buildkite for a set of artifacts +func (c *Client) SearchArtifacts(ctx context.Context, buildId string, opt *ArtifactSearchOptions) ([]*Artifact, *Response, error) { + u := fmt.Sprintf("builds/%s/artifacts/search", buildId) + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := c.newRequest(ctx, "GET", u, nil) + if err != nil { + return nil, nil, err + } + + a := []*Artifact{} + resp, err := c.doRequest(req, &a) + if err != nil { + return nil, resp, err + } + + return a, resp, err +} diff --git a/vendor/github.com/buildkite/agent/v3/api/auth.go b/vendor/github.com/buildkite/agent/v3/api/auth.go new file mode 100644 index 000000000..1fb28da10 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/auth.go @@ -0,0 +1,37 @@ +package api + +import ( + "fmt" + "net/http" +) + +type canceler interface { + CancelRequest(*http.Request) +} + +// authenticatedTransport manages injection of the API token +type authenticatedTransport struct { + // The Token used for authentication. This can either the be + // organizations registration token, or the agents access token. + Token string + + // Delegate is the underlying HTTP transport + Delegate http.RoundTripper +} + +// RoundTrip invoked each time a request is made +func (t authenticatedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Token == "" { + return nil, fmt.Errorf("Invalid token, empty string supplied") + } + + req.Header.Set("Authorization", fmt.Sprintf("Token %s", t.Token)) + + return t.Delegate.RoundTrip(req) +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *authenticatedTransport) CancelRequest(req *http.Request) { + cancelableTransport := t.Delegate.(canceler) + cancelableTransport.CancelRequest(req) +} diff --git a/vendor/github.com/buildkite/agent/v3/api/chunks.go b/vendor/github.com/buildkite/agent/v3/api/chunks.go new file mode 100644 index 000000000..71a1a1b33 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/chunks.go @@ -0,0 +1,41 @@ +package api + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" +) + +// Chunk represents a Buildkite Agent API Chunk +type Chunk struct { + Data []byte + Sequence int + Offset int + Size int +} + +// Uploads the chunk to the Buildkite Agent API. This request sends the +// compressed log directly as a request body. +func (c *Client) UploadChunk(ctx context.Context, jobId string, chunk *Chunk) (*Response, error) { + // Create a compressed buffer of the log content + body := &bytes.Buffer{} + gzipper := gzip.NewWriter(body) + gzipper.Write(chunk.Data) + if err := gzipper.Close(); err != nil { + return nil, err + } + + // Pass most params as query + u := fmt.Sprintf("jobs/%s/chunks?sequence=%d&offset=%d&size=%d", jobId, chunk.Sequence, chunk.Offset, chunk.Size) + req, err := c.newFormRequest(ctx, "POST", u, body) + if err != nil { + return nil, err + } + + // Mark the request as a direct compressed log chunk + req.Header.Add("Content-Type", "text/plain") + req.Header.Add("Content-Encoding", "gzip") + + return c.doRequest(req, nil) +} diff --git a/vendor/github.com/buildkite/agent/v3/api/client.go b/vendor/github.com/buildkite/agent/v3/api/client.go new file mode 100644 index 000000000..b3c6d89a7 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/client.go @@ -0,0 +1,366 @@ +package api + +//go:generate go run github.com/rjeczalik/interfaces/cmd/interfacer@v0.3.0 -for github.com/buildkite/agent/v3/api.Client -as agent.APIClient -o ../agent/api.go + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "github.com/buildkite/agent/v3/logger" + "github.com/google/go-querystring/query" +) + +const ( + defaultEndpoint = "https://agent.buildkite.com/" + defaultUserAgent = "buildkite-agent/api" +) + +// Config is configuration for the API Client +type Config struct { + // Endpoint for API requests. Defaults to the public Buildkite Agent API. + // The URL should always be specified with a trailing slash. + Endpoint string + + // The authentication token to use, either a registration or access token + Token string + + // User agent used when communicating with the Buildkite Agent API. + UserAgent string + + // If true, only HTTP2 is disabled + DisableHTTP2 bool + + // If true, requests and responses will be dumped and set to the logger + DebugHTTP bool + + // The http client used, leave nil for the default + HTTPClient *http.Client +} + +// A Client manages communication with the Buildkite Agent API. +type Client struct { + // The client configuration + conf Config + + // HTTP client used to communicate with the API. + client *http.Client + + // The logger used + logger logger.Logger +} + +// NewClient returns a new Buildkite Agent API Client. +func NewClient(l logger.Logger, conf Config) *Client { + if conf.Endpoint == "" { + conf.Endpoint = defaultEndpoint + } + + if conf.UserAgent == "" { + conf.UserAgent = defaultUserAgent + } + + httpClient := conf.HTTPClient + if conf.HTTPClient == nil { + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DisableCompression: false, + DisableKeepAlives: false, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 30 * time.Second, + } + + if conf.DisableHTTP2 { + t.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + + httpClient = &http.Client{ + Timeout: 60 * time.Second, + Transport: &authenticatedTransport{ + Token: conf.Token, + Delegate: t, + }, + } + } + + return &Client{ + logger: l, + client: httpClient, + conf: conf, + } +} + +// Config returns the internal configuration for the Client +func (c *Client) Config() Config { + return c.conf +} + +// FromAgentRegisterResponse returns a new instance using the access token and endpoint +// from the registration response +func (c *Client) FromAgentRegisterResponse(resp *AgentRegisterResponse) *Client { + conf := c.conf + + // Override the registration token with the access token + conf.Token = resp.AccessToken + + // If Buildkite told us to use a new Endpoint, respect that + if resp.Endpoint != "" { + conf.Endpoint = resp.Endpoint + } + + return NewClient(c.logger, conf) +} + +// FromPing returns a new instance using a new endpoint from a ping response +func (c *Client) FromPing(resp *Ping) *Client { + conf := c.conf + + // If Buildkite told us to use a new Endpoint, respect that + if resp.Endpoint != "" { + conf.Endpoint = resp.Endpoint + } + + return NewClient(c.logger, conf) +} + +type Header struct { + Name string + Value string +} + +// NewRequest creates an API request. A relative URL can be provided in urlStr, +// in which case it is resolved relative to the BaseURL of the Client. +// Relative URLs should always be specified without a preceding slash. If +// specified, the value pointed to by body is JSON encoded and included as the +// request body. +func (c *Client) newRequest( + ctx context.Context, + method, urlStr string, + body any, + headers ...Header, +) (*http.Request, error) { + u := joinURLPath(c.conf.Endpoint, urlStr) + + buf := new(bytes.Buffer) + if body != nil { + err := json.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequestWithContext(ctx, method, u, buf) + if err != nil { + return nil, err + } + + req.Header.Add("User-Agent", c.conf.UserAgent) + + // If our context has a timeout/deadline, tell the server how long is remaining. + // This may allow the server to configure its own timeouts accordingly. + if deadline, ok := ctx.Deadline(); ok { + ms := time.Until(deadline).Milliseconds() + if ms > 0 { + req.Header.Add("Buildkite-Timeout-Milliseconds", strconv.FormatInt(ms, 10)) + } + } + + for _, header := range headers { + req.Header.Add(header.Name, header.Value) + } + + if body != nil { + req.Header.Add("Content-Type", "application/json") + } + + return req, nil +} + +// NewFormRequest creates an multi-part form request. A relative URL can be +// provided in urlStr, in which case it is resolved relative to the UploadURL +// of the Client. Relative URLs should always be specified without a preceding +// slash. +func (c *Client) newFormRequest(ctx context.Context, method, urlStr string, body *bytes.Buffer) (*http.Request, error) { + u := joinURLPath(c.conf.Endpoint, urlStr) + + req, err := http.NewRequestWithContext(ctx, method, u, body) + if err != nil { + return nil, err + } + + if c.conf.UserAgent != "" { + req.Header.Add("User-Agent", c.conf.UserAgent) + } + + return req, nil +} + +// Response is a Buildkite Agent API response. This wraps the standard +// http.Response. +type Response struct { + *http.Response +} + +// newResponse creates a new Response for the provided http.Response. +func newResponse(r *http.Response) *Response { + response := &Response{Response: r} + return response +} + +// Do sends an API request and returns the API response. The API response is +// JSON decoded and stored in the value pointed to by v, or returned as an +// error if an API error has occurred. If v implements the io.Writer +// interface, the raw response body will be written to v, without attempting to +// first decode it. +func (c *Client) doRequest(req *http.Request, v any) (*Response, error) { + var err error + + if c.conf.DebugHTTP { + // If the request is a multi-part form, then it's probably a + // file upload, in which case we don't want to spewing out the + // file contents into the debug log (especially if it's been + // gzipped) + var requestDump []byte + if strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") { + requestDump, err = httputil.DumpRequestOut(req, false) + } else { + requestDump, err = httputil.DumpRequestOut(req, true) + } + + if err != nil { + c.logger.Debug("ERR: %s\n%s", err, string(requestDump)) + } else { + c.logger.Debug("%s", string(requestDump)) + } + } + + ts := time.Now() + + c.logger.Debug("%s %s", req.Method, req.URL) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + c.logger.WithFields( + logger.StringField("proto", resp.Proto), + logger.IntField("status", resp.StatusCode), + logger.DurationField("Δ", time.Since(ts)), + ).Debug("↳ %s %s", req.Method, req.URL) + + defer resp.Body.Close() + defer io.Copy(io.Discard, resp.Body) + + response := newResponse(resp) + + if c.conf.DebugHTTP { + responseDump, err := httputil.DumpResponse(resp, true) + if err != nil { + c.logger.Debug("\nERR: %s\n%s", err, string(responseDump)) + } else { + c.logger.Debug("\n%s", string(responseDump)) + } + } + + err = checkResponse(resp) + if err != nil { + // even though there was an error, we still return the response + // in case the caller wants to inspect it further + return response, err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + io.Copy(w, resp.Body) + } else { + if strings.Contains(req.Header.Get("Content-Type"), "application/msgpack") { + err = errors.New("Msgpack not supported") + } else { + err = json.NewDecoder(resp.Body).Decode(v) + } + } + } + + return response, err +} + +// ErrorResponse provides a message. +type ErrorResponse struct { + Response *http.Response // HTTP response that caused this error + Message string `json:"message"` // error message +} + +func (r *ErrorResponse) Error() string { + s := fmt.Sprintf("%v %v: %s", + r.Response.Request.Method, r.Response.Request.URL, + r.Response.Status) + + if r.Message != "" { + s = fmt.Sprintf("%s: %v", s, r.Message) + } + + return s +} + +func IsErrHavingStatus(err error, code int) bool { + var apierr *ErrorResponse + return errors.As(err, &apierr) && apierr.Response.StatusCode == code +} + +func checkResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 { + return nil + } + + errorResponse := &ErrorResponse{Response: r} + data, err := io.ReadAll(r.Body) + if err == nil && data != nil { + json.Unmarshal(data, errorResponse) + } + + return errorResponse +} + +// addOptions adds the parameters in opt as URL query parameters to s. opt must +// be a struct whose fields may contain "url" tags. +func addOptions(s string, opt any) (string, error) { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Ptr && v.IsNil() { + return s, nil + } + + u, err := url.Parse(s) + if err != nil { + return s, err + } + + qs, err := query.Values(opt) + if err != nil { + return s, err + } + + u.RawQuery = qs.Encode() + return u.String(), nil +} + +func joinURLPath(endpoint string, path string) string { + return strings.TrimRight(endpoint, "/") + "/" + strings.TrimLeft(path, "/") +} diff --git a/vendor/github.com/buildkite/agent/v3/api/doc.go b/vendor/github.com/buildkite/agent/v3/api/doc.go new file mode 100644 index 000000000..ebcb63fdc --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/doc.go @@ -0,0 +1,4 @@ +// Package api provides an API client for the Buildkite Pipelines API. +// +// It is intended for internal use by buildkite-agent only. +package api diff --git a/vendor/github.com/buildkite/agent/v3/api/header_times.go b/vendor/github.com/buildkite/agent/v3/api/header_times.go new file mode 100644 index 000000000..5302d7b66 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/header_times.go @@ -0,0 +1,29 @@ +package api + +import ( + "context" + "fmt" +) + +// HeaderTimes represents a set of header times that are associated with a job +// log. +type HeaderTimes struct { + Times map[string]string `json:"header_times"` +} + +// SaveHeaderTimes saves the header times to the job +func (c *Client) SaveHeaderTimes(ctx context.Context, jobId string, headerTimes *HeaderTimes) (*Response, error) { + u := fmt.Sprintf("jobs/%s/header_times", jobId) + + req, err := c.newRequest(ctx, "POST", u, headerTimes) + if err != nil { + return nil, err + } + + resp, err := c.doRequest(req, nil) + if err != nil { + return resp, err + } + + return resp, err +} diff --git a/vendor/github.com/buildkite/agent/v3/api/heartbeats.go b/vendor/github.com/buildkite/agent/v3/api/heartbeats.go new file mode 100644 index 000000000..e4bd7a39b --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/heartbeats.go @@ -0,0 +1,31 @@ +package api + +import ( + "context" + "time" +) + +// Heartbeat represents a Buildkite Agent API Heartbeat +type Heartbeat struct { + SentAt string `json:"sent_at"` + ReceivedAt string `json:"received_at,omitempty"` +} + +// Heartbeat notifies Buildkite that an agent is still connected +func (c *Client) Heartbeat(ctx context.Context) (*Heartbeat, *Response, error) { + // Include the current time in the heartbeat, and include the operating + // systems timezone. + heartbeat := &Heartbeat{SentAt: time.Now().Format(time.RFC3339Nano)} + + req, err := c.newRequest(ctx, "POST", "heartbeat", &heartbeat) + if err != nil { + return nil, nil, err + } + + resp, err := c.doRequest(req, heartbeat) + if err != nil { + return nil, resp, err + } + + return heartbeat, resp, err +} diff --git a/vendor/github.com/buildkite/agent/v3/api/jobs.go b/vendor/github.com/buildkite/agent/v3/api/jobs.go new file mode 100644 index 000000000..4dee5af4c --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/jobs.go @@ -0,0 +1,127 @@ +package api + +import ( + "context" + "fmt" +) + +// Job represents a Buildkite Agent API Job +type Job struct { + ID string `json:"id,omitempty"` + Endpoint string `json:"endpoint"` + State string `json:"state,omitempty"` + Env map[string]string `json:"env,omitempty"` + ChunksMaxSizeBytes int `json:"chunks_max_size_bytes,omitempty"` + Token string `json:"token,omitempty"` + ExitStatus string `json:"exit_status,omitempty"` + Signal string `json:"signal,omitempty"` + SignalReason string `json:"signal_reason,omitempty"` + StartedAt string `json:"started_at,omitempty"` + FinishedAt string `json:"finished_at,omitempty"` + RunnableAt string `json:"runnable_at,omitempty"` + ChunksFailedCount int `json:"chunks_failed_count,omitempty"` +} + +type JobState struct { + State string `json:"state,omitempty"` +} + +type jobStartRequest struct { + StartedAt string `json:"started_at,omitempty"` +} + +type jobFinishRequest struct { + ExitStatus string `json:"exit_status,omitempty"` + Signal string `json:"signal,omitempty"` + SignalReason string `json:"signal_reason,omitempty"` + FinishedAt string `json:"finished_at,omitempty"` + ChunksFailedCount int `json:"chunks_failed_count"` +} + +// GetJobState returns the state of a given job +func (c *Client) GetJobState(ctx context.Context, id string) (*JobState, *Response, error) { + u := fmt.Sprintf("jobs/%s", id) + + req, err := c.newRequest(ctx, "GET", u, nil) + if err != nil { + return nil, nil, err + } + + s := new(JobState) + resp, err := c.doRequest(req, s) + if err != nil { + return nil, resp, err + } + + return s, resp, err +} + +// Acquires a job using its ID +func (c *Client) AcquireJob(ctx context.Context, id string, headers ...Header) (*Job, *Response, error) { + u := fmt.Sprintf("jobs/%s/acquire", id) + + req, err := c.newRequest(ctx, "PUT", u, nil, headers...) + if err != nil { + return nil, nil, err + } + + j := new(Job) + resp, err := c.doRequest(req, j) + if err != nil { + return nil, resp, err + } + + return j, resp, err +} + +// AcceptJob accepts the passed in job. Returns the job with its finalized set of +// environment variables (when a job is accepted, the agents environment is +// applied to the job) +func (c *Client) AcceptJob(ctx context.Context, job *Job) (*Job, *Response, error) { + u := fmt.Sprintf("jobs/%s/accept", job.ID) + + req, err := c.newRequest(ctx, "PUT", u, nil) + if err != nil { + return nil, nil, err + } + + j := new(Job) + resp, err := c.doRequest(req, j) + if err != nil { + return nil, resp, err + } + + return j, resp, err +} + +// StartJob starts the passed in job +func (c *Client) StartJob(ctx context.Context, job *Job) (*Response, error) { + u := fmt.Sprintf("jobs/%s/start", job.ID) + + req, err := c.newRequest(ctx, "PUT", u, &jobStartRequest{ + StartedAt: job.StartedAt, + }) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} + +// FinishJob finishes the passed in job +func (c *Client) FinishJob(ctx context.Context, job *Job) (*Response, error) { + u := fmt.Sprintf("jobs/%s/finish", job.ID) + + req, err := c.newRequest(ctx, "PUT", u, &jobFinishRequest{ + FinishedAt: job.FinishedAt, + ExitStatus: job.ExitStatus, + Signal: job.Signal, + SignalReason: job.SignalReason, + ChunksFailedCount: job.ChunksFailedCount, + }) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} diff --git a/vendor/github.com/buildkite/agent/v3/api/meta_data.go b/vendor/github.com/buildkite/agent/v3/api/meta_data.go new file mode 100644 index 000000000..384c2e748 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/meta_data.go @@ -0,0 +1,97 @@ +package api + +import ( + "context" + "errors" + "fmt" +) + +// MetaData represents a Buildkite Agent API MetaData +type MetaData struct { + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` +} + +// MetaDataExists represents a Buildkite Agent API MetaData Exists check +// response +type MetaDataExists struct { + Exists bool `json:"exists"` +} + +// Sets the meta data value +func (c *Client) SetMetaData(ctx context.Context, jobId string, metaData *MetaData) (*Response, error) { + u := fmt.Sprintf("jobs/%s/data/set", jobId) + + req, err := c.newRequest(ctx, "POST", u, metaData) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} + +// Gets the meta data value +func (c *Client) GetMetaData(ctx context.Context, scope, id, key string) (*MetaData, *Response, error) { + if scope != "job" && scope != "build" { + return nil, nil, errors.New("scope must either be job or build") + } + + u := fmt.Sprintf("%ss/%s/data/get", scope, id) + m := &MetaData{Key: key} + + req, err := c.newRequest(ctx, "POST", u, m) + if err != nil { + return nil, nil, err + } + + resp, err := c.doRequest(req, m) + if err != nil { + return nil, resp, err + } + + return m, resp, err +} + +// Returns true if the meta data key has been set, false if it hasn't. +func (c *Client) ExistsMetaData(ctx context.Context, scope, id, key string) (*MetaDataExists, *Response, error) { + if scope != "job" && scope != "build" { + return nil, nil, errors.New("scope must either be job or build") + } + + u := fmt.Sprintf("%ss/%s/data/exists", scope, id) + m := &MetaData{Key: key} + + req, err := c.newRequest(ctx, "POST", u, m) + if err != nil { + return nil, nil, err + } + + e := new(MetaDataExists) + resp, err := c.doRequest(req, e) + if err != nil { + return nil, resp, err + } + + return e, resp, err +} + +func (c *Client) MetaDataKeys(ctx context.Context, scope, id string) ([]string, *Response, error) { + if scope != "job" && scope != "build" { + return nil, nil, errors.New("scope must either be job or build") + } + + u := fmt.Sprintf("%ss/%s/data/keys", scope, id) + + req, err := c.newRequest(ctx, "POST", u, nil) + if err != nil { + return nil, nil, err + } + + keys := []string{} + resp, err := c.doRequest(req, &keys) + if err != nil { + return nil, resp, err + } + + return keys, resp, err +} diff --git a/vendor/github.com/buildkite/agent/v3/api/oidc.go b/vendor/github.com/buildkite/agent/v3/api/oidc.go new file mode 100644 index 000000000..a25f0302b --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/oidc.go @@ -0,0 +1,43 @@ +package api + +import ( + "context" + "fmt" +) + +type OIDCToken struct { + Token string `json:"token"` +} + +type OIDCTokenRequest struct { + Job string + Audience string + Lifetime int + Claims []string +} + +func (c *Client) OIDCToken(ctx context.Context, methodReq *OIDCTokenRequest) (*OIDCToken, *Response, error) { + m := &struct { + Audience string `json:"audience,omitempty"` + Lifetime int `json:"lifetime,omitempty"` + Claims []string `json:"claims,omitempty"` + }{ + Audience: methodReq.Audience, + Lifetime: methodReq.Lifetime, + Claims: methodReq.Claims, + } + + u := fmt.Sprintf("jobs/%s/oidc/tokens", methodReq.Job) + httpReq, err := c.newRequest(ctx, "POST", u, m) + if err != nil { + return nil, nil, err + } + + t := &OIDCToken{} + resp, err := c.doRequest(httpReq, t) + if err != nil { + return nil, resp, err + } + + return t, resp, nil +} diff --git a/vendor/github.com/buildkite/agent/v3/api/pings.go b/vendor/github.com/buildkite/agent/v3/api/pings.go new file mode 100644 index 000000000..de3e1bd17 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/pings.go @@ -0,0 +1,27 @@ +package api + +import "context" + +// Ping represents a Buildkite Agent API Ping +type Ping struct { + Action string `json:"action,omitempty"` + Message string `json:"message,omitempty"` + Job *Job `json:"job,omitempty"` + Endpoint string `json:"endpoint,omitempty"` +} + +// Pings the API and returns any work the client needs to perform +func (c *Client) Ping(ctx context.Context) (*Ping, *Response, error) { + req, err := c.newRequest(ctx, "GET", "ping", nil) + if err != nil { + return nil, nil, err + } + + ping := new(Ping) + resp, err := c.doRequest(req, ping) + if err != nil { + return nil, resp, err + } + + return ping, resp, err +} diff --git a/vendor/github.com/buildkite/agent/v3/api/pipelines.go b/vendor/github.com/buildkite/agent/v3/api/pipelines.go new file mode 100644 index 000000000..e585fb1cd --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/pipelines.go @@ -0,0 +1,57 @@ +package api + +import ( + "context" + "fmt" +) + +// PipelineChange represents a Buildkite Agent API PipelineChange +type PipelineChange struct { + // UUID identifies this pipeline change. We keep this constant during + // retry loops so that work is not repeated on the API server + UUID string `json:"uuid"` + Pipeline any `json:"pipeline"` + Replace bool `json:"replace,omitempty"` +} + +type PipelineUploadStatus struct { + State string `json:"state"` + Message string `json:"message"` +} + +// UploadPipeline uploads the pipeline to the Buildkite Agent API. It does not wait for the +// pipeline to finish processing but will instead return with a redirect to the location to check +// the pipeline's status. +func (c *Client) UploadPipeline( + ctx context.Context, + jobId string, + pipeline *PipelineChange, + headers ...Header, +) (*Response, error) { + u := fmt.Sprintf("jobs/%s/pipelines?async=true", jobId) + + req, err := c.newRequest(ctx, "POST", u, pipeline, headers...) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} + +func (c *Client) PipelineUploadStatus( + ctx context.Context, + jobId string, + uuid string, + headers ...Header, +) (*PipelineUploadStatus, *Response, error) { + u := fmt.Sprintf("jobs/%s/pipelines/%s", jobId, uuid) + + req, err := c.newRequest(ctx, "GET", u, nil, headers...) + if err != nil { + return nil, nil, err + } + + status := &PipelineUploadStatus{} + resp, err := c.doRequest(req, status) + return status, resp, err +} diff --git a/vendor/github.com/buildkite/agent/v3/api/retryable.go b/vendor/github.com/buildkite/agent/v3/api/retryable.go new file mode 100644 index 000000000..92e83d3e9 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/retryable.go @@ -0,0 +1,72 @@ +package api + +import ( + "io" + "net" + "net/http" + "net/url" + "strings" + "syscall" + + "golang.org/x/exp/slices" +) + +var retrableErrorSuffixes = []string{ + syscall.ECONNREFUSED.Error(), + syscall.ECONNRESET.Error(), + syscall.ETIMEDOUT.Error(), + "no such host", + "remote error: handshake failure", + io.ErrUnexpectedEOF.Error(), + io.EOF.Error(), +} + +var retryableStatuses = []int{ + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 +} + +// IsRetryableStatus returns true if the response's StatusCode is one that we should retry. +func IsRetryableStatus(r *Response) bool { + return r.StatusCode >= 400 && slices.Contains(retryableStatuses, r.StatusCode) +} + +// Looks at a bunch of connection related errors, and returns true if the error +// matches one of them. +func IsRetryableError(err error) bool { + if neterr, ok := err.(net.Error); ok { + if neterr.Temporary() { + return true + } + } + + if neterr, ok := err.(net.Error); ok && neterr.Timeout() { + return true + } + + if urlerr, ok := err.(*url.Error); ok { + if strings.Contains(urlerr.Error(), "use of closed network connection") { + return true + } + + if neturlerr, ok := urlerr.Err.(net.Error); ok && neturlerr.Timeout() { + return true + } + } + + if strings.Contains(err.Error(), "request canceled while waiting for connection") { + return true + } + + s := err.Error() + for _, suffix := range retrableErrorSuffixes { + if strings.HasSuffix(s, suffix) { + return true + } + } + + return false +} diff --git a/vendor/github.com/buildkite/agent/v3/api/steps.go b/vendor/github.com/buildkite/agent/v3/api/steps.go new file mode 100644 index 000000000..a428b2e1a --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/steps.go @@ -0,0 +1,56 @@ +package api + +import ( + "context" + "fmt" +) + +// StepExportRequest represents a request for information about a step +type StepExportRequest struct { + Attribute string `json:"attribute,omitempty"` + Build string `json:"build_id,omitempty"` + Format string `json:"format,omitempty"` +} + +type StepExportResponse struct { + Output string `json:"output"` +} + +// StepExport gets an attribute from step +func (c *Client) StepExport(ctx context.Context, stepIdOrKey string, stepGetRequest *StepExportRequest) (*StepExportResponse, *Response, error) { + u := fmt.Sprintf("steps/%s/export", stepIdOrKey) + + req, err := c.newRequest(ctx, "POST", u, stepGetRequest) + if err != nil { + return nil, nil, err + } + + r := new(StepExportResponse) + resp, err := c.doRequest(req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, err +} + +// StepUpdate represents a change request to a step +type StepUpdate struct { + IdempotencyUUID string `json:"idempotency_uuid,omitempty"` + Build string `json:"build_id,omitempty"` + Attribute string `json:"attribute,omitempty"` + Value string `json:"value,omitempty"` + Append bool `json:"append,omitempty"` +} + +// StepUpdate updates a step +func (c *Client) StepUpdate(ctx context.Context, stepIdOrKey string, stepUpdate *StepUpdate) (*Response, error) { + u := fmt.Sprintf("steps/%s", stepIdOrKey) + + req, err := c.newRequest(ctx, "PUT", u, stepUpdate) + if err != nil { + return nil, err + } + + return c.doRequest(req, nil) +} diff --git a/vendor/github.com/buildkite/agent/v3/api/uuid.go b/vendor/github.com/buildkite/agent/v3/api/uuid.go new file mode 100644 index 000000000..d98371520 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/api/uuid.go @@ -0,0 +1,7 @@ +package api + +import "github.com/pborman/uuid" + +func NewUUID() string { + return uuid.New() +} diff --git a/vendor/github.com/buildkite/agent/v3/logger/buffer.go b/vendor/github.com/buildkite/agent/v3/logger/buffer.go new file mode 100644 index 000000000..9aa3b4f8f --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/logger/buffer.go @@ -0,0 +1,46 @@ +package logger + +import ( + "fmt" +) + +// Buffer is a Logger implementation intended for testing; +// messages are stored internally. +type Buffer struct { + Messages []string +} + +// NewBuffer creates a new Buffer with Messages slice initialized. +// This makes it simpler to assert empty []string when no log messages +// have been sent; otherwise Messages would be nil. +func NewBuffer() *Buffer { + return &Buffer{ + Messages: make([]string, 0), + } +} + +func (b *Buffer) Debug(format string, v ...any) { + b.Messages = append(b.Messages, "[debug] "+fmt.Sprintf(format, v...)) +} +func (b *Buffer) Error(format string, v ...any) { + b.Messages = append(b.Messages, "[error] "+fmt.Sprintf(format, v...)) +} +func (b *Buffer) Fatal(format string, v ...any) { + b.Messages = append(b.Messages, "[fatal] "+fmt.Sprintf(format, v...)) +} +func (b *Buffer) Notice(format string, v ...any) { + b.Messages = append(b.Messages, "[notice] "+fmt.Sprintf(format, v...)) +} +func (b *Buffer) Warn(format string, v ...any) { + b.Messages = append(b.Messages, "[warn] "+fmt.Sprintf(format, v...)) +} +func (b *Buffer) Info(format string, v ...any) { + b.Messages = append(b.Messages, "[info] "+fmt.Sprintf(format, v...)) +} +func (b *Buffer) WithFields(fields ...Field) Logger { + return b +} +func (b *Buffer) SetLevel(level Level) {} +func (b *Buffer) Level() Level { + return 0 +} diff --git a/vendor/github.com/buildkite/agent/v3/logger/field.go b/vendor/github.com/buildkite/agent/v3/logger/field.go new file mode 100644 index 000000000..9622da13f --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/logger/field.go @@ -0,0 +1,65 @@ +package logger + +import ( + "fmt" + "time" +) + +type Field interface { + Key() string + String() string +} + +type Fields []Field + +func (f *Fields) Add(fields ...Field) { + *f = append(*f, fields...) +} + +func (f *Fields) Get(key string) []Field { + fields := []Field{} + for _, field := range *f { + if field.Key() == key { + fields = append(fields, field) + } + } + return fields +} + +type GenericField struct { + key string + value any + format string +} + +func (f GenericField) Key() string { + return f.key +} + +func (f GenericField) String() string { + return fmt.Sprintf(f.format, f.value) +} + +func StringField(key, value string) Field { + return GenericField{ + key: key, + value: value, + format: "%s", + } +} + +func IntField(key string, value int) Field { + return GenericField{ + key: key, + value: value, + format: "%d", + } +} + +func DurationField(key string, value time.Duration) Field { + return GenericField{ + key: key, + value: value, + format: "%v", + } +} diff --git a/vendor/github.com/buildkite/agent/v3/logger/init_windows.go b/vendor/github.com/buildkite/agent/v3/logger/init_windows.go new file mode 100644 index 000000000..168f2c2c4 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/logger/init_windows.go @@ -0,0 +1,31 @@ +//go:build windows +// +build windows + +package logger + +import ( + "fmt" + "os" + + "golang.org/x/sys/windows" +) + +// Windows 10 Build 16257 added support for ANSI color output if we enable them + +func init() { + var mode uint32 + stdout := windows.Handle(os.Stdout.Fd()) + + if err := windows.GetConsoleMode(stdout, &mode); err != nil { + return + } + + // See https://docs.microsoft.com/en-us/windows/console/getconsolemode + mode = mode | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + + if err := windows.SetConsoleMode(stdout, mode); err == nil { + windowsColors = true + } else { + fmt.Printf("Error setting console mode: %v\n", err) + } +} diff --git a/vendor/github.com/buildkite/agent/v3/logger/level.go b/vendor/github.com/buildkite/agent/v3/logger/level.go new file mode 100644 index 000000000..929a8f78b --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/logger/level.go @@ -0,0 +1,50 @@ +package logger + +import ( + "fmt" + "strings" +) + +type Level int + +const ( + DEBUG Level = iota + NOTICE + INFO + WARN + ERROR + FATAL +) + +var levelNames = []string{ + "DEBUG", + "NOTICE", + "INFO", + "WARN", + "ERROR", + "FATAL", +} + +func LevelFromString(s string) (Level, error) { + switch strings.ToLower(s) { + case "debug": + return DEBUG, nil + case "notice": + return NOTICE, nil + case "info": + return INFO, nil + case "warn", "warning": + return WARN, nil + case "error": + return ERROR, nil + case "fatal": + return FATAL, nil + default: + return -1, fmt.Errorf("invalid log level: %s. Valid levels are: %v", s, levelNames) + } +} + +// String returns the string representation of a logging level. +func (p Level) String() string { + return levelNames[p] +} diff --git a/vendor/github.com/buildkite/agent/v3/logger/log.go b/vendor/github.com/buildkite/agent/v3/logger/log.go new file mode 100644 index 000000000..0147ad232 --- /dev/null +++ b/vendor/github.com/buildkite/agent/v3/logger/log.go @@ -0,0 +1,268 @@ +// Package logger provides a logger abstraction for writing log messages in +// configurable formats to different outputs, such as a console, plain text +// file, or a JSON file. +// +// It is intended for internal use by buildkite-agent only. +package logger + +import ( + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/crypto/ssh/terminal" +) + +const ( + nocolor = "0" + red = "31" + green = "38;5;48" + yellow = "33" + gray = "38;5;251" + graybold = "1;38;5;251" + lightgray = "38;5;243" + cyan = "1;36" +) + +const ( + DateFormat = "2006-01-02 15:04:05" +) + +var ( + mutex = sync.Mutex{} + windowsColors bool +) + +type Logger interface { + Debug(format string, v ...any) + Error(format string, v ...any) + Fatal(format string, v ...any) + Notice(format string, v ...any) + Warn(format string, v ...any) + Info(format string, v ...any) + + WithFields(fields ...Field) Logger + SetLevel(level Level) + Level() Level +} + +type ConsoleLogger struct { + level Level + exitFn func(int) + fields Fields + printer Printer +} + +func NewConsoleLogger(printer Printer, exitFn func(int)) Logger { + return &ConsoleLogger{ + level: DEBUG, + fields: Fields{}, + printer: printer, + exitFn: exitFn, + } +} + +// WithFields returns a copy of the logger with the provided fields +func (l *ConsoleLogger) WithFields(fields ...Field) Logger { + clone := *l + clone.fields.Add(fields...) + return &clone +} + +// SetLevel sets the level in the logger +func (l *ConsoleLogger) SetLevel(level Level) { + l.level = level +} + +func (l *ConsoleLogger) Debug(format string, v ...any) { + if l.level == DEBUG { + l.printer.Print(DEBUG, fmt.Sprintf(format, v...), l.fields) + } +} + +func (l *ConsoleLogger) Error(format string, v ...any) { + l.printer.Print(ERROR, fmt.Sprintf(format, v...), l.fields) +} + +func (l *ConsoleLogger) Fatal(format string, v ...any) { + l.printer.Print(FATAL, fmt.Sprintf(format, v...), l.fields) + l.exitFn(1) +} + +func (l *ConsoleLogger) Notice(format string, v ...any) { + if l.level <= NOTICE { + l.printer.Print(NOTICE, fmt.Sprintf(format, v...), l.fields) + } +} + +func (l *ConsoleLogger) Info(format string, v ...any) { + if l.level <= INFO { + l.printer.Print(INFO, fmt.Sprintf(format, v...), l.fields) + } +} + +func (l *ConsoleLogger) Warn(format string, v ...any) { + if l.level <= WARN { + l.printer.Print(WARN, fmt.Sprintf(format, v...), l.fields) + } +} + +func (l *ConsoleLogger) Level() Level { + return l.level +} + +type Printer interface { + Print(level Level, msg string, fields Fields) +} + +type TextPrinter struct { + Colors bool + Writer io.Writer + + IsPrefixFn func(Field) bool + IsVisibleFn func(Field) bool +} + +func NewTextPrinter(w io.Writer) *TextPrinter { + return &TextPrinter{ + Writer: w, + Colors: ColorsSupported(), + } +} + +func (l *TextPrinter) Print(level Level, msg string, fields Fields) { + now := time.Now().Format(DateFormat) + + var line string + var prefix string + var fieldStrs []string + + if l.IsPrefixFn != nil { + for _, f := range fields { + // Skip invisible fields + if l.IsVisibleFn != nil && !l.IsVisibleFn(f) { + continue + } + // Allow some fields to be shown as prefixes + if l.IsPrefixFn(f) { + prefix += f.String() + } + } + } + + if l.Colors { + levelColor := green + messageColor := nocolor + fieldColor := graybold + + switch level { + case DEBUG: + levelColor = gray + messageColor = gray + case NOTICE: + levelColor = cyan + case WARN: + levelColor = yellow + case ERROR: + levelColor = red + case FATAL: + levelColor = red + messageColor = red + } + + if prefix != "" { + line = fmt.Sprintf("\x1b[%sm%s %-6s\x1b[0m \x1b[%sm%s\x1b[0m \x1b[%sm%s\x1b[0m", + levelColor, now, level, lightgray, prefix, messageColor, msg) + } else { + line = fmt.Sprintf("\x1b[%sm%s %-6s\x1b[0m \x1b[%sm%s\x1b[0m", + levelColor, now, level, messageColor, msg) + } + + for _, field := range fields { + if l.IsVisibleFn != nil && !l.IsVisibleFn(field) { + continue + } + if l.IsPrefixFn != nil && l.IsPrefixFn(field) { + continue + } + fieldStrs = append(fieldStrs, fmt.Sprintf("\x1b[%sm%s=\x1b[0m\x1b[%sm%s\x1b[0m", + fieldColor, field.Key(), messageColor, field.String())) + } + } else { + if prefix != "" { + line = fmt.Sprintf("%s %-6s %s %s", now, level, prefix, msg) + } else { + line = fmt.Sprintf("%s %-6s %s", now, level, msg) + } + + for _, field := range fields { + if l.IsVisibleFn != nil && !l.IsVisibleFn(field) { + continue + } + if l.IsPrefixFn != nil && l.IsPrefixFn(field) { + continue + } + fieldStrs = append(fieldStrs, fmt.Sprintf("%s=%s", field.Key(), field.String())) + } + } + + // Make sure we're only outputting a line one at a time + mutex.Lock() + fmt.Fprint(l.Writer, line) + if len(fields) > 0 { + fmt.Fprintf(l.Writer, " %s", strings.Join(fieldStrs, " ")) + } + fmt.Fprint(l.Writer, "\n") + mutex.Unlock() +} + +func ColorsSupported() bool { + // Color support for windows is set in init + if runtime.GOOS == "windows" && !windowsColors { + return false + } + + // Colors can only be shown if STDOUT is a terminal + if terminal.IsTerminal(int(os.Stdout.Fd())) { + return true + } + + return false +} + +type JSONPrinter struct { + Writer io.Writer +} + +func NewJSONPrinter(w io.Writer) *JSONPrinter { + return &JSONPrinter{ + Writer: w, + } +} + +func (p *JSONPrinter) Print(level Level, msg string, fields Fields) { + var b strings.Builder + + b.WriteString(fmt.Sprintf(`"ts":%q,`, time.Now().Format(time.RFC3339))) + b.WriteString(fmt.Sprintf(`"level":%q,`, level.String())) + b.WriteString(fmt.Sprintf(`"msg":%q,`, msg)) + + for _, field := range fields { + b.WriteString(fmt.Sprintf("%q:%q,", field.Key(), field.String())) + } + + // Make sure we're only outputting a line one at a time + mutex.Lock() + fmt.Fprintf(p.Writer, "{%s}\n", strings.TrimSuffix(b.String(), ",")) + mutex.Unlock() +} + +var Discard = &ConsoleLogger{ + printer: &TextPrinter{ + Writer: io.Discard, + }, +} diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/LICENSE b/vendor/github.com/chrismellard/docker-credential-acr-env/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/chrismellard/docker-credential-acr-env/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/credhelper/helper.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/credhelper/helper.go new file mode 100644 index 000000000..2a7cbdf07 --- /dev/null +++ b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/credhelper/helper.go @@ -0,0 +1,85 @@ +/* +Copyright © 2020 Chris Mellard chris.mellard@icloud.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package credhelper + +import ( + "errors" + "fmt" + "net/url" + "regexp" + + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/chrismellard/docker-credential-acr-env/pkg/registry" + "github.com/chrismellard/docker-credential-acr-env/pkg/token" + "github.com/docker/docker-credential-helpers/credentials" +) + +var acrRE = regexp.MustCompile(`.*\.azurecr\.io|.*\.azurecr\.cn|.*\.azurecr\.de|.*\.azurecr\.us`) + +const ( + mcrHostname = "mcr.microsoft.com" + tokenUsername = "" +) + +type ACRCredHelper struct { +} + +func NewACRCredentialsHelper() credentials.Helper { + return &ACRCredHelper{} +} + +func (a ACRCredHelper) Add(_ *credentials.Credentials) error { + return errors.New("list is unimplemented") +} + +func (a ACRCredHelper) Delete(_ string) error { + return errors.New("list is unimplemented") +} + +func isACRRegistry(input string) bool { + serverURL, err := url.Parse("https://" + input) + if err != nil { + return false + } + if serverURL.Hostname() == mcrHostname { + return true + } + matches := acrRE.FindStringSubmatch(serverURL.Hostname()) + if len(matches) == 0 { + return false + } + return true +} + +func (a ACRCredHelper) Get(serverURL string) (string, string, error) { + if !isACRRegistry(serverURL) { + return "", "", errors.New("serverURL does not refer to Azure Container Registry") + } + + spToken, settings, err := token.GetServicePrincipalTokenFromEnvironment() + if err != nil { + return "", "", fmt.Errorf("failed to acquire sp token %w", err) + } + refreshToken, err := registry.GetRegistryRefreshTokenFromAADExchange(serverURL, spToken, settings.Values[auth.TenantID]) + if err != nil { + return "", "", fmt.Errorf("failed to acquire refresh token %w", err) + } + return tokenUsername, refreshToken, nil +} + +func (a ACRCredHelper) List() (map[string]string, error) { + return nil, errors.New("list is unimplemented") +} diff --git a/vendor/sigs.k8s.io/kustomize/api/resmap/idslice.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/const.go similarity index 52% rename from vendor/sigs.k8s.io/kustomize/api/resmap/idslice.go rename to vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/const.go index 8c25cbb2a..2f8ad5a0a 100644 --- a/vendor/sigs.k8s.io/kustomize/api/resmap/idslice.go +++ b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/const.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright © 2020 Chris Mellard chris.mellard@icloud.com Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,25 +13,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - -package resmap +package registry import ( - "sort" - - "sigs.k8s.io/kustomize/kyaml/resid" + "time" ) -// IdSlice implements the sort interface. -type IdSlice []resid.ResId - -var _ sort.Interface = IdSlice{} - -func (a IdSlice) Len() int { return len(a) } -func (a IdSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a IdSlice) Less(i, j int) bool { - if !a[i].Gvk.Equals(a[j].Gvk) { - return a[i].Gvk.IsLessThan(a[j].Gvk) - } - return a[i].LegacySortString() < a[j].LegacySortString() -} +const ( + secureScheme = "https://" + defaultTimeOut = time.Duration(30) * time.Second +) diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/registry.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/registry.go new file mode 100644 index 000000000..c1b30dde8 --- /dev/null +++ b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/registry/registry.go @@ -0,0 +1,63 @@ +/* +Copyright © 2020 Chris Mellard chris.mellard@icloud.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package registry + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/services/preview/containerregistry/runtime/2019-08-15-preview/containerregistry" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +// GetRegistryRefreshTokenFromAADExchange retrieves an OAuth2 refresh token for the registry specified by serverURL +func GetRegistryRefreshTokenFromAADExchange(serverURL string, principalToken *adal.ServicePrincipalToken, tenantID string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeOut) + defer cancel() + + // If refreshing fails, don't try again, just fail. + principalToken.MaxMSIRefreshAttempts = 1 + + if err := principalToken.EnsureFreshWithContext(ctx); err != nil { + return "", fmt.Errorf("error refreshing sp token - %w", err) + } + + registryName, err := getRegistryURL(serverURL) + if err != nil { + return "", fmt.Errorf("failed to parse server URL - %w", err) + } + refreshTokenClient := containerregistry.NewRefreshTokensClient(registryName.String()) + authorizer := autorest.NewBearerAuthorizer(principalToken) + refreshTokenClient.Authorizer = authorizer + rt, err := refreshTokenClient.GetFromExchange(ctx, "access_token", serverURL, tenantID, "", principalToken.Token().AccessToken) + if err != nil { + return "", fmt.Errorf("failed to get refresh token for container registry - %w", err) + } + + return *rt.RefreshToken, nil +} + +// parseRegistryName parses a serverURL and returns the registry name (i.e. minus transport scheme) +func getRegistryURL(serverURL string) (*url.URL, error) { + sURL, err := url.Parse(secureScheme + serverURL) + if err != nil { + return &url.URL{}, fmt.Errorf("failed to parse server URL - %w", err) + } + + return sURL, nil +} diff --git a/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go new file mode 100644 index 000000000..a76be1e4f --- /dev/null +++ b/vendor/github.com/chrismellard/docker-credential-acr-env/pkg/token/token.go @@ -0,0 +1,72 @@ +/* +Copyright © 2020 Chris Mellard chris.mellard@icloud.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package token + +import ( + "fmt" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +func GetServicePrincipalTokenFromEnvironment() (*adal.ServicePrincipalToken, auth.EnvironmentSettings, error) { + settings, err := auth.GetSettingsFromEnvironment() + if err != nil { + return &adal.ServicePrincipalToken{}, auth.EnvironmentSettings{}, fmt.Errorf("failed to get auth settings from environment - %w", err) + } + + spToken, err := getServicePrincipalToken(settings, settings.Environment.ResourceManagerEndpoint) + if err != nil { + return &adal.ServicePrincipalToken{}, auth.EnvironmentSettings{}, fmt.Errorf("failed to initialise sp token config %w", err) + } + + return spToken, settings, nil +} + +// getServicePrincipalToken retrieves an Azure AD OAuth2 token from the supplied environment settings for the specified resource +func getServicePrincipalToken(settings auth.EnvironmentSettings, resource string) (*adal.ServicePrincipalToken, error) { + + //1.Client Credentials + if _, e := settings.GetClientCredentials(); e == nil { + clientCredentialsConfig, err := settings.GetClientCredentials() + if err != nil { + return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to get client credentials settings from environment - %w", err) + } + oAuthConfig, err := adal.NewOAuthConfig(settings.Environment.ActiveDirectoryEndpoint, clientCredentialsConfig.TenantID) + if err != nil { + return &adal.ServicePrincipalToken{}, fmt.Errorf("failed to initialise OAuthConfig - %w", err) + } + return adal.NewServicePrincipalToken(*oAuthConfig, clientCredentialsConfig.ClientID, clientCredentialsConfig.ClientSecret, clientCredentialsConfig.Resource) + } + + //2. Client Certificate + if _, e := settings.GetClientCertificate(); e == nil { + return &adal.ServicePrincipalToken{}, fmt.Errorf("authentication method currently unsupported") + } + + //3. Username Password + if _, e := settings.GetUsernamePassword(); e == nil { + return &adal.ServicePrincipalToken{}, fmt.Errorf("authentication method currently unsupported") + } + + // 4. MSI + msiEndpoint, err := adal.GetMSIEndpoint() + if err != nil { + return &adal.ServicePrincipalToken{}, fmt.Errorf("unable to determine MSIEndpoint %w", err) + } + + return adal.NewServicePrincipalTokenFromMSI(msiEndpoint, resource) +} diff --git a/vendor/github.com/clbanning/mxj/v2/.travis.yml b/vendor/github.com/clbanning/mxj/v2/.travis.yml new file mode 100644 index 000000000..9c8611554 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/.travis.yml @@ -0,0 +1,4 @@ +language: go + +go: +- 1.x \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/LICENSE b/vendor/github.com/clbanning/mxj/v2/LICENSE new file mode 100644 index 000000000..1ada8807d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012-2021 Charles Banning . All rights reserved. + +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/clbanning/mxj/v2/anyxml.go b/vendor/github.com/clbanning/mxj/v2/anyxml.go new file mode 100644 index 000000000..63970ee24 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/anyxml.go @@ -0,0 +1,201 @@ +package mxj + +import ( + "bytes" + "encoding/xml" + "reflect" +) + +const ( + DefaultElementTag = "element" +) + +// Encode arbitrary value as XML. +// +// Note: unmarshaling the resultant +// XML may not return the original value, since tag labels may have been injected +// to create the XML representation of the value. +/* + Encode an arbitrary JSON object. + package main + + import ( + "encoding/json" + "fmt" + "github.com/clbanning/mxj" + ) + + func main() { + jsondata := []byte(`[ + { "somekey":"somevalue" }, + "string", + 3.14159265, + true + ]`) + var i interface{} + err := json.Unmarshal(jsondata, &i) + if err != nil { + // do something + } + x, err := mxj.AnyXmlIndent(i, "", " ", "mydoc") + if err != nil { + // do something else + } + fmt.Println(string(x)) + } + + output: + + somevalue + string + 3.14159265 + true + + +An extreme example is available in examples/goofy_map.go. +*/ +// Alternative values for DefaultRootTag and DefaultElementTag can be set as: +// AnyXml( v, myRootTag, myElementTag). +func AnyXml(v interface{}, tags ...string) ([]byte, error) { + var rt, et string + if len(tags) == 1 || len(tags) == 2 { + rt = tags[0] + } else { + rt = DefaultRootTag + } + if len(tags) == 2 { + et = tags[1] + } else { + et = DefaultElementTag + } + + if v == nil { + if useGoXmlEmptyElemSyntax { + return []byte("<" + rt + ">"), nil + } + return []byte("<" + rt + "/>"), nil + } + if reflect.TypeOf(v).Kind() == reflect.Struct { + return xml.Marshal(v) + } + + var err error + s := new(bytes.Buffer) + p := new(pretty) + + var b []byte + switch v.(type) { + case []interface{}: + if _, err = s.WriteString("<" + rt + ">"); err != nil { + return nil, err + } + for _, vv := range v.([]interface{}) { + switch vv.(type) { + case map[string]interface{}: + m := vv.(map[string]interface{}) + if len(m) == 1 { + for tag, val := range m { + err = marshalMapToXmlIndent(false, s, tag, val, p) + } + } else { + err = marshalMapToXmlIndent(false, s, et, vv, p) + } + default: + err = marshalMapToXmlIndent(false, s, et, vv, p) + } + if err != nil { + break + } + } + if _, err = s.WriteString(""); err != nil { + return nil, err + } + b = s.Bytes() + case map[string]interface{}: + m := Map(v.(map[string]interface{})) + b, err = m.Xml(rt) + default: + err = marshalMapToXmlIndent(false, s, rt, v, p) + b = s.Bytes() + } + + return b, err +} + +// Encode an arbitrary value as a pretty XML string. +// Alternative values for DefaultRootTag and DefaultElementTag can be set as: +// AnyXmlIndent( v, "", " ", myRootTag, myElementTag). +func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) { + var rt, et string + if len(tags) == 1 || len(tags) == 2 { + rt = tags[0] + } else { + rt = DefaultRootTag + } + if len(tags) == 2 { + et = tags[1] + } else { + et = DefaultElementTag + } + + if v == nil { + if useGoXmlEmptyElemSyntax { + return []byte(prefix + "<" + rt + ">"), nil + } + return []byte(prefix + "<" + rt + "/>"), nil + } + if reflect.TypeOf(v).Kind() == reflect.Struct { + return xml.MarshalIndent(v, prefix, indent) + } + + var err error + s := new(bytes.Buffer) + p := new(pretty) + p.indent = indent + p.padding = prefix + + var b []byte + switch v.(type) { + case []interface{}: + if _, err = s.WriteString("<" + rt + ">\n"); err != nil { + return nil, err + } + p.Indent() + for _, vv := range v.([]interface{}) { + switch vv.(type) { + case map[string]interface{}: + m := vv.(map[string]interface{}) + if len(m) == 1 { + for tag, val := range m { + err = marshalMapToXmlIndent(true, s, tag, val, p) + } + } else { + p.start = 1 // we 1 tag in + err = marshalMapToXmlIndent(true, s, et, vv, p) + // *s += "\n" + if _, err = s.WriteString("\n"); err != nil { + return nil, err + } + } + default: + p.start = 0 // in case trailing p.start = 1 + err = marshalMapToXmlIndent(true, s, et, vv, p) + } + if err != nil { + break + } + } + if _, err = s.WriteString(``); err != nil { + return nil, err + } + b = s.Bytes() + case map[string]interface{}: + m := Map(v.(map[string]interface{})) + b, err = m.XmlIndent(prefix, indent, rt) + default: + err = marshalMapToXmlIndent(true, s, rt, v, p) + b = s.Bytes() + } + + return b, err +} diff --git a/vendor/github.com/clbanning/mxj/v2/atomFeedString.xml b/vendor/github.com/clbanning/mxj/v2/atomFeedString.xml new file mode 100644 index 000000000..474575a41 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/atomFeedString.xml @@ -0,0 +1,54 @@ + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + diff --git a/vendor/github.com/clbanning/mxj/v2/doc.go b/vendor/github.com/clbanning/mxj/v2/doc.go new file mode 100644 index 000000000..bede31265 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/doc.go @@ -0,0 +1,138 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2019, Charles Banning. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file + +/* +Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards. + +mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package. The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them. + +Note: this library was designed for processing ad hoc anonymous messages. Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly. + +Related Packages: + checkxml: github.com/clbanning/checkxml provides functions for validating XML data. + +Notes: + 2020.05.01: v2.2 - optimize map to XML encoding for large XML docs. + 2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq. + 2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq. + 2019.01.21: DecodeSimpleValuesAsMap - decode to map[:map["#text":]] rather than map[:]. + 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. + 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. + 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. + 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. + 2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data. + 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. + 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). + 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. + 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). + 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. + 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). + 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". + To cast them to float64, first set flag with CastNanInf(true). + 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. + 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. + 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). + 2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding. + 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. + +SUMMARY + + type Map map[string]interface{} + + Create a Map value, 'mv', from any map[string]interface{} value, 'v': + mv := Map(v) + + Unmarshal / marshal XML as a Map value, 'mv': + mv, err := NewMapXml(xmlValue) // unmarshal + xmlValue, err := mv.Xml() // marshal + + Unmarshal XML from an io.Reader as a Map value, 'mv': + mv, err := NewMapXmlReader(xmlReader) // repeated calls, as with an os.File Reader, will process stream + mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded + + Marshal Map value, 'mv', to an XML Writer (io.Writer): + err := mv.XmlWriter(xmlWriter) + raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter + + Also, for prettified output: + xmlValue, err := mv.XmlIndent(prefix, indent, ...) + err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...) + raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...) + + Bulk process XML with error handling (note: handlers must return a boolean value): + err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error)) + err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte)) + + Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader. + + There are comparable functions and methods for JSON processing. + + Arbitrary structure values can be decoded to / encoded from Map values: + mv, err := NewMapStruct(structVal) + err := mv.Struct(structPointer) + + To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON + or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then: + paths := mv.PathsForKey(key) + path := mv.PathForKeyShortest(key) + values, err := mv.ValuesForKey(key, subkeys) + values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays. + count, err := mv.UpdateValuesForPath(newVal, path, subkeys) + + Get everything at once, irrespective of path depth: + leafnodes := mv.LeafNodes() + leafvalues := mv.LeafValues() + + A new Map with whatever keys are desired can be created from the current Map and then encoded in XML + or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.) + newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N") + newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go" + newXml, err := newMap.Xml() // for example + newJson, err := newMap.Json() // ditto + +XML PARSING CONVENTIONS + + Using NewMapXml() + + - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, + to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or + `SetAttrPrefix()`.) + - If the element is a simple element and has attributes, the element value + is given the key `#text` for its `map[string]interface{}` representation. (See + the 'atomFeedString.xml' test data, below.) + - XML comments, directives, and process instructions are ignored. + - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. + + Using NewMapXmlSeq() + + - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values + where the `` value has "#text" and "#seq" keys - the "#text" key holds the + value for ``. + - All elements, except for the root, have a "#seq" key. + - Comments, directives, and process instructions are unmarshalled into the Map using the + keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more + specifics.) + - Name space syntax is preserved: + - something parses to map["ns:key"]interface{}{"something"} + - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"} + + Both + + - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them + to be cast, set a flag to cast them using CastNanInf(true). + +XML ENCODING CONVENTIONS + + - 'nil' Map values, which may represent 'null' JSON values, are encoded as "". + NOTE: the operation is not symmetric as "" elements are decoded as 'tag:""' Map values, + which, then, encode in JSON as '"tag":""' values.. + - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go + randomizes the walk through map[string]interface{} values.) If you plan to re-encode the + Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and + mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when + working with the Map representation. + +*/ +package mxj diff --git a/vendor/github.com/clbanning/mxj/v2/escapechars.go b/vendor/github.com/clbanning/mxj/v2/escapechars.go new file mode 100644 index 000000000..eeb3d2501 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/escapechars.go @@ -0,0 +1,93 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "bytes" +) + +var xmlEscapeChars bool + +// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values. +// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is +// then '&' will be re-escaped as '&amp;'. +// +/* + The values are: + " " + ' ' + < < + > > + & & +*/ +// +// Note: if XMLEscapeCharsDecoder(true) has been called - or the default, 'false,' value +// has been toggled to 'true' - then XMLEscapeChars(true) is ignored. If XMLEscapeChars(true) +// has already been called before XMLEscapeCharsDecoder(true), XMLEscapeChars(false) is called +// to turn escape encoding on mv.Xml, etc., to prevent double escaping ampersands, '&'. +func XMLEscapeChars(b ...bool) { + var bb bool + if len(b) == 0 { + bb = !xmlEscapeChars + } else { + bb = b[0] + } + if bb == true && xmlEscapeCharsDecoder == false { + xmlEscapeChars = true + } else { + xmlEscapeChars = false + } +} + +// Scan for '&' first, since 's' may contain "&" that is parsed to "&amp;" +// - or "<" that is parsed to "&lt;". +var escapechars = [][2][]byte{ + {[]byte(`&`), []byte(`&`)}, + {[]byte(`<`), []byte(`<`)}, + {[]byte(`>`), []byte(`>`)}, + {[]byte(`"`), []byte(`"`)}, + {[]byte(`'`), []byte(`'`)}, +} + +func escapeChars(s string) string { + if len(s) == 0 { + return s + } + + b := []byte(s) + for _, v := range escapechars { + n := bytes.Count(b, v[0]) + if n == 0 { + continue + } + b = bytes.Replace(b, v[0], v[1], n) + } + return string(b) +} + +// per issue #84, escape CharData values from xml.Decoder + +var xmlEscapeCharsDecoder bool + +// XMLEscapeCharsDecoder(b ...bool) escapes XML characters in xml.CharData values +// returned by Decoder.Token. Thus, the internal Map values will contain escaped +// values, and you do not need to set XMLEscapeChars for proper encoding. +// +// By default, the Map values have the non-escaped values returned by Decoder.Token. +// XMLEscapeCharsDecoder(true) - or, XMLEscapeCharsDecoder() - will toggle escape +// encoding 'on.' +// +// Note: if XMLEscapeCharDecoder(true) is call then XMLEscapeChars(false) is +// called to prevent re-escaping the values on encoding using mv.Xml, etc. +func XMLEscapeCharsDecoder(b ...bool) { + if len(b) == 0 { + xmlEscapeCharsDecoder = !xmlEscapeCharsDecoder + } else { + xmlEscapeCharsDecoder = b[0] + } + if xmlEscapeCharsDecoder == true && xmlEscapeChars == true { + xmlEscapeChars = false + } +} diff --git a/vendor/github.com/clbanning/mxj/v2/exists.go b/vendor/github.com/clbanning/mxj/v2/exists.go new file mode 100644 index 000000000..07aeda43f --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/exists.go @@ -0,0 +1,9 @@ +package mxj + +// Checks whether the path exists. If err != nil then 'false' is returned +// along with the error encountered parsing either the "path" or "subkeys" +// argument. +func (mv Map) Exists(path string, subkeys ...string) (bool, error) { + v, err := mv.ValuesForPath(path, subkeys...) + return (err == nil && len(v) > 0), err +} diff --git a/vendor/github.com/clbanning/mxj/v2/files.go b/vendor/github.com/clbanning/mxj/v2/files.go new file mode 100644 index 000000000..27e06e1e8 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files.go @@ -0,0 +1,287 @@ +package mxj + +import ( + "fmt" + "io" + "os" +) + +type Maps []Map + +func NewMaps() Maps { + return make(Maps, 0) +} + +type MapRaw struct { + M Map + R []byte +} + +// NewMapsFromXmlFile - creates an array from a file of JSON values. +func NewMapsFromJsonFile(name string) (Maps, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]Map, 0) + for { + m, raw, err := NewMapJsonReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) + } + if len(m) > 0 { + am = append(am, m) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values. +func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]MapRaw, 0) + for { + mr := new(MapRaw) + mr.M, mr.R, err = NewMapJsonReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) + } + if len(mr.M) > 0 { + am = append(am, *mr) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// NewMapsFromXmlFile - creates an array from a file of XML values. +func NewMapsFromXmlFile(name string) (Maps, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]Map, 0) + for { + m, raw, err := NewMapXmlReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) + } + if len(m) > 0 { + am = append(am, m) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values. +// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw(). +// It is slow at parsing a file from disk and is intended for relatively small utility files. +func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]MapRaw, 0) + for { + mr := new(MapRaw) + mr.M, mr.R, err = NewMapXmlReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) + } + if len(mr.M) > 0 { + am = append(am, *mr) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// ------------------------ Maps writing ------------------------- +// These are handy-dandy methods for dumping configuration data, etc. + +// JsonString - analogous to mv.Json() +func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) { + var s string + for _, v := range mvs { + j, err := v.Json() + if err != nil { + return s, err + } + s += string(j) + } + return s, nil +} + +// JsonStringIndent - analogous to mv.JsonIndent() +func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) { + var s string + var haveFirst bool + for _, v := range mvs { + j, err := v.JsonIndent(prefix, indent) + if err != nil { + return s, err + } + if haveFirst { + s += "\n" + } else { + haveFirst = true + } + s += string(j) + } + return s, nil +} + +// XmlString - analogous to mv.Xml() +func (mvs Maps) XmlString() (string, error) { + var s string + for _, v := range mvs { + x, err := v.Xml() + if err != nil { + return s, err + } + s += string(x) + } + return s, nil +} + +// XmlStringIndent - analogous to mv.XmlIndent() +func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) { + var s string + for _, v := range mvs { + x, err := v.XmlIndent(prefix, indent) + if err != nil { + return s, err + } + s += string(x) + } + return s, nil +} + +// JsonFile - write Maps to named file as JSON +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use JsonWriter method. +func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error { + var encoding bool + if len(safeEncoding) == 1 { + encoding = safeEncoding[0] + } + s, err := mvs.JsonString(encoding) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// JsonFileIndent - write Maps to named file as pretty JSON +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use JsonIndentWriter method. +func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error { + var encoding bool + if len(safeEncoding) == 1 { + encoding = safeEncoding[0] + } + s, err := mvs.JsonStringIndent(prefix, indent, encoding) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// XmlFile - write Maps to named file as XML +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use XmlWriter method. +func (mvs Maps) XmlFile(file string) error { + s, err := mvs.XmlString() + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// XmlFileIndent - write Maps to named file as pretty XML +// Note: the file will be created,if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use XmlIndentWriter method. +func (mvs Maps) XmlFileIndent(file, prefix, indent string) error { + s, err := mvs.XmlStringIndent(prefix, indent) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} diff --git a/vendor/github.com/clbanning/mxj/v2/files_test.badjson b/vendor/github.com/clbanning/mxj/v2/files_test.badjson new file mode 100644 index 000000000..d18720044 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test.badjson @@ -0,0 +1,2 @@ +{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } +{ "with":"some", "bad":JSON, "in":"it" } diff --git a/vendor/github.com/clbanning/mxj/v2/files_test.badxml b/vendor/github.com/clbanning/mxj/v2/files_test.badxml new file mode 100644 index 000000000..4736ef973 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test.badxml @@ -0,0 +1,9 @@ + + test + for files.go + + + some + doc + test case + diff --git a/vendor/github.com/clbanning/mxj/v2/files_test.json b/vendor/github.com/clbanning/mxj/v2/files_test.json new file mode 100644 index 000000000..e9a3ddf40 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test.json @@ -0,0 +1,2 @@ +{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } +{ "with":"just", "two":2, "JSON":"values", "true":true } diff --git a/vendor/github.com/clbanning/mxj/v2/files_test.xml b/vendor/github.com/clbanning/mxj/v2/files_test.xml new file mode 100644 index 000000000..65cf021fb --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test.xml @@ -0,0 +1,9 @@ + + test + for files.go + + + some + doc + test case + diff --git a/vendor/github.com/clbanning/mxj/v2/files_test_dup.json b/vendor/github.com/clbanning/mxj/v2/files_test_dup.json new file mode 100644 index 000000000..2becb6a45 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test_dup.json @@ -0,0 +1 @@ +{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/files_test_dup.xml b/vendor/github.com/clbanning/mxj/v2/files_test_dup.xml new file mode 100644 index 000000000..f68d22e28 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test_dup.xml @@ -0,0 +1 @@ +for files.gotestdoctest casesome \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/files_test_indent.json b/vendor/github.com/clbanning/mxj/v2/files_test_indent.json new file mode 100644 index 000000000..6fde15634 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test_indent.json @@ -0,0 +1,12 @@ +{ + "a": "test", + "file": "for", + "files_test.go": "case", + "this": "is" +} +{ + "JSON": "values", + "true": true, + "two": 2, + "with": "just" +} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/files_test_indent.xml b/vendor/github.com/clbanning/mxj/v2/files_test_indent.xml new file mode 100644 index 000000000..8c91a1dc2 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test_indent.xml @@ -0,0 +1,8 @@ + + for files.go + test + + doc + test case + some + \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/gob.go b/vendor/github.com/clbanning/mxj/v2/gob.go new file mode 100644 index 000000000..d56c2fd6f --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/gob.go @@ -0,0 +1,35 @@ +// gob.go - Encode/Decode a Map into a gob object. + +package mxj + +import ( + "bytes" + "encoding/gob" +) + +// NewMapGob returns a Map value for a gob object that has been +// encoded from a map[string]interface{} (or compatible type) value. +// It is intended to provide symmetric handling of Maps that have +// been encoded using mv.Gob. +func NewMapGob(gobj []byte) (Map, error) { + m := make(map[string]interface{}, 0) + if len(gobj) == 0 { + return m, nil + } + r := bytes.NewReader(gobj) + dec := gob.NewDecoder(r) + if err := dec.Decode(&m); err != nil { + return m, err + } + return m, nil +} + +// Gob returns a gob-encoded value for the Map 'mv'. +func (mv Map) Gob() ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(map[string]interface{}(mv)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/clbanning/mxj/v2/json.go b/vendor/github.com/clbanning/mxj/v2/json.go new file mode 100644 index 000000000..eb2c05a18 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/json.go @@ -0,0 +1,323 @@ +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "time" +) + +// ------------------------------ write JSON ----------------------- + +// Just a wrapper on json.Marshal. +// If option safeEncoding is'true' then safe encoding of '<', '>' and '&' +// is preserved. (see encoding/json#Marshal, encoding/json#Encode) +func (mv Map) Json(safeEncoding ...bool) ([]byte, error) { + var s bool + if len(safeEncoding) == 1 { + s = safeEncoding[0] + } + + b, err := json.Marshal(mv) + + if !s { + b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) + b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) + b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) + } + return b, err +} + +// Just a wrapper on json.MarshalIndent. +// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&' +// is preserved. (see encoding/json#Marshal, encoding/json#Encode) +func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) { + var s bool + if len(safeEncoding) == 1 { + s = safeEncoding[0] + } + + b, err := json.MarshalIndent(mv, prefix, indent) + if !s { + b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) + b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) + b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) + } + return b, err +} + +// The following implementation is provided for symmetry with NewMapJsonReader[Raw] +// The names will also provide a key for the number of return arguments. + +// Writes the Map as JSON on the Writer. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error { + b, err := mv.Json(safeEncoding...) + if err != nil { + return err + } + + _, err = jsonWriter.Write(b) + return err +} + +// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) { + b, err := mv.Json(safeEncoding...) + if err != nil { + return b, err + } + + _, err = jsonWriter.Write(b) + return b, err +} + +// Writes the Map as pretty JSON on the Writer. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error { + b, err := mv.JsonIndent(prefix, indent, safeEncoding...) + if err != nil { + return err + } + + _, err = jsonWriter.Write(b) + return err +} + +// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) { + b, err := mv.JsonIndent(prefix, indent, safeEncoding...) + if err != nil { + return b, err + } + + _, err = jsonWriter.Write(b) + return b, err +} + +// --------------------------- read JSON ----------------------------- + +// Decode numericvalues as json.Number type Map values - see encoding/json#Number. +// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), +// etc.; it does not affect NewMapXml(), etc. The XML encoders mv.Xml() and mv.XmlIndent() +// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number +// value types and the resulting Map can be correctly encoded into a XML object. +var JsonUseNumber bool + +// Just a wrapper on json.Unmarshal +// Converting JSON to XML is a simple as: +// ... +// mapVal, merr := mxj.NewMapJson(jsonVal) +// if merr != nil { +// // handle error +// } +// xmlVal, xerr := mapVal.Xml() +// if xerr != nil { +// // handle error +// } +// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}], +// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map. +// See mxj/j2x/j2x_test.go. +func NewMapJson(jsonVal []byte) (Map, error) { + // empty or nil begets empty + if len(jsonVal) == 0 { + m := make(map[string]interface{}, 0) + return m, nil + } + // handle a goofy case ... + if jsonVal[0] == '[' { + jsonVal = []byte(`{"object":` + string(jsonVal) + `}`) + } + m := make(map[string]interface{}) + // err := json.Unmarshal(jsonVal, &m) + buf := bytes.NewReader(jsonVal) + dec := json.NewDecoder(buf) + if JsonUseNumber { + dec.UseNumber() + } + err := dec.Decode(&m) + return m, err +} + +// Retrieve a Map value from an io.Reader. +// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an +// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte +// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal +// a JSON object. +func NewMapJsonReader(jsonReader io.Reader) (Map, error) { + jb, err := getJson(jsonReader) + if err != nil || len(*jb) == 0 { + return nil, err + } + + // Unmarshal the 'presumed' JSON string + return NewMapJson(*jb) +} + +// Retrieve a Map value and raw JSON - []byte - from an io.Reader. +// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an +// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte +// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal +// a JSON object and retrieve the raw JSON in a single call. +func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) { + jb, err := getJson(jsonReader) + if err != nil || len(*jb) == 0 { + return nil, *jb, err + } + + // Unmarshal the 'presumed' JSON string + m, merr := NewMapJson(*jb) + return m, *jb, merr +} + +// Pull the next JSON string off the stream: just read from first '{' to its closing '}'. +// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package. +func getJson(rdr io.Reader) (*[]byte, error) { + bval := make([]byte, 1) + jb := make([]byte, 0) + var inQuote, inJson bool + var parenCnt int + var previous byte + + // scan the input for a matched set of {...} + // json.Unmarshal will handle syntax checking. + for { + _, err := rdr.Read(bval) + if err != nil { + if err == io.EOF && inJson && parenCnt > 0 { + return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb)) + } + return &jb, err + } + switch bval[0] { + case '{': + if !inQuote { + parenCnt++ + inJson = true + } + case '}': + if !inQuote { + parenCnt-- + } + if parenCnt < 0 { + return nil, fmt.Errorf("closing } without opening {: %s", string(jb)) + } + case '"': + if inQuote { + if previous == '\\' { + break + } + inQuote = false + } else { + inQuote = true + } + case '\n', '\r', '\t', ' ': + if !inQuote { + continue + } + } + if inJson { + jb = append(jb, bval[0]) + if parenCnt == 0 { + break + } + } + previous = bval[0] + } + + return &jb, nil +} + +// ------------------------------- JSON Reader handler via Map values ----------------------- + +// Default poll delay to keep Handler from spinning on an open stream +// like sitting on os.Stdin waiting for imput. +var jhandlerPollInterval = time.Duration(1e6) + +// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader(). +// This avoids treating one or other as a special case and discussing the underlying stdlib logic. + +// Bulk process JSON using handlers that process a Map value. +// 'rdr' is an io.Reader for the JSON (stream). +// 'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. +func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { + var n int + for { + m, merr := NewMapJsonReader(jsonReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m); !ok { + break + } + } else if merr != io.EOF { + <-time.After(jhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// Bulk process JSON using handlers that process a Map value and the raw JSON. +// 'rdr' is an io.Reader for the JSON (stream). +// 'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. +func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { + var n int + for { + m, raw, merr := NewMapJsonReaderRaw(jsonReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr, raw); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m, raw); !ok { + break + } + } else if merr != io.EOF { + <-time.After(jhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} diff --git a/vendor/github.com/clbanning/mxj/v2/keyvalues.go b/vendor/github.com/clbanning/mxj/v2/keyvalues.go new file mode 100644 index 000000000..55620ca22 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/keyvalues.go @@ -0,0 +1,668 @@ +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters. + +package mxj + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +// ----------------------------- get everything FOR a single key ------------------------- + +const ( + minArraySize = 32 +) + +var defaultArraySize int = minArraySize + +// SetArraySize adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath(). +// This can have the effect of significantly reducing memory allocation-copy functions for large data sets. +// Returns the initial buffer size. +func SetArraySize(size int) int { + if size > minArraySize { + defaultArraySize = size + } else { + defaultArraySize = minArraySize + } + return defaultArraySize +} + +// ValuesForKey return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match. +// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*". +// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. +// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. +// - For attributes prefix the label with the attribute prefix character, by default a +// hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.) +// - If the 'key' refers to a list, then "key:value" could select a list member of the list. +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". +func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) { + m := map[string]interface{}(mv) + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + ret := make([]interface{}, 0, defaultArraySize) + var cnt int + hasKey(m, key, &ret, &cnt, subKeyMap) + return ret[:cnt], nil +} + +var KeyNotExistError = errors.New("Key does not exist") + +// ValueForKey is a wrapper on ValuesForKey. It returns the first member of []interface{}, if any. +// If there is no value, "nil, nil" is returned. +func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) { + vals, err := mv.ValuesForKey(key, subkeys...) + if err != nil { + return nil, err + } + if len(vals) == 0 { + return nil, KeyNotExistError + } + return vals[0], nil +} + +// hasKey - if the map 'key' exists append it to array +// if it doesn't do nothing except scan array and map values +func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) { + // func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) { + switch iv.(type) { + case map[string]interface{}: + vv := iv.(map[string]interface{}) + // see if the current value is of interest + if v, ok := vv[key]; ok { + switch v.(type) { + case map[string]interface{}: + if hasSubKeys(v, subkeys) { + *ret = append(*ret, v) + *cnt++ + } + case []interface{}: + for _, av := range v.([]interface{}) { + if hasSubKeys(av, subkeys) { + *ret = append(*ret, av) + *cnt++ + } + } + default: + if len(subkeys) == 0 { + *ret = append(*ret, v) + *cnt++ + } + } + } + + // wildcard case + if key == "*" { + for _, v := range vv { + switch v.(type) { + case map[string]interface{}: + if hasSubKeys(v, subkeys) { + *ret = append(*ret, v) + *cnt++ + } + case []interface{}: + for _, av := range v.([]interface{}) { + if hasSubKeys(av, subkeys) { + *ret = append(*ret, av) + *cnt++ + } + } + default: + if len(subkeys) == 0 { + *ret = append(*ret, v) + *cnt++ + } + } + } + } + + // scan the rest + for _, v := range vv { + hasKey(v, key, ret, cnt, subkeys) + } + case []interface{}: + for _, v := range iv.([]interface{}) { + hasKey(v, key, ret, cnt, subkeys) + } + } +} + +// ----------------------- get everything for a node in the Map --------------------------- + +// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.) +// 2014.04.28 - implementation note. +// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion +// of wildcards and unindexed arrays. Embedding such logic into valuesForKeyPath() would have made the +// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead. + +// ValuesForPatb retrieves all values for a path from the Map. If len(returned_values) == 0, then no match. +// On error, the returned array is 'nil'. +// 'path' is a dot-separated path of key values. +// - If a node in the path is '*', then everything beyond is walked. +// - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" - +// even "*[2].*[0].field". +// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. +// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. +// - For attributes prefix the label with the attribute prefix character, by default a +// hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.) +// - If the 'path' refers to a list, then "tag:value" would return member of the list. +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". +func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) { + // If there are no array indexes in path, use legacy ValuesForPath() logic. + if strings.Index(path, "[") < 0 { + return mv.oldValuesForPath(path, subkeys...) + } + + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + keys, kerr := parsePath(path) + if kerr != nil { + return nil, kerr + } + + vals, verr := valuesForArray(keys, mv) + if verr != nil { + return nil, verr // Vals may be nil, but return empty array. + } + + // Need to handle subkeys ... only return members of vals that satisfy conditions. + retvals := make([]interface{}, 0) + for _, v := range vals { + if hasSubKeys(v, subKeyMap) { + retvals = append(retvals, v) + } + } + return retvals, nil +} + +func valuesForArray(keys []*key, m Map) ([]interface{}, error) { + var tmppath string + var haveFirst bool + var vals []interface{} + var verr error + + lastkey := len(keys) - 1 + for i := 0; i <= lastkey; i++ { + if !haveFirst { + tmppath = keys[i].name + haveFirst = true + } else { + tmppath += "." + keys[i].name + } + + // Look-ahead: explode wildcards and unindexed arrays. + // Need to handle un-indexed list recursively: + // e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]". + // Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ... + if !keys[i].isArray && i < lastkey && keys[i+1].isArray { + // Can't pass subkeys because we may not be at literal end of path. + vv, vverr := m.oldValuesForPath(tmppath) + if vverr != nil { + return nil, vverr + } + for _, v := range vv { + // See if we can walk the value. + am, ok := v.(map[string]interface{}) + if !ok { + continue + } + // Work the backend. + nvals, nvalserr := valuesForArray(keys[i+1:], Map(am)) + if nvalserr != nil { + return nil, nvalserr + } + vals = append(vals, nvals...) + } + break // have recursed the whole path - return + } + + if keys[i].isArray || i == lastkey { + // Don't pass subkeys because may not be at literal end of path. + vals, verr = m.oldValuesForPath(tmppath) + } else { + continue + } + if verr != nil { + return nil, verr + } + + if i == lastkey && !keys[i].isArray { + break + } + + // Now we're looking at an array - supposedly. + // Is index in range of vals? + if len(vals) <= keys[i].position { + vals = nil + break + } + + // Return the array member of interest, if at end of path. + if i == lastkey { + vals = vals[keys[i].position:(keys[i].position + 1)] + break + } + + // Extract the array member of interest. + am := vals[keys[i].position:(keys[i].position + 1)] + + // must be a map[string]interface{} value so we can keep walking the path + amm, ok := am[0].(map[string]interface{}) + if !ok { + vals = nil + break + } + + m = Map(amm) + haveFirst = false + } + + return vals, nil +} + +type key struct { + name string + isArray bool + position int +} + +func parsePath(s string) ([]*key, error) { + keys := strings.Split(s, ".") + + ret := make([]*key, 0) + + for i := 0; i < len(keys); i++ { + if keys[i] == "" { + continue + } + + newkey := new(key) + if strings.Index(keys[i], "[") < 0 { + newkey.name = keys[i] + ret = append(ret, newkey) + continue + } + + p := strings.Split(keys[i], "[") + newkey.name = p[0] + p = strings.Split(p[1], "]") + if p[0] == "" { // no right bracket + return nil, fmt.Errorf("no right bracket on key index: %s", keys[i]) + } + // convert p[0] to a int value + pos, nerr := strconv.ParseInt(p[0], 10, 32) + if nerr != nil { + return nil, fmt.Errorf("cannot convert index to int value: %s", p[0]) + } + newkey.position = int(pos) + newkey.isArray = true + ret = append(ret, newkey) + } + + return ret, nil +} + +// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'. +func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) { + m := map[string]interface{}(mv) + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + keys := strings.Split(path, ".") + if keys[len(keys)-1] == "" { + keys = keys[:len(keys)-1] + } + ivals := make([]interface{}, 0, defaultArraySize) + var cnt int + valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap) + return ivals[:cnt], nil +} + +func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) { + lenKeys := len(keys) + + // load 'm' values into 'ret' + // expand any lists + if lenKeys == 0 { + switch m.(type) { + case map[string]interface{}: + if subkeys != nil { + if ok := hasSubKeys(m, subkeys); !ok { + return + } + } + *ret = append(*ret, m) + *cnt++ + case []interface{}: + for i, v := range m.([]interface{}) { + if subkeys != nil { + if ok := hasSubKeys(v, subkeys); !ok { + continue // only load list members with subkeys + } + } + *ret = append(*ret, (m.([]interface{}))[i]) + *cnt++ + } + default: + if subkeys != nil { + return // must be map[string]interface{} if there are subkeys + } + *ret = append(*ret, m) + *cnt++ + } + return + } + + // key of interest + key := keys[0] + switch key { + case "*": // wildcard - scan all values + switch m.(type) { + case map[string]interface{}: + for _, v := range m.(map[string]interface{}) { + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + case []interface{}: + for _, v := range m.([]interface{}) { + switch v.(type) { + // flatten out a list of maps - keys are processed + case map[string]interface{}: + for _, vv := range v.(map[string]interface{}) { + // valuesForKeyPath(ret, vv, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) + } + default: + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + } + } + default: // key - must be map[string]interface{} + switch m.(type) { + case map[string]interface{}: + if v, ok := m.(map[string]interface{})[key]; ok { + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + case []interface{}: // may be buried in list + for _, v := range m.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + if vv, ok := v.(map[string]interface{})[key]; ok { + // valuesForKeyPath(ret, vv, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) + } + } + } + } + } +} + +// hasSubKeys() - interface{} equality works for string, float64, bool +// 'v' must be a map[string]interface{} value to have subkeys +// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard. +func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool { + if len(subkeys) == 0 { + return true + } + + switch v.(type) { + case map[string]interface{}: + // do all subKey name:value pairs match? + mv := v.(map[string]interface{}) + for skey, sval := range subkeys { + isNotKey := false + if skey[:1] == "!" { // a NOT-key + skey = skey[1:] + isNotKey = true + } + vv, ok := mv[skey] + if !ok { // key doesn't exist + if isNotKey { // key not there, but that's what we want + if kv, ok := sval.(string); ok && kv == "*" { + continue + } + } + return false + } + // wildcard check + if kv, ok := sval.(string); ok && kv == "*" { + if isNotKey { // key is there, and we don't want it + return false + } + continue + } + switch sval.(type) { + case string: + if s, ok := vv.(string); ok && s == sval.(string) { + if isNotKey { + return false + } + continue + } + case bool: + if b, ok := vv.(bool); ok && b == sval.(bool) { + if isNotKey { + return false + } + continue + } + case float64: + if f, ok := vv.(float64); ok && f == sval.(float64) { + if isNotKey { + return false + } + continue + } + } + // key there but didn't match subkey value + if isNotKey { // that's what we want + continue + } + return false + } + // all subkeys matched + return true + } + + // not a map[string]interface{} value, can't have subkeys + return false +} + +// Generate map of key:value entries as map[string]string. +// 'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'. +// If len(kv) == 0, the return is (nil, nil). +func getSubKeyMap(kv ...string) (map[string]interface{}, error) { + if len(kv) == 0 { + return nil, nil + } + m := make(map[string]interface{}, 0) + for _, v := range kv { + vv := strings.Split(v, fieldSep) + switch len(vv) { + case 2: + m[vv[0]] = interface{}(vv[1]) + case 3: + switch vv[2] { + case "string", "char", "text": + m[vv[0]] = interface{}(vv[1]) + case "bool", "boolean": + // ParseBool treats "1"==true & "0"==false + b, err := strconv.ParseBool(vv[1]) + if err != nil { + return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1]) + } + m[vv[0]] = interface{}(b) + case "float", "float64", "num", "number", "numeric": + f, err := strconv.ParseFloat(vv[1], 64) + if err != nil { + return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1]) + } + m[vv[0]] = interface{}(f) + default: + return nil, fmt.Errorf("unknown subkey conversion spec: %s", v) + } + default: + return nil, fmt.Errorf("unknown subkey spec: %s", v) + } + } + return m, nil +} + +// ------------------------------- END of valuesFor ... ---------------------------- + +// ----------------------- locate where a key value is in the tree ------------------- + +//----------------------------- find all paths to a key -------------------------------- + +// PathsForKey returns all paths through Map, 'mv', (in dot-notation) that terminate with the specified key. +// Results can be used with ValuesForPath. +func (mv Map) PathsForKey(key string) []string { + m := map[string]interface{}(mv) + breadbasket := make(map[string]bool, 0) + breadcrumbs := "" + + hasKeyPath(breadcrumbs, m, key, breadbasket) + if len(breadbasket) == 0 { + return nil + } + + // unpack map keys to return + res := make([]string, len(breadbasket)) + var i int + for k := range breadbasket { + res[i] = k + i++ + } + + return res +} + +// PathForKeyShortest extracts the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'.. +// Paths are strings using dot-notation. +func (mv Map) PathForKeyShortest(key string) string { + paths := mv.PathsForKey(key) + + lp := len(paths) + if lp == 0 { + return "" + } + if lp == 1 { + return paths[0] + } + + shortest := paths[0] + shortestLen := len(strings.Split(shortest, ".")) + + for i := 1; i < len(paths); i++ { + vlen := len(strings.Split(paths[i], ".")) + if vlen < shortestLen { + shortest = paths[i] + shortestLen = vlen + } + } + + return shortest +} + +// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth +// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'. +func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) { + switch iv.(type) { + case map[string]interface{}: + vv := iv.(map[string]interface{}) + if _, ok := vv[key]; ok { + // create a new breadcrumb, intialized with the one we have + var nbc string + if crumbs == "" { + nbc = key + } else { + nbc = crumbs + "." + key + } + basket[nbc] = true + } + // walk on down the path, key could occur again at deeper node + for k, v := range vv { + // create a new breadcrumb, intialized with the one we have + var nbc string + if crumbs == "" { + nbc = k + } else { + nbc = crumbs + "." + k + } + hasKeyPath(nbc, v, key, basket) + } + case []interface{}: + // crumb-trail doesn't change, pass it on + for _, v := range iv.([]interface{}) { + hasKeyPath(crumbs, v, key, basket) + } + } +} + +var PathNotExistError = errors.New("Path does not exist") + +// ValueForPath wraps ValuesFor Path and returns the first value returned. +// If no value is found it returns 'nil' and PathNotExistError. +func (mv Map) ValueForPath(path string) (interface{}, error) { + vals, err := mv.ValuesForPath(path) + if err != nil { + return nil, err + } + if len(vals) == 0 { + return nil, PathNotExistError + } + return vals[0], nil +} + +// ValuesForPathString returns the first found value for the path as a string. +func (mv Map) ValueForPathString(path string) (string, error) { + vals, err := mv.ValuesForPath(path) + if err != nil { + return "", err + } + if len(vals) == 0 { + return "", errors.New("ValueForPath: path not found") + } + val := vals[0] + return fmt.Sprintf("%v", val), nil +} + +// ValueOrEmptyForPathString returns the first found value for the path as a string. +// If the path is not found then it returns an empty string. +func (mv Map) ValueOrEmptyForPathString(path string) string { + str, _ := mv.ValueForPathString(path) + return str +} diff --git a/vendor/github.com/clbanning/mxj/v2/leafnode.go b/vendor/github.com/clbanning/mxj/v2/leafnode.go new file mode 100644 index 000000000..cf413ebdd --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/leafnode.go @@ -0,0 +1,112 @@ +package mxj + +// leafnode.go - return leaf nodes with paths and values for the Map +// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw + +import ( + "strconv" + "strings" +) + +const ( + NoAttributes = true // suppress LeafNode values that are attributes +) + +// LeafNode - a terminal path value in a Map. +// For XML Map values it represents an attribute or simple element value - of type +// string unless Map was created using Cast flag. For JSON Map values it represents +// a string, numeric, boolean, or null value. +type LeafNode struct { + Path string // a dot-notation representation of the path with array subscripting + Value interface{} // the value at the path termination +} + +// LeafNodes - returns an array of all LeafNode values for the Map. +// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-') +// as well as the "#text" key for the associated simple element value. +// +// PrependAttrWithHypen(false) will result in attributes having .attr-name as +// terminal node in 'path' while the path for the element value, itself, will be +// the base path w/o "#text". +// +// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax +// rather than "[N]" syntax. +func (mv Map) LeafNodes(no_attr ...bool) []LeafNode { + var a bool + if len(no_attr) == 1 { + a = no_attr[0] + } + + l := make([]LeafNode, 0) + getLeafNodes("", "", map[string]interface{}(mv), &l, a) + return l +} + +func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) { + // if stripping attributes, then also strip "#text" key + if !noattr || node != "#text" { + if path != "" && node[:1] != "[" { + path += "." + } + path += node + } + switch mv.(type) { + case map[string]interface{}: + for k, v := range mv.(map[string]interface{}) { + // if noattr && k[:1] == "-" { + if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { + continue + } + getLeafNodes(path, k, v, l, noattr) + } + case []interface{}: + for i, v := range mv.([]interface{}) { + if useDotNotation { + getLeafNodes(path, strconv.Itoa(i), v, l, noattr) + } else { + getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr) + } + } + default: + // can't walk any further, so create leaf + n := LeafNode{path, mv} + *l = append(*l, n) + } +} + +// LeafPaths - all paths that terminate in LeafNode values. +func (mv Map) LeafPaths(no_attr ...bool) []string { + ln := mv.LeafNodes() + ss := make([]string, len(ln)) + for i := 0; i < len(ln); i++ { + ss[i] = ln[i].Path + } + return ss +} + +// LeafValues - all terminal values in the Map. +func (mv Map) LeafValues(no_attr ...bool) []interface{} { + ln := mv.LeafNodes() + vv := make([]interface{}, len(ln)) + for i := 0; i < len(ln); i++ { + vv[i] = ln[i].Value + } + return vv +} + +// ====================== utilities ====================== + +// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I +var useDotNotation bool + +// LeafUseDotNotation sets a flag that list members in LeafNode paths +// should be identified using ".N" syntax rather than the default "[N]" +// syntax. Calling LeafUseDotNotation with no arguments toggles the +// flag on/off; otherwise, the argument sets the flag value 'true'/'false'. +func LeafUseDotNotation(b ...bool) { + if len(b) == 0 { + useDotNotation = !useDotNotation + return + } + useDotNotation = b[0] +} diff --git a/vendor/github.com/clbanning/mxj/v2/misc.go b/vendor/github.com/clbanning/mxj/v2/misc.go new file mode 100644 index 000000000..5b4fab216 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/misc.go @@ -0,0 +1,86 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// misc.go - mimic functions (+others) called out in: +// https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ +// Primarily these methods let you retrive XML structure information. + +package mxj + +import ( + "fmt" + "sort" + "strings" +) + +// Return the root element of the Map. If there is not a single key in Map, +// then an error is returned. +func (mv Map) Root() (string, error) { + mm := map[string]interface{}(mv) + if len(mm) != 1 { + return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm)) + } + for k, _ := range mm { + return k, nil + } + return "", nil +} + +// If the path is an element with sub-elements, return a list of the sub-element +// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are prefixed with +// '-', a hyphen, are considered attributes; see m.Attributes(path). +func (mv Map) Elements(path string) ([]string, error) { + e, err := mv.ValueForPath(path) + if err != nil { + return nil, err + } + switch e.(type) { + case map[string]interface{}: + ee := e.(map[string]interface{}) + elems := make([]string, len(ee)) + var i int + for k, _ := range ee { + if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { + continue // skip attributes + } + elems[i] = k + i++ + } + elems = elems[:i] + // alphabetic sort keeps things tidy + sort.Strings(elems) + return elems, nil + } + return nil, fmt.Errorf("no elements for path: %s", path) +} + +// If the path is an element with attributes, return a list of the attribute +// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are not prefixed with +// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the +// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then +// there are no identifiable attributes. +func (mv Map) Attributes(path string) ([]string, error) { + a, err := mv.ValueForPath(path) + if err != nil { + return nil, err + } + switch a.(type) { + case map[string]interface{}: + aa := a.(map[string]interface{}) + attrs := make([]string, len(aa)) + var i int + for k, _ := range aa { + if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 { + continue // skip non-attributes + } + attrs[i] = k[len(attrPrefix):] + i++ + } + attrs = attrs[:i] + // alphabetic sort keeps things tidy + sort.Strings(attrs) + return attrs, nil + } + return nil, fmt.Errorf("no attributes for path: %s", path) +} diff --git a/vendor/github.com/clbanning/mxj/v2/mxj.go b/vendor/github.com/clbanning/mxj/v2/mxj.go new file mode 100644 index 000000000..f0592f06c --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/mxj.go @@ -0,0 +1,128 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "fmt" + "sort" +) + +const ( + Cast = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast) + SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding) +) + +type Map map[string]interface{} + +// Allocate a Map. +func New() Map { + m := make(map[string]interface{}, 0) + return m +} + +// Cast a Map to map[string]interface{} +func (mv Map) Old() map[string]interface{} { + return mv +} + +// Return a copy of mv as a newly allocated Map. If the Map only contains string, +// numeric, map[string]interface{}, and []interface{} values, then it can be thought +// of as a "deep copy." Copying a structure (or structure reference) value is subject +// to the noted restrictions. +// NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags +// then only public fields of the structure are in the new Map - and with +// keys that conform to any encoding tag instructions. The structure itself will +// be represented as a map[string]interface{} value. +func (mv Map) Copy() (Map, error) { + // this is the poor-man's deep copy + // not efficient, but it works + j, jerr := mv.Json() + // must handle, we don't know how mv got built + if jerr != nil { + return nil, jerr + } + return NewMapJson(j) +} + +// --------------- StringIndent ... from x2j.WriteMap ------------- + +// Pretty print a Map. +func (mv Map) StringIndent(offset ...int) string { + return writeMap(map[string]interface{}(mv), true, true, offset...) +} + +// Pretty print a Map without the value type information - just key:value entries. +func (mv Map) StringIndentNoTypeInfo(offset ...int) string { + return writeMap(map[string]interface{}(mv), false, true, offset...) +} + +// writeMap - dumps the map[string]interface{} for examination. +// 'typeInfo' causes value type to be printed. +// 'offset' is initial indentation count; typically: Write(m). +func writeMap(m interface{}, typeInfo, root bool, offset ...int) string { + var indent int + if len(offset) == 1 { + indent = offset[0] + } + + var s string + switch m.(type) { + case []interface{}: + if typeInfo { + s += "[[]interface{}]" + } + for _, v := range m.([]interface{}) { + s += "\n" + for i := 0; i < indent; i++ { + s += " " + } + s += writeMap(v, typeInfo, false, indent+1) + } + case map[string]interface{}: + list := make([][2]string, len(m.(map[string]interface{}))) + var n int + for k, v := range m.(map[string]interface{}) { + list[n][0] = k + list[n][1] = writeMap(v, typeInfo, false, indent+1) + n++ + } + sort.Sort(mapList(list)) + for _, v := range list { + if root { + root = false + } else { + s += "\n" + } + for i := 0; i < indent; i++ { + s += " " + } + s += v[0] + " : " + v[1] + } + default: + if typeInfo { + s += fmt.Sprintf("[%T] %+v", m, m) + } else { + s += fmt.Sprintf("%+v", m) + } + } + return s +} + +// ======================== utility =============== + +type mapList [][2]string + +func (ml mapList) Len() int { + return len(ml) +} + +func (ml mapList) Swap(i, j int) { + ml[i], ml[j] = ml[j], ml[i] +} + +func (ml mapList) Less(i, j int) bool { + return ml[i][0] <= ml[j][0] +} diff --git a/vendor/github.com/clbanning/mxj/v2/newmap.go b/vendor/github.com/clbanning/mxj/v2/newmap.go new file mode 100644 index 000000000..b29394905 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/newmap.go @@ -0,0 +1,184 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2014, 2018 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings +// keys can use dot-notation, keyOld can use wildcard, '*' +// +// Computational strategy - +// Using the key path - []string - traverse a new map[string]interface{} and +// insert the oldVal as the newVal when we arrive at the end of the path. +// If the type at the end is nil, then that is newVal +// If the type at the end is a singleton (string, float64, bool) an array is created. +// If the type at the end is an array, newVal is just appended. +// If the type at the end is a map, it is inserted if possible or the map value +// is converted into an array if necessary. + +package mxj + +import ( + "errors" + "strings" +) + +// (Map)NewMap - create a new Map from data in the current Map. +// 'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey' +// should be the value for 'newKey' in the returned Map. +// - 'oldKey' supports dot-notation as described for (Map)ValuesForPath() +// - 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays +// - "oldKey" is shorthand for the keypair value "oldKey:oldKey" +// - "oldKey:" and ":newKey" are invalid keypair values +// - if 'oldKey' does not exist in the current Map, it is not written to the new Map. +// "null" is not supported unless it is the current Map. +// - see newmap_test.go for several syntax examples +// - mv.NewMap() == mxj.New() +// +// NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc. +func (mv Map) NewMap(keypairs ...string) (Map, error) { + n := make(map[string]interface{}, 0) + if len(keypairs) == 0 { + return n, nil + } + + // loop through the pairs + var oldKey, newKey string + var path []string + for _, v := range keypairs { + if len(v) == 0 { + continue // just skip over empty keypair arguments + } + + // initialize oldKey, newKey and check + vv := strings.Split(v, ":") + if len(vv) > 2 { + return n, errors.New("oldKey:newKey keypair value not valid - " + v) + } + if len(vv) == 1 { + oldKey, newKey = vv[0], vv[0] + } else { + oldKey, newKey = vv[0], vv[1] + } + strings.TrimSpace(oldKey) + strings.TrimSpace(newKey) + if i := strings.Index(newKey, "*"); i > -1 { + return n, errors.New("newKey value cannot contain wildcard character - " + v) + } + if i := strings.Index(newKey, "["); i > -1 { + return n, errors.New("newKey value cannot contain indexed arrays - " + v) + } + if oldKey == "" || newKey == "" { + return n, errors.New("oldKey or newKey is not specified - " + v) + } + + // get oldKey value + oldVal, err := mv.ValuesForPath(oldKey) + if err != nil { + return n, err + } + if len(oldVal) == 0 { + continue // oldKey has no value, may not exist in mv + } + + // break down path + path = strings.Split(newKey, ".") + if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec + path = path[:len(path)-1] + } + + addNewVal(&n, path, oldVal) + } + + return n, nil +} + +// navigate 'n' to end of path and add val +func addNewVal(n *map[string]interface{}, path []string, val []interface{}) { + // newVal - either singleton or array + var newVal interface{} + if len(val) == 1 { + newVal = val[0] // is type interface{} + } else { + newVal = interface{}(val) + } + + // walk to the position of interest, create it if necessary + m := (*n) // initialize map walker + var k string // key for m + lp := len(path) - 1 // when to stop looking + for i := 0; i < len(path); i++ { + k = path[i] + if i == lp { + break + } + var nm map[string]interface{} // holds position of next-map + switch m[k].(type) { + case nil: // need a map for next node in path, so go there + nm = make(map[string]interface{}, 0) + m[k] = interface{}(nm) + m = m[k].(map[string]interface{}) + case map[string]interface{}: + // OK - got somewhere to walk to, go there + m = m[k].(map[string]interface{}) + case []interface{}: + // add a map and nm points to new map unless there's already + // a map in the array, then nm points there + // The placement of the next value in the array is dependent + // on the sequence of members - could land on a map or a nil + // value first. TODO: how to test this. + a := make([]interface{}, 0) + var foundmap bool + for _, vv := range m[k].([]interface{}) { + switch vv.(type) { + case nil: // doesn't appear that this occurs, need a test case + if foundmap { // use the first one in array + a = append(a, vv) + continue + } + nm = make(map[string]interface{}, 0) + a = append(a, interface{}(nm)) + foundmap = true + case map[string]interface{}: + if foundmap { // use the first one in array + a = append(a, vv) + continue + } + nm = vv.(map[string]interface{}) + a = append(a, vv) + foundmap = true + default: + a = append(a, vv) + } + } + // no map found in array + if !foundmap { + nm = make(map[string]interface{}, 0) + a = append(a, interface{}(nm)) + } + m[k] = interface{}(a) // must insert in map + m = nm + default: // it's a string, float, bool, etc. + aa := make([]interface{}, 0) + nm = make(map[string]interface{}, 0) + aa = append(aa, m[k], nm) + m[k] = interface{}(aa) + m = nm + } + } + + // value is nil, array or a singleton of some kind + // initially m.(type) == map[string]interface{} + v := m[k] + switch v.(type) { + case nil: // initialized + m[k] = newVal + case []interface{}: + a := m[k].([]interface{}) + a = append(a, newVal) + m[k] = interface{}(a) + default: // v exists:string, float64, bool, map[string]interface, etc. + a := make([]interface{}, 0) + a = append(a, v, newVal) + m[k] = interface{}(a) + } +} diff --git a/vendor/github.com/clbanning/mxj/v2/readme.md b/vendor/github.com/clbanning/mxj/v2/readme.md new file mode 100644 index 000000000..323a747d4 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/readme.md @@ -0,0 +1,207 @@ +

mxj - to/from maps, XML and JSON

+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards. + +mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages. + +

Installation

+Using go.mod: +
+go get github.com/clbanning/mxj/v2@v2.3.2
+
+ +
+import "github.com/clbanning/mxj/v2"
+
+ +... or just vendor the package. + +

Related Packages

+ +https://github.com/clbanning/checkxml provides functions for validating XML data. + +

Refactor Encoder - 2020.05.01

+Issue #70 highlighted that encoding large maps does not scale well, since the original logic used string appends operations. Using bytes.Buffer results in linear scaling for very large XML docs. (Metrics based on MacBook Pro i7 w/ 16 GB.) + + Nodes m.XML() time + 54809 12.53708ms + 109780 32.403183ms + 164678 59.826412ms + 482598 109.358007ms + +

Refactor Decoder - 2015.11.15

+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant. I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi. Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value. As shown by: + + BenchmarkNewMapXml-4 100000 18043 ns/op + BenchmarkNewStructXml-4 100000 14892 ns/op + BenchmarkNewMapJson-4 300000 4633 ns/op + BenchmarkNewStructJson-4 300000 3427 ns/op + BenchmarkNewMapXmlBooks-4 20000 82850 ns/op + BenchmarkNewStructXmlBooks-4 20000 67822 ns/op + BenchmarkNewMapJsonBooks-4 100000 17222 ns/op + BenchmarkNewStructJsonBooks-4 100000 15309 ns/op + +

Notices

+ + 2021.02.02: v2.5 - add XmlCheckIsValid toggle to force checking that the encoded XML is valid + 2020.12.14: v2.4 - add XMLEscapeCharsDecoder to preserve XML escaped characters in Map values + 2020.10.28: v2.3 - add TrimWhiteSpace option + 2020.05.01: v2.2 - optimize map to XML encoding for large XML docs. + 2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq. + 2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq. + 2019.01.21: DecodeSimpleValuesAsMap - decode to map[:map["#text":]] rather than map[:] + 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. + 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. + 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. + 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. + 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. + 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). + 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. + 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). + 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. + 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). + 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". + To cast them to float64, first set flag with CastNanInf(true). + 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. + 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. + 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). + 2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq() + and mv.XmlSeq() / mv.XmlSeqIndent(). + 2015-05-20: New: mv.StringIndentNoTypeInfo(). + Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(), + mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo(). + 2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information. + (NOTE: PreserveXmlList() is similar and will be here soon.) + 2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag. + 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. + 2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references. + +

Basic Unmarshal XML to map[string]interface{}

+
type Map map[string]interface{}
+ +Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v': +
mv := Map(v)
+ +Unmarshal / marshal XML as a `Map` value, 'mv': +
mv, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := mv.Xml()      // marshal
+ +Unmarshal XML from an `io.Reader` as a `Map` value, 'mv': +
mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+ +Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`): +
err := mv.XmlWriter(xmlWriter)
+raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+ +Also, for prettified output: +
xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+ +Bulk process XML with error handling (note: handlers must return a boolean value): +
err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+ +Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`. + +There are comparable functions and methods for JSON processing. + +Arbitrary structure values can be decoded to / encoded from `Map` values: +
mv, err := NewMapStruct(structVal)
+err := mv.Struct(structPointer)
+ +

Extract / modify Map values

+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON +or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then: +
paths := mv.PathsForKey(key)
+path := mv.PathForKeyShortest(key)
+values, err := mv.ValuesForKey(key, subkeys)
+values, err := mv.ValuesForPath(path, subkeys)
+count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
+ +Get everything at once, irrespective of path depth: +
leafnodes := mv.LeafNodes()
+leafvalues := mv.LeafValues()
+ +A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML +or JSON. (Note: keys can use dot-notation.) +
newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto
+ +

Usage

+ +The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj). + +Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions. + +

XML parsing conventions

+ +Using NewMapXml() + + - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, + to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or + `SetAttrPrefix()`.) + - If the element is a simple element and has attributes, the element value + is given the key `#text` for its `map[string]interface{}` representation. (See + the 'atomFeedString.xml' test data, below.) + - XML comments, directives, and process instructions are ignored. + - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. + +Using NewMapXmlSeq() + + - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values + where the `` value has "#text" and "#seq" keys - the "#text" key holds the + value for ``. + - All elements, except for the root, have a "#seq" key. + - Comments, directives, and process instructions are unmarshalled into the Map using the + keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more + specifics.) + - Name space syntax is preserved: + - `something` parses to `map["ns:key"]interface{}{"something"}` + - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}` + +Both + + - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them + to be cast, set a flag to cast them using CastNanInf(true). + +

XML encoding conventions

+ + - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as ``. + NOTE: the operation is not symmetric as `` elements are decoded as `tag:""` `Map` values, + which, then, encode in JSON as `"tag":""` values. + - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go + randomizes the walk through map[string]interface{} values.) If you plan to re-encode the + Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and + mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when + working with the Map representation. + +

Running "go test"

+ +Because there are no guarantees on the sequence map elements are retrieved, the tests have been +written for visual verification in most cases. One advantage is that you can easily use the +output from running "go test" as examples of calling the various functions and methods. + +

Motivation

+ +I make extensive use of JSON for messaging and typically unmarshal the messages into +`map[string]interface{}` values. This is easily done using `json.Unmarshal` from the +standard Go libraries. Unfortunately, many legacy solutions use structured +XML messages; in those environments the applications would have to be refactored to +interoperate with my components. + +The better solution is to just provide an alternative HTTP handler that receives +XML messages and parses it into a `map[string]interface{}` value and then reuse +all the JSON-based code. The Go `xml.Unmarshal()` function does not provide the same +option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote +a couple of small functions to fill this gap and released them as the x2j package. + +Over the next year and a half additional features were added, and the companion j2x +package was released to address XML encoding of arbitrary JSON and `map[string]interface{}` +values. As part of a refactoring of our production system and looking at how we had been +using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or +JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}` +values was the primary value. Thus, everything was refactored into the mxj package. + diff --git a/vendor/github.com/clbanning/mxj/v2/remove.go b/vendor/github.com/clbanning/mxj/v2/remove.go new file mode 100644 index 000000000..8362ab17f --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/remove.go @@ -0,0 +1,37 @@ +package mxj + +import "strings" + +// Removes the path. +func (mv Map) Remove(path string) error { + m := map[string]interface{}(mv) + return remove(m, path) +} + +func remove(m interface{}, path string) error { + val, err := prevValueByPath(m, path) + if err != nil { + return err + } + + lastKey := lastKey(path) + delete(val, lastKey) + + return nil +} + +// returns the last key of the path. +// lastKey("a.b.c") would had returned "c" +func lastKey(path string) string { + keys := strings.Split(path, ".") + key := keys[len(keys)-1] + return key +} + +// returns the path without the last key +// parentPath("a.b.c") whould had returned "a.b" +func parentPath(path string) string { + keys := strings.Split(path, ".") + parentPath := strings.Join(keys[0:len(keys)-1], ".") + return parentPath +} diff --git a/vendor/github.com/clbanning/mxj/v2/rename.go b/vendor/github.com/clbanning/mxj/v2/rename.go new file mode 100644 index 000000000..4c655ed5d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/rename.go @@ -0,0 +1,61 @@ +package mxj + +import ( + "errors" + "strings" +) + +// RenameKey renames a key in a Map. +// It works only for nested maps. +// It doesn't work for cases when the key is in a list. +func (mv Map) RenameKey(path string, newName string) error { + var v bool + var err error + if v, err = mv.Exists(path); err == nil && !v { + return errors.New("RenameKey: path not found: " + path) + } else if err != nil { + return err + } + if v, err = mv.Exists(parentPath(path) + "." + newName); err == nil && v { + return errors.New("RenameKey: key already exists: " + newName) + } else if err != nil { + return err + } + + m := map[string]interface{}(mv) + return renameKey(m, path, newName) +} + +func renameKey(m interface{}, path string, newName string) error { + val, err := prevValueByPath(m, path) + if err != nil { + return err + } + + oldName := lastKey(path) + val[newName] = val[oldName] + delete(val, oldName) + + return nil +} + +// returns a value which contains a last key in the path +// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3} +func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) { + keys := strings.Split(path, ".") + + switch mValue := m.(type) { + case map[string]interface{}: + for key, value := range mValue { + if key == keys[0] { + if len(keys) == 1 { + return mValue, nil + } else { + // keep looking for the full path to the key + return prevValueByPath(value, strings.Join(keys[1:], ".")) + } + } + } + } + return nil, errors.New("prevValueByPath: didn't find path – " + path) +} diff --git a/vendor/github.com/clbanning/mxj/v2/set.go b/vendor/github.com/clbanning/mxj/v2/set.go new file mode 100644 index 000000000..a297fc388 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/set.go @@ -0,0 +1,26 @@ +package mxj + +import ( + "strings" +) + +// Sets the value for the path +func (mv Map) SetValueForPath(value interface{}, path string) error { + pathAry := strings.Split(path, ".") + parentPathAry := pathAry[0 : len(pathAry)-1] + parentPath := strings.Join(parentPathAry, ".") + + val, err := mv.ValueForPath(parentPath) + if err != nil { + return err + } + if val == nil { + return nil // we just ignore the request if there's no val + } + + key := pathAry[len(pathAry)-1] + cVal := val.(map[string]interface{}) + cVal[key] = value + + return nil +} diff --git a/vendor/github.com/clbanning/mxj/v2/setfieldsep.go b/vendor/github.com/clbanning/mxj/v2/setfieldsep.go new file mode 100644 index 000000000..b70715ebc --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/setfieldsep.go @@ -0,0 +1,20 @@ +package mxj + +// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862 +var fieldSep string = ":" + +// SetFieldSeparator changes the default field separator, ":", for the +// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments +// in mv.ValuesForKey and mv.ValuesForPath. +// +// E.g., if the newVal value is "http://blah/blah", setting the field separator +// to "|" will allow the newVal specification, "|http://blah/blah" to parse +// properly. If called with no argument or an empty string value, the field +// separator is set to the default, ":". +func SetFieldSeparator(s ...string) { + if len(s) == 0 || s[0] == "" { + fieldSep = ":" // the default + return + } + fieldSep = s[0] +} diff --git a/vendor/github.com/clbanning/mxj/v2/songtext.xml b/vendor/github.com/clbanning/mxj/v2/songtext.xml new file mode 100644 index 000000000..8c0f2becb --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/songtext.xml @@ -0,0 +1,29 @@ + + help me! + + + + Henry was a renegade + Didn't like to play it safe + One component at a time + There's got to be a better way + Oh, people came from miles around + Searching for a steady job + Welcome to the Motor Town + Booming like an atom bomb + + + Oh, Henry was the end of the story + Then everything went wrong + And we'll return it to its former glory + But it just takes so long + + + + It's going to take a long time + It's going to take it, but we'll make it one day + It's going to take a long time + It's going to take it, but we'll make it one day + + + diff --git a/vendor/github.com/clbanning/mxj/v2/strict.go b/vendor/github.com/clbanning/mxj/v2/strict.go new file mode 100644 index 000000000..1e769560b --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/strict.go @@ -0,0 +1,30 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// strict.go actually addresses setting xml.Decoder attribute +// values. This'll let you parse non-standard XML. + +package mxj + +import ( + "encoding/xml" +) + +// CustomDecoder can be used to specify xml.Decoder attribute +// values, e.g., Strict:false, to be used. By default CustomDecoder +// is nil. If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is +// ignored and must be set as part of the CustomDecoder value, if needed. +// Usage: +// mxj.CustomDecoder = &xml.Decoder{Strict:false} +var CustomDecoder *xml.Decoder + +// useCustomDecoder copy over public attributes from customDecoder +func useCustomDecoder(d *xml.Decoder) { + d.Strict = CustomDecoder.Strict + d.AutoClose = CustomDecoder.AutoClose + d.Entity = CustomDecoder.Entity + d.CharsetReader = CustomDecoder.CharsetReader + d.DefaultSpace = CustomDecoder.DefaultSpace +} + diff --git a/vendor/github.com/clbanning/mxj/v2/struct.go b/vendor/github.com/clbanning/mxj/v2/struct.go new file mode 100644 index 000000000..9be636cdc --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/struct.go @@ -0,0 +1,54 @@ +// Copyright 2012-2017 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "encoding/json" + "errors" + "reflect" + + // "github.com/fatih/structs" +) + +// Create a new Map value from a structure. Error returned if argument is not a structure. +// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map +// for handling of "structs" tags. + +// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map. +// import "github.com/fatih/structs" +// ... +// sm, err := structs.Map() +// if err != nil { +// // handle error +// } +// m := mxj.Map(sm) +// Alernatively uncomment the old source and import in struct.go. +func NewMapStruct(structVal interface{}) (Map, error) { + return nil, errors.New("deprecated - see package documentation") + /* + if !structs.IsStruct(structVal) { + return nil, errors.New("NewMapStruct() error: argument is not type Struct") + } + return structs.Map(structVal), nil + */ +} + +// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned +// if argument is not a pointer or if json.Unmarshal returns an error. +// json.Unmarshal structure encoding rules are followed to encode public structure fields. +func (mv Map) Struct(structPtr interface{}) error { + // should check that we're getting a pointer. + if reflect.ValueOf(structPtr).Kind() != reflect.Ptr { + return errors.New("mv.Struct() error: argument is not type Ptr") + } + + m := map[string]interface{}(mv) + j, err := json.Marshal(m) + if err != nil { + return err + } + + return json.Unmarshal(j, structPtr) +} diff --git a/vendor/github.com/clbanning/mxj/v2/updatevalues.go b/vendor/github.com/clbanning/mxj/v2/updatevalues.go new file mode 100644 index 000000000..9e10d84e8 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/updatevalues.go @@ -0,0 +1,258 @@ +// Copyright 2012-2014, 2017 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// updatevalues.go - modify a value based on path and possibly sub-keys +// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values. + +package mxj + +import ( + "fmt" + "strconv" + "strings" +) + +// Update value based on path and possible sub-key values. +// A count of the number of values changed and any error are returned. +// If the count == 0, then no path (and subkeys) matched. +// 'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified +// or a string value "key:value[:type]" where type is "bool" or "num" to cast the value. +// 'path' is dot-notation list of keys to traverse; last key in path can be newVal key +// NOTE: 'path' spec does not currently support indexed array references. +// 'subkeys' are "key:value[:type]" entries that must match for path node +// - For attributes prefix the label with the attribute prefix character, by default a +// hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.) +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// +// NOTES: +// 1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value. +// 2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key. +// 3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol, +// perhaps "|". +func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) { + m := map[string]interface{}(mv) + + // extract the subkeys + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return 0, err + } + } + + // extract key and value from newVal + var key string + var val interface{} + switch newVal.(type) { + case map[string]interface{}, Map: + switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec) + case Map: + newVal = newVal.(Map).Old() + } + if len(newVal.(map[string]interface{})) != 1 { + return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal) + } + for key, val = range newVal.(map[string]interface{}) { + } + case string: // split it as a key:value pair + ss := strings.Split(newVal.(string), fieldSep) + n := len(ss) + if n < 2 || n > 3 { + return 0, fmt.Errorf("unknown newVal spec - %+v", newVal) + } + key = ss[0] + if n == 2 { + val = interface{}(ss[1]) + } else if n == 3 { + switch ss[2] { + case "bool", "boolean": + nv, err := strconv.ParseBool(ss[1]) + if err != nil { + return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal) + } + val = interface{}(nv) + case "num", "numeric", "float", "int": + nv, err := strconv.ParseFloat(ss[1], 64) + if err != nil { + return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal) + } + val = interface{}(nv) + default: + return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal) + } + } + default: + return 0, fmt.Errorf("invalid newVal type - %+v", newVal) + } + + // parse path + keys := strings.Split(path, ".") + + var count int + updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count) + + return count, nil +} + +// navigate the path +func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) { + // ----- at end node: looking at possible node to get 'key' ---- + if len(keys) == 1 { + updateValue(key, value, m, keys[0], subkeys, cnt) + return + } + + // ----- here we are navigating the path thru the penultimate node -------- + // key of interest is keys[0] - the next in the path + switch keys[0] { + case "*": // wildcard - scan all values + switch m.(type) { + case map[string]interface{}: + for _, v := range m.(map[string]interface{}) { + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + case []interface{}: + for _, v := range m.([]interface{}) { + switch v.(type) { + // flatten out a list of maps - keys are processed + case map[string]interface{}: + for _, vv := range v.(map[string]interface{}) { + updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) + } + default: + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + } + } + default: // key - must be map[string]interface{} + switch m.(type) { + case map[string]interface{}: + if v, ok := m.(map[string]interface{})[keys[0]]; ok { + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + case []interface{}: // may be buried in list + for _, v := range m.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + if vv, ok := v.(map[string]interface{})[keys[0]]; ok { + updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) + } + } + } + } + } +} + +// change value if key and subkeys are present +func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) { + // there are two possible options for the value of 'keys0': map[string]interface, []interface{} + // and 'key' is a key in the map or is a key in a map in a list. + switch m.(type) { + case map[string]interface{}: // gotta have the last key + if keys0 == "*" { + for k := range m.(map[string]interface{}) { + updateValue(key, value, m, k, subkeys, cnt) + } + return + } + endVal, _ := m.(map[string]interface{})[keys0] + + // if newV key is the end of path, replace the value for path-end + // may be []interface{} - means replace just an entry w/ subkeys + // otherwise replace the keys0 value if subkeys are there + // NOTE: this will replace the subkeys, also + if key == keys0 { + switch endVal.(type) { + case map[string]interface{}: + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + } + case []interface{}: + // without subkeys can't select list member to modify + // so key:value spec is it ... + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + break + } + nv := make([]interface{}, 0) + var valmodified bool + for _, v := range endVal.([]interface{}) { + // check entry subkeys + if hasSubKeys(v, subkeys) { + // replace v with value + nv = append(nv, value) + valmodified = true + (*cnt)++ + continue + } + nv = append(nv, v) + } + if valmodified { + (m.(map[string]interface{}))[keys0] = interface{}(nv) + } + default: // anything else is a strict replacement + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + } + } + return + } + + // so value is for an element of endVal + // if endVal is a map then 'key' must be there w/ subkeys + // if endVal is a list then 'key' must be in a list member w/ subkeys + switch endVal.(type) { + case map[string]interface{}: + if !hasSubKeys(endVal, subkeys) { + return + } + if _, ok := (endVal.(map[string]interface{}))[key]; ok { + (endVal.(map[string]interface{}))[key] = value + (*cnt)++ + } + case []interface{}: // keys0 points to a list, check subkeys + for _, v := range endVal.([]interface{}) { + // got to be a map so we can replace value for 'key' + vv, vok := v.(map[string]interface{}) + if !vok { + continue + } + if _, ok := vv[key]; !ok { + continue + } + if !hasSubKeys(vv, subkeys) { + continue + } + vv[key] = value + (*cnt)++ + } + } + case []interface{}: // key may be in a list member + // don't need to handle keys0 == "*"; we're looking at everything, anyway. + for _, v := range m.([]interface{}) { + // only map values - we're looking for 'key' + mm, ok := v.(map[string]interface{}) + if !ok { + continue + } + if _, ok := mm[key]; !ok { + continue + } + if !hasSubKeys(mm, subkeys) { + continue + } + mm[key] = value + (*cnt)++ + } + } + + // return +} diff --git a/vendor/github.com/clbanning/mxj/v2/xml.go b/vendor/github.com/clbanning/mxj/v2/xml.go new file mode 100644 index 000000000..2ea1bc25a --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/xml.go @@ -0,0 +1,1414 @@ +// Copyright 2012-2016, 2018-2019 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// xml.go - basically the core of X2j for map[string]interface{} values. +// NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter +// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages. + +package mxj + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// ------------------- NewMapXml & NewMapXmlReader ... ------------------------- + +// If XmlCharsetReader != nil, it will be used to decode the XML, if required. +// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored; +// set the CustomDecoder attribute instead. +// import ( +// charset "code.google.com/p/go-charset/charset" +// github.com/clbanning/mxj +// ) +// ... +// mxj.XmlCharsetReader = charset.NewReader +// m, merr := mxj.NewMapXml(xmlValue) +var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error) + +// NewMapXml - convert a XML doc into a Map +// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().) +// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. +// +// Converting XML to JSON is a simple as: +// ... +// mapVal, merr := mxj.NewMapXml(xmlVal) +// if merr != nil { +// // handle error +// } +// jsonVal, jerr := mapVal.Json() +// if jerr != nil { +// // handle error +// } +// +// NOTES: +// 1. Declarations, directives, process instructions and comments are NOT parsed. +// 2. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 3. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +// 5. If DisableTrimWhiteSpace(b bool) has been called, then all values will be trimmed or not. 'true' by default. +func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + return xmlToMap(xmlVal, r) +} + +// Get next XML doc from an io.Reader as a Map value. Returns Map value. +// NOTES: +// 1. Declarations, directives, process instructions and comments are NOT parsed. +// 2. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 3. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + + // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder + // will wrap it in a bufio.Reader and seek on the file beyond where the + // xml.Decoder parses! + if _, ok := xmlReader.(io.ByteReader); !ok { + xmlReader = myByteReader(xmlReader) // see code at EOF + } + + // build the map + return xmlReaderToMap(xmlReader, r) +} + +// Get next XML doc from an io.Reader as a Map value. Returns Map value and slice with the raw XML. +// NOTES: +// 1. Declarations, directives, process instructions and comments are NOT parsed. +// 2. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte +// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. +// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large +// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body +// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. +// 3. The 'raw' return value may be larger than the XML text value. +// 4. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 5. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 6. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + // create TeeReader so we can retrieve raw XML + buf := make([]byte, 0) + wb := bytes.NewBuffer(buf) + trdr := myTeeReader(xmlReader, wb) // see code at EOF + + m, err := xmlReaderToMap(trdr, r) + + // retrieve the raw XML that was decoded + b := wb.Bytes() + + if err != nil { + return nil, b, err + } + + return m, b, nil +} + +// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value +func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { + // parse the Reader + p := xml.NewDecoder(rdr) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlToMapParser("", nil, p, r) +} + +// xmlToMap - convert a XML doc into map[string]interface{} value +func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) { + b := bytes.NewReader(doc) + p := xml.NewDecoder(b) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlToMapParser("", nil, p, r) +} + +// ===================================== where the work happens ============================= + +// PrependAttrWithHyphen. Prepend attribute tags with a hyphen. +// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.) +// Note: +// If 'false', unmarshaling and marshaling is not symmetric. Attributes will be +// marshal'd as attr and may be part of a list. +func PrependAttrWithHyphen(v bool) { + if v { + attrPrefix = "-" + lenAttrPrefix = len(attrPrefix) + return + } + attrPrefix = "" + lenAttrPrefix = len(attrPrefix) +} + +// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com. +var includeTagSeqNum bool + +// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting +// its position when parsed. This is of limited usefulness, since list values cannot +// be tagged with "_seq" without changing their depth in the Map. +// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what +// you get. +/* + + + + + hello + + + parses as: + + { + Obj:{ + "-c":"la", + "-h":"da", + "-x":"dee", + "intObj":[ + { + "-id"="3", + "_seq":"0" // if mxj.Cast is passed, then: "_seq":0 + }, + { + "-id"="2", + "_seq":"2" + }], + "intObj1":{ + "-id":"1", + "_seq":"1" + }, + "StrObj":{ + "#text":"hello", // simple element value gets "#text" tag + "_seq":"3" + } + } + } +*/ +func IncludeTagSeqNum(b ...bool) { + if len(b) == 0 { + includeTagSeqNum = !includeTagSeqNum + } else if len(b) == 1 { + includeTagSeqNum = b[0] + } +} + +// all keys will be "lower case" +var lowerCase bool + +// Coerce all tag values to keys in lower case. This is useful if you've got sources with variable +// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec +// in lower case. +// CoerceKeysToLower() will toggle the coercion flag true|false - on|off +// CoerceKeysToLower(true|false) will set the coercion flag on|off +// +// NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as +// the associated HandleXmlReader and HandleXmlReaderRaw. +func CoerceKeysToLower(b ...bool) { + if len(b) == 0 { + lowerCase = !lowerCase + } else if len(b) == 1 { + lowerCase = b[0] + } +} + +// disableTrimWhiteSpace sets if the white space should be removed or not +var disableTrimWhiteSpace bool +var trimRunes = "\t\r\b\n " + +// DisableTrimWhiteSpace set if the white space should be trimmed or not. By default white space is always trimmed. If +// no argument is provided, trim white space will be disabled. +func DisableTrimWhiteSpace(b ...bool) { + if len(b) == 0 { + disableTrimWhiteSpace = true + } else { + disableTrimWhiteSpace = b[0] + } + + if disableTrimWhiteSpace { + trimRunes = "\t\r\b\n" + } else { + trimRunes = "\t\r\b\n " + } +} + +// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels. +// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "", +// and adding a SetAttrPrefix(s string) function. + +var attrPrefix string = `-` // the default +var lenAttrPrefix int = 1 // the default + +// SetAttrPrefix changes the default, "-", to the specified value, s. +// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false). +// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.) +func SetAttrPrefix(s string) { + attrPrefix = s + lenAttrPrefix = len(attrPrefix) +} + +// 18jan17: Allows user to specify if the map keys should be in snake case instead +// of the default hyphenated notation. +var snakeCaseKeys bool + +// CoerceKeysToSnakeCase changes the default, false, to the specified value, b. +// Note: the attribute prefix will be a hyphen, '-', or what ever string value has +// been specified using SetAttrPrefix. +func CoerceKeysToSnakeCase(b ...bool) { + if len(b) == 0 { + snakeCaseKeys = !snakeCaseKeys + } else if len(b) == 1 { + snakeCaseKeys = b[0] + } +} + +// 10jan19: use of pull request #57 should be conditional - legacy code assumes +// numeric values are float64. +var castToInt bool + +// CastValuesToInt tries to coerce numeric valus to int64 or uint64 instead of the +// default float64. Repeated calls with no argument will toggle this on/off, or this +// handling will be set with the value of 'b'. +func CastValuesToInt(b ...bool) { + if len(b) == 0 { + castToInt = !castToInt + } else if len(b) == 1 { + castToInt = b[0] + } +} + +// 05feb17: support processing XMPP streams (issue #36) +var handleXMPPStreamTag bool + +// HandleXMPPStreamTag causes decoder to parse XMPP elements. +// If called with no argument, XMPP stream element handling is toggled on/off. +// (See xmppStream_test.go for example.) +// If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream" +// element will be returned as: +// map["stream"]interface{}{map[-]interface{}}. +// If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream" +// element will be returned as: +// map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}} +// where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.) +func HandleXMPPStreamTag(b ...bool) { + if len(b) == 0 { + handleXMPPStreamTag = !handleXMPPStreamTag + } else if len(b) == 1 { + handleXMPPStreamTag = b[0] + } +} + +// 21jan18 - decode all values as map["#text":value] (issue #56) +var decodeSimpleValuesAsMap bool + +// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":]. +// If called with no argument, the decoding is toggled on/off. +// +// By default the NewMapXml functions decode simple values without attributes as +// map[:]. This function causes simple values without attributes to be +// decoded the same as simple values with attributes - map[:map["#text":]]. +func DecodeSimpleValuesAsMap(b ...bool) { + if len(b) == 0 { + decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap + } else if len(b) == 1 { + decodeSimpleValuesAsMap = b[0] + } +} + +// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly. +// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one. +// We've removed the intermediate *node tree with the allocation and subsequent rescanning. +func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { + if lowerCase { + skey = strings.ToLower(skey) + } + if snakeCaseKeys { + skey = strings.Replace(skey, "-", "_", -1) + } + + // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. + // Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value. + var n, na map[string]interface{} + var seq int // for includeTagSeqNum + + // Allocate maps and load attributes, if any. + // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through + // to get StartElement then recurse with skey==xml.StartElement.Name.Local + // where we begin allocating map[string]interface{} values 'n' and 'na'. + if skey != "" { + n = make(map[string]interface{}) // old n + na = make(map[string]interface{}) // old n.nodes + if len(a) > 0 { + for _, v := range a { + if snakeCaseKeys { + v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) + } + var key string + key = attrPrefix + v.Name.Local + if lowerCase { + key = strings.ToLower(key) + } + if xmlEscapeCharsDecoder { // per issue#84 + v.Value = escapeChars(v.Value) + } + na[key] = cast(v.Value, r, key) + } + } + } + // Return XMPP message. + if handleXMPPStreamTag && skey == "stream" { + n[skey] = na + return n, nil + } + + for { + t, err := p.Token() + if err != nil { + if err != io.EOF { + return nil, errors.New("xml.Decoder.Token() - " + err.Error()) + } + return nil, err + } + switch t.(type) { + case xml.StartElement: + tt := t.(xml.StartElement) + + // First call to xmlToMapParser() doesn't pass xml.StartElement - the map key. + // So when the loop is first entered, the first token is the root tag along + // with any attributes, which we process here. + // + // Subsequent calls to xmlToMapParser() will pass in tag+attributes for + // processing before getting the next token which is the element value, + // which is done above. + if skey == "" { + return xmlToMapParser(tt.Name.Local, tt.Attr, p, r) + } + + // If not initializing the map, parse the element. + // len(nn) == 1, necessarily - it is just an 'n'. + nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r) + if err != nil { + return nil, err + } + + // The nn map[string]interface{} value is a na[nn_key] value. + // We need to see if nn_key already exists - means we're parsing a list. + // This may require converting na[nn_key] value into []interface{} type. + // First, extract the key:val for the map - it's a singleton. + // Note: + // * if CoerceKeysToLower() called, then key will be lower case. + // * if CoerceKeysToSnakeCase() called, then key will be converted to snake case. + var key string + var val interface{} + for key, val = range nn { + break + } + + // IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element. + // In theory, we don't need this if len(na) == 1. But, we don't know what might + // come next - we're only parsing forward. So if you ask for 'includeTagSeqNum' you + // get it on every element. (Personally, I never liked this, but I added it on request + // and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!) + if includeTagSeqNum { + switch val.(type) { + case []interface{}: + // noop - There's no clean way to handle this w/o changing message structure. + case map[string]interface{}: + val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag + seq++ + case interface{}: // a non-nil simple element: string, float64, bool + v := map[string]interface{}{"#text": val} + v["_seq"] = seq + seq++ + val = v + } + } + + // 'na' holding sub-elements of n. + // See if 'key' already exists. + // If 'key' exists, then this is a list, if not just add key:val to na. + if v, ok := na[key]; ok { + var a []interface{} + switch v.(type) { + case []interface{}: + a = v.([]interface{}) + default: // anything else - note: v.(type) != nil + a = []interface{}{v} + } + a = append(a, val) + na[key] = a + } else { + na[key] = val // save it as a singleton + } + case xml.EndElement: + // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. + if len(n) == 0 { + // If len(na)==0 we have an empty element == ""; + // it has no xml.Attr nor xml.CharData. + // Note: in original node-tree parser, val defaulted to ""; + // so we always had the default if len(node.nodes) == 0. + if len(na) > 0 { + n[skey] = na + } else { + n[skey] = "" // empty element + } + } else if len(n) == 1 && len(na) > 0 { + // it's a simple element w/ no attributes w/ subelements + for _, v := range n { + na["#text"] = v + } + n[skey] = na + } + return n, nil + case xml.CharData: + // clean up possible noise + tt := strings.Trim(string(t.(xml.CharData)), trimRunes) + if xmlEscapeCharsDecoder { // issue#84 + tt = escapeChars(tt) + } + if len(tt) > 0 { + if len(na) > 0 || decodeSimpleValuesAsMap { + na["#text"] = cast(tt, r, "#text") + } else if skey != "" { + n[skey] = cast(tt, r, skey) + } else { + // per Adrian (http://www.adrianlungu.com/) catch stray text + // in decoder stream - + // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 + // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get + // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. + continue + } + } + default: + // noop + } + } +} + +var castNanInf bool + +// Cast "Nan", "Inf", "-Inf" XML values to 'float64'. +// By default, these values will be decoded as 'string'. +func CastNanInf(b ...bool) { + if len(b) == 0 { + castNanInf = !castNanInf + } else if len(b) == 1 { + castNanInf = b[0] + } +} + +// cast - try to cast string values to bool or float64 +// 't' is the tag key that can be checked for 'not-casting' +func cast(s string, r bool, t string) interface{} { + if checkTagToSkip != nil && t != "" && checkTagToSkip(t) { + // call the check-function here with 't[0]' + // if 'true' return s + return s + } + + if r { + // handle nan and inf + if !castNanInf { + switch strings.ToLower(s) { + case "nan", "inf", "-inf": + return s + } + } + + // handle numeric strings ahead of boolean + if castToInt { + if f, err := strconv.ParseInt(s, 10, 64); err == nil { + return f + } + if f, err := strconv.ParseUint(s, 10, 64); err == nil { + return f + } + } + + if castToFloat { + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + // ParseBool treats "1"==true & "0"==false, we've already scanned those + // values as float64. See if value has 't' or 'f' as initial screen to + // minimize calls to ParseBool; also, see if len(s) < 6. + if castToBool { + if len(s) > 0 && len(s) < 6 { + switch s[:1] { + case "t", "T", "f", "F": + if b, err := strconv.ParseBool(s); err == nil { + return b + } + } + } + } + } + return s +} + +// pull request, #59 +var castToFloat = true + +// CastValuesToFloat can be used to skip casting to float64 when +// "cast" argument is 'true' in NewMapXml, etc. +// Default is true. +func CastValuesToFloat(b ...bool) { + if len(b) == 0 { + castToFloat = !castToFloat + } else if len(b) == 1 { + castToFloat = b[0] + } +} + +var castToBool = true + +// CastValuesToBool can be used to skip casting to bool when +// "cast" argument is 'true' in NewMapXml, etc. +// Default is true. +func CastValuesToBool(b ...bool) { + if len(b) == 0 { + castToBool = !castToBool + } else if len(b) == 1 { + castToBool = b[0] + } +} + +// checkTagToSkip - switch to address Issue #58 + +var checkTagToSkip func(string) bool + +// SetCheckTagToSkipFunc registers function to test whether the value +// for a tag should be cast to bool or float64 when "cast" argument is 'true'. +// (Dot tag path notation is not supported.) +// NOTE: key may be "#text" if it's a simple element with attributes +// or "decodeSimpleValuesAsMap == true". +// NOTE: does not apply to NewMapXmlSeq... functions. +func SetCheckTagToSkipFunc(fn func(string) bool) { + checkTagToSkip = fn +} + +// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- + +// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------ + +const ( + DefaultRootTag = "doc" +) + +var useGoXmlEmptyElemSyntax bool + +// XmlGoEmptyElemSyntax() - rather than . +// Go's encoding/xml package marshals empty XML elements as . By default this package +// encodes empty elements as . If you're marshaling Map values that include structures +// (which are passed to xml.Marshal for encoding), this will let you conform to the standard package. +func XmlGoEmptyElemSyntax() { + useGoXmlEmptyElemSyntax = true +} + +// XmlDefaultEmptyElemSyntax() - rather than . +// Return XML encoding for empty elements to the default package setting. +// Reverses effect of XmlGoEmptyElemSyntax(). +func XmlDefaultEmptyElemSyntax() { + useGoXmlEmptyElemSyntax = false +} + +// ------- issue #88 ---------- +// xmlCheckIsValid set switch to force decoding the encoded XML to +// see if it is valid XML. +var xmlCheckIsValid bool + +// XmlCheckIsValid forces the encoded XML to be checked for validity. +func XmlCheckIsValid(b ...bool) { + if len(b) == 1 { + xmlCheckIsValid = b[0] + return + } + xmlCheckIsValid = !xmlCheckIsValid +} + +// Encode a Map as XML. The companion of NewMapXml(). +// The following rules apply. +// - The key label "#text" is treated as the value for a simple element with attributes. +// - Map keys that begin with a hyphen, '-', are interpreted as attributes. +// It is an error if the attribute doesn't have a []byte, string, number, or boolean value. +// - Map value type encoding: +// > string, bool, float64, int, int32, int64, float32: per "%v" formating +// > []bool, []uint8: by casting to string +// > structures, etc.: handed to xml.Marshal() - if there is an error, the element +// value is "UNKNOWN" +// - Elements with only attribute values or are null are terminated using "/>". +// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. +// Thus, `{ "key":"value" }` encodes as "value". +// - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax(). +// The attributes tag=value pairs are alphabetized by "tag". Also, when encoding map[string]interface{} values - +// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted. +func (mv Map) Xml(rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + var err error + b := new(bytes.Buffer) + p := new(pretty) // just a stub + + if len(m) == 1 && len(rootTag) == 0 { + for key, value := range m { + // if it an array, see if all values are map[string]interface{} + // we force a new root tag if we'll end up with no key:value in the list + // so: key:[string_val, bool:true] --> string_valtrue + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: // noop + default: // anything else + err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p) + goto done + } + } + } + err = marshalMapToXmlIndent(false, b, key, value, p) + } + } else if len(rootTag) == 1 { + err = marshalMapToXmlIndent(false, b, rootTag[0], m, p) + } else { + err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p) + } +done: + if xmlCheckIsValid { + d := xml.NewDecoder(bytes.NewReader(b.Bytes())) + for { + _, err = d.Token() + if err == io.EOF { + err = nil + break + } else if err != nil { + return nil, err + } + } + } + return b.Bytes(), err +} + +// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] +// The names will also provide a key for the number of return arguments. + +// Writes the Map as XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error { + x, err := mv.Xml(rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// Writes the Map as XML on the Writer. []byte is the raw XML that was written. +// See Xml() for encoding rules. +/* +func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { + x, err := mv.Xml(rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} +*/ + +// Writes the Map as pretty XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. +// See Xml() for encoding rules. +/* +func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} +*/ + +// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- + +// -------------- Handle XML stream by processing Map value -------------------- + +// Default poll delay to keep Handler from spinning on an open stream +// like sitting on os.Stdin waiting for imput. +var xhandlerPollInterval = time.Millisecond + +// Bulk process XML using handlers that process a Map value. +// 'rdr' is an io.Reader for XML (stream) +// 'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. +func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { + var n int + for { + m, merr := NewMapXmlReader(xmlReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m); !ok { + break + } + } else if merr != io.EOF { + time.Sleep(xhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// Bulk process XML using handlers that process a Map value and the raw XML. +// 'rdr' is an io.Reader for XML (stream) +// 'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. +// See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader. +func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { + var n int + for { + m, raw, merr := NewMapXmlReaderRaw(xmlReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr, raw); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m, raw); !ok { + break + } + } else if merr != io.EOF { + time.Sleep(xhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// ----------------- END: Handle XML stream by processing Map value -------------- + +// -------- a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ---------- + +// This is a clone of io.TeeReader with the additional method t.ReadByte(). +// Thus, this TeeReader is also an io.ByteReader. +// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written +// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte().. +// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with +// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic. + +type teeReader struct { + r io.Reader + w io.Writer + b []byte +} + +func myTeeReader(r io.Reader, w io.Writer) io.Reader { + b := make([]byte, 1) + return &teeReader{r, w, b} +} + +// need for io.Reader - but we don't use it ... +func (t *teeReader) Read(p []byte) (int, error) { + return 0, nil +} + +func (t *teeReader) ReadByte() (byte, error) { + n, err := t.r.Read(t.b) + if n > 0 { + if _, err := t.w.Write(t.b[:1]); err != nil { + return t.b[0], err + } + } + return t.b[0], err +} + +// For use with NewMapXmlReader & NewMapXmlSeqReader. +type byteReader struct { + r io.Reader + b []byte +} + +func myByteReader(r io.Reader) io.Reader { + b := make([]byte, 1) + return &byteReader{r, b} +} + +// Need for io.Reader interface ... +// Needed if reading a malformed http.Request.Body - issue #38. +func (b *byteReader) Read(p []byte) (int, error) { + return b.r.Read(p) +} + +func (b *byteReader) ReadByte() (byte, error) { + _, err := b.r.Read(b.b) + if len(b.b) > 0 { + return b.b[0], nil + } + var c byte + return c, err +} + +// ----------------------- END: io.TeeReader hack ----------------------------------- + +// ---------------------- XmlIndent - from j2x package ---------------------------- + +// Encode a map[string]interface{} as a pretty XML string. +// See Xml for encoding rules. +func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + + var err error + b := new(bytes.Buffer) + p := new(pretty) + p.indent = indent + p.padding = prefix + + if len(m) == 1 && len(rootTag) == 0 { + // this can extract the key for the single map element + // use it if it isn't a key for a list + for key, value := range m { + if _, ok := value.([]interface{}); ok { + err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p) + } else { + err = marshalMapToXmlIndent(true, b, key, value, p) + } + } + } else if len(rootTag) == 1 { + err = marshalMapToXmlIndent(true, b, rootTag[0], m, p) + } else { + err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p) + } + if xmlCheckIsValid { + d := xml.NewDecoder(bytes.NewReader(b.Bytes())) + for { + _, err = d.Token() + if err == io.EOF { + err = nil + break + } else if err != nil { + return nil, err + } + } + } + return b.Bytes(), err +} + +type pretty struct { + indent string + cnt int + padding string + mapDepth int + start int +} + +func (p *pretty) Indent() { + p.padding += p.indent + p.cnt++ +} + +func (p *pretty) Outdent() { + if p.cnt > 0 { + p.padding = p.padding[:len(p.padding)-len(p.indent)] + p.cnt-- + } +} + +// where the work actually happens +// returns an error if an attribute is not atomic +// NOTE: 01may20 - replaces mapToXmlIndent(); uses bytes.Buffer instead for string appends. +func marshalMapToXmlIndent(doIndent bool, b *bytes.Buffer, key string, value interface{}, pp *pretty) error { + var err error + var endTag bool + var isSimple bool + var elen int + p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} + + // per issue #48, 18apr18 - try and coerce maps to map[string]interface{} + // Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq(). + if reflect.ValueOf(value).Kind() == reflect.Map { + switch value.(type) { + case map[string]interface{}: + default: + val := make(map[string]interface{}) + vv := reflect.ValueOf(value) + keys := vv.MapKeys() + for _, k := range keys { + val[fmt.Sprint(k)] = vv.MapIndex(k).Interface() + } + value = val + } + } + + // 14jul20. The following block of code has become something of a catch all for odd stuff + // that might be passed in as a result of casting an arbitrary map[] to an mxj.Map + // value and then call m.Xml or m.XmlIndent. See issue #71 (and #73) for such edge cases. + switch value.(type) { + // these types are handled during encoding + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number: + case []map[string]interface{}, []string, []float64, []bool, []int, []int32, []int64, []float32, []json.Number: + case []interface{}: + case nil: + value = "" + default: + // see if value is a struct, if so marshal using encoding/xml package + if reflect.ValueOf(value).Kind() == reflect.Struct { + if v, err := xml.Marshal(value); err != nil { + return err + } else { + value = string(v) + } + } else { + // coerce eveything else into a string value + value = fmt.Sprint(value) + } + } + + // start the XML tag with required indentaton and padding + if doIndent { + switch value.(type) { + case []interface{}, []string: + default: + if _, err = b.WriteString(p.padding); err != nil { + return err + } + } + } + switch value.(type) { + case []interface{}: + default: + if _, err = b.WriteString(`<` + key); err != nil { + return err + } + } + + switch value.(type) { + case map[string]interface{}: + vv := value.(map[string]interface{}) + lenvv := len(vv) + // scan out attributes - attribute keys have prepended attrPrefix + attrlist := make([][2]string, len(vv)) + var n int + var ss string + for k, v := range vv { + if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { + switch v.(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(v.(string)) + } else { + ss = v.(string) + } + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = ss + case float64, bool, int, int32, int64, float32, json.Number: + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = fmt.Sprintf("%v", v) + case []byte: + if xmlEscapeChars { + ss = escapeChars(string(v.([]byte))) + } else { + ss = string(v.([]byte)) + } + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = ss + default: + return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v) + } + n++ + } + } + if n > 0 { + attrlist = attrlist[:n] + sort.Sort(attrList(attrlist)) + for _, v := range attrlist { + if _, err = b.WriteString(` ` + v[0] + `="` + v[1] + `"`); err != nil { + return err + } + } + } + // only attributes? + if n == lenvv { + if useGoXmlEmptyElemSyntax { + if _, err = b.WriteString(`"); err != nil { + return err + } + } else { + if _, err = b.WriteString(`/>`); err != nil { + return err + } + } + break + } + + // simple element? Note: '#text" is an invalid XML tag. + isComplex := false + if v, ok := vv["#text"]; ok && n+1 == lenvv { + // just the value and attributes + switch v.(type) { + case string: + if xmlEscapeChars { + v = escapeChars(v.(string)) + } else { + v = v.(string) + } + case []byte: + if xmlEscapeChars { + v = escapeChars(string(v.([]byte))) + } else { + v = string(v.([]byte)) + } + } + if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil { + return err + } + endTag = true + elen = 1 + isSimple = true + break + } else if ok { + // need to handle when there are subelements in addition to the simple element value + // issue #90 + switch v.(type) { + case string: + if xmlEscapeChars { + v = escapeChars(v.(string)) + } else { + v = v.(string) + } + case []byte: + if xmlEscapeChars { + v = escapeChars(string(v.([]byte))) + } else { + v = string(v.([]byte)) + } + } + if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil { + return err + } + isComplex = true + } + + // close tag with possible attributes + if !isComplex { + if _, err = b.WriteString(">"); err != nil { + return err + } + } + if doIndent { + // *s += "\n" + if _, err = b.WriteString("\n"); err != nil { + return err + } + } + // something more complex + p.mapDepth++ + // extract the map k:v pairs and sort on key + elemlist := make([][2]interface{}, len(vv)) + n = 0 + for k, v := range vv { + if k == "#text" { + // simple element handled above + continue + } + if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { + continue + } + elemlist[n][0] = k + elemlist[n][1] = v + n++ + } + elemlist = elemlist[:n] + sort.Sort(elemList(elemlist)) + var i int + for _, v := range elemlist { + switch v[1].(type) { + case []interface{}: + default: + if i == 0 && doIndent { + p.Indent() + } + } + i++ + if err := marshalMapToXmlIndent(doIndent, b, v[0].(string), v[1], p); err != nil { + return err + } + switch v[1].(type) { + case []interface{}: // handled in []interface{} case + default: + if doIndent { + p.Outdent() + } + } + i-- + } + p.mapDepth-- + endTag = true + elen = 1 // we do have some content ... + case []interface{}: + // special case - found during implementing Issue #23 + if len(value.([]interface{})) == 0 { + if doIndent { + if _, err = b.WriteString(p.padding + p.indent); err != nil { + return err + } + } + if _, err = b.WriteString("<" + key); err != nil { + return err + } + elen = 0 + endTag = true + break + } + for _, v := range value.([]interface{}) { + if doIndent { + p.Indent() + } + if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case []string: + // This was added by https://github.com/slotix ... not a type that + // would be encountered if mv generated from NewMapXml, NewMapJson. + // Could be encountered in AnyXml(), so we'll let it stay, though + // it should be merged with case []interface{}, above. + //quick fix for []string type + //[]string should be treated exaclty as []interface{} + if len(value.([]string)) == 0 { + if doIndent { + if _, err = b.WriteString(p.padding + p.indent); err != nil { + return err + } + } + if _, err = b.WriteString("<" + key); err != nil { + return err + } + elen = 0 + endTag = true + break + } + for _, v := range value.([]string) { + if doIndent { + p.Indent() + } + if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case nil: + // terminate the tag + if doIndent { + // *s += p.padding + if _, err = b.WriteString(p.padding); err != nil { + return err + } + } + if _, err = b.WriteString("<" + key); err != nil { + return err + } + endTag, isSimple = true, true + break + default: // handle anything - even goofy stuff + elen = 0 + switch value.(type) { + case string: + v := value.(string) + if xmlEscapeChars { + v = escapeChars(v) + } + elen = len(v) + if elen > 0 { + // *s += ">" + v + if _, err = b.WriteString(">" + v); err != nil { + return err + } + } + case float64, bool, int, int32, int64, float32, json.Number: + v := fmt.Sprintf("%v", value) + elen = len(v) // always > 0 + if _, err = b.WriteString(">" + v); err != nil { + return err + } + case []byte: // NOTE: byte is just an alias for uint8 + // similar to how xml.Marshal handles []byte structure members + v := string(value.([]byte)) + if xmlEscapeChars { + v = escapeChars(v) + } + elen = len(v) + if elen > 0 { + // *s += ">" + v + if _, err = b.WriteString(">" + v); err != nil { + return err + } + } + default: + if _, err = b.WriteString(">"); err != nil { + return err + } + var v []byte + var err error + if doIndent { + v, err = xml.MarshalIndent(value, p.padding, p.indent) + } else { + v, err = xml.Marshal(value) + } + if err != nil { + if _, err = b.WriteString(">UNKNOWN"); err != nil { + return err + } + } else { + elen = len(v) + if elen > 0 { + if _, err = b.Write(v); err != nil { + return err + } + } + } + } + isSimple = true + endTag = true + } + if endTag { + if doIndent { + if !isSimple { + if _, err = b.WriteString(p.padding); err != nil { + return err + } + } + } + if elen > 0 || useGoXmlEmptyElemSyntax { + if elen == 0 { + if _, err = b.WriteString(">"); err != nil { + return err + } + } + if _, err = b.WriteString(`"); err != nil { + return err + } + } else { + if _, err = b.WriteString(`/>`); err != nil { + return err + } + } + } + if doIndent { + if p.cnt > p.start { + if _, err = b.WriteString("\n"); err != nil { + return err + } + } + p.Outdent() + } + + return nil +} + +// ============================ sort interface implementation ================= + +type attrList [][2]string + +func (a attrList) Len() int { + return len(a) +} + +func (a attrList) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a attrList) Less(i, j int) bool { + return a[i][0] <= a[j][0] +} + +type elemList [][2]interface{} + +func (e elemList) Len() int { + return len(e) +} + +func (e elemList) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e elemList) Less(i, j int) bool { + return e[i][0].(string) <= e[j][0].(string) +} diff --git a/vendor/github.com/clbanning/mxj/v2/xmlseq.go b/vendor/github.com/clbanning/mxj/v2/xmlseq.go new file mode 100644 index 000000000..80632bd3c --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/xmlseq.go @@ -0,0 +1,877 @@ +// Copyright 2012-2016, 2019 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding. +// Also, handles comments, directives and process instructions. + +package mxj + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "sort" + "strings" +) + +// MapSeq is like Map but contains seqencing indices to allow recovering the original order of +// the XML elements when the map[string]interface{} is marshaled. Element attributes are +// stored as a map["#attr"]map[]map[string]interface{}{"#text":"", "#seq":} +// value instead of denoting the keys with a prefix character. Also, comments, directives and +// process instructions are preserved. +type MapSeq map[string]interface{} + +// NoRoot is returned by NewXmlSeq, etc., when a comment, directive or procinstr element is parsed +// in the XML data stream and the element is not contained in an XML object with a root element. +var NoRoot = errors.New("no root key") +var NO_ROOT = NoRoot // maintain backwards compatibility + +// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... ------------------------- + +// NewMapXmlSeq converts a XML doc into a MapSeq value with elements id'd with decoding sequence key represented +// as map["#seq"]. +// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. +// NOTE: "#seq" key/value pairs are removed on encoding with msv.Xml() / msv.XmlIndent(). +// • attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":, "#seq":} +// • all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well. +// • lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that +// include a "#seq" k:v pair based on sequence they are decoded. Thus, XML like: +// +// value 1 +// value 2 +// value 3 +// +// is decoded as: +// doc : +// ltag :[[]interface{}] +// [item: 0] +// #seq :[int] 0 +// #text :[string] value 1 +// [item: 1] +// #seq :[int] 2 +// #text :[string] value 3 +// newtag : +// #seq :[int] 1 +// #text :[string] value 2 +// It will encode in proper sequence even though the MapSeq representation merges all "ltag" elements in an array. +// • comments - "" - are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair. +// • directives - "" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair. +// • process instructions - "" - are decoded as map["#procinst"]interface{} where the #procinst value +// is of map[string]interface{} type with the following keys: #target, #inst, and #seq. +// • comments, directives, and procinsts that are NOT part of a document with a root key will be returned as +// map[string]interface{} and the error value 'NoRoot'. +// • note: ": tag preserve the +// ":" notation rather than stripping it as with NewMapXml(). +// 2. Attribute keys for name space prefix declarations preserve "xmlns:" notation. +// +// ERRORS: +// 1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment", +// "#directive" or #procinst" key. +func NewMapXmlSeq(xmlVal []byte, cast ...bool) (MapSeq, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + return xmlSeqToMap(xmlVal, r) +} + +// NewMpaXmlSeqReader returns next XML doc from an io.Reader as a MapSeq value. +// NOTES: +// 1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to +// re-encode the message in its original structure. +// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +// +// ERRORS: +// 1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment", +// "#directive" or #procinst" key. +func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (MapSeq, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + + // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder + // will wrap it in a bufio.Reader and seek on the file beyond where the + // xml.Decoder parses! + if _, ok := xmlReader.(io.ByteReader); !ok { + xmlReader = myByteReader(xmlReader) // see code at EOF + } + + // build the map + return xmlSeqReaderToMap(xmlReader, r) +} + +// NewMapXmlSeqReaderRaw returns the next XML doc from an io.Reader as a MapSeq value. +// Returns MapSeq value, slice with the raw XML, and any error. +// NOTES: +// 1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte +// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. +// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large +// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body +// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. +// 2. The 'raw' return value may be larger than the XML text value. +// 3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to +// re-encode the message in its original structure. +// 5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +// +// ERRORS: +// 1. If a NoRoot error, "no root key," is returned, check if the initial map key is "#comment", +// "#directive" or #procinst" key. +func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (MapSeq, []byte, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + // create TeeReader so we can retrieve raw XML + buf := make([]byte, 0) + wb := bytes.NewBuffer(buf) + trdr := myTeeReader(xmlReader, wb) + + m, err := xmlSeqReaderToMap(trdr, r) + + // retrieve the raw XML that was decoded + b := wb.Bytes() + + // err may be NoRoot + return m, b, err +} + +// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value +func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { + // parse the Reader + p := xml.NewDecoder(rdr) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlSeqToMapParser("", nil, p, r) +} + +// xmlSeqToMap - convert a XML doc into map[string]interface{} value +func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) { + b := bytes.NewReader(doc) + p := xml.NewDecoder(b) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlSeqToMapParser("", nil, p, r) +} + +// ===================================== where the work happens ============================= + +// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly. +// Add #seq tag value for each element decoded - to be used for Encoding later. +func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { + if snakeCaseKeys { + skey = strings.Replace(skey, "-", "_", -1) + } + + // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. + var n, na map[string]interface{} + var seq int // for including seq num when decoding + + // Allocate maps and load attributes, if any. + // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through + // to get StartElement then recurse with skey==xml.StartElement.Name.Local + // where we begin allocating map[string]interface{} values 'n' and 'na'. + if skey != "" { + // 'n' only needs one slot - save call to runtime•hashGrow() + // 'na' we don't know + n = make(map[string]interface{}, 1) + na = make(map[string]interface{}) + if len(a) > 0 { + // xml.Attr is decoded into: map["#attr"]map[]interface{} + // where interface{} is map[string]interface{}{"#text":, "#seq":} + aa := make(map[string]interface{}, len(a)) + for i, v := range a { + if snakeCaseKeys { + v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) + } + if xmlEscapeCharsDecoder { // per issue#84 + v.Value = escapeChars(v.Value) + } + if len(v.Name.Space) > 0 { + aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i} + } else { + aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i} + } + } + na["#attr"] = aa + } + } + + // Return XMPP message. + if handleXMPPStreamTag && skey == "stream:stream" { + n[skey] = na + return n, nil + } + + for { + t, err := p.RawToken() + if err != nil { + if err != io.EOF { + return nil, errors.New("xml.Decoder.Token() - " + err.Error()) + } + return nil, err + } + switch t.(type) { + case xml.StartElement: + tt := t.(xml.StartElement) + + // First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key. + // So when the loop is first entered, the first token is the root tag along + // with any attributes, which we process here. + // + // Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for + // processing before getting the next token which is the element value, + // which is done above. + if skey == "" { + if len(tt.Name.Space) > 0 { + return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) + } else { + return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) + } + } + + // If not initializing the map, parse the element. + // len(nn) == 1, necessarily - it is just an 'n'. + var nn map[string]interface{} + if len(tt.Name.Space) > 0 { + nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) + } else { + nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) + } + if err != nil { + return nil, err + } + + // The nn map[string]interface{} value is a na[nn_key] value. + // We need to see if nn_key already exists - means we're parsing a list. + // This may require converting na[nn_key] value into []interface{} type. + // First, extract the key:val for the map - it's a singleton. + var key string + var val interface{} + for key, val = range nn { + break + } + + // add "#seq" k:v pair - + // Sequence number included even in list elements - this should allow us + // to properly resequence even something goofy like: + // item 1 + // item 2 + // item 3 + // where all the "list" subelements are decoded into an array. + switch val.(type) { + case map[string]interface{}: + val.(map[string]interface{})["#seq"] = seq + seq++ + case interface{}: // a non-nil simple element: string, float64, bool + v := map[string]interface{}{"#text": val, "#seq": seq} + seq++ + val = v + } + + // 'na' holding sub-elements of n. + // See if 'key' already exists. + // If 'key' exists, then this is a list, if not just add key:val to na. + if v, ok := na[key]; ok { + var a []interface{} + switch v.(type) { + case []interface{}: + a = v.([]interface{}) + default: // anything else - note: v.(type) != nil + a = []interface{}{v} + } + a = append(a, val) + na[key] = a + } else { + na[key] = val // save it as a singleton + } + case xml.EndElement: + if skey != "" { + tt := t.(xml.EndElement) + if snakeCaseKeys { + tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1) + } + var name string + if len(tt.Name.Space) > 0 { + name = tt.Name.Space + `:` + tt.Name.Local + } else { + name = tt.Name.Local + } + if skey != name { + return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d", + skey, name, p.InputOffset()) + } + } + // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. + if len(n) == 0 { + // If len(na)==0 we have an empty element == ""; + // it has no xml.Attr nor xml.CharData. + // Empty element content will be map["etag"]map["#text"]"" + // after #seq injection - map["etag"]map["#seq"]seq - after return. + if len(na) > 0 { + n[skey] = na + } else { + n[skey] = "" // empty element + } + } + return n, nil + case xml.CharData: + // clean up possible noise + tt := strings.Trim(string(t.(xml.CharData)), trimRunes) + if xmlEscapeCharsDecoder { // issue#84 + tt = escapeChars(tt) + } + if skey == "" { + // per Adrian (http://www.adrianlungu.com/) catch stray text + // in decoder stream - + // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 + // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get + // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. + continue + } + if len(tt) > 0 { + // every simple element is a #text and has #seq associated with it + na["#text"] = cast(tt, r, "") + na["#seq"] = seq + seq++ + } + case xml.Comment: + if n == nil { // no root 'key' + n = map[string]interface{}{"#comment": string(t.(xml.Comment))} + return n, NoRoot + } + cm := make(map[string]interface{}, 2) + cm["#text"] = string(t.(xml.Comment)) + cm["#seq"] = seq + seq++ + na["#comment"] = cm + case xml.Directive: + if n == nil { // no root 'key' + n = map[string]interface{}{"#directive": string(t.(xml.Directive))} + return n, NoRoot + } + dm := make(map[string]interface{}, 2) + dm["#text"] = string(t.(xml.Directive)) + dm["#seq"] = seq + seq++ + na["#directive"] = dm + case xml.ProcInst: + if n == nil { + na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)} + n = map[string]interface{}{"#procinst": na} + return n, NoRoot + } + pm := make(map[string]interface{}, 3) + pm["#target"] = t.(xml.ProcInst).Target + pm["#inst"] = string(t.(xml.ProcInst).Inst) + pm["#seq"] = seq + seq++ + na["#procinst"] = pm + default: + // noop - shouldn't ever get here, now, since we handle all token types + } + } +} + +// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- + +// --------------------- mv.XmlSeq & mv.XmlSeqWriter ------------------------- + +// Xml encodes a MapSeq as XML with elements sorted on #seq. The companion of NewMapXmlSeq(). +// The following rules apply. +// - The "#seq" key value is used to seqence the subelements or attributes only. +// - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key. +// - The "#comment" map key identifies a comment in the value "#text" map entry - . +// - The "#directive" map key identifies a directive in the value "#text" map entry - . +// - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst" +// map entries - . +// - Value type encoding: +// > string, bool, float64, int, int32, int64, float32: per "%v" formating +// > []bool, []uint8: by casting to string +// > structures, etc.: handed to xml.Marshal() - if there is an error, the element +// value is "UNKNOWN" +// - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called. +// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. +// Thus, `{ "key":"value" }` encodes as "value". +func (mv MapSeq) Xml(rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + var err error + s := new(string) + p := new(pretty) // just a stub + + if len(m) == 1 && len(rootTag) == 0 { + for key, value := range m { + // if it's an array, see if all values are map[string]interface{} + // we force a new root tag if we'll end up with no key:value in the list + // so: key:[string_val, bool:true] --> string_valtrue + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: // noop + default: // anything else + err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) + goto done + } + } + } + err = mapToXmlSeqIndent(false, s, key, value, p) + } + } else if len(rootTag) == 1 { + err = mapToXmlSeqIndent(false, s, rootTag[0], m, p) + } else { + err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) + } +done: + if xmlCheckIsValid { + d := xml.NewDecoder(bytes.NewReader([]byte(*s))) + for { + _, err = d.Token() + if err == io.EOF { + err = nil + break + } else if err != nil { + return nil, err + } + } + } + return []byte(*s), err +} + +// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] +// The names will also provide a key for the number of return arguments. + +// XmlWriter Writes the MapSeq value as XML on the Writer. +// See MapSeq.Xml() for encoding rules. +func (mv MapSeq) XmlWriter(xmlWriter io.Writer, rootTag ...string) error { + x, err := mv.Xml(rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// XmlWriteRaw writes the MapSeq value as XML on the Writer. []byte is the raw XML that was written. +// See Map.XmlSeq() for encoding rules. +/* +func (mv MapSeq) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { + x, err := mv.Xml(rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} +*/ + +// XmlIndentWriter writes the MapSeq value as pretty XML on the Writer. +// See MapSeq.Xml() for encoding rules. +func (mv MapSeq) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// XmlIndentWriterRaw writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. +// See Map.XmlSeq() for encoding rules. +/* +func (mv MapSeq) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { + x, err := mv.XmlSeqIndent(prefix, indent, rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} +*/ + +// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- + +// ---------------------- XmlSeqIndent ---------------------------- + +// XmlIndent encodes a map[string]interface{} as a pretty XML string. +// See MapSeq.XmlSeq() for encoding rules. +func (mv MapSeq) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + + var err error + s := new(string) + p := new(pretty) + p.indent = indent + p.padding = prefix + + if len(m) == 1 && len(rootTag) == 0 { + // this can extract the key for the single map element + // use it if it isn't a key for a list + for key, value := range m { + if _, ok := value.([]interface{}); ok { + err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) + } else { + err = mapToXmlSeqIndent(true, s, key, value, p) + } + } + } else if len(rootTag) == 1 { + err = mapToXmlSeqIndent(true, s, rootTag[0], m, p) + } else { + err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) + } + if xmlCheckIsValid { + if _, err = NewMapXml([]byte(*s)); err != nil { + return nil, err + } + d := xml.NewDecoder(bytes.NewReader([]byte(*s))) + for { + _, err = d.Token() + if err == io.EOF { + err = nil + break + } else if err != nil { + return nil, err + } + } + } + return []byte(*s), err +} + +// where the work actually happens +// returns an error if an attribute is not atomic +func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error { + var endTag bool + var isSimple bool + var noEndTag bool + var elen int + var ss string + p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} + + switch value.(type) { + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: + if doIndent { + *s += p.padding + } + if key != "#comment" && key != "#directive" && key != "#procinst" { + *s += `<` + key + } + } + switch value.(type) { + case map[string]interface{}: + val := value.(map[string]interface{}) + + if key == "#comment" { + *s += `` + noEndTag = true + break + } + + if key == "#directive" { + *s += `` + noEndTag = true + break + } + + if key == "#procinst" { + *s += `` + noEndTag = true + break + } + + haveAttrs := false + // process attributes first + if v, ok := val["#attr"].(map[string]interface{}); ok { + // First, unroll the map[string]interface{} into a []keyval array. + // Then sequence it. + kv := make([]keyval, len(v)) + n := 0 + for ak, av := range v { + kv[n] = keyval{ak, av} + n++ + } + sort.Sort(elemListSeq(kv)) + // Now encode the attributes in original decoding sequence, using keyval array. + for _, a := range kv { + vv := a.v.(map[string]interface{}) + switch vv["#text"].(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(vv["#text"].(string)) + } else { + ss = vv["#text"].(string) + } + *s += ` ` + a.k + `="` + ss + `"` + case float64, bool, int, int32, int64, float32: + *s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"` + case []byte: + if xmlEscapeChars { + ss = escapeChars(string(vv["#text"].([]byte))) + } else { + ss = string(vv["#text"].([]byte)) + } + *s += ` ` + a.k + `="` + ss + `"` + default: + return fmt.Errorf("invalid attribute value for: %s", a.k) + } + } + haveAttrs = true + } + + // simple element? + // every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr" + _, seqOK := val["#seq"] // have key + if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK { + if stmp, ok := v.(string); ok && stmp != "" { + if xmlEscapeChars { + stmp = escapeChars(stmp) + } + *s += ">" + stmp + endTag = true + elen = 1 + } + isSimple = true + break + } else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK { + // here no #text but have #seq or #seq+#attr + endTag = false + break + } + + // we now need to sequence everything except attributes + // 'kv' will hold everything that needs to be written + kv := make([]keyval, 0) + for k, v := range val { + if k == "#attr" { // already processed + continue + } + if k == "#seq" { // ignore - just for sorting + continue + } + switch v.(type) { + case []interface{}: + // unwind the array as separate entries + for _, vv := range v.([]interface{}) { + kv = append(kv, keyval{k, vv}) + } + default: + kv = append(kv, keyval{k, v}) + } + } + + // close tag with possible attributes + *s += ">" + if doIndent { + *s += "\n" + } + // something more complex + p.mapDepth++ + sort.Sort(elemListSeq(kv)) + i := 0 + for _, v := range kv { + switch v.v.(type) { + case []interface{}: + default: + if i == 0 && doIndent { + p.Indent() + } + } + i++ + if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil { + return err + } + switch v.v.(type) { + case []interface{}: // handled in []interface{} case + default: + if doIndent { + p.Outdent() + } + } + i-- + } + p.mapDepth-- + endTag = true + elen = 1 // we do have some content other than attrs + case []interface{}: + for _, v := range value.([]interface{}) { + if doIndent { + p.Indent() + } + if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case nil: + // terminate the tag + if doIndent { + *s += p.padding + } + *s += "<" + key + endTag, isSimple = true, true + break + default: // handle anything - even goofy stuff + elen = 0 + switch value.(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(value.(string)) + } else { + ss = value.(string) + } + elen = len(ss) + if elen > 0 { + *s += ">" + ss + } + case float64, bool, int, int32, int64, float32: + v := fmt.Sprintf("%v", value) + elen = len(v) + if elen > 0 { + *s += ">" + v + } + case []byte: // NOTE: byte is just an alias for uint8 + // similar to how xml.Marshal handles []byte structure members + if xmlEscapeChars { + ss = escapeChars(string(value.([]byte))) + } else { + ss = string(value.([]byte)) + } + elen = len(ss) + if elen > 0 { + *s += ">" + ss + } + default: + var v []byte + var err error + if doIndent { + v, err = xml.MarshalIndent(value, p.padding, p.indent) + } else { + v, err = xml.Marshal(value) + } + if err != nil { + *s += ">UNKNOWN" + } else { + elen = len(v) + if elen > 0 { + *s += string(v) + } + } + } + isSimple = true + endTag = true + } + if endTag && !noEndTag { + if doIndent { + if !isSimple { + *s += p.padding + } + } + switch value.(type) { + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: + if elen > 0 || useGoXmlEmptyElemSyntax { + if elen == 0 { + *s += ">" + } + *s += `" + } else { + *s += `/>` + } + } + } else if !noEndTag { + if useGoXmlEmptyElemSyntax { + *s += `" + // *s += ">" + } else { + *s += "/>" + } + } + if doIndent { + if p.cnt > p.start { + *s += "\n" + } + p.Outdent() + } + + return nil +} + +// the element sort implementation + +type keyval struct { + k string + v interface{} +} +type elemListSeq []keyval + +func (e elemListSeq) Len() int { + return len(e) +} + +func (e elemListSeq) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e elemListSeq) Less(i, j int) bool { + var iseq, jseq int + var fiseq, fjseq float64 + var ok bool + if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok { + if fiseq, ok = e[i].v.(map[string]interface{})["#seq"].(float64); ok { + iseq = int(fiseq) + } else { + iseq = 9999999 + } + } + + if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok { + if fjseq, ok = e[j].v.(map[string]interface{})["#seq"].(float64); ok { + jseq = int(fjseq) + } else { + jseq = 9999999 + } + } + + return iseq <= jseq +} + +// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio + +// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent(). +// It preserves comments, directives and process instructions, +func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) { + x, err := NewMapXmlSeq(b) + if err != nil { + return nil, err + } + return x.XmlIndent(prefix, indent) +} diff --git a/vendor/github.com/clbanning/mxj/v2/xmlseq2.go b/vendor/github.com/clbanning/mxj/v2/xmlseq2.go new file mode 100644 index 000000000..467fd0769 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/xmlseq2.go @@ -0,0 +1,18 @@ +// Copyright 2012-2016, 2019 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +// ---------------- expose Map methods to MapSeq type --------------------------- + +// Pretty print a Map. +func (msv MapSeq) StringIndent(offset ...int) string { + return writeMap(map[string]interface{}(msv), true, true, offset...) +} + +// Pretty print a Map without the value type information - just key:value entries. +func (msv MapSeq) StringIndentNoTypeInfo(offset ...int) string { + return writeMap(map[string]interface{}(msv), false, true, offset...) +} + diff --git a/vendor/github.com/cloudflare/circl/LICENSE b/vendor/github.com/cloudflare/circl/LICENSE new file mode 100644 index 000000000..67edaa90a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/LICENSE @@ -0,0 +1,57 @@ +Copyright (c) 2019 Cloudflare. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Cloudflare nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +======================================================================== + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go new file mode 100644 index 000000000..f9057c2b8 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve.go @@ -0,0 +1,96 @@ +package x25519 + +import ( + fp "github.com/cloudflare/circl/math/fp25519" +) + +// ladderJoye calculates a fixed-point multiplication with the generator point. +// The algorithm is the right-to-left Joye's ladder as described +// in "How to precompute a ladder" in SAC'2017. +func ladderJoye(k *Key) { + w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. + fp.SetOne(&w[1]) // x1 = 1 + fp.SetOne(&w[2]) // z1 = 1 + w[3] = fp.Elt{ // x2 = G-S + 0xbd, 0xaa, 0x2f, 0xc8, 0xfe, 0xe1, 0x94, 0x7e, + 0xf8, 0xed, 0xb2, 0x14, 0xae, 0x95, 0xf0, 0xbb, + 0xe2, 0x48, 0x5d, 0x23, 0xb9, 0xa0, 0xc7, 0xad, + 0x34, 0xab, 0x7c, 0xe2, 0xee, 0xcd, 0xae, 0x1e, + } + fp.SetOne(&w[4]) // z2 = 1 + + const n = 255 + const h = 3 + swap := uint(1) + for s := 0; s < n-h; s++ { + i := (s + h) / 8 + j := (s + h) % 8 + bit := uint((k[i] >> uint(j)) & 1) + copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) + diffAdd(&w, swap^bit) + swap = bit + } + for s := 0; s < h; s++ { + double(&w[1], &w[2]) + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +// ladderMontgomery calculates a generic scalar point multiplication +// The algorithm implemented is the left-to-right Montgomery's ladder. +func ladderMontgomery(k, xP *Key) { + w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. + w[0] = *(*fp.Elt)(xP) // x1 = xP + fp.SetOne(&w[1]) // x2 = 1 + w[3] = *(*fp.Elt)(xP) // x3 = xP + fp.SetOne(&w[4]) // z3 = 1 + + move := uint(0) + for s := 255 - 1; s >= 0; s-- { + i := s / 8 + j := s % 8 + bit := uint((k[i] >> uint(j)) & 1) + ladderStep(&w, move^bit) + move = bit + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { + fp.Inv(z, z) + fp.Mul(x, x, z) + _ = fp.ToBytes(k[:], x) +} + +var lowOrderPoints = [5]fp.Elt{ + { /* (0,_,1) point of order 2 on Curve25519 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (1,_,1) point of order 4 on Curve25519 */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (x,_,1) first point of order 8 on Curve25519 */ + 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, + 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, + 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, + 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00, + }, + { /* (x,_,1) second point of order 8 on Curve25519 */ + 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, + 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, + 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, + 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57, + }, + { /* (-1,_,1) a point of order 4 on the twist of Curve25519 */ + 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + }, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go new file mode 100644 index 000000000..8a3d54c57 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go @@ -0,0 +1,30 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package x25519 + +import ( + fp "github.com/cloudflare/circl/math/fp25519" + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func double(x, z *fp.Elt) { doubleAmd64(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } + +//go:noescape +func ladderStepAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func diffAddAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func doubleAmd64(x, z *fp.Elt) + +//go:noescape +func mulA24Amd64(z, x *fp.Elt) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h new file mode 100644 index 000000000..8c1ae4d0f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h @@ -0,0 +1,111 @@ +#define ladderStepLeg \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulLeg(b0,x2,z3) \ + integerMulLeg(b1,x3,z2) \ + reduceFromDoubleLeg(t0,b0) \ + reduceFromDoubleLeg(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrLeg(b0,t0) \ + integerSqrLeg(b1,t1) \ + reduceFromDoubleLeg(x3,b0) \ + reduceFromDoubleLeg(z3,b1) \ + integerMulLeg(b0,x1,z3) \ + reduceFromDoubleLeg(z3,b0) \ + integerSqrLeg(b0,x2) \ + integerSqrLeg(b1,z2) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z2) \ + integerMulLeg(b0,x2,z2) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) + +#define ladderStepBmi2Adx \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulAdx(b0,x2,z3) \ + integerMulAdx(b1,x3,z2) \ + reduceFromDoubleAdx(t0,b0) \ + reduceFromDoubleAdx(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrAdx(b0,t0) \ + integerSqrAdx(b1,t1) \ + reduceFromDoubleAdx(x3,b0) \ + reduceFromDoubleAdx(z3,b1) \ + integerMulAdx(b0,x1,z3) \ + reduceFromDoubleAdx(z3,b0) \ + integerSqrAdx(b0,x2) \ + integerSqrAdx(b1,z2) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z2) \ + integerMulAdx(b0,x2,z2) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) + +#define difAddLeg \ + addSub(x1,z1) \ + integerMulLeg(b0,z1,ui) \ + reduceFromDoubleLeg(z1,b0) \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + integerMulLeg(b0,x1,z2) \ + integerMulLeg(b1,z1,x2) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define difAddBmi2Adx \ + addSub(x1,z1) \ + integerMulAdx(b0,z1,ui) \ + reduceFromDoubleAdx(z1,b0) \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + integerMulAdx(b0,x1,z2) \ + integerMulAdx(b1,z1,x2) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) + +#define doubleLeg \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z1) \ + integerMulLeg(b0,x1,z1) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define doubleBmi2Adx \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z1) \ + integerMulAdx(b0,x1,z1) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s new file mode 100644 index 000000000..b7723185b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s @@ -0,0 +1,156 @@ +// +build amd64 + +#include "textflag.h" + +// Depends on circl/math/fp25519 package +#include "../../math/fp25519/fp_amd64.h" +#include "curve_amd64.h" + +// CTE_A24 is (A+2)/4 from Curve25519 +#define CTE_A24 121666 + +#define Size 32 + +// multiplyA24Leg multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R13, FLAGS +// Instr: x86_64, cmov +#define multiplyA24Leg(z,x) \ + MOVL $CTE_A24, AX; MULQ 0+x; MOVQ AX, R8; MOVQ DX, R9; \ + MOVL $CTE_A24, AX; MULQ 8+x; MOVQ AX, R12; MOVQ DX, R10; \ + MOVL $CTE_A24, AX; MULQ 16+x; MOVQ AX, R13; MOVQ DX, R11; \ + MOVL $CTE_A24, AX; MULQ 24+x; \ + ADDQ R12, R9; \ + ADCQ R13, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + MOVL $38, AX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ + IMULQ AX, DX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVQ $0, DX; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; MOVQ R8, 0+z; + +// multiplyA24Adx multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R12, FLAGS +// Instr: x86_64, cmov, bmi2 +#define multiplyA24Adx(z,x) \ + MOVQ $CTE_A24, DX; \ + MULXQ 0+x, R8, R10; \ + MULXQ 8+x, R9, R11; ADDQ R10, R9; \ + MULXQ 16+x, R10, AX; ADCQ R11, R10; \ + MULXQ 24+x, R11, R12; ADCQ AX, R11; \ + ;;;;;;;;;;;;;;;;;;;;; ADCQ $0, R12; \ + MOVL $38, DX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \ + IMULQ DX, R12; \ + ADDQ R12, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVQ $0, R12; \ + CMOVQCS DX, R12; \ + ADDQ R12, R8; MOVQ R8, 0+z; + +#define mulA24Legacy \ + multiplyA24Leg(0(DI),0(SI)) +#define mulA24Bmi2Adx \ + multiplyA24Adx(0(DI),0(SI)) + +// func mulA24Amd64(z, x *fp255.Elt) +TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) + + +// func ladderStepAmd64(w *[5]fp255.Elt, b uint) +// ladderStepAmd64 calculates a point addition and doubling as follows: +// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). +// work = (x1,x2,z2,x3,z3) are five fp255.Elt of 32 bytes. +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·ladderStepAmd64(SB),NOSPLIT,$192-16 + // Parameters + #define regWork DI + #define regMove SI + #define x1 0*Size(regWork) + #define x2 1*Size(regWork) + #define z2 2*Size(regWork) + #define x3 3*Size(regWork) + #define z3 4*Size(regWork) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regMove + CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) + #undef regWork + #undef regMove + #undef x1 + #undef x2 + #undef z2 + #undef x3 + #undef z3 + #undef t0 + #undef t1 + #undef b0 + #undef b1 + +// func diffAddAmd64(w *[5]fp255.Elt, b uint) +// diffAddAmd64 calculates a differential point addition using a precomputed point. +// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) +// w = (mu,x1,z1,x2,z2) are five fp.Elt, and +// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·diffAddAmd64(SB),NOSPLIT,$128-16 + // Parameters + #define regWork DI + #define regSwap SI + #define ui 0*Size(regWork) + #define x1 1*Size(regWork) + #define z1 2*Size(regWork) + #define x2 3*Size(regWork) + #define z2 4*Size(regWork) + // Local variables + #define b0 0*Size(SP) + #define b1 2*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regSwap + cswap(x1,x2,regSwap) + cswap(z1,z2,regSwap) + CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) + #undef regWork + #undef regSwap + #undef ui + #undef x1 + #undef z1 + #undef x2 + #undef z2 + #undef b0 + #undef b1 + +// func doubleAmd64(x, z *fp255.Elt) +// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·doubleAmd64(SB),NOSPLIT,$192-16 + // Parameters + #define x1 0(DI) + #define z1 0(SI) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ x+0(FP), DI + MOVQ z+8(FP), SI + CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) + #undef x1 + #undef z1 + #undef t0 + #undef t1 + #undef b0 + #undef b1 diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go new file mode 100644 index 000000000..dae67ea37 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go @@ -0,0 +1,85 @@ +package x25519 + +import ( + "encoding/binary" + "math/bits" + + fp "github.com/cloudflare/circl/math/fp25519" +) + +func doubleGeneric(x, z *fp.Elt) { + t0, t1 := &fp.Elt{}, &fp.Elt{} + fp.AddSub(x, z) + fp.Sqr(x, x) + fp.Sqr(z, z) + fp.Sub(t0, x, z) + mulA24Generic(t1, t0) + fp.Add(t1, t1, z) + fp.Mul(x, x, z) + fp.Mul(z, t0, t1) +} + +func diffAddGeneric(w *[5]fp.Elt, b uint) { + mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] + fp.Cswap(x1, x2, b) + fp.Cswap(z1, z2, b) + fp.AddSub(x1, z1) + fp.Mul(z1, z1, mu) + fp.AddSub(x1, z1) + fp.Sqr(x1, x1) + fp.Sqr(z1, z1) + fp.Mul(x1, x1, z2) + fp.Mul(z1, z1, x2) +} + +func ladderStepGeneric(w *[5]fp.Elt, b uint) { + x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] + t0 := &fp.Elt{} + t1 := &fp.Elt{} + fp.AddSub(x2, z2) + fp.AddSub(x3, z3) + fp.Mul(t0, x2, z3) + fp.Mul(t1, x3, z2) + fp.AddSub(t0, t1) + fp.Cmov(x2, x3, b) + fp.Cmov(z2, z3, b) + fp.Sqr(x3, t0) + fp.Sqr(z3, t1) + fp.Mul(z3, x1, z3) + fp.Sqr(x2, x2) + fp.Sqr(z2, z2) + fp.Sub(t0, x2, z2) + mulA24Generic(t1, t0) + fp.Add(t1, t1, z2) + fp.Mul(x2, x2, z2) + fp.Mul(z2, t0, t1) +} + +func mulA24Generic(z, x *fp.Elt) { + const A24 = 121666 + const n = 8 + var xx [4]uint64 + for i := range xx { + xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) + } + + h0, l0 := bits.Mul64(xx[0], A24) + h1, l1 := bits.Mul64(xx[1], A24) + h2, l2 := bits.Mul64(xx[2], A24) + h3, l3 := bits.Mul64(xx[3], A24) + + var c3 uint64 + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + _, l4 = bits.Mul64(l4, 38) + l0, c0 = bits.Add64(l0, l4, 0) + xx[1], c1 = bits.Add64(l1, 0, c0) + xx[2], c2 = bits.Add64(l2, 0, c1) + xx[3], c3 = bits.Add64(l3, 0, c2) + xx[0], _ = bits.Add64(l0, (-c3)&38, 0) + for i := range xx { + binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) + } +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go new file mode 100644 index 000000000..07fab97d2 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go @@ -0,0 +1,11 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package x25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +func double(x, z *fp.Elt) { doubleGeneric(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go new file mode 100644 index 000000000..3ce102d14 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go @@ -0,0 +1,19 @@ +/* +Package x25519 provides Diffie-Hellman functions as specified in RFC-7748. + +Validation of public keys. + +The Diffie-Hellman function, as described in RFC-7748 [1], works for any +public key. However, if a different protocol requires contributory +behaviour [2,3], then the public keys must be validated against low-order +points [3,4]. To do that, the Shared function performs this validation +internally and returns false when the public key is invalid (i.e., it +is a low-order point). + +References: + - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) + - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) + - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) + - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) +*/ +package x25519 diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/key.go b/vendor/github.com/cloudflare/circl/dh/x25519/key.go new file mode 100644 index 000000000..c76f72ac7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/key.go @@ -0,0 +1,47 @@ +package x25519 + +import ( + "crypto/subtle" + + fp "github.com/cloudflare/circl/math/fp25519" +) + +// Size is the length in bytes of a X25519 key. +const Size = 32 + +// Key represents a X25519 key. +type Key [Size]byte + +func (k *Key) clamp(in *Key) *Key { + *k = *in + k[0] &= 248 + k[31] = (k[31] & 127) | 64 + return k +} + +// isValidPubKey verifies if the public key is not a low-order point. +func (k *Key) isValidPubKey() bool { + fp.Modp((*fp.Elt)(k)) + var isLowOrder int + for _, P := range lowOrderPoints { + isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) + } + return isLowOrder == 0 +} + +// KeyGen obtains a public key given a secret key. +func KeyGen(public, secret *Key) { + ladderJoye(public.clamp(secret)) +} + +// Shared calculates Alice's shared key from Alice's secret key and Bob's +// public key returning true on success. A failure case happens when the public +// key is a low-order point, thus the shared key is all-zeros and the function +// returns false. +func Shared(shared, secret, public *Key) bool { + validPk := *public + validPk[31] &= (1 << (255 % 8)) - 1 + ok := validPk.isValidPubKey() + ladderMontgomery(shared.clamp(secret), &validPk) + return ok +} diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/table.go b/vendor/github.com/cloudflare/circl/dh/x25519/table.go new file mode 100644 index 000000000..28c8c4ac0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x25519/table.go @@ -0,0 +1,268 @@ +package x25519 + +import "github.com/cloudflare/circl/math/fp25519" + +// tableGenerator contains the set of points: +// +// t[i] = (xi+1)/(xi-1), +// +// where (xi,yi) = 2^iG and G is the generator point +// Size = (256)*(256/8) = 8192 bytes. +var tableGenerator = [256 * fp25519.Size]byte{ + /* (2^ 0)P */ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, + /* (2^ 1)P */ 0x96, 0xfe, 0xaa, 0x16, 0xf4, 0x20, 0x82, 0x6b, 0x34, 0x6a, 0x56, 0x4f, 0x2b, 0xeb, 0xeb, 0x82, 0x0f, 0x95, 0xa5, 0x75, 0xb0, 0xa5, 0xa9, 0xd5, 0xf4, 0x88, 0x24, 0x4b, 0xcf, 0xb2, 0x42, 0x51, + /* (2^ 2)P */ 0x0c, 0x68, 0x69, 0x00, 0x75, 0xbc, 0xae, 0x6a, 0x41, 0x9c, 0xf9, 0xa0, 0x20, 0x78, 0xcf, 0x89, 0xf4, 0xd0, 0x56, 0x3b, 0x18, 0xd9, 0x58, 0x2a, 0xa4, 0x11, 0x60, 0xe3, 0x80, 0xca, 0x5a, 0x4b, + /* (2^ 3)P */ 0x5d, 0x74, 0x29, 0x8c, 0x34, 0x32, 0x91, 0x32, 0xd7, 0x2f, 0x64, 0xe1, 0x16, 0xe6, 0xa2, 0xf4, 0x34, 0xbc, 0x67, 0xff, 0x03, 0xbb, 0x45, 0x1e, 0x4a, 0x9b, 0x2a, 0xf4, 0xd0, 0x12, 0x69, 0x30, + /* (2^ 4)P */ 0x54, 0x71, 0xaf, 0xe6, 0x07, 0x65, 0x88, 0xff, 0x2f, 0xc8, 0xee, 0xdf, 0x13, 0x0e, 0xf5, 0x04, 0xce, 0xb5, 0xba, 0x2a, 0xe8, 0x2f, 0x51, 0xaa, 0x22, 0xf2, 0xd5, 0x68, 0x1a, 0x25, 0x4e, 0x17, + /* (2^ 5)P */ 0x98, 0x88, 0x02, 0x82, 0x0d, 0x70, 0x96, 0xcf, 0xc5, 0x02, 0x2c, 0x0a, 0x37, 0xe3, 0x43, 0x17, 0xaa, 0x6e, 0xe8, 0xb4, 0x98, 0xec, 0x9e, 0x37, 0x2e, 0x48, 0xe0, 0x51, 0x8a, 0x88, 0x59, 0x0c, + /* (2^ 6)P */ 0x89, 0xd1, 0xb5, 0x99, 0xd6, 0xf1, 0xcb, 0xfb, 0x84, 0xdc, 0x9f, 0x8e, 0xd5, 0xf0, 0xae, 0xac, 0x14, 0x76, 0x1f, 0x23, 0x06, 0x0d, 0xc2, 0xc1, 0x72, 0xf9, 0x74, 0xa2, 0x8d, 0x21, 0x38, 0x29, + /* (2^ 7)P */ 0x18, 0x7f, 0x1d, 0xff, 0xbe, 0x49, 0xaf, 0xf6, 0xc2, 0xc9, 0x7a, 0x38, 0x22, 0x1c, 0x54, 0xcc, 0x6b, 0xc5, 0x15, 0x40, 0xef, 0xc9, 0xfc, 0x96, 0xa9, 0x13, 0x09, 0x69, 0x7c, 0x62, 0xc1, 0x69, + /* (2^ 8)P */ 0x0e, 0xdb, 0x33, 0x47, 0x2f, 0xfd, 0x86, 0x7a, 0xe9, 0x7d, 0x08, 0x9e, 0xf2, 0xc4, 0xb8, 0xfd, 0x29, 0xa2, 0xa2, 0x8e, 0x1a, 0x4b, 0x5e, 0x09, 0x79, 0x7a, 0xb3, 0x29, 0xc8, 0xa7, 0xd7, 0x1a, + /* (2^ 9)P */ 0xc0, 0xa0, 0x7e, 0xd1, 0xca, 0x89, 0x2d, 0x34, 0x51, 0x20, 0xed, 0xcc, 0xa6, 0xdd, 0xbe, 0x67, 0x74, 0x2f, 0xb4, 0x2b, 0xbf, 0x31, 0xca, 0x19, 0xbb, 0xac, 0x80, 0x49, 0xc8, 0xb4, 0xf7, 0x3d, + /* (2^ 10)P */ 0x83, 0xd8, 0x0a, 0xc8, 0x4d, 0x44, 0xc6, 0xa8, 0x85, 0xab, 0xe3, 0x66, 0x03, 0x44, 0x1e, 0xb9, 0xd8, 0xf6, 0x64, 0x01, 0xa0, 0xcd, 0x15, 0xc2, 0x68, 0xe6, 0x47, 0xf2, 0x6e, 0x7c, 0x86, 0x3d, + /* (2^ 11)P */ 0x8c, 0x65, 0x3e, 0xcc, 0x2b, 0x58, 0xdd, 0xc7, 0x28, 0x55, 0x0e, 0xee, 0x48, 0x47, 0x2c, 0xfd, 0x71, 0x4f, 0x9f, 0xcc, 0x95, 0x9b, 0xfd, 0xa0, 0xdf, 0x5d, 0x67, 0xb0, 0x71, 0xd8, 0x29, 0x75, + /* (2^ 12)P */ 0x78, 0xbd, 0x3c, 0x2d, 0xb4, 0x68, 0xf5, 0xb8, 0x82, 0xda, 0xf3, 0x91, 0x1b, 0x01, 0x33, 0x12, 0x62, 0x3b, 0x7c, 0x4a, 0xcd, 0x6c, 0xce, 0x2d, 0x03, 0x86, 0x49, 0x9e, 0x8e, 0xfc, 0xe7, 0x75, + /* (2^ 13)P */ 0xec, 0xb6, 0xd0, 0xfc, 0xf1, 0x13, 0x4f, 0x2f, 0x45, 0x7a, 0xff, 0x29, 0x1f, 0xca, 0xa8, 0xf1, 0x9b, 0xe2, 0x81, 0x29, 0xa7, 0xc1, 0x49, 0xc2, 0x6a, 0xb5, 0x83, 0x8c, 0xbb, 0x0d, 0xbe, 0x6e, + /* (2^ 14)P */ 0x22, 0xb2, 0x0b, 0x17, 0x8d, 0xfa, 0x14, 0x71, 0x5f, 0x93, 0x93, 0xbf, 0xd5, 0xdc, 0xa2, 0x65, 0x9a, 0x97, 0x9c, 0xb5, 0x68, 0x1f, 0xc4, 0xbd, 0x89, 0x92, 0xce, 0xa2, 0x79, 0xef, 0x0e, 0x2f, + /* (2^ 15)P */ 0xce, 0x37, 0x3c, 0x08, 0x0c, 0xbf, 0xec, 0x42, 0x22, 0x63, 0x49, 0xec, 0x09, 0xbc, 0x30, 0x29, 0x0d, 0xac, 0xfe, 0x9c, 0xc1, 0xb0, 0x94, 0xf2, 0x80, 0xbb, 0xfa, 0xed, 0x4b, 0xaa, 0x80, 0x37, + /* (2^ 16)P */ 0x29, 0xd9, 0xea, 0x7c, 0x3e, 0x7d, 0xc1, 0x56, 0xc5, 0x22, 0x57, 0x2e, 0xeb, 0x4b, 0xcb, 0xe7, 0x5a, 0xe1, 0xbf, 0x2d, 0x73, 0x31, 0xe9, 0x0c, 0xf8, 0x52, 0x10, 0x62, 0xc7, 0x83, 0xb8, 0x41, + /* (2^ 17)P */ 0x50, 0x53, 0xd2, 0xc3, 0xa0, 0x5c, 0xf7, 0xdb, 0x51, 0xe3, 0xb1, 0x6e, 0x08, 0xbe, 0x36, 0x29, 0x12, 0xb2, 0xa9, 0xb4, 0x3c, 0xe0, 0x36, 0xc9, 0xaa, 0x25, 0x22, 0x32, 0x82, 0xbf, 0x45, 0x1d, + /* (2^ 18)P */ 0xc5, 0x4c, 0x02, 0x6a, 0x03, 0xb1, 0x1a, 0xe8, 0x72, 0x9a, 0x4c, 0x30, 0x1c, 0x20, 0x12, 0xe2, 0xfc, 0xb1, 0x32, 0x68, 0xba, 0x3f, 0xd7, 0xc5, 0x81, 0x95, 0x83, 0x4d, 0x5a, 0xdb, 0xff, 0x20, + /* (2^ 19)P */ 0xad, 0x0f, 0x5d, 0xbe, 0x67, 0xd3, 0x83, 0xa2, 0x75, 0x44, 0x16, 0x8b, 0xca, 0x25, 0x2b, 0x6c, 0x2e, 0xf2, 0xaa, 0x7c, 0x46, 0x35, 0x49, 0x9d, 0x49, 0xff, 0x85, 0xee, 0x8e, 0x40, 0x66, 0x51, + /* (2^ 20)P */ 0x61, 0xe3, 0xb4, 0xfa, 0xa2, 0xba, 0x67, 0x3c, 0xef, 0x5c, 0xf3, 0x7e, 0xc6, 0x33, 0xe4, 0xb3, 0x1c, 0x9b, 0x15, 0x41, 0x92, 0x72, 0x59, 0x52, 0x33, 0xab, 0xb0, 0xd5, 0x92, 0x18, 0x62, 0x6a, + /* (2^ 21)P */ 0xcb, 0xcd, 0x55, 0x75, 0x38, 0x4a, 0xb7, 0x20, 0x3f, 0x92, 0x08, 0x12, 0x0e, 0xa1, 0x2a, 0x53, 0xd1, 0x1d, 0x28, 0x62, 0x77, 0x7b, 0xa1, 0xea, 0xbf, 0x44, 0x5c, 0xf0, 0x43, 0x34, 0xab, 0x61, + /* (2^ 22)P */ 0xf8, 0xde, 0x24, 0x23, 0x42, 0x6c, 0x7a, 0x25, 0x7f, 0xcf, 0xe3, 0x17, 0x10, 0x6c, 0x1c, 0x13, 0x57, 0xa2, 0x30, 0xf6, 0x39, 0x87, 0x75, 0x23, 0x80, 0x85, 0xa7, 0x01, 0x7a, 0x40, 0x5a, 0x29, + /* (2^ 23)P */ 0xd9, 0xa8, 0x5d, 0x6d, 0x24, 0x43, 0xc4, 0xf8, 0x5d, 0xfa, 0x52, 0x0c, 0x45, 0x75, 0xd7, 0x19, 0x3d, 0xf8, 0x1b, 0x73, 0x92, 0xfc, 0xfc, 0x2a, 0x00, 0x47, 0x2b, 0x1b, 0xe8, 0xc8, 0x10, 0x7d, + /* (2^ 24)P */ 0x0b, 0xa2, 0xba, 0x70, 0x1f, 0x27, 0xe0, 0xc8, 0x57, 0x39, 0xa6, 0x7c, 0x86, 0x48, 0x37, 0x99, 0xbb, 0xd4, 0x7e, 0xcb, 0xb3, 0xef, 0x12, 0x54, 0x75, 0x29, 0xe6, 0x73, 0x61, 0xd3, 0x96, 0x31, + /* (2^ 25)P */ 0xfc, 0xdf, 0xc7, 0x41, 0xd1, 0xca, 0x5b, 0xde, 0x48, 0xc8, 0x95, 0xb3, 0xd2, 0x8c, 0xcc, 0x47, 0xcb, 0xf3, 0x1a, 0xe1, 0x42, 0xd9, 0x4c, 0xa3, 0xc2, 0xce, 0x4e, 0xd0, 0xf2, 0xdb, 0x56, 0x02, + /* (2^ 26)P */ 0x7f, 0x66, 0x0e, 0x4b, 0xe9, 0xb7, 0x5a, 0x87, 0x10, 0x0d, 0x85, 0xc0, 0x83, 0xdd, 0xd4, 0xca, 0x9f, 0xc7, 0x72, 0x4e, 0x8f, 0x2e, 0xf1, 0x47, 0x9b, 0xb1, 0x85, 0x8c, 0xbb, 0x87, 0x1a, 0x5f, + /* (2^ 27)P */ 0xb8, 0x51, 0x7f, 0x43, 0xb6, 0xd0, 0xe9, 0x7a, 0x65, 0x90, 0x87, 0x18, 0x55, 0xce, 0xc7, 0x12, 0xee, 0x7a, 0xf7, 0x5c, 0xfe, 0x09, 0xde, 0x2a, 0x27, 0x56, 0x2c, 0x7d, 0x2f, 0x5a, 0xa0, 0x23, + /* (2^ 28)P */ 0x9a, 0x16, 0x7c, 0xf1, 0x28, 0xe1, 0x08, 0x59, 0x2d, 0x85, 0xd0, 0x8a, 0xdd, 0x98, 0x74, 0xf7, 0x64, 0x2f, 0x10, 0xab, 0xce, 0xc4, 0xb4, 0x74, 0x45, 0x98, 0x13, 0x10, 0xdd, 0xba, 0x3a, 0x18, + /* (2^ 29)P */ 0xac, 0xaa, 0x92, 0xaa, 0x8d, 0xba, 0x65, 0xb1, 0x05, 0x67, 0x38, 0x99, 0x95, 0xef, 0xc5, 0xd5, 0xd1, 0x40, 0xfc, 0xf8, 0x0c, 0x8f, 0x2f, 0xbe, 0x14, 0x45, 0x20, 0xee, 0x35, 0xe6, 0x01, 0x27, + /* (2^ 30)P */ 0x14, 0x65, 0x15, 0x20, 0x00, 0xa8, 0x9f, 0x62, 0xce, 0xc1, 0xa8, 0x64, 0x87, 0x86, 0x23, 0xf2, 0x0e, 0x06, 0x3f, 0x0b, 0xff, 0x4f, 0x89, 0x5b, 0xfa, 0xa3, 0x08, 0xf7, 0x4c, 0x94, 0xd9, 0x60, + /* (2^ 31)P */ 0x1f, 0x20, 0x7a, 0x1c, 0x1a, 0x00, 0xea, 0xae, 0x63, 0xce, 0xe2, 0x3e, 0x63, 0x6a, 0xf1, 0xeb, 0xe1, 0x07, 0x7a, 0x4c, 0x59, 0x09, 0x77, 0x6f, 0xcb, 0x08, 0x02, 0x0d, 0x15, 0x58, 0xb9, 0x79, + /* (2^ 32)P */ 0xe7, 0x10, 0xd4, 0x01, 0x53, 0x5e, 0xb5, 0x24, 0x4d, 0xc8, 0xfd, 0xf3, 0xdf, 0x4e, 0xa3, 0xe3, 0xd8, 0x32, 0x40, 0x90, 0xe4, 0x68, 0x87, 0xd8, 0xec, 0xae, 0x3a, 0x7b, 0x42, 0x84, 0x13, 0x13, + /* (2^ 33)P */ 0x14, 0x4f, 0x23, 0x86, 0x12, 0xe5, 0x05, 0x84, 0x29, 0xc5, 0xb4, 0xad, 0x39, 0x47, 0xdc, 0x14, 0xfd, 0x4f, 0x63, 0x50, 0xb2, 0xb5, 0xa2, 0xb8, 0x93, 0xff, 0xa7, 0xd8, 0x4a, 0xa9, 0xe2, 0x2f, + /* (2^ 34)P */ 0xdd, 0xfa, 0x43, 0xe8, 0xef, 0x57, 0x5c, 0xec, 0x18, 0x99, 0xbb, 0xf0, 0x40, 0xce, 0x43, 0x28, 0x05, 0x63, 0x3d, 0xcf, 0xd6, 0x61, 0xb5, 0xa4, 0x7e, 0x77, 0xfb, 0xe8, 0xbd, 0x29, 0x36, 0x74, + /* (2^ 35)P */ 0x8f, 0x73, 0xaf, 0xbb, 0x46, 0xdd, 0x3e, 0x34, 0x51, 0xa6, 0x01, 0xb1, 0x28, 0x18, 0x98, 0xed, 0x7a, 0x79, 0x2c, 0x88, 0x0b, 0x76, 0x01, 0xa4, 0x30, 0x87, 0xc8, 0x8d, 0xe2, 0x23, 0xc2, 0x1f, + /* (2^ 36)P */ 0x0e, 0xba, 0x0f, 0xfc, 0x91, 0x4e, 0x60, 0x48, 0xa4, 0x6f, 0x2c, 0x05, 0x8f, 0xf7, 0x37, 0xb6, 0x9c, 0x23, 0xe9, 0x09, 0x3d, 0xac, 0xcc, 0x91, 0x7c, 0x68, 0x7a, 0x43, 0xd4, 0xee, 0xf7, 0x23, + /* (2^ 37)P */ 0x00, 0xd8, 0x9b, 0x8d, 0x11, 0xb1, 0x73, 0x51, 0xa7, 0xd4, 0x89, 0x31, 0xb6, 0x41, 0xd6, 0x29, 0x86, 0xc5, 0xbb, 0x88, 0x79, 0x17, 0xbf, 0xfd, 0xf5, 0x1d, 0xd8, 0xca, 0x4f, 0x89, 0x59, 0x29, + /* (2^ 38)P */ 0x99, 0xc8, 0xbb, 0xb4, 0xf3, 0x8e, 0xbc, 0xae, 0xb9, 0x92, 0x69, 0xb2, 0x5a, 0x99, 0x48, 0x41, 0xfb, 0x2c, 0xf9, 0x34, 0x01, 0x0b, 0xe2, 0x24, 0xe8, 0xde, 0x05, 0x4a, 0x89, 0x58, 0xd1, 0x40, + /* (2^ 39)P */ 0xf6, 0x76, 0xaf, 0x85, 0x11, 0x0b, 0xb0, 0x46, 0x79, 0x7a, 0x18, 0x73, 0x78, 0xc7, 0xba, 0x26, 0x5f, 0xff, 0x8f, 0xab, 0x95, 0xbf, 0xc0, 0x3d, 0xd7, 0x24, 0x55, 0x94, 0xd8, 0x8b, 0x60, 0x2a, + /* (2^ 40)P */ 0x02, 0x63, 0x44, 0xbd, 0x88, 0x95, 0x44, 0x26, 0x9c, 0x43, 0x88, 0x03, 0x1c, 0xc2, 0x4b, 0x7c, 0xb2, 0x11, 0xbd, 0x83, 0xf3, 0xa4, 0x98, 0x8e, 0xb9, 0x76, 0xd8, 0xc9, 0x7b, 0x8d, 0x21, 0x26, + /* (2^ 41)P */ 0x8a, 0x17, 0x7c, 0x99, 0x42, 0x15, 0x08, 0xe3, 0x6f, 0x60, 0xb6, 0x6f, 0xa8, 0x29, 0x2d, 0x3c, 0x74, 0x93, 0x27, 0xfa, 0x36, 0x77, 0x21, 0x5c, 0xfa, 0xb1, 0xfe, 0x4a, 0x73, 0x05, 0xde, 0x7d, + /* (2^ 42)P */ 0xab, 0x2b, 0xd4, 0x06, 0x39, 0x0e, 0xf1, 0x3b, 0x9c, 0x64, 0x80, 0x19, 0x3e, 0x80, 0xf7, 0xe4, 0x7a, 0xbf, 0x95, 0x95, 0xf8, 0x3b, 0x05, 0xe6, 0x30, 0x55, 0x24, 0xda, 0x38, 0xaf, 0x4f, 0x39, + /* (2^ 43)P */ 0xf4, 0x28, 0x69, 0x89, 0x58, 0xfb, 0x8e, 0x7a, 0x3c, 0x11, 0x6a, 0xcc, 0xe9, 0x78, 0xc7, 0xfb, 0x6f, 0x59, 0xaf, 0x30, 0xe3, 0x0c, 0x67, 0x72, 0xf7, 0x6c, 0x3d, 0x1d, 0xa8, 0x22, 0xf2, 0x48, + /* (2^ 44)P */ 0xa7, 0xca, 0x72, 0x0d, 0x41, 0xce, 0x1f, 0xf0, 0x95, 0x55, 0x3b, 0x21, 0xc7, 0xec, 0x20, 0x5a, 0x83, 0x14, 0xfa, 0xc1, 0x65, 0x11, 0xc2, 0x7b, 0x41, 0xa7, 0xa8, 0x1d, 0xe3, 0x9a, 0xf8, 0x07, + /* (2^ 45)P */ 0xf9, 0x0f, 0x83, 0xc6, 0xb4, 0xc2, 0xd2, 0x05, 0x93, 0x62, 0x31, 0xc6, 0x0f, 0x33, 0x3e, 0xd4, 0x04, 0xa9, 0xd3, 0x96, 0x0a, 0x59, 0xa5, 0xa5, 0xb6, 0x33, 0x53, 0xa6, 0x91, 0xdb, 0x5e, 0x70, + /* (2^ 46)P */ 0xf7, 0xa5, 0xb9, 0x0b, 0x5e, 0xe1, 0x8e, 0x04, 0x5d, 0xaf, 0x0a, 0x9e, 0xca, 0xcf, 0x40, 0x32, 0x0b, 0xa4, 0xc4, 0xed, 0xce, 0x71, 0x4b, 0x8f, 0x6d, 0x4a, 0x54, 0xde, 0xa3, 0x0d, 0x1c, 0x62, + /* (2^ 47)P */ 0x91, 0x40, 0x8c, 0xa0, 0x36, 0x28, 0x87, 0x92, 0x45, 0x14, 0xc9, 0x10, 0xb0, 0x75, 0x83, 0xce, 0x94, 0x63, 0x27, 0x4f, 0x52, 0xeb, 0x72, 0x8a, 0x35, 0x36, 0xc8, 0x7e, 0xfa, 0xfc, 0x67, 0x26, + /* (2^ 48)P */ 0x2a, 0x75, 0xe8, 0x45, 0x33, 0x17, 0x4c, 0x7f, 0xa5, 0x79, 0x70, 0xee, 0xfe, 0x47, 0x1b, 0x06, 0x34, 0xff, 0x86, 0x9f, 0xfa, 0x9a, 0xdd, 0x25, 0x9c, 0xc8, 0x5d, 0x42, 0xf5, 0xce, 0x80, 0x37, + /* (2^ 49)P */ 0xe9, 0xb4, 0x3b, 0x51, 0x5a, 0x03, 0x46, 0x1a, 0xda, 0x5a, 0x57, 0xac, 0x79, 0xf3, 0x1e, 0x3e, 0x50, 0x4b, 0xa2, 0x5f, 0x1c, 0x5f, 0x8c, 0xc7, 0x22, 0x9f, 0xfd, 0x34, 0x76, 0x96, 0x1a, 0x32, + /* (2^ 50)P */ 0xfa, 0x27, 0x6e, 0x82, 0xb8, 0x07, 0x67, 0x94, 0xd0, 0x6f, 0x50, 0x4c, 0xd6, 0x84, 0xca, 0x3d, 0x36, 0x14, 0xe9, 0x75, 0x80, 0x21, 0x89, 0xc1, 0x84, 0x84, 0x3b, 0x9b, 0x16, 0x84, 0x92, 0x6d, + /* (2^ 51)P */ 0xdf, 0x2d, 0x3f, 0x38, 0x40, 0xe8, 0x67, 0x3a, 0x75, 0x9b, 0x4f, 0x0c, 0xa3, 0xc9, 0xee, 0x33, 0x47, 0xef, 0x83, 0xa7, 0x6f, 0xc8, 0xc7, 0x3e, 0xc4, 0xfb, 0xc9, 0xba, 0x9f, 0x44, 0xec, 0x26, + /* (2^ 52)P */ 0x7d, 0x9e, 0x9b, 0xa0, 0xcb, 0x38, 0x0f, 0x5c, 0x8c, 0x47, 0xa3, 0x62, 0xc7, 0x8c, 0x16, 0x81, 0x1c, 0x12, 0xfc, 0x06, 0xd3, 0xb0, 0x23, 0x3e, 0xdd, 0xdc, 0xef, 0xa5, 0xa0, 0x8a, 0x23, 0x5a, + /* (2^ 53)P */ 0xff, 0x43, 0xea, 0xc4, 0x21, 0x61, 0xa2, 0x1b, 0xb5, 0x32, 0x88, 0x7c, 0x7f, 0xc7, 0xf8, 0x36, 0x9a, 0xf9, 0xdc, 0x0a, 0x0b, 0xea, 0xfb, 0x88, 0xf9, 0xeb, 0x5b, 0xc2, 0x8e, 0x93, 0xa9, 0x5c, + /* (2^ 54)P */ 0xa0, 0xcd, 0xfc, 0x51, 0x5e, 0x6a, 0x43, 0xd5, 0x3b, 0x89, 0xcd, 0xc2, 0x97, 0x47, 0xbc, 0x1d, 0x08, 0x4a, 0x22, 0xd3, 0x65, 0x6a, 0x34, 0x19, 0x66, 0xf4, 0x9a, 0x9b, 0xe4, 0x34, 0x50, 0x0f, + /* (2^ 55)P */ 0x6e, 0xb9, 0xe0, 0xa1, 0x67, 0x39, 0x3c, 0xf2, 0x88, 0x4d, 0x7a, 0x86, 0xfa, 0x08, 0x8b, 0xe5, 0x79, 0x16, 0x34, 0xa7, 0xc6, 0xab, 0x2f, 0xfb, 0x46, 0x69, 0x02, 0xb6, 0x1e, 0x38, 0x75, 0x2a, + /* (2^ 56)P */ 0xac, 0x20, 0x94, 0xc1, 0xe4, 0x3b, 0x0a, 0xc8, 0xdc, 0xb6, 0xf2, 0x81, 0xc6, 0xf6, 0xb1, 0x66, 0x88, 0x33, 0xe9, 0x61, 0x67, 0x03, 0xf7, 0x7c, 0xc4, 0xa4, 0x60, 0xa6, 0xd8, 0xbb, 0xab, 0x25, + /* (2^ 57)P */ 0x98, 0x51, 0xfd, 0x14, 0xba, 0x12, 0xea, 0x91, 0xa9, 0xff, 0x3c, 0x4a, 0xfc, 0x50, 0x49, 0x68, 0x28, 0xad, 0xf5, 0x30, 0x21, 0x84, 0x26, 0xf8, 0x41, 0xa4, 0x01, 0x53, 0xf7, 0x88, 0xa9, 0x3e, + /* (2^ 58)P */ 0x6f, 0x8c, 0x5f, 0x69, 0x9a, 0x10, 0x78, 0xc9, 0xf3, 0xc3, 0x30, 0x05, 0x4a, 0xeb, 0x46, 0x17, 0x95, 0x99, 0x45, 0xb4, 0x77, 0x6d, 0x4d, 0x44, 0xc7, 0x5c, 0x4e, 0x05, 0x8c, 0x2b, 0x95, 0x75, + /* (2^ 59)P */ 0xaa, 0xd6, 0xf4, 0x15, 0x79, 0x3f, 0x70, 0xa3, 0xd8, 0x47, 0x26, 0x2f, 0x20, 0x46, 0xc3, 0x66, 0x4b, 0x64, 0x1d, 0x81, 0xdf, 0x69, 0x14, 0xd0, 0x1f, 0xd7, 0xa5, 0x81, 0x7d, 0xa4, 0xfe, 0x77, + /* (2^ 60)P */ 0x81, 0xa3, 0x7c, 0xf5, 0x9e, 0x52, 0xe9, 0xc5, 0x1a, 0x88, 0x2f, 0xce, 0xb9, 0xb4, 0xee, 0x6e, 0xd6, 0x9b, 0x00, 0xe8, 0x28, 0x1a, 0xe9, 0xb6, 0xec, 0x3f, 0xfc, 0x9a, 0x3e, 0xbe, 0x80, 0x4b, + /* (2^ 61)P */ 0xc5, 0xd2, 0xae, 0x26, 0xc5, 0x73, 0x37, 0x7e, 0x9d, 0xa4, 0xc9, 0x53, 0xb4, 0xfc, 0x4a, 0x1b, 0x4d, 0xb2, 0xff, 0xba, 0xd7, 0xbd, 0x20, 0xa9, 0x0e, 0x40, 0x2d, 0x12, 0x9f, 0x69, 0x54, 0x7c, + /* (2^ 62)P */ 0xc8, 0x4b, 0xa9, 0x4f, 0xe1, 0xc8, 0x46, 0xef, 0x5e, 0xed, 0x52, 0x29, 0xce, 0x74, 0xb0, 0xe0, 0xd5, 0x85, 0xd8, 0xdb, 0xe1, 0x50, 0xa4, 0xbe, 0x2c, 0x71, 0x0f, 0x32, 0x49, 0x86, 0xb6, 0x61, + /* (2^ 63)P */ 0xd1, 0xbd, 0xcc, 0x09, 0x73, 0x5f, 0x48, 0x8a, 0x2d, 0x1a, 0x4d, 0x7d, 0x0d, 0x32, 0x06, 0xbd, 0xf4, 0xbe, 0x2d, 0x32, 0x73, 0x29, 0x23, 0x25, 0x70, 0xf7, 0x17, 0x8c, 0x75, 0xc4, 0x5d, 0x44, + /* (2^ 64)P */ 0x3c, 0x93, 0xc8, 0x7c, 0x17, 0x34, 0x04, 0xdb, 0x9f, 0x05, 0xea, 0x75, 0x21, 0xe8, 0x6f, 0xed, 0x34, 0xdb, 0x53, 0xc0, 0xfd, 0xbe, 0xfe, 0x1e, 0x99, 0xaf, 0x5d, 0xc6, 0x67, 0xe8, 0xdb, 0x4a, + /* (2^ 65)P */ 0xdf, 0x09, 0x06, 0xa9, 0xa2, 0x71, 0xcd, 0x3a, 0x50, 0x40, 0xd0, 0x6d, 0x85, 0x91, 0xe9, 0xe5, 0x3c, 0xc2, 0x57, 0x81, 0x68, 0x9b, 0xc6, 0x1e, 0x4d, 0xfe, 0x5c, 0x88, 0xf6, 0x27, 0x74, 0x69, + /* (2^ 66)P */ 0x51, 0xa8, 0xe1, 0x65, 0x9b, 0x7b, 0xbe, 0xd7, 0xdd, 0x36, 0xc5, 0x22, 0xd5, 0x28, 0x3d, 0xa0, 0x45, 0xb6, 0xd2, 0x8f, 0x65, 0x9d, 0x39, 0x28, 0xe1, 0x41, 0x26, 0x7c, 0xe1, 0xb7, 0xe5, 0x49, + /* (2^ 67)P */ 0xa4, 0x57, 0x04, 0x70, 0x98, 0x3a, 0x8c, 0x6f, 0x78, 0x67, 0xbb, 0x5e, 0xa2, 0xf0, 0x78, 0x50, 0x0f, 0x96, 0x82, 0xc3, 0xcb, 0x3c, 0x3c, 0xd1, 0xb1, 0x84, 0xdf, 0xa7, 0x58, 0x32, 0x00, 0x2e, + /* (2^ 68)P */ 0x1c, 0x6a, 0x29, 0xe6, 0x9b, 0xf3, 0xd1, 0x8a, 0xb2, 0xbf, 0x5f, 0x2a, 0x65, 0xaa, 0xee, 0xc1, 0xcb, 0xf3, 0x26, 0xfd, 0x73, 0x06, 0xee, 0x33, 0xcc, 0x2c, 0x9d, 0xa6, 0x73, 0x61, 0x25, 0x59, + /* (2^ 69)P */ 0x41, 0xfc, 0x18, 0x4e, 0xaa, 0x07, 0xea, 0x41, 0x1e, 0xa5, 0x87, 0x7c, 0x52, 0x19, 0xfc, 0xd9, 0x6f, 0xca, 0x31, 0x58, 0x80, 0xcb, 0xaa, 0xbd, 0x4f, 0x69, 0x16, 0xc9, 0x2d, 0x65, 0x5b, 0x44, + /* (2^ 70)P */ 0x15, 0x23, 0x17, 0xf2, 0xa7, 0xa3, 0x92, 0xce, 0x64, 0x99, 0x1b, 0xe1, 0x2d, 0x28, 0xdc, 0x1e, 0x4a, 0x31, 0x4c, 0xe0, 0xaf, 0x3a, 0x82, 0xa1, 0x86, 0xf5, 0x7c, 0x43, 0x94, 0x2d, 0x0a, 0x79, + /* (2^ 71)P */ 0x09, 0xe0, 0xf6, 0x93, 0xfb, 0x47, 0xc4, 0x71, 0x76, 0x52, 0x84, 0x22, 0x67, 0xa5, 0x22, 0x89, 0x69, 0x51, 0x4f, 0x20, 0x3b, 0x90, 0x70, 0xbf, 0xfe, 0x19, 0xa3, 0x1b, 0x89, 0x89, 0x7a, 0x2f, + /* (2^ 72)P */ 0x0c, 0x14, 0xe2, 0x77, 0xb5, 0x8e, 0xa0, 0x02, 0xf4, 0xdc, 0x7b, 0x42, 0xd4, 0x4e, 0x9a, 0xed, 0xd1, 0x3c, 0x32, 0xe4, 0x44, 0xec, 0x53, 0x52, 0x5b, 0x35, 0xe9, 0x14, 0x3c, 0x36, 0x88, 0x3e, + /* (2^ 73)P */ 0x8c, 0x0b, 0x11, 0x77, 0x42, 0xc1, 0x66, 0xaa, 0x90, 0x33, 0xa2, 0x10, 0x16, 0x39, 0xe0, 0x1a, 0xa2, 0xc2, 0x3f, 0xc9, 0x12, 0xbd, 0x30, 0x20, 0xab, 0xc7, 0x55, 0x95, 0x57, 0x41, 0xe1, 0x3e, + /* (2^ 74)P */ 0x41, 0x7d, 0x6e, 0x6d, 0x3a, 0xde, 0x14, 0x92, 0xfe, 0x7e, 0xf1, 0x07, 0x86, 0xd8, 0xcd, 0x3c, 0x17, 0x12, 0xe1, 0xf8, 0x88, 0x12, 0x4f, 0x67, 0xd0, 0x93, 0x9f, 0x32, 0x0f, 0x25, 0x82, 0x56, + /* (2^ 75)P */ 0x6e, 0x39, 0x2e, 0x6d, 0x13, 0x0b, 0xf0, 0x6c, 0xbf, 0xde, 0x14, 0x10, 0x6f, 0xf8, 0x4c, 0x6e, 0x83, 0x4e, 0xcc, 0xbf, 0xb5, 0xb1, 0x30, 0x59, 0xb6, 0x16, 0xba, 0x8a, 0xb4, 0x69, 0x70, 0x04, + /* (2^ 76)P */ 0x93, 0x07, 0xb2, 0x69, 0xab, 0xe4, 0x4c, 0x0d, 0x9e, 0xfb, 0xd0, 0x97, 0x1a, 0xb9, 0x4d, 0xb2, 0x1d, 0xd0, 0x00, 0x4e, 0xf5, 0x50, 0xfa, 0xcd, 0xb5, 0xdd, 0x8b, 0x36, 0x85, 0x10, 0x1b, 0x22, + /* (2^ 77)P */ 0xd2, 0xd8, 0xe3, 0xb1, 0x68, 0x94, 0xe5, 0xe7, 0x93, 0x2f, 0x12, 0xbd, 0x63, 0x65, 0xc5, 0x53, 0x09, 0x3f, 0x66, 0xe0, 0x03, 0xa9, 0xe8, 0xee, 0x42, 0x3d, 0xbe, 0xcb, 0x62, 0xa6, 0xef, 0x61, + /* (2^ 78)P */ 0x2a, 0xab, 0x6e, 0xde, 0xdd, 0xdd, 0xf8, 0x2c, 0x31, 0xf2, 0x35, 0x14, 0xd5, 0x0a, 0xf8, 0x9b, 0x73, 0x49, 0xf0, 0xc9, 0xce, 0xda, 0xea, 0x5d, 0x27, 0x9b, 0xd2, 0x41, 0x5d, 0x5b, 0x27, 0x29, + /* (2^ 79)P */ 0x4f, 0xf1, 0xeb, 0x95, 0x08, 0x0f, 0xde, 0xcf, 0xa7, 0x05, 0x49, 0x05, 0x6b, 0xb9, 0xaa, 0xb9, 0xfd, 0x20, 0xc4, 0xa1, 0xd9, 0x0d, 0xe8, 0xca, 0xc7, 0xbb, 0x73, 0x16, 0x2f, 0xbf, 0x63, 0x0a, + /* (2^ 80)P */ 0x8c, 0xbc, 0x8f, 0x95, 0x11, 0x6e, 0x2f, 0x09, 0xad, 0x2f, 0x82, 0x04, 0xe8, 0x81, 0x2a, 0x67, 0x17, 0x25, 0xd5, 0x60, 0x15, 0x35, 0xc8, 0xca, 0xf8, 0x92, 0xf1, 0xc8, 0x22, 0x77, 0x3f, 0x6f, + /* (2^ 81)P */ 0xb7, 0x94, 0xe8, 0xc2, 0xcc, 0x90, 0xba, 0xf8, 0x0d, 0x9f, 0xff, 0x38, 0xa4, 0x57, 0x75, 0x2c, 0x59, 0x23, 0xe5, 0x5a, 0x85, 0x1d, 0x4d, 0x89, 0x69, 0x3d, 0x74, 0x7b, 0x15, 0x22, 0xe1, 0x68, + /* (2^ 82)P */ 0xf3, 0x19, 0xb9, 0xcf, 0x70, 0x55, 0x7e, 0xd8, 0xb9, 0x8d, 0x79, 0x95, 0xcd, 0xde, 0x2c, 0x3f, 0xce, 0xa2, 0xc0, 0x10, 0x47, 0x15, 0x21, 0x21, 0xb2, 0xc5, 0x6d, 0x24, 0x15, 0xa1, 0x66, 0x3c, + /* (2^ 83)P */ 0x72, 0xcb, 0x4e, 0x29, 0x62, 0xc5, 0xed, 0xcb, 0x16, 0x0b, 0x28, 0x6a, 0xc3, 0x43, 0x71, 0xba, 0x67, 0x8b, 0x07, 0xd4, 0xef, 0xc2, 0x10, 0x96, 0x1e, 0x4b, 0x6a, 0x94, 0x5d, 0x73, 0x44, 0x61, + /* (2^ 84)P */ 0x50, 0x33, 0x5b, 0xd7, 0x1e, 0x11, 0x6f, 0x53, 0x1b, 0xd8, 0x41, 0x20, 0x8c, 0xdb, 0x11, 0x02, 0x3c, 0x41, 0x10, 0x0e, 0x00, 0xb1, 0x3c, 0xf9, 0x76, 0x88, 0x9e, 0x03, 0x3c, 0xfd, 0x9d, 0x14, + /* (2^ 85)P */ 0x5b, 0x15, 0x63, 0x6b, 0xe4, 0xdd, 0x79, 0xd4, 0x76, 0x79, 0x83, 0x3c, 0xe9, 0x15, 0x6e, 0xb6, 0x38, 0xe0, 0x13, 0x1f, 0x3b, 0xe4, 0xfd, 0xda, 0x35, 0x0b, 0x4b, 0x2e, 0x1a, 0xda, 0xaf, 0x5f, + /* (2^ 86)P */ 0x81, 0x75, 0x19, 0x17, 0xdf, 0xbb, 0x00, 0x36, 0xc2, 0xd2, 0x3c, 0xbe, 0x0b, 0x05, 0x72, 0x39, 0x86, 0xbe, 0xd5, 0xbd, 0x6d, 0x90, 0x38, 0x59, 0x0f, 0x86, 0x9b, 0x3f, 0xe4, 0xe5, 0xfc, 0x34, + /* (2^ 87)P */ 0x02, 0x4d, 0xd1, 0x42, 0xcd, 0xa4, 0xa8, 0x75, 0x65, 0xdf, 0x41, 0x34, 0xc5, 0xab, 0x8d, 0x82, 0xd3, 0x31, 0xe1, 0xd2, 0xed, 0xab, 0xdc, 0x33, 0x5f, 0xd2, 0x14, 0xb8, 0x6f, 0xd7, 0xba, 0x3e, + /* (2^ 88)P */ 0x0f, 0xe1, 0x70, 0x6f, 0x56, 0x6f, 0x90, 0xd4, 0x5a, 0x0f, 0x69, 0x51, 0xaa, 0xf7, 0x12, 0x5d, 0xf2, 0xfc, 0xce, 0x76, 0x6e, 0xb1, 0xad, 0x45, 0x99, 0x29, 0x23, 0xad, 0xae, 0x68, 0xf7, 0x01, + /* (2^ 89)P */ 0xbd, 0xfe, 0x48, 0x62, 0x7b, 0xc7, 0x6c, 0x2b, 0xfd, 0xaf, 0x3a, 0xec, 0x28, 0x06, 0xd3, 0x3c, 0x6a, 0x48, 0xef, 0xd4, 0x80, 0x0b, 0x1c, 0xce, 0x23, 0x6c, 0xf6, 0xa6, 0x2e, 0xff, 0x3b, 0x4c, + /* (2^ 90)P */ 0x5f, 0xeb, 0xea, 0x4a, 0x09, 0xc4, 0x2e, 0x3f, 0xa7, 0x2c, 0x37, 0x6e, 0x28, 0x9b, 0xb1, 0x61, 0x1d, 0x70, 0x2a, 0xde, 0x66, 0xa9, 0xef, 0x5e, 0xef, 0xe3, 0x55, 0xde, 0x65, 0x05, 0xb2, 0x23, + /* (2^ 91)P */ 0x57, 0x85, 0xd5, 0x79, 0x52, 0xca, 0x01, 0xe3, 0x4f, 0x87, 0xc2, 0x27, 0xce, 0xd4, 0xb2, 0x07, 0x67, 0x1d, 0xcf, 0x9d, 0x8a, 0xcd, 0x32, 0xa5, 0x56, 0xff, 0x2b, 0x3f, 0xe2, 0xfe, 0x52, 0x2a, + /* (2^ 92)P */ 0x3d, 0x66, 0xd8, 0x7c, 0xb3, 0xef, 0x24, 0x86, 0x94, 0x75, 0xbd, 0xff, 0x20, 0xac, 0xc7, 0xbb, 0x45, 0x74, 0xd3, 0x82, 0x9c, 0x5e, 0xb8, 0x57, 0x66, 0xec, 0xa6, 0x86, 0xcb, 0x52, 0x30, 0x7b, + /* (2^ 93)P */ 0x1e, 0xe9, 0x25, 0x25, 0xad, 0xf0, 0x82, 0x34, 0xa0, 0xdc, 0x8e, 0xd2, 0x43, 0x80, 0xb6, 0x2c, 0x3a, 0x00, 0x1b, 0x2e, 0x05, 0x6d, 0x4f, 0xaf, 0x0a, 0x1b, 0x78, 0x29, 0x25, 0x8c, 0x5f, 0x18, + /* (2^ 94)P */ 0xd6, 0xe0, 0x0c, 0xd8, 0x5b, 0xde, 0x41, 0xaa, 0xd6, 0xe9, 0x53, 0x68, 0x41, 0xb2, 0x07, 0x94, 0x3a, 0x4c, 0x7f, 0x35, 0x6e, 0xc3, 0x3e, 0x56, 0xce, 0x7b, 0x29, 0x0e, 0xdd, 0xb8, 0xc4, 0x4c, + /* (2^ 95)P */ 0x0e, 0x73, 0xb8, 0xff, 0x52, 0x1a, 0xfc, 0xa2, 0x37, 0x8e, 0x05, 0x67, 0x6e, 0xf1, 0x11, 0x18, 0xe1, 0x4e, 0xdf, 0xcd, 0x66, 0xa3, 0xf9, 0x10, 0x99, 0xf0, 0xb9, 0xa0, 0xc4, 0xa0, 0xf4, 0x72, + /* (2^ 96)P */ 0xa7, 0x4e, 0x3f, 0x66, 0x6f, 0xc0, 0x16, 0x8c, 0xba, 0x0f, 0x97, 0x4e, 0xf7, 0x3a, 0x3b, 0x69, 0x45, 0xc3, 0x9e, 0xd6, 0xf1, 0xe7, 0x02, 0x21, 0x89, 0x80, 0x8a, 0x96, 0xbc, 0x3c, 0xa5, 0x0b, + /* (2^ 97)P */ 0x37, 0x55, 0xa1, 0xfe, 0xc7, 0x9d, 0x3d, 0xca, 0x93, 0x64, 0x53, 0x51, 0xbb, 0x24, 0x68, 0x4c, 0xb1, 0x06, 0x40, 0x84, 0x14, 0x63, 0x88, 0xb9, 0x60, 0xcc, 0x54, 0xb4, 0x2a, 0xa7, 0xd2, 0x40, + /* (2^ 98)P */ 0x75, 0x09, 0x57, 0x12, 0xb7, 0xa1, 0x36, 0x59, 0x57, 0xa6, 0xbd, 0xde, 0x48, 0xd6, 0xb9, 0x91, 0xea, 0x30, 0x43, 0xb6, 0x4b, 0x09, 0x44, 0x33, 0xd0, 0x51, 0xee, 0x12, 0x0d, 0xa1, 0x6b, 0x00, + /* (2^ 99)P */ 0x58, 0x5d, 0xde, 0xf5, 0x68, 0x84, 0x22, 0x19, 0xb0, 0x05, 0xcc, 0x38, 0x4c, 0x2f, 0xb1, 0x0e, 0x90, 0x19, 0x60, 0xd5, 0x9d, 0x9f, 0x03, 0xa1, 0x0b, 0x0e, 0xff, 0x4f, 0xce, 0xd4, 0x02, 0x45, + /* (2^100)P */ 0x89, 0xc1, 0x37, 0x68, 0x10, 0x54, 0x20, 0xeb, 0x3c, 0xb9, 0xd3, 0x6d, 0x4c, 0x54, 0xf6, 0xd0, 0x4f, 0xd7, 0x16, 0xc4, 0x64, 0x70, 0x72, 0x40, 0xf0, 0x2e, 0x50, 0x4b, 0x11, 0xc6, 0x15, 0x6e, + /* (2^101)P */ 0x6b, 0xa7, 0xb1, 0xcf, 0x98, 0xa3, 0xf2, 0x4d, 0xb1, 0xf6, 0xf2, 0x19, 0x74, 0x6c, 0x25, 0x11, 0x43, 0x60, 0x6e, 0x06, 0x62, 0x79, 0x49, 0x4a, 0x44, 0x5b, 0x35, 0x41, 0xab, 0x3a, 0x5b, 0x70, + /* (2^102)P */ 0xd8, 0xb1, 0x97, 0xd7, 0x36, 0xf5, 0x5e, 0x36, 0xdb, 0xf0, 0xdd, 0x22, 0xd6, 0x6b, 0x07, 0x00, 0x88, 0x5a, 0x57, 0xe0, 0xb0, 0x33, 0xbf, 0x3b, 0x4d, 0xca, 0xe4, 0xc8, 0x05, 0xaa, 0x77, 0x37, + /* (2^103)P */ 0x5f, 0xdb, 0x78, 0x55, 0xc8, 0x45, 0x27, 0x39, 0xe2, 0x5a, 0xae, 0xdb, 0x49, 0x41, 0xda, 0x6f, 0x67, 0x98, 0xdc, 0x8a, 0x0b, 0xb0, 0xf0, 0xb1, 0xa3, 0x1d, 0x6f, 0xd3, 0x37, 0x34, 0x96, 0x09, + /* (2^104)P */ 0x53, 0x38, 0xdc, 0xa5, 0x90, 0x4e, 0x82, 0x7e, 0xbd, 0x5c, 0x13, 0x1f, 0x64, 0xf6, 0xb5, 0xcc, 0xcc, 0x8f, 0xce, 0x87, 0x6c, 0xd8, 0x36, 0x67, 0x9f, 0x24, 0x04, 0x66, 0xe2, 0x3c, 0x5f, 0x62, + /* (2^105)P */ 0x3f, 0xf6, 0x02, 0x95, 0x05, 0xc8, 0x8a, 0xaf, 0x69, 0x14, 0x35, 0x2e, 0x0a, 0xe7, 0x05, 0x0c, 0x05, 0x63, 0x4b, 0x76, 0x9c, 0x2e, 0x29, 0x35, 0xc3, 0x3a, 0xe2, 0xc7, 0x60, 0x43, 0x39, 0x1a, + /* (2^106)P */ 0x64, 0x32, 0x18, 0x51, 0x32, 0xd5, 0xc6, 0xd5, 0x4f, 0xb7, 0xc2, 0x43, 0xbd, 0x5a, 0x06, 0x62, 0x9b, 0x3f, 0x97, 0x3b, 0xd0, 0xf5, 0xfb, 0xb5, 0x5e, 0x6e, 0x20, 0x61, 0x36, 0xda, 0xa3, 0x13, + /* (2^107)P */ 0xe5, 0x94, 0x5d, 0x72, 0x37, 0x58, 0xbd, 0xc6, 0xc5, 0x16, 0x50, 0x20, 0x12, 0x09, 0xe3, 0x18, 0x68, 0x3c, 0x03, 0x70, 0x15, 0xce, 0x88, 0x20, 0x87, 0x79, 0x83, 0x5c, 0x49, 0x1f, 0xba, 0x7f, + /* (2^108)P */ 0x9d, 0x07, 0xf9, 0xf2, 0x23, 0x74, 0x8c, 0x5a, 0xc5, 0x3f, 0x02, 0x34, 0x7b, 0x15, 0x35, 0x17, 0x51, 0xb3, 0xfa, 0xd2, 0x9a, 0xb4, 0xf9, 0xe4, 0x3c, 0xe3, 0x78, 0xc8, 0x72, 0xff, 0x91, 0x66, + /* (2^109)P */ 0x3e, 0xff, 0x5e, 0xdc, 0xde, 0x2a, 0x2c, 0x12, 0xf4, 0x6c, 0x95, 0xd8, 0xf1, 0x4b, 0xdd, 0xf8, 0xda, 0x5b, 0x9e, 0x9e, 0x5d, 0x20, 0x86, 0xeb, 0x43, 0xc7, 0x75, 0xd9, 0xb9, 0x92, 0x9b, 0x04, + /* (2^110)P */ 0x5a, 0xc0, 0xf6, 0xb0, 0x30, 0x97, 0x37, 0xa5, 0x53, 0xa5, 0xf3, 0xc6, 0xac, 0xff, 0xa0, 0x72, 0x6d, 0xcd, 0x0d, 0xb2, 0x34, 0x2c, 0x03, 0xb0, 0x4a, 0x16, 0xd5, 0x88, 0xbc, 0x9d, 0x0e, 0x47, + /* (2^111)P */ 0x47, 0xc0, 0x37, 0xa2, 0x0c, 0xf1, 0x9c, 0xb1, 0xa2, 0x81, 0x6c, 0x1f, 0x71, 0x66, 0x54, 0xb6, 0x43, 0x0b, 0xd8, 0x6d, 0xd1, 0x1b, 0x32, 0xb3, 0x8e, 0xbe, 0x5f, 0x0c, 0x60, 0x4f, 0xc1, 0x48, + /* (2^112)P */ 0x03, 0xc8, 0xa6, 0x4a, 0x26, 0x1c, 0x45, 0x66, 0xa6, 0x7d, 0xfa, 0xa4, 0x04, 0x39, 0x6e, 0xb6, 0x95, 0x83, 0x12, 0xb3, 0xb0, 0x19, 0x5f, 0xd4, 0x10, 0xbc, 0xc9, 0xc3, 0x27, 0x26, 0x60, 0x31, + /* (2^113)P */ 0x0d, 0xe1, 0xe4, 0x32, 0x48, 0xdc, 0x20, 0x31, 0xf7, 0x17, 0xc7, 0x56, 0x67, 0xc4, 0x20, 0xeb, 0x94, 0x02, 0x28, 0x67, 0x3f, 0x2e, 0xf5, 0x00, 0x09, 0xc5, 0x30, 0x47, 0xc1, 0x4f, 0x6d, 0x56, + /* (2^114)P */ 0x06, 0x72, 0x83, 0xfd, 0x40, 0x5d, 0x3a, 0x7e, 0x7a, 0x54, 0x59, 0x71, 0xdc, 0x26, 0xe9, 0xc1, 0x95, 0x60, 0x8d, 0xa6, 0xfb, 0x30, 0x67, 0x21, 0xa7, 0xce, 0x69, 0x3f, 0x84, 0xc3, 0xe8, 0x22, + /* (2^115)P */ 0x2b, 0x4b, 0x0e, 0x93, 0xe8, 0x74, 0xd0, 0x33, 0x16, 0x58, 0xd1, 0x84, 0x0e, 0x35, 0xe4, 0xb6, 0x65, 0x23, 0xba, 0xd6, 0x6a, 0xc2, 0x34, 0x55, 0xf3, 0xf3, 0xf1, 0x89, 0x2f, 0xc1, 0x73, 0x77, + /* (2^116)P */ 0xaa, 0x62, 0x79, 0xa5, 0x4d, 0x40, 0xba, 0x8c, 0x56, 0xce, 0x99, 0x19, 0xa8, 0x97, 0x98, 0x5b, 0xfc, 0x92, 0x16, 0x12, 0x2f, 0x86, 0x8e, 0x50, 0x91, 0xc2, 0x93, 0xa0, 0x7f, 0x90, 0x81, 0x3a, + /* (2^117)P */ 0x10, 0xa5, 0x25, 0x47, 0xff, 0xd0, 0xde, 0x0d, 0x03, 0xc5, 0x3f, 0x67, 0x10, 0xcc, 0xd8, 0x10, 0x89, 0x4e, 0x1f, 0x9f, 0x1c, 0x15, 0x9d, 0x5b, 0x4c, 0xa4, 0x09, 0xcb, 0xd5, 0xc1, 0xa5, 0x32, + /* (2^118)P */ 0xfb, 0x41, 0x05, 0xb9, 0x42, 0xa4, 0x0a, 0x1e, 0xdb, 0x85, 0xb4, 0xc1, 0x7c, 0xeb, 0x85, 0x5f, 0xe5, 0xf2, 0x9d, 0x8a, 0xce, 0x95, 0xe5, 0xbe, 0x36, 0x22, 0x42, 0x22, 0xc7, 0x96, 0xe4, 0x25, + /* (2^119)P */ 0xb9, 0xe5, 0x0f, 0xcd, 0x46, 0x3c, 0xdf, 0x5e, 0x88, 0x33, 0xa4, 0xd2, 0x7e, 0x5a, 0xe7, 0x34, 0x52, 0xe3, 0x61, 0xd7, 0x11, 0xde, 0x88, 0xe4, 0x5c, 0x54, 0x85, 0xa0, 0x01, 0x8a, 0x87, 0x0e, + /* (2^120)P */ 0x04, 0xbb, 0x21, 0xe0, 0x77, 0x3c, 0x49, 0xba, 0x9a, 0x89, 0xdf, 0xc7, 0x43, 0x18, 0x4d, 0x2b, 0x67, 0x0d, 0xe8, 0x7a, 0x48, 0x7a, 0xa3, 0x9e, 0x94, 0x17, 0xe4, 0x11, 0x80, 0x95, 0xa9, 0x67, + /* (2^121)P */ 0x65, 0xb0, 0x97, 0x66, 0x1a, 0x05, 0x58, 0x4b, 0xd4, 0xa6, 0x6b, 0x8d, 0x7d, 0x3f, 0xe3, 0x47, 0xc1, 0x46, 0xca, 0x83, 0xd4, 0xa8, 0x4d, 0xbb, 0x0d, 0xdb, 0xc2, 0x81, 0xa1, 0xca, 0xbe, 0x68, + /* (2^122)P */ 0xa5, 0x9a, 0x98, 0x0b, 0xe9, 0x80, 0x89, 0x8d, 0x9b, 0xc9, 0x93, 0x2c, 0x4a, 0xb1, 0x5e, 0xf9, 0xa2, 0x73, 0x6e, 0x79, 0xc4, 0xc7, 0xc6, 0x51, 0x69, 0xb5, 0xef, 0xb5, 0x63, 0x83, 0x22, 0x6e, + /* (2^123)P */ 0xc8, 0x24, 0xd6, 0x2d, 0xb0, 0xc0, 0xbb, 0xc6, 0xee, 0x70, 0x81, 0xec, 0x7d, 0xb4, 0x7e, 0x77, 0xa9, 0xaf, 0xcf, 0x04, 0xa0, 0x15, 0xde, 0x3c, 0x9b, 0xbf, 0x60, 0x71, 0x08, 0xbc, 0xc6, 0x1d, + /* (2^124)P */ 0x02, 0x40, 0xc3, 0xee, 0x43, 0xe0, 0x07, 0x2e, 0x7f, 0xdc, 0x68, 0x7a, 0x67, 0xfc, 0xe9, 0x18, 0x9a, 0x5b, 0xd1, 0x8b, 0x18, 0x03, 0xda, 0xd8, 0x53, 0x82, 0x56, 0x00, 0xbb, 0xc3, 0xfb, 0x48, + /* (2^125)P */ 0xe1, 0x4c, 0x65, 0xfb, 0x4c, 0x7d, 0x54, 0x57, 0xad, 0xe2, 0x58, 0xa0, 0x82, 0x5b, 0x56, 0xd3, 0x78, 0x44, 0x15, 0xbf, 0x0b, 0xaf, 0x3e, 0xf6, 0x18, 0xbb, 0xdf, 0x14, 0xf1, 0x1e, 0x53, 0x47, + /* (2^126)P */ 0x87, 0xc5, 0x78, 0x42, 0x0a, 0x63, 0xec, 0xe1, 0xf3, 0x83, 0x8e, 0xca, 0x46, 0xd5, 0x07, 0x55, 0x2b, 0x0c, 0xdc, 0x3a, 0xc6, 0x35, 0xe1, 0x85, 0x4e, 0x84, 0x82, 0x56, 0xa8, 0xef, 0xa7, 0x0a, + /* (2^127)P */ 0x15, 0xf6, 0xe1, 0xb3, 0xa8, 0x1b, 0x69, 0x72, 0xfa, 0x3f, 0xbe, 0x1f, 0x70, 0xe9, 0xb4, 0x32, 0x68, 0x78, 0xbb, 0x39, 0x2e, 0xd9, 0xb6, 0x97, 0xe8, 0x39, 0x2e, 0xa0, 0xde, 0x53, 0xfe, 0x2c, + /* (2^128)P */ 0xb0, 0x52, 0xcd, 0x85, 0xcd, 0x92, 0x73, 0x68, 0x31, 0x98, 0xe2, 0x10, 0xc9, 0x66, 0xff, 0x27, 0x06, 0x2d, 0x83, 0xa9, 0x56, 0x45, 0x13, 0x97, 0xa0, 0xf8, 0x84, 0x0a, 0x36, 0xb0, 0x9b, 0x26, + /* (2^129)P */ 0x5c, 0xf8, 0x43, 0x76, 0x45, 0x55, 0x6e, 0x70, 0x1b, 0x7d, 0x59, 0x9b, 0x8c, 0xa4, 0x34, 0x37, 0x72, 0xa4, 0xef, 0xc6, 0xe8, 0x91, 0xee, 0x7a, 0xe0, 0xd9, 0xa9, 0x98, 0xc1, 0xab, 0xd6, 0x5c, + /* (2^130)P */ 0x1a, 0xe4, 0x3c, 0xcb, 0x06, 0xde, 0x04, 0x0e, 0x38, 0xe1, 0x02, 0x34, 0x89, 0xeb, 0xc6, 0xd8, 0x72, 0x37, 0x6e, 0x68, 0xbb, 0x59, 0x46, 0x90, 0xc8, 0xa8, 0x6b, 0x74, 0x71, 0xc3, 0x15, 0x72, + /* (2^131)P */ 0xd9, 0xa2, 0xe4, 0xea, 0x7e, 0xa9, 0x12, 0xfd, 0xc5, 0xf2, 0x94, 0x63, 0x51, 0xb7, 0x14, 0x95, 0x94, 0xf2, 0x08, 0x92, 0x80, 0xd5, 0x6f, 0x26, 0xb9, 0x26, 0x9a, 0x61, 0x85, 0x70, 0x84, 0x5c, + /* (2^132)P */ 0xea, 0x94, 0xd6, 0xfe, 0x10, 0x54, 0x98, 0x52, 0x54, 0xd2, 0x2e, 0x4a, 0x93, 0x5b, 0x90, 0x3c, 0x67, 0xe4, 0x3b, 0x2d, 0x69, 0x47, 0xbb, 0x10, 0xe1, 0xe9, 0xe5, 0x69, 0x2d, 0x3d, 0x3b, 0x06, + /* (2^133)P */ 0xeb, 0x7d, 0xa5, 0xdd, 0xee, 0x26, 0x27, 0x47, 0x91, 0x18, 0xf4, 0x10, 0xae, 0xc4, 0xb6, 0xef, 0x14, 0x76, 0x30, 0x7b, 0x91, 0x41, 0x16, 0x2b, 0x7c, 0x5b, 0xf4, 0xc4, 0x4f, 0x55, 0x7c, 0x11, + /* (2^134)P */ 0x12, 0x88, 0x9d, 0x8f, 0x11, 0xf3, 0x7c, 0xc0, 0x39, 0x79, 0x01, 0x50, 0x20, 0xd8, 0xdb, 0x01, 0x27, 0x28, 0x1b, 0x17, 0xf4, 0x03, 0xe8, 0xd7, 0xea, 0x25, 0xd2, 0x87, 0x74, 0xe8, 0x15, 0x10, + /* (2^135)P */ 0x4d, 0xcc, 0x3a, 0xd2, 0xfe, 0xe3, 0x8d, 0xc5, 0x2d, 0xbe, 0xa7, 0x94, 0xc2, 0x91, 0xdb, 0x50, 0x57, 0xf4, 0x9c, 0x1c, 0x3d, 0xd4, 0x94, 0x0b, 0x4a, 0x52, 0x37, 0x6e, 0xfa, 0x40, 0x16, 0x6b, + /* (2^136)P */ 0x09, 0x0d, 0xda, 0x5f, 0x6c, 0x34, 0x2f, 0x69, 0x51, 0x31, 0x4d, 0xfa, 0x59, 0x1c, 0x0b, 0x20, 0x96, 0xa2, 0x77, 0x07, 0x76, 0x6f, 0xc4, 0xb8, 0xcf, 0xfb, 0xfd, 0x3f, 0x5f, 0x39, 0x38, 0x4b, + /* (2^137)P */ 0x71, 0xd6, 0x54, 0xbe, 0x00, 0x5e, 0xd2, 0x18, 0xa6, 0xab, 0xc8, 0xbe, 0x82, 0x05, 0xd5, 0x60, 0x82, 0xb9, 0x78, 0x3b, 0x26, 0x8f, 0xad, 0x87, 0x32, 0x04, 0xda, 0x9c, 0x4e, 0xf6, 0xfd, 0x50, + /* (2^138)P */ 0xf0, 0xdc, 0x78, 0xc5, 0xaa, 0x67, 0xf5, 0x90, 0x3b, 0x13, 0xa3, 0xf2, 0x0e, 0x9b, 0x1e, 0xef, 0x71, 0xde, 0xd9, 0x42, 0x92, 0xba, 0xeb, 0x0e, 0xc7, 0x01, 0x31, 0xf0, 0x9b, 0x3c, 0x47, 0x15, + /* (2^139)P */ 0x95, 0x80, 0xb7, 0x56, 0xae, 0xe8, 0x77, 0x7c, 0x8e, 0x07, 0x6f, 0x6e, 0x66, 0xe7, 0x78, 0xb6, 0x1f, 0xba, 0x48, 0x53, 0x61, 0xb9, 0xa0, 0x2d, 0x0b, 0x3f, 0x73, 0xff, 0xc1, 0x31, 0xf9, 0x7c, + /* (2^140)P */ 0x6c, 0x36, 0x0a, 0x0a, 0xf5, 0x57, 0xb3, 0x26, 0x32, 0xd7, 0x87, 0x2b, 0xf4, 0x8c, 0x70, 0xe9, 0xc0, 0xb2, 0x1c, 0xf9, 0xa5, 0xee, 0x3a, 0xc1, 0x4c, 0xbb, 0x43, 0x11, 0x99, 0x0c, 0xd9, 0x35, + /* (2^141)P */ 0xdc, 0xd9, 0xa0, 0xa9, 0x04, 0xc4, 0xc1, 0x47, 0x51, 0xd2, 0x72, 0x19, 0x45, 0x58, 0x9e, 0x65, 0x31, 0x8c, 0xb3, 0x73, 0xc4, 0xa8, 0x75, 0x38, 0x24, 0x1f, 0x56, 0x79, 0xd3, 0x9e, 0xbd, 0x1f, + /* (2^142)P */ 0x8d, 0xc2, 0x1e, 0xd4, 0x6f, 0xbc, 0xfa, 0x11, 0xca, 0x2d, 0x2a, 0xcd, 0xe3, 0xdf, 0xf8, 0x7e, 0x95, 0x45, 0x40, 0x8c, 0x5d, 0x3b, 0xe7, 0x72, 0x27, 0x2f, 0xb7, 0x54, 0x49, 0xfa, 0x35, 0x61, + /* (2^143)P */ 0x9c, 0xb6, 0x24, 0xde, 0xa2, 0x32, 0xfc, 0xcc, 0x88, 0x5d, 0x09, 0x1f, 0x8c, 0x69, 0x55, 0x3f, 0x29, 0xf9, 0xc3, 0x5a, 0xed, 0x50, 0x33, 0xbe, 0xeb, 0x7e, 0x47, 0xca, 0x06, 0xf8, 0x9b, 0x5e, + /* (2^144)P */ 0x68, 0x9f, 0x30, 0x3c, 0xb6, 0x8f, 0xce, 0xe9, 0xf4, 0xf9, 0xe1, 0x65, 0x35, 0xf6, 0x76, 0x53, 0xf1, 0x93, 0x63, 0x5a, 0xb3, 0xcf, 0xaf, 0xd1, 0x06, 0x35, 0x62, 0xe5, 0xed, 0xa1, 0x32, 0x66, + /* (2^145)P */ 0x4c, 0xed, 0x2d, 0x0c, 0x39, 0x6c, 0x7d, 0x0b, 0x1f, 0xcb, 0x04, 0xdf, 0x81, 0x32, 0xcb, 0x56, 0xc7, 0xc3, 0xec, 0x49, 0x12, 0x5a, 0x30, 0x66, 0x2a, 0xa7, 0x8c, 0xa3, 0x60, 0x8b, 0x58, 0x5d, + /* (2^146)P */ 0x2d, 0xf4, 0xe5, 0xe8, 0x78, 0xbf, 0xec, 0xa6, 0xec, 0x3e, 0x8a, 0x3c, 0x4b, 0xb4, 0xee, 0x86, 0x04, 0x16, 0xd2, 0xfb, 0x48, 0x9c, 0x21, 0xec, 0x31, 0x67, 0xc3, 0x17, 0xf5, 0x1a, 0xaf, 0x1a, + /* (2^147)P */ 0xe7, 0xbd, 0x69, 0x67, 0x83, 0xa2, 0x06, 0xc3, 0xdb, 0x2a, 0x1e, 0x2b, 0x62, 0x80, 0x82, 0x20, 0xa6, 0x94, 0xff, 0xfb, 0x1f, 0xf5, 0x27, 0x80, 0x6b, 0xf2, 0x24, 0x11, 0xce, 0xa1, 0xcf, 0x76, + /* (2^148)P */ 0xb6, 0xab, 0x22, 0x24, 0x56, 0x00, 0xeb, 0x18, 0xc3, 0x29, 0x8c, 0x8f, 0xd5, 0xc4, 0x77, 0xf3, 0x1a, 0x56, 0x31, 0xf5, 0x07, 0xc2, 0xbb, 0x4d, 0x27, 0x8a, 0x12, 0x82, 0xf0, 0xb7, 0x53, 0x02, + /* (2^149)P */ 0xe0, 0x17, 0x2c, 0xb6, 0x1c, 0x09, 0x1f, 0x3d, 0xa9, 0x28, 0x46, 0xd6, 0xab, 0xe1, 0x60, 0x48, 0x53, 0x42, 0x9d, 0x30, 0x36, 0x74, 0xd1, 0x52, 0x76, 0xe5, 0xfa, 0x3e, 0xe1, 0x97, 0x6f, 0x35, + /* (2^150)P */ 0x5b, 0x53, 0x50, 0xa1, 0x1a, 0xe1, 0x51, 0xd3, 0xcc, 0x78, 0xd8, 0x1d, 0xbb, 0x45, 0x6b, 0x3e, 0x98, 0x2c, 0xd9, 0xbe, 0x28, 0x61, 0x77, 0x0c, 0xb8, 0x85, 0x28, 0x03, 0x93, 0xae, 0x34, 0x1d, + /* (2^151)P */ 0xc3, 0xa4, 0x5b, 0xa8, 0x8c, 0x48, 0xa0, 0x4b, 0xce, 0xe6, 0x9c, 0x3c, 0xc3, 0x48, 0x53, 0x98, 0x70, 0xa7, 0xbd, 0x97, 0x6f, 0x4c, 0x12, 0x66, 0x4a, 0x12, 0x54, 0x06, 0x29, 0xa0, 0x81, 0x0f, + /* (2^152)P */ 0xfd, 0x86, 0x9b, 0x56, 0xa6, 0x9c, 0xd0, 0x9e, 0x2d, 0x9a, 0xaf, 0x18, 0xfd, 0x09, 0x10, 0x81, 0x0a, 0xc2, 0xd8, 0x93, 0x3f, 0xd0, 0x08, 0xff, 0x6b, 0xf2, 0xae, 0x9f, 0x19, 0x48, 0xa1, 0x52, + /* (2^153)P */ 0x73, 0x1b, 0x8d, 0x2d, 0xdc, 0xf9, 0x03, 0x3e, 0x70, 0x1a, 0x96, 0x73, 0x18, 0x80, 0x05, 0x42, 0x70, 0x59, 0xa3, 0x41, 0xf0, 0x87, 0xd9, 0xc0, 0x49, 0xd5, 0xc0, 0xa1, 0x15, 0x1f, 0xaa, 0x07, + /* (2^154)P */ 0x24, 0x72, 0xd2, 0x8c, 0xe0, 0x6c, 0xd4, 0xdf, 0x39, 0x42, 0x4e, 0x93, 0x4f, 0x02, 0x0a, 0x6d, 0x59, 0x7b, 0x89, 0x99, 0x63, 0x7a, 0x8a, 0x80, 0xa2, 0x95, 0x3d, 0xe1, 0xe9, 0x56, 0x45, 0x0a, + /* (2^155)P */ 0x45, 0x30, 0xc1, 0xe9, 0x1f, 0x99, 0x1a, 0xd2, 0xb8, 0x51, 0x77, 0xfe, 0x48, 0x85, 0x0e, 0x9b, 0x35, 0x00, 0xf3, 0x4b, 0xcb, 0x43, 0xa6, 0x5d, 0x21, 0xf7, 0x40, 0x39, 0xd6, 0x28, 0xdb, 0x77, + /* (2^156)P */ 0x11, 0x90, 0xdc, 0x4a, 0x61, 0xeb, 0x5e, 0xfc, 0xeb, 0x11, 0xc4, 0xe8, 0x9a, 0x41, 0x29, 0x52, 0x74, 0xcf, 0x1d, 0x7d, 0x78, 0xe7, 0xc3, 0x9e, 0xb5, 0x4c, 0x6e, 0x21, 0x3e, 0x05, 0x0d, 0x34, + /* (2^157)P */ 0xb4, 0xf2, 0x8d, 0xb4, 0x39, 0xaf, 0xc7, 0xca, 0x94, 0x0a, 0xa1, 0x71, 0x28, 0xec, 0xfa, 0xc0, 0xed, 0x75, 0xa5, 0x5c, 0x24, 0x69, 0x0a, 0x14, 0x4c, 0x3a, 0x27, 0x34, 0x71, 0xc3, 0xf1, 0x0c, + /* (2^158)P */ 0xa5, 0xb8, 0x24, 0xc2, 0x6a, 0x30, 0xee, 0xc8, 0xb0, 0x30, 0x49, 0xcb, 0x7c, 0xee, 0xea, 0x57, 0x4f, 0xe7, 0xcb, 0xaa, 0xbd, 0x06, 0xe8, 0xa1, 0x7d, 0x65, 0xeb, 0x2e, 0x74, 0x62, 0x9a, 0x7d, + /* (2^159)P */ 0x30, 0x48, 0x6c, 0x54, 0xef, 0xb6, 0xb6, 0x9e, 0x2e, 0x6e, 0xb3, 0xdd, 0x1f, 0xca, 0x5c, 0x88, 0x05, 0x71, 0x0d, 0xef, 0x83, 0xf3, 0xb9, 0xe6, 0x12, 0x04, 0x2e, 0x9d, 0xef, 0x4f, 0x65, 0x58, + /* (2^160)P */ 0x26, 0x8e, 0x0e, 0xbe, 0xff, 0xc4, 0x05, 0xa9, 0x6e, 0x81, 0x31, 0x9b, 0xdf, 0xe5, 0x2d, 0x94, 0xe1, 0x88, 0x2e, 0x80, 0x3f, 0x72, 0x7d, 0x49, 0x8d, 0x40, 0x2f, 0x60, 0xea, 0x4d, 0x68, 0x30, + /* (2^161)P */ 0x34, 0xcb, 0xe6, 0xa3, 0x78, 0xa2, 0xe5, 0x21, 0xc4, 0x1d, 0x15, 0x5b, 0x6f, 0x6e, 0xfb, 0xae, 0x15, 0xca, 0x77, 0x9d, 0x04, 0x8e, 0x0b, 0xb3, 0x81, 0x89, 0xb9, 0x53, 0xcf, 0xc9, 0xc3, 0x28, + /* (2^162)P */ 0x2a, 0xdd, 0x6c, 0x55, 0x21, 0xb7, 0x7f, 0x28, 0x74, 0x22, 0x02, 0x97, 0xa8, 0x7c, 0x31, 0x0d, 0x58, 0x32, 0x54, 0x3a, 0x42, 0xc7, 0x68, 0x74, 0x2f, 0x64, 0xb5, 0x4e, 0x46, 0x11, 0x7f, 0x4a, + /* (2^163)P */ 0xa6, 0x3a, 0x19, 0x4d, 0x77, 0xa4, 0x37, 0xa2, 0xa1, 0x29, 0x21, 0xa9, 0x6e, 0x98, 0x65, 0xd8, 0x88, 0x1a, 0x7c, 0xf8, 0xec, 0x15, 0xc5, 0x24, 0xeb, 0xf5, 0x39, 0x5f, 0x57, 0x03, 0x40, 0x60, + /* (2^164)P */ 0x27, 0x9b, 0x0a, 0x57, 0x89, 0xf1, 0xb9, 0x47, 0x78, 0x4b, 0x5e, 0x46, 0xde, 0xce, 0x98, 0x2b, 0x20, 0x5c, 0xb8, 0xdb, 0x51, 0xf5, 0x6d, 0x02, 0x01, 0x19, 0xe2, 0x47, 0x10, 0xd9, 0xfc, 0x74, + /* (2^165)P */ 0xa3, 0xbf, 0xc1, 0x23, 0x0a, 0xa9, 0xe2, 0x13, 0xf6, 0x19, 0x85, 0x47, 0x4e, 0x07, 0xb0, 0x0c, 0x44, 0xcf, 0xf6, 0x3a, 0xbe, 0xcb, 0xf1, 0x5f, 0xbe, 0x2d, 0x81, 0xbe, 0x38, 0x54, 0xfe, 0x67, + /* (2^166)P */ 0xb0, 0x05, 0x0f, 0xa4, 0x4f, 0xf6, 0x3c, 0xd1, 0x87, 0x37, 0x28, 0x32, 0x2f, 0xfb, 0x4d, 0x05, 0xea, 0x2a, 0x0d, 0x7f, 0x5b, 0x91, 0x73, 0x41, 0x4e, 0x0d, 0x61, 0x1f, 0x4f, 0x14, 0x2f, 0x48, + /* (2^167)P */ 0x34, 0x82, 0x7f, 0xb4, 0x01, 0x02, 0x21, 0xf6, 0x90, 0xb9, 0x70, 0x9e, 0x92, 0xe1, 0x0a, 0x5d, 0x7c, 0x56, 0x49, 0xb0, 0x55, 0xf4, 0xd7, 0xdc, 0x01, 0x6f, 0x91, 0xf0, 0xf1, 0xd0, 0x93, 0x7e, + /* (2^168)P */ 0xfa, 0xb4, 0x7d, 0x8a, 0xf1, 0xcb, 0x79, 0xdd, 0x2f, 0xc6, 0x74, 0x6f, 0xbf, 0x91, 0x83, 0xbe, 0xbd, 0x91, 0x82, 0x4b, 0xd1, 0x45, 0x71, 0x02, 0x05, 0x17, 0xbf, 0x2c, 0xea, 0x73, 0x5a, 0x58, + /* (2^169)P */ 0xb2, 0x0d, 0x8a, 0x92, 0x3e, 0xa0, 0x5c, 0x48, 0xe7, 0x57, 0x28, 0x74, 0xa5, 0x01, 0xfc, 0x10, 0xa7, 0x51, 0xd5, 0xd6, 0xdb, 0x2e, 0x48, 0x2f, 0x8a, 0xdb, 0x8f, 0x04, 0xb5, 0x33, 0x04, 0x0f, + /* (2^170)P */ 0x47, 0x62, 0xdc, 0xd7, 0x8d, 0x2e, 0xda, 0x60, 0x9a, 0x81, 0xd4, 0x8c, 0xd3, 0xc9, 0xb4, 0x88, 0x97, 0x66, 0xf6, 0x01, 0xc0, 0x3a, 0x03, 0x13, 0x75, 0x7d, 0x36, 0x3b, 0xfe, 0x24, 0x3b, 0x27, + /* (2^171)P */ 0xd4, 0xb9, 0xb3, 0x31, 0x6a, 0xf6, 0xe8, 0xc6, 0xd5, 0x49, 0xdf, 0x94, 0xa4, 0x14, 0x15, 0x28, 0xa7, 0x3d, 0xb2, 0xc8, 0xdf, 0x6f, 0x72, 0xd1, 0x48, 0xe5, 0xde, 0x03, 0xd1, 0xe7, 0x3a, 0x4b, + /* (2^172)P */ 0x7e, 0x9d, 0x4b, 0xce, 0x19, 0x6e, 0x25, 0xc6, 0x1c, 0xc6, 0xe3, 0x86, 0xf1, 0x5c, 0x5c, 0xff, 0x45, 0xc1, 0x8e, 0x4b, 0xa3, 0x3c, 0xc6, 0xac, 0x74, 0x65, 0xe6, 0xfe, 0x88, 0x18, 0x62, 0x74, + /* (2^173)P */ 0x1e, 0x0a, 0x29, 0x45, 0x96, 0x40, 0x6f, 0x95, 0x2e, 0x96, 0x3a, 0x26, 0xe3, 0xf8, 0x0b, 0xef, 0x7b, 0x64, 0xc2, 0x5e, 0xeb, 0x50, 0x6a, 0xed, 0x02, 0x75, 0xca, 0x9d, 0x3a, 0x28, 0x94, 0x06, + /* (2^174)P */ 0xd1, 0xdc, 0xa2, 0x43, 0x36, 0x96, 0x9b, 0x76, 0x53, 0x53, 0xfc, 0x09, 0xea, 0xc8, 0xb7, 0x42, 0xab, 0x7e, 0x39, 0x13, 0xee, 0x2a, 0x00, 0x4f, 0x3a, 0xd6, 0xb7, 0x19, 0x2c, 0x5e, 0x00, 0x63, + /* (2^175)P */ 0xea, 0x3b, 0x02, 0x63, 0xda, 0x36, 0x67, 0xca, 0xb7, 0x99, 0x2a, 0xb1, 0x6d, 0x7f, 0x6c, 0x96, 0xe1, 0xc5, 0x37, 0xc5, 0x90, 0x93, 0xe0, 0xac, 0xee, 0x89, 0xaa, 0xa1, 0x63, 0x60, 0x69, 0x0b, + /* (2^176)P */ 0xe5, 0x56, 0x8c, 0x28, 0x97, 0x3e, 0xb0, 0xeb, 0xe8, 0x8b, 0x8c, 0x93, 0x9f, 0x9f, 0x2a, 0x43, 0x71, 0x7f, 0x71, 0x5b, 0x3d, 0xa9, 0xa5, 0xa6, 0x97, 0x9d, 0x8f, 0xe1, 0xc3, 0xb4, 0x5f, 0x1a, + /* (2^177)P */ 0xce, 0xcd, 0x60, 0x1c, 0xad, 0xe7, 0x94, 0x1c, 0xa0, 0xc4, 0x02, 0xfc, 0x43, 0x2a, 0x20, 0xee, 0x20, 0x6a, 0xc4, 0x67, 0xd8, 0xe4, 0xaf, 0x8d, 0x58, 0x7b, 0xc2, 0x8a, 0x3c, 0x26, 0x10, 0x0a, + /* (2^178)P */ 0x4a, 0x2a, 0x43, 0xe4, 0xdf, 0xa9, 0xde, 0xd0, 0xc5, 0x77, 0x92, 0xbe, 0x7b, 0xf8, 0x6a, 0x85, 0x1a, 0xc7, 0x12, 0xc2, 0xac, 0x72, 0x84, 0xce, 0x91, 0x1e, 0xbb, 0x9b, 0x6d, 0x1b, 0x15, 0x6f, + /* (2^179)P */ 0x6a, 0xd5, 0xee, 0x7c, 0x52, 0x6c, 0x77, 0x26, 0xec, 0xfa, 0xf8, 0xfb, 0xb7, 0x1c, 0x21, 0x7d, 0xcc, 0x09, 0x46, 0xfd, 0xa6, 0x66, 0xae, 0x37, 0x42, 0x0c, 0x77, 0xd2, 0x02, 0xb7, 0x81, 0x1f, + /* (2^180)P */ 0x92, 0x83, 0xc5, 0xea, 0x57, 0xb0, 0xb0, 0x2f, 0x9d, 0x4e, 0x74, 0x29, 0xfe, 0x89, 0xdd, 0xe1, 0xf8, 0xb4, 0xbe, 0x17, 0xeb, 0xf8, 0x64, 0xc9, 0x1e, 0xd4, 0xa2, 0xc9, 0x73, 0x10, 0x57, 0x29, + /* (2^181)P */ 0x54, 0xe2, 0xc0, 0x81, 0x89, 0xa1, 0x48, 0xa9, 0x30, 0x28, 0xb2, 0x65, 0x9b, 0x36, 0xf6, 0x2d, 0xc6, 0xd3, 0xcf, 0x5f, 0xd7, 0xb2, 0x3e, 0xa3, 0x1f, 0xa0, 0x99, 0x41, 0xec, 0xd6, 0x8c, 0x07, + /* (2^182)P */ 0x2f, 0x0d, 0x90, 0xad, 0x41, 0x4a, 0x58, 0x4a, 0x52, 0x4c, 0xc7, 0xe2, 0x78, 0x2b, 0x14, 0x32, 0x78, 0xc9, 0x31, 0x84, 0x33, 0xe8, 0xc4, 0x68, 0xc2, 0x9f, 0x68, 0x08, 0x90, 0xea, 0x69, 0x7f, + /* (2^183)P */ 0x65, 0x82, 0xa3, 0x46, 0x1e, 0xc8, 0xf2, 0x52, 0xfd, 0x32, 0xa8, 0x04, 0x2d, 0x07, 0x78, 0xfd, 0x94, 0x9e, 0x35, 0x25, 0xfa, 0xd5, 0xd7, 0x8c, 0xd2, 0x29, 0xcc, 0x54, 0x74, 0x1b, 0xe7, 0x4d, + /* (2^184)P */ 0xc9, 0x6a, 0xda, 0x1e, 0xad, 0x60, 0xeb, 0x42, 0x3a, 0x9c, 0xc0, 0xdb, 0xdf, 0x37, 0xad, 0x0a, 0x91, 0xc1, 0x3c, 0xe3, 0x71, 0x4b, 0x00, 0x81, 0x3c, 0x80, 0x22, 0x51, 0x34, 0xbe, 0xe6, 0x44, + /* (2^185)P */ 0xdb, 0x20, 0x19, 0xba, 0x88, 0x83, 0xfe, 0x03, 0x08, 0xb0, 0x0d, 0x15, 0x32, 0x7c, 0xd5, 0xf5, 0x29, 0x0c, 0xf6, 0x1a, 0x28, 0xc4, 0xc8, 0x49, 0xee, 0x1a, 0x70, 0xde, 0x18, 0xb5, 0xed, 0x21, + /* (2^186)P */ 0x99, 0xdc, 0x06, 0x8f, 0x41, 0x3e, 0xb6, 0x7f, 0xb8, 0xd7, 0x66, 0xc1, 0x99, 0x0d, 0x46, 0xa4, 0x83, 0x0a, 0x52, 0xce, 0x48, 0x52, 0xdd, 0x24, 0x58, 0x83, 0x92, 0x2b, 0x71, 0xad, 0xc3, 0x5e, + /* (2^187)P */ 0x0f, 0x93, 0x17, 0xbd, 0x5f, 0x2a, 0x02, 0x15, 0xe3, 0x70, 0x25, 0xd8, 0x77, 0x4a, 0xf6, 0xa4, 0x12, 0x37, 0x78, 0x15, 0x69, 0x8d, 0xbc, 0x12, 0xbb, 0x0a, 0x62, 0xfc, 0xc0, 0x94, 0x81, 0x49, + /* (2^188)P */ 0x82, 0x6c, 0x68, 0x55, 0xd2, 0xd9, 0xa2, 0x38, 0xf0, 0x21, 0x3e, 0x19, 0xd9, 0x6b, 0x5c, 0x78, 0x84, 0x54, 0x4a, 0xb2, 0x1a, 0xc8, 0xd5, 0xe4, 0x89, 0x09, 0xe2, 0xb2, 0x60, 0x78, 0x30, 0x56, + /* (2^189)P */ 0xc4, 0x74, 0x4d, 0x8b, 0xf7, 0x55, 0x9d, 0x42, 0x31, 0x01, 0x35, 0x43, 0x46, 0x83, 0xf1, 0x22, 0xff, 0x1f, 0xc7, 0x98, 0x45, 0xc2, 0x60, 0x1e, 0xef, 0x83, 0x99, 0x97, 0x14, 0xf0, 0xf2, 0x59, + /* (2^190)P */ 0x44, 0x4a, 0x49, 0xeb, 0x56, 0x7d, 0xa4, 0x46, 0x8e, 0xa1, 0x36, 0xd6, 0x54, 0xa8, 0x22, 0x3e, 0x3b, 0x1c, 0x49, 0x74, 0x52, 0xe1, 0x46, 0xb3, 0xe7, 0xcd, 0x90, 0x53, 0x4e, 0xfd, 0xea, 0x2c, + /* (2^191)P */ 0x75, 0x66, 0x0d, 0xbe, 0x38, 0x85, 0x8a, 0xba, 0x23, 0x8e, 0x81, 0x50, 0xbb, 0x74, 0x90, 0x4b, 0xc3, 0x04, 0xd3, 0x85, 0x90, 0xb8, 0xda, 0xcb, 0xc4, 0x92, 0x61, 0xe5, 0xe0, 0x4f, 0xa2, 0x61, + /* (2^192)P */ 0xcb, 0x5b, 0x52, 0xdb, 0xe6, 0x15, 0x76, 0xcb, 0xca, 0xe4, 0x67, 0xa5, 0x35, 0x8c, 0x7d, 0xdd, 0x69, 0xdd, 0xfc, 0xca, 0x3a, 0x15, 0xb4, 0xe6, 0x66, 0x97, 0x3c, 0x7f, 0x09, 0x8e, 0x66, 0x2d, + /* (2^193)P */ 0xf0, 0x5e, 0xe5, 0x5c, 0x26, 0x7e, 0x7e, 0xa5, 0x67, 0xb9, 0xd4, 0x7c, 0x52, 0x4e, 0x9f, 0x5d, 0xe5, 0xd1, 0x2f, 0x49, 0x06, 0x36, 0xc8, 0xfb, 0xae, 0xf7, 0xc3, 0xb7, 0xbe, 0x52, 0x0d, 0x09, + /* (2^194)P */ 0x7c, 0x4d, 0x7b, 0x1e, 0x5a, 0x51, 0xb9, 0x09, 0xc0, 0x44, 0xda, 0x99, 0x25, 0x6a, 0x26, 0x1f, 0x04, 0x55, 0xc5, 0xe2, 0x48, 0x95, 0xc4, 0xa1, 0xcc, 0x15, 0x6f, 0x12, 0x87, 0x42, 0xf0, 0x7e, + /* (2^195)P */ 0x15, 0xef, 0x30, 0xbd, 0x9d, 0x65, 0xd1, 0xfe, 0x7b, 0x27, 0xe0, 0xc4, 0xee, 0xb9, 0x4a, 0x8b, 0x91, 0x32, 0xdf, 0xa5, 0x36, 0x62, 0x4d, 0x88, 0x88, 0xf7, 0x5c, 0xbf, 0xa6, 0x6e, 0xd9, 0x1f, + /* (2^196)P */ 0x9a, 0x0d, 0x19, 0x1f, 0x98, 0x61, 0xa1, 0x42, 0xc1, 0x52, 0x60, 0x7e, 0x50, 0x49, 0xd8, 0x61, 0xd5, 0x2c, 0x5a, 0x28, 0xbf, 0x13, 0xe1, 0x9f, 0xd8, 0x85, 0xad, 0xdb, 0x76, 0xd6, 0x22, 0x7c, + /* (2^197)P */ 0x7d, 0xd2, 0xfb, 0x2b, 0xed, 0x70, 0xe7, 0x82, 0xa5, 0xf5, 0x96, 0xe9, 0xec, 0xb2, 0x05, 0x4c, 0x50, 0x01, 0x90, 0xb0, 0xc2, 0xa9, 0x40, 0xcd, 0x64, 0xbf, 0xd9, 0x13, 0x92, 0x31, 0x95, 0x58, + /* (2^198)P */ 0x08, 0x2e, 0xea, 0x3f, 0x70, 0x5d, 0xcc, 0xe7, 0x8c, 0x18, 0xe2, 0x58, 0x12, 0x49, 0x0c, 0xb5, 0xf0, 0x5b, 0x20, 0x48, 0xaa, 0x0b, 0xe3, 0xcc, 0x62, 0x2d, 0xa3, 0xcf, 0x9c, 0x65, 0x7c, 0x53, + /* (2^199)P */ 0x88, 0xc0, 0xcf, 0x98, 0x3a, 0x62, 0xb6, 0x37, 0xa4, 0xac, 0xd6, 0xa4, 0x1f, 0xed, 0x9b, 0xfe, 0xb0, 0xd1, 0xa8, 0x56, 0x8e, 0x9b, 0xd2, 0x04, 0x75, 0x95, 0x51, 0x0b, 0xc4, 0x71, 0x5f, 0x72, + /* (2^200)P */ 0xe6, 0x9c, 0x33, 0xd0, 0x9c, 0xf8, 0xc7, 0x28, 0x8b, 0xc1, 0xdd, 0x69, 0x44, 0xb1, 0x67, 0x83, 0x2c, 0x65, 0xa1, 0xa6, 0x83, 0xda, 0x3a, 0x88, 0x17, 0x6c, 0x4d, 0x03, 0x74, 0x19, 0x5f, 0x58, + /* (2^201)P */ 0x88, 0x91, 0xb1, 0xf1, 0x66, 0xb2, 0xcf, 0x89, 0x17, 0x52, 0xc3, 0xe7, 0x63, 0x48, 0x3b, 0xe6, 0x6a, 0x52, 0xc0, 0xb4, 0xa6, 0x9d, 0x8c, 0xd8, 0x35, 0x46, 0x95, 0xf0, 0x9d, 0x5c, 0x03, 0x3e, + /* (2^202)P */ 0x9d, 0xde, 0x45, 0xfb, 0x12, 0x54, 0x9d, 0xdd, 0x0d, 0xf4, 0xcf, 0xe4, 0x32, 0x45, 0x68, 0xdd, 0x1c, 0x67, 0x1d, 0x15, 0x9b, 0x99, 0x5c, 0x4b, 0x90, 0xf6, 0xe7, 0x11, 0xc8, 0x2c, 0x8c, 0x2d, + /* (2^203)P */ 0x40, 0x5d, 0x05, 0x90, 0x1d, 0xbe, 0x54, 0x7f, 0x40, 0xaf, 0x4a, 0x46, 0xdf, 0xc5, 0x64, 0xa4, 0xbe, 0x17, 0xe9, 0xf0, 0x24, 0x96, 0x97, 0x33, 0x30, 0x6b, 0x35, 0x27, 0xc5, 0x8d, 0x01, 0x2c, + /* (2^204)P */ 0xd4, 0xb3, 0x30, 0xe3, 0x24, 0x50, 0x41, 0xa5, 0xd3, 0x52, 0x16, 0x69, 0x96, 0x3d, 0xff, 0x73, 0xf1, 0x59, 0x9b, 0xef, 0xc4, 0x42, 0xec, 0x94, 0x5a, 0x8e, 0xd0, 0x18, 0x16, 0x20, 0x47, 0x07, + /* (2^205)P */ 0x53, 0x1c, 0x41, 0xca, 0x8a, 0xa4, 0x6c, 0x4d, 0x19, 0x61, 0xa6, 0xcf, 0x2f, 0x5f, 0x41, 0x66, 0xff, 0x27, 0xe2, 0x51, 0x00, 0xd4, 0x4d, 0x9c, 0xeb, 0xf7, 0x02, 0x9a, 0xc0, 0x0b, 0x81, 0x59, + /* (2^206)P */ 0x1d, 0x10, 0xdc, 0xb3, 0x71, 0xb1, 0x7e, 0x2a, 0x8e, 0xf6, 0xfe, 0x9f, 0xb9, 0x5a, 0x1c, 0x44, 0xea, 0x59, 0xb3, 0x93, 0x9b, 0x5c, 0x02, 0x32, 0x2f, 0x11, 0x9d, 0x1e, 0xa7, 0xe0, 0x8c, 0x5e, + /* (2^207)P */ 0xfd, 0x03, 0x95, 0x42, 0x92, 0xcb, 0xcc, 0xbf, 0x55, 0x5d, 0x09, 0x2f, 0x75, 0xba, 0x71, 0xd2, 0x1e, 0x09, 0x2d, 0x97, 0x5e, 0xad, 0x5e, 0x34, 0xba, 0x03, 0x31, 0xa8, 0x11, 0xdf, 0xc8, 0x18, + /* (2^208)P */ 0x4c, 0x0f, 0xed, 0x9a, 0x9a, 0x94, 0xcd, 0x90, 0x7e, 0xe3, 0x60, 0x66, 0xcb, 0xf4, 0xd1, 0xc5, 0x0b, 0x2e, 0xc5, 0x56, 0x2d, 0xc5, 0xca, 0xb8, 0x0d, 0x8e, 0x80, 0xc5, 0x00, 0xe4, 0x42, 0x6e, + /* (2^209)P */ 0x23, 0xfd, 0xae, 0xee, 0x66, 0x69, 0xb4, 0xa3, 0xca, 0xcd, 0x9e, 0xe3, 0x0b, 0x1f, 0x4f, 0x0c, 0x1d, 0xa5, 0x83, 0xd6, 0xc9, 0xc8, 0x9d, 0x18, 0x1b, 0x35, 0x09, 0x4c, 0x05, 0x7f, 0xf2, 0x51, + /* (2^210)P */ 0x82, 0x06, 0x32, 0x2a, 0xcd, 0x7c, 0x48, 0x4c, 0x96, 0x1c, 0xdf, 0xb3, 0x5b, 0xa9, 0x7e, 0x58, 0xe8, 0xb8, 0x5c, 0x55, 0x9e, 0xf7, 0xcc, 0xc8, 0x3d, 0xd7, 0x06, 0xa2, 0x29, 0xc8, 0x7d, 0x54, + /* (2^211)P */ 0x06, 0x9b, 0xc3, 0x80, 0xcd, 0xa6, 0x22, 0xb8, 0xc6, 0xd4, 0x00, 0x20, 0x73, 0x54, 0x6d, 0xe9, 0x4d, 0x3b, 0x46, 0x91, 0x6f, 0x5b, 0x53, 0x28, 0x1d, 0x6e, 0x48, 0xe2, 0x60, 0x46, 0x8f, 0x22, + /* (2^212)P */ 0xbf, 0x3a, 0x8d, 0xde, 0x38, 0x95, 0x79, 0x98, 0x6e, 0xca, 0xeb, 0x45, 0x00, 0x33, 0xd8, 0x8c, 0x38, 0xe7, 0x21, 0x82, 0x00, 0x2a, 0x95, 0x79, 0xbb, 0xd2, 0x5c, 0x53, 0xa7, 0xe1, 0x22, 0x43, + /* (2^213)P */ 0x1c, 0x80, 0xd1, 0x19, 0x18, 0xc1, 0x14, 0xb1, 0xc7, 0x5e, 0x3f, 0x4f, 0xd8, 0xe4, 0x16, 0x20, 0x4c, 0x0f, 0x26, 0x09, 0xf4, 0x2d, 0x0e, 0xdd, 0x66, 0x72, 0x5f, 0xae, 0xc0, 0x62, 0xc3, 0x5e, + /* (2^214)P */ 0xee, 0xb4, 0xb2, 0xb8, 0x18, 0x2b, 0x46, 0xc0, 0xfb, 0x1a, 0x4d, 0x27, 0x50, 0xd9, 0xc8, 0x7c, 0xd2, 0x02, 0x6b, 0x43, 0x05, 0x71, 0x5f, 0xf2, 0xd3, 0xcc, 0xf9, 0xbf, 0xdc, 0xf8, 0xbb, 0x43, + /* (2^215)P */ 0xdf, 0xe9, 0x39, 0xa0, 0x67, 0x17, 0xad, 0xb6, 0x83, 0x35, 0x9d, 0xf6, 0xa8, 0x4d, 0x71, 0xb0, 0xf5, 0x31, 0x29, 0xb4, 0x18, 0xfa, 0x55, 0x5e, 0x61, 0x09, 0xc6, 0x33, 0x8f, 0x55, 0xd5, 0x4e, + /* (2^216)P */ 0xdd, 0xa5, 0x47, 0xc6, 0x01, 0x79, 0xe3, 0x1f, 0x57, 0xd3, 0x81, 0x80, 0x1f, 0xdf, 0x3d, 0x59, 0xa6, 0xd7, 0x3f, 0x81, 0xfd, 0xa4, 0x49, 0x02, 0x61, 0xaf, 0x9c, 0x4e, 0x27, 0xca, 0xac, 0x69, + /* (2^217)P */ 0xc9, 0x21, 0x07, 0x33, 0xea, 0xa3, 0x7b, 0x04, 0xa0, 0x1e, 0x7e, 0x0e, 0xc2, 0x3f, 0x42, 0x83, 0x60, 0x4a, 0x31, 0x01, 0xaf, 0xc0, 0xf4, 0x1d, 0x27, 0x95, 0x28, 0x89, 0xab, 0x2d, 0xa6, 0x09, + /* (2^218)P */ 0x00, 0xcb, 0xc6, 0x9c, 0xa4, 0x25, 0xb3, 0xa5, 0xb6, 0x6c, 0xb5, 0x54, 0xc6, 0x5d, 0x4b, 0xe9, 0xa0, 0x94, 0xc9, 0xad, 0x79, 0x87, 0xe2, 0x3b, 0xad, 0x4a, 0x3a, 0xba, 0xf8, 0xe8, 0x96, 0x42, + /* (2^219)P */ 0xab, 0x1e, 0x45, 0x1e, 0x76, 0x89, 0x86, 0x32, 0x4a, 0x59, 0x59, 0xff, 0x8b, 0x59, 0x4d, 0x2e, 0x4a, 0x08, 0xa7, 0xd7, 0x53, 0x68, 0xb9, 0x49, 0xa8, 0x20, 0x14, 0x60, 0x19, 0xa3, 0x80, 0x49, + /* (2^220)P */ 0x42, 0x2c, 0x55, 0x2f, 0xe1, 0xb9, 0x65, 0x95, 0x96, 0xfe, 0x00, 0x71, 0xdb, 0x18, 0x53, 0x8a, 0xd7, 0xd0, 0xad, 0x43, 0x4d, 0x0b, 0xc9, 0x05, 0xda, 0x4e, 0x5d, 0x6a, 0xd6, 0x4c, 0x8b, 0x53, + /* (2^221)P */ 0x9f, 0x03, 0x9f, 0xe8, 0xc3, 0x4f, 0xe9, 0xf4, 0x45, 0x80, 0x61, 0x6f, 0xf2, 0x9a, 0x2c, 0x59, 0x50, 0x95, 0x4b, 0xfd, 0xb5, 0x6e, 0xa3, 0x08, 0x19, 0x14, 0xed, 0xc2, 0xf6, 0xfa, 0xff, 0x25, + /* (2^222)P */ 0x54, 0xd3, 0x79, 0xcc, 0x59, 0x44, 0x43, 0x34, 0x6b, 0x47, 0xd5, 0xb1, 0xb4, 0xbf, 0xec, 0xee, 0x99, 0x5d, 0x61, 0x61, 0xa0, 0x34, 0xeb, 0xdd, 0x73, 0xb7, 0x64, 0xeb, 0xcc, 0xce, 0x29, 0x51, + /* (2^223)P */ 0x20, 0x35, 0x99, 0x94, 0x58, 0x21, 0x43, 0xee, 0x3b, 0x0b, 0x4c, 0xf1, 0x7c, 0x9c, 0x2f, 0x77, 0xd5, 0xda, 0xbe, 0x06, 0xe3, 0xfc, 0xe2, 0xd2, 0x97, 0x6a, 0xf0, 0x46, 0xb5, 0x42, 0x5f, 0x71, + /* (2^224)P */ 0x1a, 0x5f, 0x5b, 0xda, 0xce, 0xcd, 0x4e, 0x43, 0xa9, 0x41, 0x97, 0xa4, 0x15, 0x71, 0xa1, 0x0d, 0x2e, 0xad, 0xed, 0x73, 0x7c, 0xd7, 0x0b, 0x68, 0x41, 0x90, 0xdd, 0x4e, 0x35, 0x02, 0x7c, 0x48, + /* (2^225)P */ 0xc4, 0xd9, 0x0e, 0xa7, 0xf3, 0xef, 0xef, 0xb8, 0x02, 0xe3, 0x57, 0xe8, 0xa3, 0x2a, 0xa3, 0x56, 0xa0, 0xa5, 0xa2, 0x48, 0xbd, 0x68, 0x3a, 0xdf, 0x44, 0xc4, 0x76, 0x31, 0xb7, 0x50, 0xf6, 0x07, + /* (2^226)P */ 0xb1, 0xcc, 0xe0, 0x26, 0x16, 0x9b, 0x8b, 0xe3, 0x36, 0xfb, 0x09, 0x8b, 0xc1, 0x53, 0xe0, 0x79, 0x64, 0x49, 0xf9, 0xc9, 0x19, 0x03, 0xd9, 0x56, 0xc4, 0xf5, 0x9f, 0xac, 0xe7, 0x41, 0xa9, 0x1c, + /* (2^227)P */ 0xbb, 0xa0, 0x2f, 0x16, 0x29, 0xdf, 0xc4, 0x49, 0x05, 0x33, 0xb3, 0x82, 0x32, 0xcf, 0x88, 0x84, 0x7d, 0x43, 0xbb, 0xca, 0x14, 0xda, 0xdf, 0x95, 0x86, 0xad, 0xd5, 0x64, 0x82, 0xf7, 0x91, 0x33, + /* (2^228)P */ 0x5d, 0x09, 0xb5, 0xe2, 0x6a, 0xe0, 0x9a, 0x72, 0x46, 0xa9, 0x59, 0x32, 0xd7, 0x58, 0x8a, 0xd5, 0xed, 0x21, 0x39, 0xd1, 0x62, 0x42, 0x83, 0xe9, 0x92, 0xb5, 0x4b, 0xa5, 0xfa, 0xda, 0xfe, 0x27, + /* (2^229)P */ 0xbb, 0x48, 0xad, 0x29, 0xb8, 0xc5, 0x9d, 0xa9, 0x60, 0xe2, 0x9e, 0x49, 0x42, 0x57, 0x02, 0x5f, 0xfd, 0x13, 0x75, 0x5d, 0xcd, 0x8e, 0x2c, 0x80, 0x38, 0xd9, 0x6d, 0x3f, 0xef, 0xb3, 0xce, 0x78, + /* (2^230)P */ 0x94, 0x5d, 0x13, 0x8a, 0x4f, 0xf4, 0x42, 0xc3, 0xa3, 0xdd, 0x8c, 0x82, 0x44, 0xdb, 0x9e, 0x7b, 0xe7, 0xcf, 0x37, 0x05, 0x1a, 0xd1, 0x36, 0x94, 0xc8, 0xb4, 0x1a, 0xec, 0x64, 0xb1, 0x64, 0x50, + /* (2^231)P */ 0xfc, 0xb2, 0x7e, 0xd3, 0xcf, 0xec, 0x20, 0x70, 0xfc, 0x25, 0x0d, 0xd9, 0x3e, 0xea, 0x31, 0x1f, 0x34, 0xbb, 0xa1, 0xdf, 0x7b, 0x0d, 0x93, 0x1b, 0x44, 0x30, 0x11, 0x48, 0x7a, 0x46, 0x44, 0x53, + /* (2^232)P */ 0xfb, 0x6d, 0x5e, 0xf2, 0x70, 0x31, 0x07, 0x70, 0xc8, 0x4c, 0x11, 0x50, 0x1a, 0xdc, 0x85, 0xe3, 0x00, 0x4f, 0xfc, 0xc8, 0x8a, 0x69, 0x48, 0x23, 0xd8, 0x40, 0xdd, 0x84, 0x52, 0xa5, 0x77, 0x2a, + /* (2^233)P */ 0xe4, 0x6c, 0x8c, 0xc9, 0xe0, 0xaf, 0x06, 0xfe, 0xe4, 0xd6, 0xdf, 0xdd, 0x96, 0xdf, 0x35, 0xc2, 0xd3, 0x1e, 0xbf, 0x33, 0x1e, 0xd0, 0x28, 0x14, 0xaf, 0xbd, 0x00, 0x93, 0xec, 0x68, 0x57, 0x78, + /* (2^234)P */ 0x3b, 0xb6, 0xde, 0x91, 0x7a, 0xe5, 0x02, 0x97, 0x80, 0x8b, 0xce, 0xe5, 0xbf, 0xb8, 0xbd, 0x61, 0xac, 0x58, 0x1d, 0x3d, 0x6f, 0x42, 0x5b, 0x64, 0xbc, 0x57, 0xa5, 0x27, 0x22, 0xa8, 0x04, 0x48, + /* (2^235)P */ 0x01, 0x26, 0x4d, 0xb4, 0x8a, 0x04, 0x57, 0x8e, 0x35, 0x69, 0x3a, 0x4b, 0x1a, 0x50, 0xd6, 0x68, 0x93, 0xc2, 0xe1, 0xf9, 0xc3, 0x9e, 0x9c, 0xc3, 0xe2, 0x63, 0xde, 0xd4, 0x57, 0xf2, 0x72, 0x41, + /* (2^236)P */ 0x01, 0x64, 0x0c, 0x33, 0x50, 0xb4, 0x68, 0xd3, 0x91, 0x23, 0x8f, 0x41, 0x17, 0x30, 0x0d, 0x04, 0x0d, 0xd9, 0xb7, 0x90, 0x60, 0xbb, 0x34, 0x2c, 0x1f, 0xd5, 0xdf, 0x8f, 0x22, 0x49, 0xf6, 0x16, + /* (2^237)P */ 0xf5, 0x8e, 0x92, 0x2b, 0x8e, 0x81, 0xa6, 0xbe, 0x72, 0x1e, 0xc1, 0xcd, 0x91, 0xcf, 0x8c, 0xe2, 0xcd, 0x36, 0x7a, 0xe7, 0x68, 0xaa, 0x4a, 0x59, 0x0f, 0xfd, 0x7f, 0x6c, 0x80, 0x34, 0x30, 0x31, + /* (2^238)P */ 0x65, 0xbd, 0x49, 0x22, 0xac, 0x27, 0x9d, 0x8a, 0x12, 0x95, 0x8e, 0x01, 0x64, 0xb4, 0xa3, 0x19, 0xc7, 0x7e, 0xb3, 0x52, 0xf3, 0xcf, 0x6c, 0xc2, 0x21, 0x7b, 0x79, 0x1d, 0x34, 0x68, 0x6f, 0x05, + /* (2^239)P */ 0x27, 0x23, 0xfd, 0x7e, 0x75, 0xd6, 0x79, 0x5e, 0x15, 0xfe, 0x3a, 0x55, 0xb6, 0xbc, 0xbd, 0xfa, 0x60, 0x5a, 0xaf, 0x6e, 0x2c, 0x22, 0xe7, 0xd3, 0x3b, 0x74, 0xae, 0x4d, 0x6d, 0xc7, 0x46, 0x70, + /* (2^240)P */ 0x55, 0x4a, 0x8d, 0xb1, 0x72, 0xe8, 0x0b, 0x66, 0x96, 0x14, 0x4e, 0x57, 0x18, 0x25, 0x99, 0x19, 0xbb, 0xdc, 0x2b, 0x30, 0x3a, 0x05, 0x03, 0xc1, 0x8e, 0x8e, 0x21, 0x0b, 0x80, 0xe9, 0xd8, 0x3e, + /* (2^241)P */ 0x3e, 0xe0, 0x75, 0xfa, 0x39, 0x92, 0x0b, 0x7b, 0x83, 0xc0, 0x33, 0x46, 0x68, 0xfb, 0xe9, 0xef, 0x93, 0x77, 0x1a, 0x39, 0xbe, 0x5f, 0xa3, 0x98, 0x34, 0xfe, 0xd0, 0xe2, 0x0f, 0x51, 0x65, 0x60, + /* (2^242)P */ 0x0c, 0xad, 0xab, 0x48, 0x85, 0x66, 0xcb, 0x55, 0x27, 0xe5, 0x87, 0xda, 0x48, 0x45, 0x58, 0xb4, 0xdd, 0xc1, 0x07, 0x01, 0xea, 0xec, 0x43, 0x2c, 0x35, 0xde, 0x72, 0x93, 0x80, 0x28, 0x60, 0x52, + /* (2^243)P */ 0x1f, 0x3b, 0x21, 0xf9, 0x6a, 0xc5, 0x15, 0x34, 0xdb, 0x98, 0x7e, 0x01, 0x4d, 0x1a, 0xee, 0x5b, 0x9b, 0x70, 0xcf, 0xb5, 0x05, 0xb1, 0xf6, 0x13, 0xb6, 0x9a, 0xb2, 0x82, 0x34, 0x0e, 0xf2, 0x5f, + /* (2^244)P */ 0x90, 0x6c, 0x2e, 0xcc, 0x75, 0x9c, 0xa2, 0x0a, 0x06, 0xe2, 0x70, 0x3a, 0xca, 0x73, 0x7d, 0xfc, 0x15, 0xc5, 0xb5, 0xc4, 0x8f, 0xc3, 0x9f, 0x89, 0x07, 0xc2, 0xff, 0x24, 0xb1, 0x86, 0x03, 0x25, + /* (2^245)P */ 0x56, 0x2b, 0x3d, 0xae, 0xd5, 0x28, 0xea, 0x54, 0xce, 0x60, 0xde, 0xd6, 0x9d, 0x14, 0x13, 0x99, 0xc1, 0xd6, 0x06, 0x8f, 0xc5, 0x4f, 0x69, 0x16, 0xc7, 0x8f, 0x01, 0xeb, 0x75, 0x39, 0xb2, 0x46, + /* (2^246)P */ 0xe2, 0xb4, 0xb7, 0xb4, 0x0f, 0x6a, 0x0a, 0x47, 0xde, 0x53, 0x72, 0x8f, 0x5a, 0x47, 0x92, 0x5d, 0xdb, 0x3a, 0xbd, 0x2f, 0xb5, 0xe5, 0xee, 0xab, 0x68, 0x69, 0x80, 0xa0, 0x01, 0x08, 0xa2, 0x7f, + /* (2^247)P */ 0xd2, 0x14, 0x77, 0x9f, 0xf1, 0xfa, 0xf3, 0x76, 0xc3, 0x60, 0x46, 0x2f, 0xc1, 0x40, 0xe8, 0xb3, 0x4e, 0x74, 0x12, 0xf2, 0x8d, 0xcd, 0xb4, 0x0f, 0xd2, 0x2d, 0x3a, 0x1d, 0x25, 0x5a, 0x06, 0x4b, + /* (2^248)P */ 0x4a, 0xcd, 0x77, 0x3d, 0x38, 0xde, 0xeb, 0x5c, 0xb1, 0x9c, 0x2c, 0x88, 0xdf, 0x39, 0xdf, 0x6a, 0x59, 0xf7, 0x9a, 0xb0, 0x2e, 0x24, 0xdd, 0xa2, 0x22, 0x64, 0x5f, 0x0e, 0xe5, 0xc0, 0x47, 0x31, + /* (2^249)P */ 0xdb, 0x50, 0x13, 0x1d, 0x10, 0xa5, 0x4c, 0x16, 0x62, 0xc9, 0x3f, 0xc3, 0x79, 0x34, 0xd1, 0xf8, 0x08, 0xda, 0xe5, 0x13, 0x4d, 0xce, 0x40, 0xe6, 0xba, 0xf8, 0x61, 0x50, 0xc4, 0xe0, 0xde, 0x4b, + /* (2^250)P */ 0xc9, 0xb1, 0xed, 0xa4, 0xc1, 0x6d, 0xc4, 0xd7, 0x8a, 0xd9, 0x7f, 0x43, 0xb6, 0xd7, 0x14, 0x55, 0x0b, 0xc0, 0xa1, 0xb2, 0x6b, 0x2f, 0x94, 0x58, 0x0e, 0x71, 0x70, 0x1d, 0xab, 0xb2, 0xff, 0x2d, + /* (2^251)P */ 0x68, 0x6d, 0x8b, 0xc1, 0x2f, 0xcf, 0xdf, 0xcc, 0x67, 0x61, 0x80, 0xb7, 0xa8, 0xcb, 0xeb, 0xa8, 0xe3, 0x37, 0x29, 0x5e, 0xf9, 0x97, 0x06, 0x98, 0x8c, 0x6e, 0x12, 0xd0, 0x1c, 0xba, 0xfb, 0x02, + /* (2^252)P */ 0x65, 0x45, 0xff, 0xad, 0x60, 0xc3, 0x98, 0xcb, 0x19, 0x15, 0xdb, 0x4b, 0xd2, 0x01, 0x71, 0x44, 0xd5, 0x15, 0xfb, 0x75, 0x74, 0xc8, 0xc4, 0x98, 0x7d, 0xa2, 0x22, 0x6e, 0x6d, 0xc7, 0xf8, 0x05, + /* (2^253)P */ 0x94, 0xf4, 0xb9, 0xfe, 0xdf, 0xe5, 0x69, 0xab, 0x75, 0x6b, 0x40, 0x18, 0x9d, 0xc7, 0x09, 0xae, 0x1d, 0x2d, 0xa4, 0x94, 0xfb, 0x45, 0x9b, 0x19, 0x84, 0xfa, 0x2a, 0xae, 0xeb, 0x0a, 0x71, 0x79, + /* (2^254)P */ 0xdf, 0xd2, 0x34, 0xf3, 0xa7, 0xed, 0xad, 0xa6, 0xb4, 0x57, 0x2a, 0xaf, 0x51, 0x9c, 0xde, 0x7b, 0xa8, 0xea, 0xdc, 0x86, 0x4f, 0xc6, 0x8f, 0xa9, 0x7b, 0xd0, 0x0e, 0xc2, 0x35, 0x03, 0xbe, 0x6b, + /* (2^255)P */ 0x44, 0x43, 0x98, 0x53, 0xbe, 0xdc, 0x7f, 0x66, 0xa8, 0x49, 0x59, 0x00, 0x1c, 0xbc, 0x72, 0x07, 0x8e, 0xd6, 0xbe, 0x4e, 0x9f, 0xa4, 0x07, 0xba, 0xbf, 0x30, 0xdf, 0xba, 0x85, 0xb0, 0xa7, 0x1f, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve.go b/vendor/github.com/cloudflare/circl/dh/x448/curve.go new file mode 100644 index 000000000..d59564e4b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve.go @@ -0,0 +1,104 @@ +package x448 + +import ( + fp "github.com/cloudflare/circl/math/fp448" +) + +// ladderJoye calculates a fixed-point multiplication with the generator point. +// The algorithm is the right-to-left Joye's ladder as described +// in "How to precompute a ladder" in SAC'2017. +func ladderJoye(k *Key) { + w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved. + w[1] = fp.Elt{ // x1 = S + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + fp.SetOne(&w[2]) // z1 = 1 + w[3] = fp.Elt{ // x2 = G-S + 0x20, 0x27, 0x9d, 0xc9, 0x7d, 0x19, 0xb1, 0xac, + 0xf8, 0xba, 0x69, 0x1c, 0xff, 0x33, 0xac, 0x23, + 0x51, 0x1b, 0xce, 0x3a, 0x64, 0x65, 0xbd, 0xf1, + 0x23, 0xf8, 0xc1, 0x84, 0x9d, 0x45, 0x54, 0x29, + 0x67, 0xb9, 0x81, 0x1c, 0x03, 0xd1, 0xcd, 0xda, + 0x7b, 0xeb, 0xff, 0x1a, 0x88, 0x03, 0xcf, 0x3a, + 0x42, 0x44, 0x32, 0x01, 0x25, 0xb7, 0xfa, 0xf0, + } + fp.SetOne(&w[4]) // z2 = 1 + + const n = 448 + const h = 2 + swap := uint(1) + for s := 0; s < n-h; s++ { + i := (s + h) / 8 + j := (s + h) % 8 + bit := uint((k[i] >> uint(j)) & 1) + copy(w[0][:], tableGenerator[s*Size:(s+1)*Size]) + diffAdd(&w, swap^bit) + swap = bit + } + for s := 0; s < h; s++ { + double(&w[1], &w[2]) + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +// ladderMontgomery calculates a generic scalar point multiplication +// The algorithm implemented is the left-to-right Montgomery's ladder. +func ladderMontgomery(k, xP *Key) { + w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved. + w[0] = *(*fp.Elt)(xP) // x1 = xP + fp.SetOne(&w[1]) // x2 = 1 + w[3] = *(*fp.Elt)(xP) // x3 = xP + fp.SetOne(&w[4]) // z3 = 1 + + move := uint(0) + for s := 448 - 1; s >= 0; s-- { + i := s / 8 + j := s % 8 + bit := uint((k[i] >> uint(j)) & 1) + ladderStep(&w, move^bit) + move = bit + } + toAffine((*[fp.Size]byte)(k), &w[1], &w[2]) +} + +func toAffine(k *[fp.Size]byte, x, z *fp.Elt) { + fp.Inv(z, z) + fp.Mul(x, x, z) + _ = fp.ToBytes(k[:], x) +} + +var lowOrderPoints = [3]fp.Elt{ + { /* (0,_,1) point of order 2 on Curve448 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (1,_,1) a point of order 4 on the twist of Curve448 */ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { /* (-1,_,1) point of order 4 on Curve448 */ + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go new file mode 100644 index 000000000..a06226661 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go @@ -0,0 +1,30 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package x448 + +import ( + fp "github.com/cloudflare/circl/math/fp448" + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func double(x, z *fp.Elt) { doubleAmd64(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) } + +//go:noescape +func doubleAmd64(x, z *fp.Elt) + +//go:noescape +func diffAddAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func ladderStepAmd64(w *[5]fp.Elt, b uint) + +//go:noescape +func mulA24Amd64(z, x *fp.Elt) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h new file mode 100644 index 000000000..8c1ae4d0f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h @@ -0,0 +1,111 @@ +#define ladderStepLeg \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulLeg(b0,x2,z3) \ + integerMulLeg(b1,x3,z2) \ + reduceFromDoubleLeg(t0,b0) \ + reduceFromDoubleLeg(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrLeg(b0,t0) \ + integerSqrLeg(b1,t1) \ + reduceFromDoubleLeg(x3,b0) \ + reduceFromDoubleLeg(z3,b1) \ + integerMulLeg(b0,x1,z3) \ + reduceFromDoubleLeg(z3,b0) \ + integerSqrLeg(b0,x2) \ + integerSqrLeg(b1,z2) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z2) \ + integerMulLeg(b0,x2,z2) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x2,b0) \ + reduceFromDoubleLeg(z2,b1) + +#define ladderStepBmi2Adx \ + addSub(x2,z2) \ + addSub(x3,z3) \ + integerMulAdx(b0,x2,z3) \ + integerMulAdx(b1,x3,z2) \ + reduceFromDoubleAdx(t0,b0) \ + reduceFromDoubleAdx(t1,b1) \ + addSub(t0,t1) \ + cselect(x2,x3,regMove) \ + cselect(z2,z3,regMove) \ + integerSqrAdx(b0,t0) \ + integerSqrAdx(b1,t1) \ + reduceFromDoubleAdx(x3,b0) \ + reduceFromDoubleAdx(z3,b1) \ + integerMulAdx(b0,x1,z3) \ + reduceFromDoubleAdx(z3,b0) \ + integerSqrAdx(b0,x2) \ + integerSqrAdx(b1,z2) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) \ + subtraction(t0,x2,z2) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z2) \ + integerMulAdx(b0,x2,z2) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x2,b0) \ + reduceFromDoubleAdx(z2,b1) + +#define difAddLeg \ + addSub(x1,z1) \ + integerMulLeg(b0,z1,ui) \ + reduceFromDoubleLeg(z1,b0) \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + integerMulLeg(b0,x1,z2) \ + integerMulLeg(b1,z1,x2) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define difAddBmi2Adx \ + addSub(x1,z1) \ + integerMulAdx(b0,z1,ui) \ + reduceFromDoubleAdx(z1,b0) \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + integerMulAdx(b0,x1,z2) \ + integerMulAdx(b1,z1,x2) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) + +#define doubleLeg \ + addSub(x1,z1) \ + integerSqrLeg(b0,x1) \ + integerSqrLeg(b1,z1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Leg(t1,t0) \ + additionLeg(t1,t1,z1) \ + integerMulLeg(b0,x1,z1) \ + integerMulLeg(b1,t0,t1) \ + reduceFromDoubleLeg(x1,b0) \ + reduceFromDoubleLeg(z1,b1) + +#define doubleBmi2Adx \ + addSub(x1,z1) \ + integerSqrAdx(b0,x1) \ + integerSqrAdx(b1,z1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) \ + subtraction(t0,x1,z1) \ + multiplyA24Adx(t1,t0) \ + additionAdx(t1,t1,z1) \ + integerMulAdx(b0,x1,z1) \ + integerMulAdx(b1,t0,t1) \ + reduceFromDoubleAdx(x1,b0) \ + reduceFromDoubleAdx(z1,b1) diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s new file mode 100644 index 000000000..810aa9e64 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s @@ -0,0 +1,193 @@ +// +build amd64 + +#include "textflag.h" + +// Depends on circl/math/fp448 package +#include "../../math/fp448/fp_amd64.h" +#include "curve_amd64.h" + +// CTE_A24 is (A+2)/4 from Curve448 +#define CTE_A24 39082 + +#define Size 56 + +// multiplyA24Leg multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, cmov, adx +#define multiplyA24Leg(z,x) \ + MOVQ $CTE_A24, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; MOVQ $0, DX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + MOVQ R8, 0+z; \ + MOVQ R9, 8+z; \ + MOVQ R10, 16+z; \ + MOVQ R11, 24+z; \ + MOVQ R12, 32+z; \ + MOVQ R13, 40+z; \ + MOVQ R14, 48+z; + +// multiplyA24Adx multiplies x times CTE_A24 and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64, bmi2 +#define multiplyA24Adx(z,x) \ + MOVQ $CTE_A24, DX; \ + MULXQ 0+x, R8, R9; \ + MULXQ 8+x, AX, R10; ADDQ AX, R9; \ + MULXQ 16+x, AX, R11; ADCQ AX, R10; \ + MULXQ 24+x, AX, R12; ADCQ AX, R11; \ + MULXQ 32+x, AX, R13; ADCQ AX, R12; \ + MULXQ 40+x, AX, R14; ADCQ AX, R13; \ + MULXQ 48+x, AX, DX; ADCQ AX, R14; \ + ;;;;;;;;;;;;;;;;;;;; ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; MOVQ $0, DX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, DX; \ + MOVQ DX, AX; \ + SHLQ $32, AX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ AX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + MOVQ R8, 0+z; \ + MOVQ R9, 8+z; \ + MOVQ R10, 16+z; \ + MOVQ R11, 24+z; \ + MOVQ R12, 32+z; \ + MOVQ R13, 40+z; \ + MOVQ R14, 48+z; + +#define mulA24Legacy \ + multiplyA24Leg(0(DI),0(SI)) +#define mulA24Bmi2Adx \ + multiplyA24Adx(0(DI),0(SI)) + +// func mulA24Amd64(z, x *fp448.Elt) +TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx) + +// func ladderStepAmd64(w *[5]fp448.Elt, b uint) +// ladderStepAmd64 calculates a point addition and doubling as follows: +// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-). +// w = {x1,x2,z2,x3,z4} are five fp255.Elt of 56 bytes. +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·ladderStepAmd64(SB),NOSPLIT,$336-16 + // Parameters + #define regWork DI + #define regMove SI + #define x1 0*Size(regWork) + #define x2 1*Size(regWork) + #define z2 2*Size(regWork) + #define x3 3*Size(regWork) + #define z3 4*Size(regWork) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regMove + CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx) + #undef regWork + #undef regMove + #undef x1 + #undef x2 + #undef z2 + #undef x3 + #undef z3 + #undef t0 + #undef t1 + #undef b0 + #undef b1 + +// func diffAddAmd64(work *[5]fp.Elt, swap uint) +// diffAddAmd64 calculates a differential point addition using a precomputed point. +// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2) +// work = {mu,x1,z1,x2,z2} are five fp448.Elt of 56 bytes, and +// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +// This is Equation 7 at https://eprint.iacr.org/2017/264. +TEXT ·diffAddAmd64(SB),NOSPLIT,$224-16 + // Parameters + #define regWork DI + #define regSwap SI + #define ui 0*Size(regWork) + #define x1 1*Size(regWork) + #define z1 2*Size(regWork) + #define x2 3*Size(regWork) + #define z2 4*Size(regWork) + // Local variables + #define b0 0*Size(SP) + #define b1 2*Size(SP) + MOVQ w+0(FP), regWork + MOVQ b+8(FP), regSwap + cswap(x1,x2,regSwap) + cswap(z1,z2,regSwap) + CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx) + #undef regWork + #undef regSwap + #undef ui + #undef x1 + #undef z1 + #undef x2 + #undef z2 + #undef b0 + #undef b1 + +// func doubleAmd64(x, z *fp448.Elt) +// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1). +// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and +// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes. +TEXT ·doubleAmd64(SB),NOSPLIT,$336-16 + // Parameters + #define x1 0(DI) + #define z1 0(SI) + // Local variables + #define t0 0*Size(SP) + #define t1 1*Size(SP) + #define b0 2*Size(SP) + #define b1 4*Size(SP) + MOVQ x+0(FP), DI + MOVQ z+8(FP), SI + CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx) + #undef x1 + #undef z1 + #undef t0 + #undef t1 + #undef b0 + #undef b1 diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go new file mode 100644 index 000000000..b0b65ccf7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go @@ -0,0 +1,100 @@ +package x448 + +import ( + "encoding/binary" + "math/bits" + + "github.com/cloudflare/circl/math/fp448" +) + +func doubleGeneric(x, z *fp448.Elt) { + t0, t1 := &fp448.Elt{}, &fp448.Elt{} + fp448.AddSub(x, z) + fp448.Sqr(x, x) + fp448.Sqr(z, z) + fp448.Sub(t0, x, z) + mulA24Generic(t1, t0) + fp448.Add(t1, t1, z) + fp448.Mul(x, x, z) + fp448.Mul(z, t0, t1) +} + +func diffAddGeneric(w *[5]fp448.Elt, b uint) { + mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4] + fp448.Cswap(x1, x2, b) + fp448.Cswap(z1, z2, b) + fp448.AddSub(x1, z1) + fp448.Mul(z1, z1, mu) + fp448.AddSub(x1, z1) + fp448.Sqr(x1, x1) + fp448.Sqr(z1, z1) + fp448.Mul(x1, x1, z2) + fp448.Mul(z1, z1, x2) +} + +func ladderStepGeneric(w *[5]fp448.Elt, b uint) { + x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4] + t0 := &fp448.Elt{} + t1 := &fp448.Elt{} + fp448.AddSub(x2, z2) + fp448.AddSub(x3, z3) + fp448.Mul(t0, x2, z3) + fp448.Mul(t1, x3, z2) + fp448.AddSub(t0, t1) + fp448.Cmov(x2, x3, b) + fp448.Cmov(z2, z3, b) + fp448.Sqr(x3, t0) + fp448.Sqr(z3, t1) + fp448.Mul(z3, x1, z3) + fp448.Sqr(x2, x2) + fp448.Sqr(z2, z2) + fp448.Sub(t0, x2, z2) + mulA24Generic(t1, t0) + fp448.Add(t1, t1, z2) + fp448.Mul(x2, x2, z2) + fp448.Mul(z2, t0, t1) +} + +func mulA24Generic(z, x *fp448.Elt) { + const A24 = 39082 + const n = 8 + var xx [7]uint64 + for i := range xx { + xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n]) + } + h0, l0 := bits.Mul64(xx[0], A24) + h1, l1 := bits.Mul64(xx[1], A24) + h2, l2 := bits.Mul64(xx[2], A24) + h3, l3 := bits.Mul64(xx[3], A24) + h4, l4 := bits.Mul64(xx[4], A24) + h5, l5 := bits.Mul64(xx[5], A24) + h6, l6 := bits.Mul64(xx[6], A24) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, c3 := bits.Add64(h3, l4, c2) + l5, c4 := bits.Add64(h4, l5, c3) + l6, c5 := bits.Add64(h5, l6, c4) + l7, _ := bits.Add64(h6, 0, c5) + + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, l7 = bits.Add64(l6, 0, c5) + + xx[0], c0 = bits.Add64(l0, l7, 0) + xx[1], c1 = bits.Add64(l1, 0, c0) + xx[2], c2 = bits.Add64(l2, 0, c1) + xx[3], c3 = bits.Add64(l3, l7<<32, c2) + xx[4], c4 = bits.Add64(l4, 0, c3) + xx[5], c5 = bits.Add64(l5, 0, c4) + xx[6], _ = bits.Add64(l6, 0, c5) + + for i := range xx { + binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i]) + } +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go new file mode 100644 index 000000000..3755b7c83 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go @@ -0,0 +1,11 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package x448 + +import fp "github.com/cloudflare/circl/math/fp448" + +func double(x, z *fp.Elt) { doubleGeneric(x, z) } +func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) } +func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) } +func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) } diff --git a/vendor/github.com/cloudflare/circl/dh/x448/doc.go b/vendor/github.com/cloudflare/circl/dh/x448/doc.go new file mode 100644 index 000000000..c02904fed --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/doc.go @@ -0,0 +1,19 @@ +/* +Package x448 provides Diffie-Hellman functions as specified in RFC-7748. + +Validation of public keys. + +The Diffie-Hellman function, as described in RFC-7748 [1], works for any +public key. However, if a different protocol requires contributory +behaviour [2,3], then the public keys must be validated against low-order +points [3,4]. To do that, the Shared function performs this validation +internally and returns false when the public key is invalid (i.e., it +is a low-order point). + +References: + - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt) + - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html) + - [3] Bernstein (https://cr.yp.to/ecdh.html#validate) + - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526) +*/ +package x448 diff --git a/vendor/github.com/cloudflare/circl/dh/x448/key.go b/vendor/github.com/cloudflare/circl/dh/x448/key.go new file mode 100644 index 000000000..2fdde5116 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/key.go @@ -0,0 +1,46 @@ +package x448 + +import ( + "crypto/subtle" + + fp "github.com/cloudflare/circl/math/fp448" +) + +// Size is the length in bytes of a X448 key. +const Size = 56 + +// Key represents a X448 key. +type Key [Size]byte + +func (k *Key) clamp(in *Key) *Key { + *k = *in + k[0] &= 252 + k[55] |= 128 + return k +} + +// isValidPubKey verifies if the public key is not a low-order point. +func (k *Key) isValidPubKey() bool { + fp.Modp((*fp.Elt)(k)) + var isLowOrder int + for _, P := range lowOrderPoints { + isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:]) + } + return isLowOrder == 0 +} + +// KeyGen obtains a public key given a secret key. +func KeyGen(public, secret *Key) { + ladderJoye(public.clamp(secret)) +} + +// Shared calculates Alice's shared key from Alice's secret key and Bob's +// public key returning true on success. A failure case happens when the public +// key is a low-order point, thus the shared key is all-zeros and the function +// returns false. +func Shared(shared, secret, public *Key) bool { + validPk := *public + ok := validPk.isValidPubKey() + ladderMontgomery(shared.clamp(secret), &validPk) + return ok +} diff --git a/vendor/github.com/cloudflare/circl/dh/x448/table.go b/vendor/github.com/cloudflare/circl/dh/x448/table.go new file mode 100644 index 000000000..eef53c30f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/dh/x448/table.go @@ -0,0 +1,460 @@ +package x448 + +import fp "github.com/cloudflare/circl/math/fp448" + +// tableGenerator contains the set of points: +// +// t[i] = (xi+1)/(xi-1), +// +// where (xi,yi) = 2^iG and G is the generator point +// Size = (448)*(448/8) = 25088 bytes. +var tableGenerator = [448 * fp.Size]byte{ + /* (2^ 0)P */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, + /* (2^ 1)P */ 0x37, 0xfa, 0xaa, 0x0d, 0x86, 0xa6, 0x24, 0xe9, 0x6c, 0x95, 0x08, 0x34, 0xba, 0x1a, 0x81, 0x3a, 0xae, 0x01, 0xa5, 0xa7, 0x05, 0x85, 0x96, 0x00, 0x06, 0x5a, 0xd7, 0xff, 0xee, 0x8e, 0x8f, 0x94, 0xd2, 0xdc, 0xd7, 0xfc, 0xe7, 0xe5, 0x99, 0x1d, 0x05, 0x46, 0x43, 0xe8, 0xbc, 0x12, 0xb7, 0xeb, 0x30, 0x5e, 0x7a, 0x85, 0x68, 0xed, 0x9d, 0x28, + /* (2^ 2)P */ 0xf1, 0x7d, 0x08, 0x2b, 0x32, 0x4a, 0x62, 0x80, 0x36, 0xe7, 0xa4, 0x76, 0x5a, 0x2a, 0x1e, 0xf7, 0x9e, 0x3c, 0x40, 0x46, 0x9a, 0x1b, 0x61, 0xc1, 0xbf, 0x1a, 0x1b, 0xae, 0x91, 0x80, 0xa3, 0x76, 0x6c, 0xd4, 0x8f, 0xa4, 0xee, 0x26, 0x39, 0x23, 0xa4, 0x80, 0xf4, 0x66, 0x92, 0xe4, 0xe1, 0x18, 0x76, 0xc5, 0xe2, 0x19, 0x87, 0xd5, 0xc3, 0xe8, + /* (2^ 3)P */ 0xfb, 0xc9, 0xf0, 0x07, 0xf2, 0x93, 0xd8, 0x50, 0x36, 0xed, 0xfb, 0xbd, 0xb2, 0xd3, 0xfc, 0xdf, 0xd5, 0x2a, 0x6e, 0x26, 0x09, 0xce, 0xd4, 0x07, 0x64, 0x9f, 0x40, 0x74, 0xad, 0x98, 0x2f, 0x1c, 0xb6, 0xdc, 0x2d, 0x42, 0xff, 0xbf, 0x97, 0xd8, 0xdb, 0xef, 0x99, 0xca, 0x73, 0x99, 0x1a, 0x04, 0x3b, 0x56, 0x2c, 0x1f, 0x87, 0x9d, 0x9f, 0x03, + /* (2^ 4)P */ 0x4c, 0x35, 0x97, 0xf7, 0x81, 0x2c, 0x84, 0xa6, 0xe0, 0xcb, 0xce, 0x37, 0x4c, 0x21, 0x1c, 0x67, 0xfa, 0xab, 0x18, 0x4d, 0xef, 0xd0, 0xf0, 0x44, 0xa9, 0xfb, 0xc0, 0x8e, 0xda, 0x57, 0xa1, 0xd8, 0xeb, 0x87, 0xf4, 0x17, 0xea, 0x66, 0x0f, 0x16, 0xea, 0xcd, 0x5f, 0x3e, 0x88, 0xea, 0x09, 0x68, 0x40, 0xdf, 0x43, 0xcc, 0x54, 0x61, 0x58, 0xaa, + /* (2^ 5)P */ 0x8d, 0xe7, 0x59, 0xd7, 0x5e, 0x63, 0x37, 0xa7, 0x3f, 0xd1, 0x49, 0x85, 0x01, 0xdd, 0x5e, 0xb3, 0xe6, 0x29, 0xcb, 0x25, 0x93, 0xdd, 0x08, 0x96, 0x83, 0x52, 0x76, 0x85, 0xf5, 0x5d, 0x02, 0xbf, 0xe9, 0x6d, 0x15, 0x27, 0xc1, 0x09, 0xd1, 0x14, 0x4d, 0x6e, 0xe8, 0xaf, 0x59, 0x58, 0x34, 0x9d, 0x2a, 0x99, 0x85, 0x26, 0xbe, 0x4b, 0x1e, 0xb9, + /* (2^ 6)P */ 0x8d, 0xce, 0x94, 0xe2, 0x18, 0x56, 0x0d, 0x82, 0x8e, 0xdf, 0x85, 0x01, 0x8f, 0x93, 0x3c, 0xc6, 0xbd, 0x61, 0xfb, 0xf4, 0x22, 0xc5, 0x16, 0x87, 0xd1, 0xb1, 0x9e, 0x09, 0xc5, 0x83, 0x2e, 0x4a, 0x07, 0x88, 0xee, 0xe0, 0x29, 0x8d, 0x2e, 0x1f, 0x88, 0xad, 0xfd, 0x18, 0x93, 0xb7, 0xed, 0x42, 0x86, 0x78, 0xf0, 0xb8, 0x70, 0xbe, 0x01, 0x67, + /* (2^ 7)P */ 0xdf, 0x62, 0x2d, 0x94, 0xc7, 0x35, 0x23, 0xda, 0x27, 0xbb, 0x2b, 0xdb, 0x30, 0x80, 0x68, 0x16, 0xa3, 0xae, 0xd7, 0xd2, 0xa7, 0x7c, 0xbf, 0x6a, 0x1d, 0x83, 0xde, 0x96, 0x0a, 0x43, 0xb6, 0x30, 0x37, 0xd6, 0xee, 0x63, 0x59, 0x9a, 0xbf, 0xa3, 0x30, 0x6c, 0xaf, 0x0c, 0xee, 0x3d, 0xcb, 0x35, 0x4b, 0x55, 0x5f, 0x84, 0x85, 0xcb, 0x4f, 0x1e, + /* (2^ 8)P */ 0x9d, 0x04, 0x68, 0x89, 0xa4, 0xa9, 0x0d, 0x87, 0xc1, 0x70, 0xf1, 0xeb, 0xfb, 0x47, 0x0a, 0xf0, 0xde, 0x67, 0xb7, 0x94, 0xcd, 0x36, 0x43, 0xa5, 0x49, 0x43, 0x67, 0xc3, 0xee, 0x3c, 0x6b, 0xec, 0xd0, 0x1a, 0xf4, 0xad, 0xef, 0x06, 0x4a, 0xe8, 0x46, 0x24, 0xd7, 0x93, 0xbf, 0xf0, 0xe3, 0x81, 0x61, 0xec, 0xea, 0x64, 0xfe, 0x67, 0xeb, 0xc7, + /* (2^ 9)P */ 0x95, 0x45, 0x79, 0xcf, 0x2c, 0xfd, 0x9b, 0xfe, 0x84, 0x46, 0x4b, 0x8f, 0xa1, 0xcf, 0xc3, 0x04, 0x94, 0x78, 0xdb, 0xc9, 0xa6, 0x01, 0x75, 0xa4, 0xb4, 0x93, 0x72, 0x43, 0xa7, 0x7d, 0xda, 0x31, 0x38, 0x54, 0xab, 0x4e, 0x3f, 0x89, 0xa6, 0xab, 0x57, 0xc0, 0x16, 0x65, 0xdb, 0x92, 0x96, 0xe4, 0xc8, 0xae, 0xe7, 0x4c, 0x7a, 0xeb, 0xbb, 0x5a, + /* (2^ 10)P */ 0xbe, 0xfe, 0x86, 0xc3, 0x97, 0xe0, 0x6a, 0x18, 0x20, 0x21, 0xca, 0x22, 0x55, 0xa1, 0xeb, 0xf5, 0x74, 0xe5, 0xc9, 0x59, 0xa7, 0x92, 0x65, 0x15, 0x08, 0x71, 0xd1, 0x09, 0x7e, 0x83, 0xfc, 0xbc, 0x5a, 0x93, 0x38, 0x0d, 0x43, 0x42, 0xfd, 0x76, 0x30, 0xe8, 0x63, 0x60, 0x09, 0x8d, 0x6c, 0xd3, 0xf8, 0x56, 0x3d, 0x68, 0x47, 0xab, 0xa0, 0x1d, + /* (2^ 11)P */ 0x38, 0x50, 0x1c, 0xb1, 0xac, 0x88, 0x8f, 0x38, 0xe3, 0x69, 0xe6, 0xfc, 0x4f, 0x8f, 0xe1, 0x9b, 0xb1, 0x1a, 0x09, 0x39, 0x19, 0xdf, 0xcd, 0x98, 0x7b, 0x64, 0x42, 0xf6, 0x11, 0xea, 0xc7, 0xe8, 0x92, 0x65, 0x00, 0x2c, 0x75, 0xb5, 0x94, 0x1e, 0x5b, 0xa6, 0x66, 0x81, 0x77, 0xf3, 0x39, 0x94, 0xac, 0xbd, 0xe4, 0x2a, 0x66, 0x84, 0x9c, 0x60, + /* (2^ 12)P */ 0xb5, 0xb6, 0xd9, 0x03, 0x67, 0xa4, 0xa8, 0x0a, 0x4a, 0x2b, 0x9d, 0xfa, 0x13, 0xe1, 0x99, 0x25, 0x4a, 0x5c, 0x67, 0xb9, 0xb2, 0xb7, 0xdd, 0x1e, 0xaf, 0xeb, 0x63, 0x41, 0xb6, 0xb9, 0xa0, 0x87, 0x0a, 0xe0, 0x06, 0x07, 0xaa, 0x97, 0xf8, 0xf9, 0x38, 0x4f, 0xdf, 0x0c, 0x40, 0x7c, 0xc3, 0x98, 0xa9, 0x74, 0xf1, 0x5d, 0xda, 0xd1, 0xc0, 0x0a, + /* (2^ 13)P */ 0xf2, 0x0a, 0xab, 0xab, 0x94, 0x50, 0xf0, 0xa3, 0x6f, 0xc6, 0x66, 0xba, 0xa6, 0xdc, 0x44, 0xdd, 0xd6, 0x08, 0xf4, 0xd3, 0xed, 0xb1, 0x40, 0x93, 0xee, 0xf6, 0xb8, 0x8e, 0xb4, 0x7c, 0xb9, 0x82, 0xc9, 0x9d, 0x45, 0x3b, 0x8e, 0x10, 0xcb, 0x70, 0x1e, 0xba, 0x3c, 0x62, 0x50, 0xda, 0xa9, 0x93, 0xb5, 0xd7, 0xd0, 0x6f, 0x29, 0x52, 0x95, 0xae, + /* (2^ 14)P */ 0x14, 0x68, 0x69, 0x23, 0xa8, 0x44, 0x87, 0x9e, 0x22, 0x91, 0xe8, 0x92, 0xdf, 0xf7, 0xae, 0xba, 0x1c, 0x96, 0xe1, 0xc3, 0x94, 0xed, 0x6c, 0x95, 0xae, 0x96, 0xa7, 0x15, 0x9f, 0xf1, 0x17, 0x11, 0x92, 0x42, 0xd5, 0xcd, 0x18, 0xe7, 0xa9, 0xb5, 0x2f, 0xcd, 0xde, 0x6c, 0xc9, 0x7d, 0xfc, 0x7e, 0xbd, 0x7f, 0x10, 0x3d, 0x01, 0x00, 0x8d, 0x95, + /* (2^ 15)P */ 0x3b, 0x76, 0x72, 0xae, 0xaf, 0x84, 0xf2, 0xf7, 0xd1, 0x6d, 0x13, 0x9c, 0x47, 0xe1, 0xb7, 0xa3, 0x19, 0x16, 0xee, 0x75, 0x45, 0xf6, 0x1a, 0x7b, 0x78, 0x49, 0x79, 0x05, 0x86, 0xf0, 0x7f, 0x9f, 0xfc, 0xc4, 0xbd, 0x86, 0xf3, 0x41, 0xa7, 0xfe, 0x01, 0xd5, 0x67, 0x16, 0x10, 0x5b, 0xa5, 0x16, 0xf3, 0x7f, 0x60, 0xce, 0xd2, 0x0c, 0x8e, 0x4b, + /* (2^ 16)P */ 0x4a, 0x07, 0x99, 0x4a, 0x0f, 0x74, 0x91, 0x14, 0x68, 0xb9, 0x48, 0xb7, 0x44, 0x77, 0x9b, 0x4a, 0xe0, 0x68, 0x0e, 0x43, 0x4d, 0x98, 0x98, 0xbf, 0xa8, 0x3a, 0xb7, 0x6d, 0x2a, 0x9a, 0x77, 0x5f, 0x62, 0xf5, 0x6b, 0x4a, 0xb7, 0x7d, 0xe5, 0x09, 0x6b, 0xc0, 0x8b, 0x9c, 0x88, 0x37, 0x33, 0xf2, 0x41, 0xac, 0x22, 0x1f, 0xcf, 0x3b, 0x82, 0x34, + /* (2^ 17)P */ 0x00, 0xc3, 0x78, 0x42, 0x32, 0x2e, 0xdc, 0xda, 0xb1, 0x96, 0x21, 0xa4, 0xe4, 0xbb, 0xe9, 0x9d, 0xbb, 0x0f, 0x93, 0xed, 0x26, 0x3d, 0xb5, 0xdb, 0x94, 0x31, 0x37, 0x07, 0xa2, 0xb2, 0xd5, 0x99, 0x0d, 0x93, 0xe1, 0xce, 0x3f, 0x0b, 0x96, 0x82, 0x47, 0xfe, 0x60, 0x6f, 0x8f, 0x61, 0x88, 0xd7, 0x05, 0x95, 0x0b, 0x46, 0x06, 0xb7, 0x32, 0x06, + /* (2^ 18)P */ 0x44, 0xf5, 0x34, 0xdf, 0x2f, 0x9c, 0x5d, 0x9f, 0x53, 0x5c, 0x42, 0x8f, 0xc9, 0xdc, 0xd8, 0x40, 0xa2, 0xe7, 0x6a, 0x4a, 0x05, 0xf7, 0x86, 0x77, 0x2b, 0xae, 0x37, 0xed, 0x48, 0xfb, 0xf7, 0x62, 0x7c, 0x17, 0x59, 0x92, 0x41, 0x61, 0x93, 0x38, 0x30, 0xd1, 0xef, 0x54, 0x54, 0x03, 0x17, 0x57, 0x91, 0x15, 0x11, 0x33, 0xb5, 0xfa, 0xfb, 0x17, + /* (2^ 19)P */ 0x29, 0xbb, 0xd4, 0xb4, 0x9c, 0xf1, 0x72, 0x94, 0xce, 0x6a, 0x29, 0xa8, 0x89, 0x18, 0x19, 0xf7, 0xb7, 0xcc, 0xee, 0x9a, 0x02, 0xe3, 0xc0, 0xb1, 0xe0, 0xee, 0x83, 0x78, 0xb4, 0x9e, 0x07, 0x87, 0xdf, 0xb0, 0x82, 0x26, 0x4e, 0xa4, 0x0c, 0x33, 0xaf, 0x40, 0x59, 0xb6, 0xdd, 0x52, 0x45, 0xf0, 0xb4, 0xf6, 0xe8, 0x4e, 0x4e, 0x79, 0x1a, 0x5d, + /* (2^ 20)P */ 0x27, 0x33, 0x4d, 0x4c, 0x6b, 0x4f, 0x75, 0xb1, 0xbc, 0x1f, 0xab, 0x5b, 0x2b, 0xf0, 0x1c, 0x57, 0x86, 0xdd, 0xfd, 0x60, 0xb0, 0x8c, 0xe7, 0x9a, 0xe5, 0x5c, 0xeb, 0x11, 0x3a, 0xda, 0x22, 0x25, 0x99, 0x06, 0x8d, 0xf4, 0xaf, 0x29, 0x7a, 0xc9, 0xe5, 0xd2, 0x16, 0x9e, 0xd4, 0x63, 0x1d, 0x64, 0xa6, 0x47, 0x96, 0x37, 0x6f, 0x93, 0x2c, 0xcc, + /* (2^ 21)P */ 0xc1, 0x94, 0x74, 0x86, 0x75, 0xf2, 0x91, 0x58, 0x23, 0x85, 0x63, 0x76, 0x54, 0xc7, 0xb4, 0x8c, 0xbc, 0x4e, 0xc4, 0xa7, 0xba, 0xa0, 0x55, 0x26, 0x71, 0xd5, 0x33, 0x72, 0xc9, 0xad, 0x1e, 0xf9, 0x5d, 0x78, 0x70, 0x93, 0x4e, 0x85, 0xfc, 0x39, 0x06, 0x73, 0x76, 0xff, 0xe8, 0x64, 0x69, 0x42, 0x45, 0xb2, 0x69, 0xb5, 0x32, 0xe7, 0x2c, 0xde, + /* (2^ 22)P */ 0xde, 0x16, 0xd8, 0x33, 0x49, 0x32, 0xe9, 0x0e, 0x3a, 0x60, 0xee, 0x2e, 0x24, 0x75, 0xe3, 0x9c, 0x92, 0x07, 0xdb, 0xad, 0x92, 0xf5, 0x11, 0xdf, 0xdb, 0xb0, 0x17, 0x5c, 0xd6, 0x1a, 0x70, 0x00, 0xb7, 0xe2, 0x18, 0xec, 0xdc, 0xc2, 0x02, 0x93, 0xb3, 0xc8, 0x3f, 0x4f, 0x1b, 0x96, 0xe6, 0x33, 0x8c, 0xfb, 0xcc, 0xa5, 0x4e, 0xe8, 0xe7, 0x11, + /* (2^ 23)P */ 0x05, 0x7a, 0x74, 0x52, 0xf8, 0xdf, 0x0d, 0x7c, 0x6a, 0x1a, 0x4e, 0x9a, 0x02, 0x1d, 0xae, 0x77, 0xf8, 0x8e, 0xf9, 0xa2, 0x38, 0x54, 0x50, 0xb2, 0x2c, 0x08, 0x9d, 0x9b, 0x9f, 0xfb, 0x2b, 0x06, 0xde, 0x9d, 0xc2, 0x03, 0x0b, 0x22, 0x2b, 0x10, 0x5b, 0x3a, 0x73, 0x29, 0x8e, 0x3e, 0x37, 0x08, 0x2c, 0x3b, 0xf8, 0x80, 0xc1, 0x66, 0x1e, 0x98, + /* (2^ 24)P */ 0xd8, 0xd6, 0x3e, 0xcd, 0x63, 0x8c, 0x2b, 0x41, 0x81, 0xc0, 0x0c, 0x06, 0x87, 0xd6, 0xe7, 0x92, 0xfe, 0xf1, 0x0c, 0x4a, 0x84, 0x5b, 0xaf, 0x40, 0x53, 0x6f, 0x60, 0xd6, 0x6b, 0x76, 0x4b, 0xc2, 0xad, 0xc9, 0xb6, 0xb6, 0x6a, 0xa2, 0xb3, 0xf5, 0xf5, 0xc2, 0x55, 0x83, 0xb2, 0xd3, 0xe9, 0x41, 0x6c, 0x63, 0x51, 0xb8, 0x81, 0x74, 0xc8, 0x2c, + /* (2^ 25)P */ 0xb2, 0xaf, 0x1c, 0xee, 0x07, 0xb0, 0x58, 0xa8, 0x2c, 0x6a, 0xc9, 0x2d, 0x62, 0x28, 0x75, 0x0c, 0x40, 0xb6, 0x11, 0x33, 0x96, 0x80, 0x28, 0x6d, 0xd5, 0x9e, 0x87, 0x90, 0x01, 0x66, 0x1d, 0x1c, 0xf8, 0xb4, 0x92, 0xac, 0x38, 0x18, 0x05, 0xc2, 0x4c, 0x4b, 0x54, 0x7d, 0x80, 0x46, 0x87, 0x2d, 0x99, 0x8e, 0x70, 0x80, 0x69, 0x71, 0x8b, 0xed, + /* (2^ 26)P */ 0x37, 0xa7, 0x6b, 0x71, 0x36, 0x75, 0x8e, 0xff, 0x0f, 0x42, 0xda, 0x5a, 0x46, 0xa6, 0x97, 0x79, 0x7e, 0x30, 0xb3, 0x8f, 0xc7, 0x3a, 0xa0, 0xcb, 0x1d, 0x9c, 0x78, 0x77, 0x36, 0xc2, 0xe7, 0xf4, 0x2f, 0x29, 0x07, 0xb1, 0x07, 0xfd, 0xed, 0x1b, 0x39, 0x77, 0x06, 0x38, 0x77, 0x0f, 0x50, 0x31, 0x12, 0xbf, 0x92, 0xbf, 0x72, 0x79, 0x54, 0xa9, + /* (2^ 27)P */ 0xbd, 0x4d, 0x46, 0x6b, 0x1a, 0x80, 0x46, 0x2d, 0xed, 0xfd, 0x64, 0x6d, 0x94, 0xbc, 0x4a, 0x6e, 0x0c, 0x12, 0xf6, 0x12, 0xab, 0x54, 0x88, 0xd3, 0x85, 0xac, 0x51, 0xae, 0x6f, 0xca, 0xc4, 0xb7, 0xec, 0x22, 0x54, 0x6d, 0x80, 0xb2, 0x1c, 0x63, 0x33, 0x76, 0x6b, 0x8e, 0x6d, 0x59, 0xcd, 0x73, 0x92, 0x5f, 0xff, 0xad, 0x10, 0x35, 0x70, 0x5f, + /* (2^ 28)P */ 0xb3, 0x84, 0xde, 0xc8, 0x04, 0x43, 0x63, 0xfa, 0x29, 0xd9, 0xf0, 0x69, 0x65, 0x5a, 0x0c, 0xe8, 0x2e, 0x0b, 0xfe, 0xb0, 0x7a, 0x42, 0xb3, 0xc3, 0xfc, 0xe6, 0xb8, 0x92, 0x29, 0xae, 0xed, 0xec, 0xd5, 0xe8, 0x4a, 0xa1, 0xbd, 0x3b, 0xd3, 0xc0, 0x07, 0xab, 0x65, 0x65, 0x35, 0x9a, 0xa6, 0x5e, 0x78, 0x18, 0x76, 0x1c, 0x15, 0x49, 0xe6, 0x75, + /* (2^ 29)P */ 0x45, 0xb3, 0x92, 0xa9, 0xc3, 0xb8, 0x11, 0x68, 0x64, 0x3a, 0x83, 0x5d, 0xa8, 0x94, 0x6a, 0x9d, 0xaa, 0x27, 0x9f, 0x98, 0x5d, 0xc0, 0x29, 0xf0, 0xc0, 0x4b, 0x14, 0x3c, 0x05, 0xe7, 0xf8, 0xbd, 0x38, 0x22, 0x96, 0x75, 0x65, 0x5e, 0x0d, 0x3f, 0xbb, 0x6f, 0xe8, 0x3f, 0x96, 0x76, 0x9f, 0xba, 0xd9, 0x44, 0x92, 0x96, 0x22, 0xe7, 0x52, 0xe7, + /* (2^ 30)P */ 0xf4, 0xa3, 0x95, 0x90, 0x47, 0xdf, 0x7d, 0xdc, 0xf4, 0x13, 0x87, 0x67, 0x7d, 0x4f, 0x9d, 0xa0, 0x00, 0x46, 0x72, 0x08, 0xc3, 0xa2, 0x7a, 0x3e, 0xe7, 0x6d, 0x52, 0x7c, 0x11, 0x36, 0x50, 0x83, 0x89, 0x64, 0xcb, 0x1f, 0x08, 0x83, 0x46, 0xcb, 0xac, 0xa6, 0xd8, 0x9c, 0x1b, 0xe8, 0x05, 0x47, 0xc7, 0x26, 0x06, 0x83, 0x39, 0xe9, 0xb1, 0x1c, + /* (2^ 31)P */ 0x11, 0xe8, 0xc8, 0x42, 0xbf, 0x30, 0x9c, 0xa3, 0xf1, 0x85, 0x96, 0x95, 0x4f, 0x4f, 0x52, 0xa2, 0xf5, 0x8b, 0x68, 0x24, 0x16, 0xac, 0x9b, 0xa9, 0x27, 0x28, 0x0e, 0x84, 0x03, 0x46, 0x22, 0x5f, 0xf7, 0x0d, 0xa6, 0x85, 0x88, 0xc1, 0x45, 0x4b, 0x85, 0x1a, 0x10, 0x7f, 0xc9, 0x94, 0x20, 0xb0, 0x04, 0x28, 0x12, 0x30, 0xb9, 0xe6, 0x40, 0x6b, + /* (2^ 32)P */ 0xac, 0x1b, 0x57, 0xb6, 0x42, 0xdb, 0x81, 0x8d, 0x76, 0xfd, 0x9b, 0x1c, 0x29, 0x30, 0xd5, 0x3a, 0xcc, 0x53, 0xd9, 0x26, 0x7a, 0x0f, 0x9c, 0x2e, 0x79, 0xf5, 0x62, 0xeb, 0x61, 0x9d, 0x9b, 0x80, 0x39, 0xcd, 0x60, 0x2e, 0x1f, 0x08, 0x22, 0xbc, 0x19, 0xb3, 0x2a, 0x43, 0x44, 0xf2, 0x4e, 0x66, 0xf4, 0x36, 0xa6, 0xa7, 0xbc, 0xa4, 0x15, 0x7e, + /* (2^ 33)P */ 0xc1, 0x90, 0x8a, 0xde, 0xff, 0x78, 0xc3, 0x73, 0x16, 0xee, 0x76, 0xa0, 0x84, 0x60, 0x8d, 0xe6, 0x82, 0x0f, 0xde, 0x4e, 0xc5, 0x99, 0x34, 0x06, 0x90, 0x44, 0x55, 0xf8, 0x91, 0xd8, 0xe1, 0xe4, 0x2c, 0x8a, 0xde, 0x94, 0x1e, 0x78, 0x25, 0x3d, 0xfd, 0xd8, 0x59, 0x7d, 0xaf, 0x6e, 0xbe, 0x96, 0xbe, 0x3c, 0x16, 0x23, 0x0f, 0x4c, 0xa4, 0x28, + /* (2^ 34)P */ 0xba, 0x11, 0x35, 0x57, 0x03, 0xb6, 0xf4, 0x24, 0x89, 0xb8, 0x5a, 0x0d, 0x50, 0x9c, 0xaa, 0x51, 0x7f, 0xa4, 0x0e, 0xfc, 0x71, 0xb3, 0x3b, 0xf1, 0x96, 0x50, 0x23, 0x15, 0xf5, 0xf5, 0xd4, 0x23, 0xdc, 0x8b, 0x26, 0x9e, 0xae, 0xb7, 0x50, 0xcd, 0xc4, 0x25, 0xf6, 0x75, 0x40, 0x9c, 0x37, 0x79, 0x33, 0x60, 0xd4, 0x4b, 0x13, 0x32, 0xee, 0xe2, + /* (2^ 35)P */ 0x43, 0xb8, 0x56, 0x59, 0xf0, 0x68, 0x23, 0xb3, 0xea, 0x70, 0x58, 0x4c, 0x1e, 0x5a, 0x16, 0x54, 0x03, 0xb2, 0xf4, 0x73, 0xb6, 0xd9, 0x5c, 0x9c, 0x6f, 0xcf, 0x82, 0x2e, 0x54, 0x15, 0x46, 0x2c, 0xa3, 0xda, 0x4e, 0x87, 0xf5, 0x2b, 0xba, 0x91, 0xa3, 0xa0, 0x89, 0xba, 0x48, 0x2b, 0xfa, 0x64, 0x02, 0x7f, 0x78, 0x03, 0xd1, 0xe8, 0x3b, 0xe9, + /* (2^ 36)P */ 0x15, 0xa4, 0x71, 0xd4, 0x0c, 0x24, 0xe9, 0x07, 0xa1, 0x43, 0xf4, 0x7f, 0xbb, 0xa2, 0xa6, 0x6b, 0xfa, 0xb7, 0xea, 0x58, 0xd1, 0x96, 0xb0, 0x24, 0x5c, 0xc7, 0x37, 0x4e, 0x60, 0x0f, 0x40, 0xf2, 0x2f, 0x44, 0x70, 0xea, 0x80, 0x63, 0xfe, 0xfc, 0x46, 0x59, 0x12, 0x27, 0xb5, 0x27, 0xfd, 0xb7, 0x73, 0x0b, 0xca, 0x8b, 0xc2, 0xd3, 0x71, 0x08, + /* (2^ 37)P */ 0x26, 0x0e, 0xd7, 0x52, 0x6f, 0xf1, 0xf2, 0x9d, 0xb8, 0x3d, 0xbd, 0xd4, 0x75, 0x97, 0xd8, 0xbf, 0xa8, 0x86, 0x96, 0xa5, 0x80, 0xa0, 0x45, 0x75, 0xf6, 0x77, 0x71, 0xdb, 0x77, 0x96, 0x55, 0x99, 0x31, 0xd0, 0x4f, 0x34, 0xf4, 0x35, 0x39, 0x41, 0xd3, 0x7d, 0xf7, 0xe2, 0x74, 0xde, 0xbe, 0x5b, 0x1f, 0x39, 0x10, 0x21, 0xa3, 0x4d, 0x3b, 0xc8, + /* (2^ 38)P */ 0x04, 0x00, 0x2a, 0x45, 0xb2, 0xaf, 0x9b, 0x18, 0x6a, 0xeb, 0x96, 0x28, 0xa4, 0x77, 0xd0, 0x13, 0xcf, 0x17, 0x65, 0xe8, 0xc5, 0x81, 0x28, 0xad, 0x39, 0x7a, 0x0b, 0xaa, 0x55, 0x2b, 0xf3, 0xfc, 0x86, 0x40, 0xad, 0x0d, 0x1e, 0x28, 0xa2, 0x2d, 0xc5, 0xd6, 0x04, 0x15, 0xa2, 0x30, 0x3d, 0x12, 0x8e, 0xd6, 0xb5, 0xf7, 0x69, 0xbb, 0x84, 0x20, + /* (2^ 39)P */ 0xd7, 0x7a, 0x77, 0x2c, 0xfb, 0x81, 0x80, 0xe9, 0x1e, 0xc6, 0x36, 0x31, 0x79, 0xc3, 0x7c, 0xa9, 0x57, 0x6b, 0xb5, 0x70, 0xfb, 0xe4, 0xa1, 0xff, 0xfd, 0x21, 0xa5, 0x7c, 0xfa, 0x44, 0xba, 0x0d, 0x96, 0x3d, 0xc4, 0x5c, 0x39, 0x52, 0x87, 0xd7, 0x22, 0x0f, 0x52, 0x88, 0x91, 0x87, 0x96, 0xac, 0xfa, 0x3b, 0xdf, 0xdc, 0x83, 0x8c, 0x99, 0x29, + /* (2^ 40)P */ 0x98, 0x6b, 0x3a, 0x8d, 0x83, 0x17, 0xe1, 0x62, 0xd8, 0x80, 0x4c, 0x97, 0xce, 0x6b, 0xaa, 0x10, 0xa7, 0xc4, 0xe9, 0xeb, 0xa5, 0xfb, 0xc9, 0xdd, 0x2d, 0xeb, 0xfc, 0x9a, 0x71, 0xcd, 0x68, 0x6e, 0xc0, 0x35, 0x64, 0x62, 0x1b, 0x95, 0x12, 0xe8, 0x53, 0xec, 0xf0, 0xf4, 0x86, 0x86, 0x78, 0x18, 0xc4, 0xc6, 0xbc, 0x5a, 0x59, 0x8f, 0x7c, 0x7e, + /* (2^ 41)P */ 0x7f, 0xd7, 0x1e, 0xc5, 0x83, 0xdc, 0x1f, 0xbe, 0x0b, 0xcf, 0x2e, 0x01, 0x01, 0xed, 0xac, 0x17, 0x3b, 0xed, 0xa4, 0x30, 0x96, 0x0e, 0x14, 0x7e, 0x19, 0x2b, 0xa5, 0x67, 0x1e, 0xb3, 0x34, 0x03, 0xa8, 0xbb, 0x0a, 0x7d, 0x08, 0x2d, 0xd5, 0x53, 0x19, 0x6f, 0x13, 0xd5, 0xc0, 0x90, 0x8a, 0xcc, 0xc9, 0x5c, 0xab, 0x24, 0xd7, 0x03, 0xf6, 0x57, + /* (2^ 42)P */ 0x49, 0xcb, 0xb4, 0x96, 0x5f, 0xa6, 0xf8, 0x71, 0x6f, 0x59, 0xad, 0x05, 0x24, 0x2d, 0xaf, 0x67, 0xa8, 0xbe, 0x95, 0xdf, 0x0d, 0x28, 0x5a, 0x7f, 0x6e, 0x87, 0x8c, 0x6e, 0x67, 0x0c, 0xf4, 0xe0, 0x1c, 0x30, 0xc2, 0x66, 0xae, 0x20, 0xa1, 0x34, 0xec, 0x9c, 0xbc, 0xae, 0x3d, 0xa1, 0x28, 0x28, 0x95, 0x1d, 0xc9, 0x3a, 0xa8, 0xfd, 0xfc, 0xa1, + /* (2^ 43)P */ 0xe2, 0x2b, 0x9d, 0xed, 0x02, 0x99, 0x67, 0xbb, 0x2e, 0x16, 0x62, 0x05, 0x70, 0xc7, 0x27, 0xb9, 0x1c, 0x3f, 0xf2, 0x11, 0x01, 0xd8, 0x51, 0xa4, 0x18, 0x92, 0xa9, 0x5d, 0xfb, 0xa9, 0xe4, 0x42, 0xba, 0x38, 0x34, 0x1a, 0x4a, 0xc5, 0x6a, 0x37, 0xde, 0xa7, 0x0c, 0xb4, 0x7e, 0x7f, 0xde, 0xa6, 0xee, 0xcd, 0x55, 0x57, 0x05, 0x06, 0xfd, 0x5d, + /* (2^ 44)P */ 0x2f, 0x32, 0xcf, 0x2e, 0x2c, 0x7b, 0xbe, 0x9a, 0x0c, 0x57, 0x35, 0xf8, 0x87, 0xda, 0x9c, 0xec, 0x48, 0xf2, 0xbb, 0xe2, 0xda, 0x10, 0x58, 0x20, 0xc6, 0xd3, 0x87, 0xe9, 0xc7, 0x26, 0xd1, 0x9a, 0x46, 0x87, 0x90, 0xda, 0xdc, 0xde, 0xc3, 0xb3, 0xf2, 0xe8, 0x6f, 0x4a, 0xe6, 0xe8, 0x9d, 0x98, 0x36, 0x20, 0x03, 0x47, 0x15, 0x3f, 0x64, 0x59, + /* (2^ 45)P */ 0xd4, 0x71, 0x49, 0x0a, 0x67, 0x97, 0xaa, 0x3f, 0xf4, 0x1b, 0x3a, 0x6e, 0x5e, 0x17, 0xcc, 0x0a, 0x8f, 0x81, 0x6a, 0x41, 0x38, 0x77, 0x40, 0x8a, 0x11, 0x42, 0x62, 0xd2, 0x50, 0x32, 0x79, 0x78, 0x28, 0xc2, 0x2e, 0x10, 0x01, 0x94, 0x30, 0x4f, 0x7f, 0x18, 0x17, 0x56, 0x85, 0x4e, 0xad, 0xf7, 0xcb, 0x87, 0x3c, 0x3f, 0x50, 0x2c, 0xc0, 0xba, + /* (2^ 46)P */ 0xbc, 0x30, 0x8e, 0x65, 0x8e, 0x57, 0x5b, 0x38, 0x7a, 0xd4, 0x95, 0x52, 0x7a, 0x32, 0x59, 0x69, 0xcd, 0x9d, 0x47, 0x34, 0x5b, 0x55, 0xa5, 0x24, 0x60, 0xdd, 0xc0, 0xc1, 0x62, 0x73, 0x44, 0xae, 0x4c, 0x9c, 0x65, 0x55, 0x1b, 0x9d, 0x8a, 0x29, 0xb0, 0x1a, 0x52, 0xa8, 0xf1, 0xe6, 0x9a, 0xb3, 0xf6, 0xa3, 0xc9, 0x0a, 0x70, 0x7d, 0x0f, 0xee, + /* (2^ 47)P */ 0x77, 0xd3, 0xe5, 0x8e, 0xfa, 0x00, 0xeb, 0x1b, 0x7f, 0xdc, 0x68, 0x3f, 0x92, 0xbd, 0xb7, 0x0b, 0xb7, 0xb5, 0x24, 0xdf, 0xc5, 0x67, 0x53, 0xd4, 0x36, 0x79, 0xc4, 0x7b, 0x57, 0xbc, 0x99, 0x97, 0x60, 0xef, 0xe4, 0x01, 0xa1, 0xa7, 0xaa, 0x12, 0x36, 0x29, 0xb1, 0x03, 0xc2, 0x83, 0x1c, 0x2b, 0x83, 0xef, 0x2e, 0x2c, 0x23, 0x92, 0xfd, 0xd1, + /* (2^ 48)P */ 0x94, 0xef, 0x03, 0x59, 0xfa, 0x8a, 0x18, 0x76, 0xee, 0x58, 0x08, 0x4d, 0x44, 0xce, 0xf1, 0x52, 0x33, 0x49, 0xf6, 0x69, 0x71, 0xe3, 0xa9, 0xbc, 0x86, 0xe3, 0x43, 0xde, 0x33, 0x7b, 0x90, 0x8b, 0x3e, 0x7d, 0xd5, 0x4a, 0xf0, 0x23, 0x99, 0xa6, 0xea, 0x5f, 0x08, 0xe5, 0xb9, 0x49, 0x8b, 0x0d, 0x6a, 0x21, 0xab, 0x07, 0x62, 0xcd, 0xc4, 0xbe, + /* (2^ 49)P */ 0x61, 0xbf, 0x70, 0x14, 0xfa, 0x4e, 0x9e, 0x7c, 0x0c, 0xf8, 0xb2, 0x48, 0x71, 0x62, 0x83, 0xd6, 0xd1, 0xdc, 0x9c, 0x29, 0x66, 0xb1, 0x34, 0x9c, 0x8d, 0xe6, 0x88, 0xaf, 0xbe, 0xdc, 0x4d, 0xeb, 0xb0, 0xe7, 0x28, 0xae, 0xb2, 0x05, 0x56, 0xc6, 0x0e, 0x10, 0x26, 0xab, 0x2c, 0x59, 0x72, 0x03, 0x66, 0xfe, 0x8f, 0x2c, 0x51, 0x2d, 0xdc, 0xae, + /* (2^ 50)P */ 0xdc, 0x63, 0xf1, 0x8b, 0x5c, 0x65, 0x0b, 0xf1, 0xa6, 0x22, 0xe2, 0xd9, 0xdb, 0x49, 0xb1, 0x3c, 0x47, 0xc2, 0xfe, 0xac, 0x86, 0x07, 0x52, 0xec, 0xb0, 0x08, 0x69, 0xfb, 0xd1, 0x06, 0xdc, 0x48, 0x5c, 0x3d, 0xb2, 0x4d, 0xb8, 0x1a, 0x4e, 0xda, 0xb9, 0xc1, 0x2b, 0xab, 0x4b, 0x62, 0x81, 0x21, 0x9a, 0xfc, 0x3d, 0x39, 0x83, 0x11, 0x36, 0xeb, + /* (2^ 51)P */ 0x94, 0xf3, 0x17, 0xef, 0xf9, 0x60, 0x54, 0xc3, 0xd7, 0x27, 0x35, 0xc5, 0x98, 0x5e, 0xf6, 0x63, 0x6c, 0xa0, 0x4a, 0xd3, 0xa3, 0x98, 0xd9, 0x42, 0xe3, 0xf1, 0xf8, 0x81, 0x96, 0xa9, 0xea, 0x6d, 0x4b, 0x8e, 0x33, 0xca, 0x94, 0x0d, 0xa0, 0xf7, 0xbb, 0x64, 0xa3, 0x36, 0x6f, 0xdc, 0x5a, 0x94, 0x42, 0xca, 0x06, 0xb2, 0x2b, 0x9a, 0x9f, 0x71, + /* (2^ 52)P */ 0xec, 0xdb, 0xa6, 0x1f, 0xdf, 0x15, 0x36, 0xa3, 0xda, 0x8a, 0x7a, 0xb6, 0xa7, 0xe3, 0xaf, 0x52, 0xe0, 0x8d, 0xe8, 0xf2, 0x44, 0x20, 0xeb, 0xa1, 0x20, 0xc4, 0x65, 0x3c, 0x7c, 0x6c, 0x49, 0xed, 0x2f, 0x66, 0x23, 0x68, 0x61, 0x91, 0x40, 0x9f, 0x50, 0x19, 0xd1, 0x84, 0xa7, 0xe2, 0xed, 0x34, 0x37, 0xe3, 0xe4, 0x11, 0x7f, 0x87, 0x55, 0x0f, + /* (2^ 53)P */ 0xb3, 0xa1, 0x0f, 0xb0, 0x48, 0xc0, 0x4d, 0x96, 0xa7, 0xcf, 0x5a, 0x81, 0xb8, 0x4a, 0x46, 0xef, 0x0a, 0xd3, 0x40, 0x7e, 0x02, 0xe3, 0x63, 0xaa, 0x50, 0xd1, 0x2a, 0x37, 0x22, 0x4a, 0x7f, 0x4f, 0xb6, 0xf9, 0x01, 0x82, 0x78, 0x3d, 0x93, 0x14, 0x11, 0x8a, 0x90, 0x60, 0xcd, 0x45, 0x4e, 0x7b, 0x42, 0xb9, 0x3e, 0x6e, 0x68, 0x1f, 0x36, 0x41, + /* (2^ 54)P */ 0x13, 0x73, 0x0e, 0x4f, 0x79, 0x93, 0x9e, 0x29, 0x70, 0x7b, 0x4a, 0x59, 0x1a, 0x9a, 0xf4, 0x55, 0x08, 0xf0, 0xdb, 0x17, 0x58, 0xec, 0x64, 0xad, 0x7f, 0x29, 0xeb, 0x3f, 0x85, 0x4e, 0x60, 0x28, 0x98, 0x1f, 0x73, 0x4e, 0xe6, 0xa8, 0xab, 0xd5, 0xd6, 0xfc, 0xa1, 0x36, 0x6d, 0x15, 0xc6, 0x13, 0x83, 0xa0, 0xc2, 0x6e, 0xd9, 0xdb, 0xc9, 0xcc, + /* (2^ 55)P */ 0xff, 0xd8, 0x52, 0xa3, 0xdc, 0x99, 0xcf, 0x3e, 0x19, 0xb3, 0x68, 0xd0, 0xb5, 0x0d, 0xb8, 0xee, 0x3f, 0xef, 0x6e, 0xc0, 0x38, 0x28, 0x44, 0x92, 0x78, 0x91, 0x1a, 0x08, 0x78, 0x6c, 0x65, 0x24, 0xf3, 0xa2, 0x3d, 0xf2, 0xe5, 0x79, 0x62, 0x69, 0x29, 0xf4, 0x22, 0xc5, 0xdb, 0x6a, 0xae, 0xf4, 0x44, 0xa3, 0x6f, 0xc7, 0x86, 0xab, 0xef, 0xef, + /* (2^ 56)P */ 0xbf, 0x54, 0x9a, 0x09, 0x5d, 0x17, 0xd0, 0xde, 0xfb, 0xf5, 0xca, 0xff, 0x13, 0x20, 0x88, 0x82, 0x3a, 0xe2, 0xd0, 0x3b, 0xfb, 0x05, 0x76, 0xd1, 0xc0, 0x02, 0x71, 0x3b, 0x94, 0xe8, 0xc9, 0x84, 0xcf, 0xa4, 0xe9, 0x28, 0x7b, 0xf5, 0x09, 0xc3, 0x2b, 0x22, 0x40, 0xf1, 0x68, 0x24, 0x24, 0x7d, 0x9f, 0x6e, 0xcd, 0xfe, 0xb0, 0x19, 0x61, 0xf5, + /* (2^ 57)P */ 0xe8, 0x63, 0x51, 0xb3, 0x95, 0x6b, 0x7b, 0x74, 0x92, 0x52, 0x45, 0xa4, 0xed, 0xea, 0x0e, 0x0d, 0x2b, 0x01, 0x1e, 0x2c, 0xbc, 0x91, 0x06, 0x69, 0xdb, 0x1f, 0xb5, 0x77, 0x1d, 0x56, 0xf5, 0xb4, 0x02, 0x80, 0x49, 0x56, 0x12, 0xce, 0x86, 0x05, 0xc9, 0xd9, 0xae, 0xf3, 0x6d, 0xe6, 0x3f, 0x40, 0x52, 0xe9, 0x49, 0x2b, 0x31, 0x06, 0x86, 0x14, + /* (2^ 58)P */ 0xf5, 0x09, 0x3b, 0xd2, 0xff, 0xdf, 0x11, 0xa5, 0x1c, 0x99, 0xe8, 0x1b, 0xa4, 0x2c, 0x7d, 0x8e, 0xc8, 0xf7, 0x03, 0x46, 0xfa, 0xb6, 0xde, 0x73, 0x91, 0x7e, 0x5a, 0x7a, 0xd7, 0x9a, 0x5b, 0x80, 0x24, 0x62, 0x5e, 0x92, 0xf1, 0xa3, 0x45, 0xa3, 0x43, 0x92, 0x8a, 0x2a, 0x5b, 0x0c, 0xb4, 0xc8, 0xad, 0x1c, 0xb6, 0x6c, 0x5e, 0x81, 0x18, 0x91, + /* (2^ 59)P */ 0x96, 0xb3, 0xca, 0x2b, 0xe3, 0x7a, 0x59, 0x72, 0x17, 0x74, 0x29, 0x21, 0xe7, 0x78, 0x07, 0xad, 0xda, 0xb6, 0xcd, 0xf9, 0x27, 0x4d, 0xc8, 0xf2, 0x98, 0x22, 0xca, 0xf2, 0x33, 0x74, 0x7a, 0xdd, 0x1e, 0x71, 0xec, 0xe3, 0x3f, 0xe2, 0xa2, 0xd2, 0x38, 0x75, 0xb0, 0xd0, 0x0a, 0xcf, 0x7d, 0x36, 0xdc, 0x49, 0x38, 0x25, 0x34, 0x4f, 0x20, 0x9a, + /* (2^ 60)P */ 0x2b, 0x6e, 0x04, 0x0d, 0x4f, 0x3d, 0x3b, 0x24, 0xf6, 0x4e, 0x5e, 0x0a, 0xbd, 0x48, 0x96, 0xba, 0x81, 0x8f, 0x39, 0x82, 0x13, 0xe6, 0x72, 0xf3, 0x0f, 0xb6, 0x94, 0xf4, 0xc5, 0x90, 0x74, 0x91, 0xa8, 0xf2, 0xc9, 0xca, 0x9a, 0x4d, 0x98, 0xf2, 0xdf, 0x52, 0x4e, 0x97, 0x2f, 0xeb, 0x84, 0xd3, 0xaf, 0xc2, 0xcc, 0xfb, 0x4c, 0x26, 0x4b, 0xe4, + /* (2^ 61)P */ 0x12, 0x9e, 0xfb, 0x9d, 0x78, 0x79, 0x99, 0xdd, 0xb3, 0x0b, 0x2e, 0x56, 0x41, 0x8e, 0x3f, 0x39, 0xb8, 0x97, 0x89, 0x53, 0x9b, 0x8a, 0x3c, 0x40, 0x9d, 0xa4, 0x6c, 0x2e, 0x31, 0x71, 0xc6, 0x0a, 0x41, 0xd4, 0x95, 0x06, 0x5e, 0xc1, 0xab, 0xc2, 0x14, 0xc4, 0xc7, 0x15, 0x08, 0x3a, 0xad, 0x7a, 0xb4, 0x62, 0xa3, 0x0c, 0x90, 0xf4, 0x47, 0x08, + /* (2^ 62)P */ 0x7f, 0xec, 0x09, 0x82, 0xf5, 0x94, 0x09, 0x93, 0x32, 0xd3, 0xdc, 0x56, 0x80, 0x7b, 0x5b, 0x22, 0x80, 0x6a, 0x96, 0x72, 0xb1, 0xc2, 0xd9, 0xa1, 0x8b, 0x66, 0x42, 0x16, 0xe2, 0x07, 0xb3, 0x2d, 0xf1, 0x75, 0x35, 0x72, 0xc7, 0x98, 0xbe, 0x63, 0x3b, 0x20, 0x75, 0x05, 0xc1, 0x3e, 0x31, 0x5a, 0xf7, 0xaa, 0xae, 0x4b, 0xdb, 0x1d, 0xd0, 0x74, + /* (2^ 63)P */ 0x36, 0x5c, 0x74, 0xe6, 0x5d, 0x59, 0x3f, 0x15, 0x4b, 0x4d, 0x4e, 0x67, 0x41, 0xfe, 0x98, 0x1f, 0x49, 0x76, 0x91, 0x0f, 0x9b, 0xf4, 0xaf, 0x86, 0xaf, 0x66, 0x19, 0xed, 0x46, 0xf1, 0x05, 0x9a, 0xcc, 0xd1, 0x14, 0x1f, 0x82, 0x12, 0x8e, 0xe6, 0xf4, 0xc3, 0x42, 0x5c, 0x4e, 0x33, 0x93, 0xbe, 0x30, 0xe7, 0x64, 0xa9, 0x35, 0x00, 0x4d, 0xf9, + /* (2^ 64)P */ 0x1f, 0xc1, 0x1e, 0xb7, 0xe3, 0x7c, 0xfa, 0xa3, 0x6b, 0x76, 0xaf, 0x9c, 0x05, 0x85, 0x4a, 0xa9, 0xfb, 0xe3, 0x7e, 0xf2, 0x49, 0x56, 0xdc, 0x2f, 0x57, 0x10, 0xba, 0x37, 0xb2, 0x62, 0xf5, 0x6b, 0xe5, 0x8f, 0x0a, 0x87, 0xd1, 0x6a, 0xcb, 0x9d, 0x07, 0xd0, 0xf6, 0x38, 0x99, 0x2c, 0x61, 0x4a, 0x4e, 0xd8, 0xd2, 0x88, 0x29, 0x99, 0x11, 0x95, + /* (2^ 65)P */ 0x6f, 0xdc, 0xd5, 0xd6, 0xd6, 0xa7, 0x4c, 0x46, 0x93, 0x65, 0x62, 0x23, 0x95, 0x32, 0x9c, 0xde, 0x40, 0x41, 0x68, 0x2c, 0x18, 0x4e, 0x5a, 0x8c, 0xc0, 0xc5, 0xc5, 0xea, 0x5c, 0x45, 0x0f, 0x60, 0x78, 0x39, 0xb6, 0x36, 0x23, 0x12, 0xbc, 0x21, 0x9a, 0xf8, 0x91, 0xac, 0xc4, 0x70, 0xdf, 0x85, 0x8e, 0x3c, 0xec, 0x22, 0x04, 0x98, 0xa8, 0xaa, + /* (2^ 66)P */ 0xcc, 0x52, 0x10, 0x5b, 0x4b, 0x6c, 0xc5, 0xfa, 0x3e, 0xd4, 0xf8, 0x1c, 0x04, 0x14, 0x48, 0x33, 0xd9, 0xfc, 0x5f, 0xb0, 0xa5, 0x48, 0x8c, 0x45, 0x8a, 0xee, 0x3e, 0xa7, 0xc1, 0x2e, 0x34, 0xca, 0xf6, 0xc9, 0xeb, 0x10, 0xbb, 0xe1, 0x59, 0x84, 0x25, 0xe8, 0x81, 0x70, 0xc0, 0x09, 0x42, 0xa7, 0x3b, 0x0d, 0x33, 0x00, 0xb5, 0x77, 0xbe, 0x25, + /* (2^ 67)P */ 0xcd, 0x1f, 0xbc, 0x7d, 0xef, 0xe5, 0xca, 0x91, 0xaf, 0xa9, 0x59, 0x6a, 0x09, 0xca, 0xd6, 0x1b, 0x3d, 0x55, 0xde, 0xa2, 0x6a, 0x80, 0xd6, 0x95, 0x47, 0xe4, 0x5f, 0x68, 0x54, 0x08, 0xdf, 0x29, 0xba, 0x2a, 0x02, 0x84, 0xe8, 0xe9, 0x00, 0x77, 0x99, 0x36, 0x03, 0xf6, 0x4a, 0x3e, 0x21, 0x81, 0x7d, 0xb8, 0xa4, 0x8a, 0xa2, 0x05, 0xef, 0xbc, + /* (2^ 68)P */ 0x7c, 0x59, 0x5f, 0x66, 0xd9, 0xb7, 0x83, 0x43, 0x8a, 0xa1, 0x8d, 0x51, 0x70, 0xba, 0xf2, 0x9b, 0x95, 0xc0, 0x4b, 0x4c, 0xa0, 0x14, 0xd3, 0xa4, 0x5d, 0x4a, 0x37, 0x36, 0x97, 0x31, 0x1e, 0x12, 0xe7, 0xbb, 0x08, 0x67, 0xa5, 0x23, 0xd7, 0xfb, 0x97, 0xd8, 0x6a, 0x03, 0xb1, 0xf8, 0x7f, 0xda, 0x58, 0xd9, 0x3f, 0x73, 0x4a, 0x53, 0xe1, 0x7b, + /* (2^ 69)P */ 0x55, 0x83, 0x98, 0x78, 0x6c, 0x56, 0x5e, 0xed, 0xf7, 0x23, 0x3e, 0x4c, 0x7d, 0x09, 0x2d, 0x09, 0x9c, 0x58, 0x8b, 0x32, 0xca, 0xfe, 0xbf, 0x47, 0x03, 0xeb, 0x4d, 0xe7, 0xeb, 0x9c, 0x83, 0x05, 0x68, 0xaa, 0x80, 0x89, 0x44, 0xf9, 0xd4, 0xdc, 0xdb, 0xb1, 0xdb, 0x77, 0xac, 0xf9, 0x2a, 0xae, 0x35, 0xac, 0x74, 0xb5, 0x95, 0x62, 0x18, 0x85, + /* (2^ 70)P */ 0xab, 0x82, 0x7e, 0x10, 0xd7, 0xe6, 0x57, 0xd1, 0x66, 0x12, 0x31, 0x9c, 0x9c, 0xa6, 0x27, 0x59, 0x71, 0x2e, 0xeb, 0xa0, 0x68, 0xc5, 0x87, 0x51, 0xf4, 0xca, 0x3f, 0x98, 0x56, 0xb0, 0x89, 0xb1, 0xc7, 0x7b, 0x46, 0xb3, 0xae, 0x36, 0xf2, 0xee, 0x15, 0x1a, 0x60, 0xf4, 0x50, 0x76, 0x4f, 0xc4, 0x53, 0x0d, 0x36, 0x4d, 0x31, 0xb1, 0x20, 0x51, + /* (2^ 71)P */ 0xf7, 0x1d, 0x8c, 0x1b, 0x5e, 0xe5, 0x02, 0x6f, 0xc5, 0xa5, 0xe0, 0x5f, 0xc6, 0xb6, 0x63, 0x43, 0xaf, 0x3c, 0x19, 0x6c, 0xf4, 0xaf, 0xa4, 0x33, 0xb1, 0x0a, 0x37, 0x3d, 0xd9, 0x4d, 0xe2, 0x29, 0x24, 0x26, 0x94, 0x7c, 0x02, 0xe4, 0xe2, 0xf2, 0xbe, 0xbd, 0xac, 0x1b, 0x48, 0xb8, 0xdd, 0xe9, 0x0d, 0x9a, 0x50, 0x1a, 0x98, 0x71, 0x6e, 0xdc, + /* (2^ 72)P */ 0x9f, 0x40, 0xb1, 0xb3, 0x66, 0x28, 0x6c, 0xfe, 0xa6, 0x7d, 0xf8, 0x3e, 0xb8, 0xf3, 0xde, 0x52, 0x76, 0x52, 0xa3, 0x92, 0x98, 0x23, 0xab, 0x4f, 0x88, 0x97, 0xfc, 0x22, 0xe1, 0x6b, 0x67, 0xcd, 0x13, 0x95, 0xda, 0x65, 0xdd, 0x3b, 0x67, 0x3f, 0x5f, 0x4c, 0xf2, 0x8a, 0xad, 0x98, 0xa7, 0x94, 0x24, 0x45, 0x87, 0x11, 0x7c, 0x75, 0x79, 0x85, + /* (2^ 73)P */ 0x70, 0xbf, 0xf9, 0x3b, 0xa9, 0x44, 0x57, 0x72, 0x96, 0xc9, 0xa4, 0x98, 0x65, 0xbf, 0x87, 0xb3, 0x3a, 0x39, 0x12, 0xde, 0xe5, 0x39, 0x01, 0x4f, 0xf7, 0xc0, 0x71, 0x52, 0x36, 0x85, 0xb3, 0x18, 0xf8, 0x14, 0xc0, 0x6d, 0xae, 0x9e, 0x4f, 0xb0, 0x72, 0x87, 0xac, 0x5c, 0xd1, 0x6c, 0x41, 0x6c, 0x90, 0x9d, 0x22, 0x81, 0xe4, 0x2b, 0xea, 0xe5, + /* (2^ 74)P */ 0xfc, 0xea, 0x1a, 0x65, 0xd9, 0x49, 0x6a, 0x39, 0xb5, 0x96, 0x72, 0x7b, 0x32, 0xf1, 0xd0, 0xe9, 0x45, 0xd9, 0x31, 0x55, 0xc7, 0x34, 0xe9, 0x5a, 0xec, 0x73, 0x0b, 0x03, 0xc4, 0xb3, 0xe6, 0xc9, 0x5e, 0x0a, 0x17, 0xfe, 0x53, 0x66, 0x7f, 0x21, 0x18, 0x74, 0x54, 0x1b, 0xc9, 0x49, 0x16, 0xd2, 0x48, 0xaf, 0x5b, 0x47, 0x7b, 0xeb, 0xaa, 0xc9, + /* (2^ 75)P */ 0x47, 0x04, 0xf5, 0x5a, 0x87, 0x77, 0x9e, 0x21, 0x34, 0x4e, 0x83, 0x88, 0xaf, 0x02, 0x1d, 0xb0, 0x5a, 0x1d, 0x1d, 0x7d, 0x8d, 0x2c, 0xd3, 0x8d, 0x63, 0xa9, 0x45, 0xfb, 0x15, 0x6d, 0x86, 0x45, 0xcd, 0x38, 0x0e, 0xf7, 0x37, 0x79, 0xed, 0x6d, 0x5a, 0xbc, 0x32, 0xcc, 0x66, 0xf1, 0x3a, 0xb2, 0x87, 0x6f, 0x70, 0x71, 0xd9, 0xf2, 0xfa, 0x7b, + /* (2^ 76)P */ 0x68, 0x07, 0xdc, 0x61, 0x40, 0xe4, 0xec, 0x32, 0xc8, 0xbe, 0x66, 0x30, 0x54, 0x80, 0xfd, 0x13, 0x7a, 0xef, 0xae, 0xed, 0x2e, 0x00, 0x6d, 0x3f, 0xbd, 0xfc, 0x91, 0x24, 0x53, 0x7f, 0x63, 0x9d, 0x2e, 0xe3, 0x76, 0xe0, 0xf3, 0xe1, 0x8f, 0x7a, 0xc4, 0x77, 0x0c, 0x91, 0xc0, 0xc2, 0x18, 0x6b, 0x04, 0xad, 0xb6, 0x70, 0x9a, 0x64, 0xc5, 0x82, + /* (2^ 77)P */ 0x7f, 0xea, 0x13, 0xd8, 0x9e, 0xfc, 0x5b, 0x06, 0xb5, 0x4f, 0xda, 0x38, 0xe0, 0x9c, 0xd2, 0x3a, 0xc1, 0x1c, 0x62, 0x70, 0x7f, 0xc6, 0x24, 0x0a, 0x47, 0x04, 0x01, 0xc4, 0x55, 0x09, 0xd1, 0x7a, 0x07, 0xba, 0xa3, 0x80, 0x4f, 0xc1, 0x65, 0x36, 0x6d, 0xc0, 0x10, 0xcf, 0x94, 0xa9, 0xa2, 0x01, 0x44, 0xd1, 0xf9, 0x1c, 0x4c, 0xfb, 0xf8, 0x99, + /* (2^ 78)P */ 0x6c, 0xb9, 0x6b, 0xee, 0x43, 0x5b, 0xb9, 0xbb, 0xee, 0x2e, 0x52, 0xc1, 0xc6, 0xb9, 0x61, 0xd2, 0x93, 0xa5, 0xaf, 0x52, 0xf4, 0xa4, 0x1a, 0x51, 0x61, 0xa7, 0xcb, 0x9e, 0xbb, 0x56, 0x65, 0xe2, 0xbf, 0x75, 0xb9, 0x9c, 0x50, 0x96, 0x60, 0x81, 0x74, 0x47, 0xc0, 0x04, 0x88, 0x71, 0x76, 0x39, 0x9a, 0xa7, 0xb1, 0x4e, 0x43, 0x15, 0xe0, 0xbb, + /* (2^ 79)P */ 0xbb, 0xce, 0xe2, 0xbb, 0xf9, 0x17, 0x0f, 0x82, 0x40, 0xad, 0x73, 0xe3, 0xeb, 0x3b, 0x06, 0x1a, 0xcf, 0x8e, 0x6e, 0x28, 0xb8, 0x26, 0xd9, 0x5b, 0xb7, 0xb3, 0xcf, 0xb4, 0x6a, 0x1c, 0xbf, 0x7f, 0xb8, 0xb5, 0x79, 0xcf, 0x45, 0x68, 0x7d, 0xc5, 0xeb, 0xf3, 0xbe, 0x39, 0x40, 0xfc, 0x07, 0x90, 0x7a, 0x62, 0xad, 0x86, 0x08, 0x71, 0x25, 0xe1, + /* (2^ 80)P */ 0x9b, 0x46, 0xac, 0xef, 0xc1, 0x4e, 0xa1, 0x97, 0x95, 0x76, 0xf9, 0x1b, 0xc2, 0xb2, 0x6a, 0x41, 0xea, 0x80, 0x3d, 0xe9, 0x08, 0x52, 0x5a, 0xe3, 0xf2, 0x08, 0xc5, 0xea, 0x39, 0x3f, 0x44, 0x71, 0x4d, 0xea, 0x0d, 0x05, 0x23, 0xe4, 0x2e, 0x3c, 0x89, 0xfe, 0x12, 0x8a, 0x95, 0x42, 0x0a, 0x68, 0xea, 0x5a, 0x28, 0x06, 0x9e, 0xe3, 0x5f, 0xe0, + /* (2^ 81)P */ 0x00, 0x61, 0x6c, 0x98, 0x9b, 0xe7, 0xb9, 0x06, 0x1c, 0xc5, 0x1b, 0xed, 0xbe, 0xc8, 0xb3, 0xea, 0x87, 0xf0, 0xc4, 0x24, 0x7d, 0xbb, 0x5d, 0xa4, 0x1d, 0x7a, 0x16, 0x00, 0x55, 0x94, 0x67, 0x78, 0xbd, 0x58, 0x02, 0x82, 0x90, 0x53, 0x76, 0xd4, 0x72, 0x99, 0x51, 0x6f, 0x7b, 0xcf, 0x80, 0x30, 0x31, 0x3b, 0x01, 0xc7, 0xc1, 0xef, 0xe6, 0x42, + /* (2^ 82)P */ 0xe2, 0x35, 0xaf, 0x4b, 0x79, 0xc6, 0x12, 0x24, 0x99, 0xc0, 0x68, 0xb0, 0x43, 0x3e, 0xe5, 0xef, 0xe2, 0x29, 0xea, 0xb8, 0xb3, 0xbc, 0x6a, 0x53, 0x2c, 0x69, 0x18, 0x5a, 0xf9, 0x15, 0xae, 0x66, 0x58, 0x18, 0xd3, 0x2d, 0x4b, 0x00, 0xfd, 0x84, 0xab, 0x4f, 0xae, 0x70, 0x6b, 0x9e, 0x9a, 0xdf, 0x83, 0xfd, 0x2e, 0x3c, 0xcf, 0xf8, 0x88, 0x5b, + /* (2^ 83)P */ 0xa4, 0x90, 0x31, 0x85, 0x13, 0xcd, 0xdf, 0x64, 0xc9, 0xa1, 0x0b, 0xe7, 0xb6, 0x73, 0x8a, 0x1b, 0x22, 0x78, 0x4c, 0xd4, 0xae, 0x48, 0x18, 0x00, 0x00, 0xa8, 0x9f, 0x06, 0xf9, 0xfb, 0x2d, 0xc3, 0xb1, 0x2a, 0xbc, 0x13, 0x99, 0x57, 0xaf, 0xf0, 0x8d, 0x61, 0x54, 0x29, 0xd5, 0xf2, 0x72, 0x00, 0x96, 0xd1, 0x85, 0x12, 0x8a, 0xf0, 0x23, 0xfb, + /* (2^ 84)P */ 0x69, 0xc7, 0xdb, 0xd9, 0x92, 0x75, 0x08, 0x9b, 0xeb, 0xa5, 0x93, 0xd1, 0x1a, 0xf4, 0xf5, 0xaf, 0xe6, 0xc4, 0x4a, 0x0d, 0x35, 0x26, 0x39, 0x9d, 0xd3, 0x17, 0x3e, 0xae, 0x2d, 0xbf, 0x73, 0x9f, 0xb7, 0x74, 0x91, 0xd1, 0xd8, 0x5c, 0x14, 0xf9, 0x75, 0xdf, 0xeb, 0xc2, 0x22, 0xd8, 0x14, 0x8d, 0x86, 0x23, 0x4d, 0xd1, 0x2d, 0xdb, 0x6b, 0x42, + /* (2^ 85)P */ 0x8c, 0xda, 0xc6, 0xf8, 0x71, 0xba, 0x2b, 0x06, 0x78, 0xae, 0xcc, 0x3a, 0xe3, 0xe3, 0xa1, 0x8b, 0xe2, 0x34, 0x6d, 0x28, 0x9e, 0x46, 0x13, 0x4d, 0x9e, 0xa6, 0x73, 0x49, 0x65, 0x79, 0x88, 0xb9, 0x3a, 0xd1, 0x6d, 0x2f, 0x48, 0x2b, 0x0a, 0x7f, 0x58, 0x20, 0x37, 0xf4, 0x0e, 0xbb, 0x4a, 0x95, 0x58, 0x0c, 0x88, 0x30, 0xc4, 0x74, 0xdd, 0xfd, + /* (2^ 86)P */ 0x6d, 0x13, 0x4e, 0x89, 0x2d, 0xa9, 0xa3, 0xed, 0x09, 0xe3, 0x0e, 0x71, 0x3e, 0x4a, 0xab, 0x90, 0xde, 0x03, 0xeb, 0x56, 0x46, 0x60, 0x06, 0xf5, 0x71, 0xe5, 0xee, 0x9b, 0xef, 0xff, 0xc4, 0x2c, 0x9f, 0x37, 0x48, 0x45, 0x94, 0x12, 0x41, 0x81, 0x15, 0x70, 0x91, 0x99, 0x5e, 0x56, 0x6b, 0xf4, 0xa6, 0xc9, 0xf5, 0x69, 0x9d, 0x78, 0x37, 0x57, + /* (2^ 87)P */ 0xf3, 0x51, 0x57, 0x7e, 0x43, 0x6f, 0xc6, 0x67, 0x59, 0x0c, 0xcf, 0x94, 0xe6, 0x3d, 0xb5, 0x07, 0xc9, 0x77, 0x48, 0xc9, 0x68, 0x0d, 0x98, 0x36, 0x62, 0x35, 0x38, 0x1c, 0xf5, 0xc5, 0xec, 0x66, 0x78, 0xfe, 0x47, 0xab, 0x26, 0xd6, 0x44, 0xb6, 0x06, 0x0f, 0x89, 0xe3, 0x19, 0x40, 0x1a, 0xe7, 0xd8, 0x65, 0x55, 0xf7, 0x1a, 0xfc, 0xa3, 0x0e, + /* (2^ 88)P */ 0x0e, 0x30, 0xa6, 0xb7, 0x58, 0x60, 0x62, 0x2a, 0x6c, 0x13, 0xa8, 0x14, 0x9b, 0xb8, 0xf2, 0x70, 0xd8, 0xb1, 0x71, 0x88, 0x8c, 0x18, 0x31, 0x25, 0x93, 0x90, 0xb4, 0xc7, 0x49, 0xd8, 0xd4, 0xdb, 0x1e, 0x1e, 0x7f, 0xaa, 0xba, 0xc9, 0xf2, 0x5d, 0xa9, 0x3a, 0x43, 0xb4, 0x5c, 0xee, 0x7b, 0xc7, 0x97, 0xb7, 0x66, 0xd7, 0x23, 0xd9, 0x22, 0x59, + /* (2^ 89)P */ 0x28, 0x19, 0xa6, 0xf9, 0x89, 0x20, 0x78, 0xd4, 0x6d, 0xcb, 0x79, 0x8f, 0x61, 0x6f, 0xb2, 0x5c, 0x4f, 0xa6, 0x54, 0x84, 0x95, 0x24, 0x36, 0x64, 0xcb, 0x39, 0xe7, 0x8f, 0x97, 0x9c, 0x5c, 0x3c, 0xfb, 0x51, 0x11, 0x01, 0x17, 0xdb, 0xc9, 0x9b, 0x51, 0x03, 0x9a, 0xe9, 0xe5, 0x24, 0x1e, 0xf5, 0xda, 0xe0, 0x48, 0x02, 0x23, 0xd0, 0x2c, 0x81, + /* (2^ 90)P */ 0x42, 0x1b, 0xe4, 0x91, 0x85, 0x2a, 0x0c, 0xd2, 0x28, 0x66, 0x57, 0x9e, 0x33, 0x8d, 0x25, 0x71, 0x10, 0x65, 0x76, 0xa2, 0x8c, 0x21, 0x86, 0x81, 0x15, 0xc2, 0x27, 0xeb, 0x54, 0x2d, 0x4f, 0x6c, 0xe6, 0xd6, 0x24, 0x9c, 0x1a, 0x12, 0xb8, 0x81, 0xe2, 0x0a, 0xf3, 0xd3, 0xf0, 0xd3, 0xe1, 0x74, 0x1f, 0x9b, 0x11, 0x47, 0xd0, 0xcf, 0xb6, 0x54, + /* (2^ 91)P */ 0x26, 0x45, 0xa2, 0x10, 0xd4, 0x2d, 0xae, 0xc0, 0xb0, 0xe8, 0x86, 0xb3, 0xc7, 0xea, 0x70, 0x87, 0x61, 0xb5, 0xa5, 0x55, 0xbe, 0x88, 0x1d, 0x7a, 0xd9, 0x6f, 0xeb, 0x83, 0xe2, 0x44, 0x7f, 0x98, 0x04, 0xd6, 0x50, 0x9d, 0xa7, 0x86, 0x66, 0x09, 0x63, 0xe1, 0xed, 0x72, 0xb1, 0xe4, 0x1d, 0x3a, 0xfd, 0x47, 0xce, 0x1c, 0xaa, 0x3b, 0x8f, 0x1b, + /* (2^ 92)P */ 0xf4, 0x3c, 0x4a, 0xb6, 0xc2, 0x9c, 0xe0, 0x2e, 0xb7, 0x38, 0xea, 0x61, 0x35, 0x97, 0x10, 0x90, 0xae, 0x22, 0x48, 0xb3, 0xa9, 0xc6, 0x7a, 0xbb, 0x23, 0xf2, 0xf8, 0x1b, 0xa7, 0xa1, 0x79, 0xcc, 0xc4, 0xf8, 0x08, 0x76, 0x8a, 0x5a, 0x1c, 0x1b, 0xc5, 0x33, 0x91, 0xa9, 0xb8, 0xb9, 0xd3, 0xf8, 0x49, 0xcd, 0xe5, 0x82, 0x43, 0xf7, 0xca, 0x68, + /* (2^ 93)P */ 0x38, 0xba, 0xae, 0x44, 0xfe, 0x57, 0x64, 0x56, 0x7c, 0x0e, 0x9c, 0xca, 0xff, 0xa9, 0x82, 0xbb, 0x38, 0x4a, 0xa7, 0xf7, 0x47, 0xab, 0xbe, 0x6d, 0x23, 0x0b, 0x8a, 0xed, 0xc2, 0xb9, 0x8f, 0xf1, 0xec, 0x91, 0x44, 0x73, 0x64, 0xba, 0xd5, 0x8f, 0x37, 0x38, 0x0d, 0xd5, 0xf8, 0x73, 0x57, 0xb6, 0xc2, 0x45, 0xdc, 0x25, 0xb2, 0xb6, 0xea, 0xd9, + /* (2^ 94)P */ 0xbf, 0xe9, 0x1a, 0x40, 0x4d, 0xcc, 0xe6, 0x1d, 0x70, 0x1a, 0x65, 0xcc, 0x34, 0x2c, 0x37, 0x2c, 0x2d, 0x6b, 0x6d, 0xe5, 0x2f, 0x19, 0x9e, 0xe4, 0xe1, 0xaa, 0xd4, 0xab, 0x54, 0xf4, 0xa8, 0xe4, 0x69, 0x2d, 0x8e, 0x4d, 0xd7, 0xac, 0xb0, 0x5b, 0xfe, 0xe3, 0x26, 0x07, 0xc3, 0xf8, 0x1b, 0x43, 0xa8, 0x1d, 0x64, 0xa5, 0x25, 0x88, 0xbb, 0x77, + /* (2^ 95)P */ 0x92, 0xcd, 0x6e, 0xa0, 0x79, 0x04, 0x18, 0xf4, 0x11, 0x58, 0x48, 0xb5, 0x3c, 0x7b, 0xd1, 0xcc, 0xd3, 0x14, 0x2c, 0xa0, 0xdd, 0x04, 0x44, 0x11, 0xb3, 0x6d, 0x2f, 0x0d, 0xf5, 0x2a, 0x75, 0x5d, 0x1d, 0xda, 0x86, 0x8d, 0x7d, 0x6b, 0x32, 0x68, 0xb6, 0x6c, 0x64, 0x9e, 0xde, 0x80, 0x88, 0xce, 0x08, 0xbf, 0x0b, 0xe5, 0x8e, 0x4f, 0x1d, 0xfb, + /* (2^ 96)P */ 0xaf, 0xe8, 0x85, 0xbf, 0x7f, 0x37, 0x8d, 0x66, 0x7c, 0xd5, 0xd3, 0x96, 0xa5, 0x81, 0x67, 0x95, 0xff, 0x48, 0xde, 0xde, 0xd7, 0x7a, 0x46, 0x34, 0xb1, 0x13, 0x70, 0x29, 0xed, 0x87, 0x90, 0xb0, 0x40, 0x2c, 0xa6, 0x43, 0x6e, 0xb6, 0xbc, 0x48, 0x8a, 0xc1, 0xae, 0xb8, 0xd4, 0xe2, 0xc0, 0x32, 0xb2, 0xa6, 0x2a, 0x8f, 0xb5, 0x16, 0x9e, 0xc3, + /* (2^ 97)P */ 0xff, 0x4d, 0xd2, 0xd6, 0x74, 0xef, 0x2c, 0x96, 0xc1, 0x11, 0xa8, 0xb8, 0xfe, 0x94, 0x87, 0x3e, 0xa0, 0xfb, 0x57, 0xa3, 0xfc, 0x7a, 0x7e, 0x6a, 0x59, 0x6c, 0x54, 0xbb, 0xbb, 0xa2, 0x25, 0x38, 0x1b, 0xdf, 0x5d, 0x7b, 0x94, 0x14, 0xde, 0x07, 0x6e, 0xd3, 0xab, 0x02, 0x26, 0x74, 0x16, 0x12, 0xdf, 0x2e, 0x2a, 0xa7, 0xb0, 0xe8, 0x29, 0xc0, + /* (2^ 98)P */ 0x6a, 0x38, 0x0b, 0xd3, 0xba, 0x45, 0x23, 0xe0, 0x04, 0x3b, 0x83, 0x39, 0xc5, 0x11, 0xe6, 0xcf, 0x39, 0x0a, 0xb3, 0xb0, 0x3b, 0x27, 0x29, 0x63, 0x1c, 0xf3, 0x00, 0xe6, 0xd2, 0x55, 0x21, 0x1f, 0x84, 0x97, 0x9f, 0x01, 0x49, 0x43, 0x30, 0x5f, 0xe0, 0x1d, 0x24, 0xc4, 0x4e, 0xa0, 0x2b, 0x0b, 0x12, 0x55, 0xc3, 0x27, 0xae, 0x08, 0x83, 0x7c, + /* (2^ 99)P */ 0x5d, 0x1a, 0xb7, 0xa9, 0xf5, 0xfd, 0xec, 0xad, 0xb7, 0x87, 0x02, 0x5f, 0x0d, 0x30, 0x4d, 0xe2, 0x65, 0x87, 0xa4, 0x41, 0x45, 0x1d, 0x67, 0xe0, 0x30, 0x5c, 0x13, 0x87, 0xf6, 0x2e, 0x08, 0xc1, 0xc7, 0x12, 0x45, 0xc8, 0x9b, 0xad, 0xb8, 0xd5, 0x57, 0xbb, 0x5c, 0x48, 0x3a, 0xe1, 0x91, 0x5e, 0xf6, 0x4d, 0x8a, 0x63, 0x75, 0x69, 0x0c, 0x01, + /* (2^100)P */ 0x8f, 0x53, 0x2d, 0xa0, 0x71, 0x3d, 0xfc, 0x45, 0x10, 0x96, 0xcf, 0x56, 0xf9, 0xbb, 0x40, 0x3c, 0x86, 0x52, 0x76, 0xbe, 0x84, 0xf9, 0xa6, 0x9d, 0x3d, 0x27, 0xbe, 0xb4, 0x00, 0x49, 0x94, 0xf5, 0x5d, 0xe1, 0x62, 0x85, 0x66, 0xe5, 0xb8, 0x20, 0x2c, 0x09, 0x7d, 0x9d, 0x3d, 0x6e, 0x74, 0x39, 0xab, 0xad, 0xa0, 0x90, 0x97, 0x5f, 0xbb, 0xa7, + /* (2^101)P */ 0xdb, 0x2d, 0x99, 0x08, 0x16, 0x46, 0x83, 0x7a, 0xa8, 0xea, 0x3d, 0x28, 0x5b, 0x49, 0xfc, 0xb9, 0x6d, 0x00, 0x9e, 0x54, 0x4f, 0x47, 0x64, 0x9b, 0x58, 0x4d, 0x07, 0x0c, 0x6f, 0x29, 0x56, 0x0b, 0x00, 0x14, 0x85, 0x96, 0x41, 0x04, 0xb9, 0x5c, 0xa4, 0xf6, 0x16, 0x73, 0x6a, 0xc7, 0x62, 0x0c, 0x65, 0x2f, 0x93, 0xbf, 0xf7, 0xb9, 0xb7, 0xf1, + /* (2^102)P */ 0xeb, 0x6d, 0xb3, 0x46, 0x32, 0xd2, 0xcb, 0x08, 0x94, 0x14, 0xbf, 0x3f, 0xc5, 0xcb, 0x5f, 0x9f, 0x8a, 0x89, 0x0c, 0x1b, 0x45, 0xad, 0x4c, 0x50, 0xb4, 0xe1, 0xa0, 0x6b, 0x11, 0x92, 0xaf, 0x1f, 0x00, 0xcc, 0xe5, 0x13, 0x7e, 0xe4, 0x2e, 0xa0, 0x57, 0xf3, 0xa7, 0x84, 0x79, 0x7a, 0xc2, 0xb7, 0xb7, 0xfc, 0x5d, 0xa5, 0xa9, 0x64, 0xcc, 0xd8, + /* (2^103)P */ 0xa9, 0xc4, 0x12, 0x8b, 0x34, 0x78, 0x3e, 0x38, 0xfd, 0x3f, 0x87, 0xfa, 0x88, 0x94, 0xd5, 0xd9, 0x7f, 0xeb, 0x58, 0xff, 0xb9, 0x45, 0xdb, 0xa1, 0xed, 0x22, 0x28, 0x1d, 0x00, 0x6d, 0x79, 0x85, 0x7a, 0x75, 0x5d, 0xf0, 0xb1, 0x9e, 0x47, 0x28, 0x8c, 0x62, 0xdf, 0xfb, 0x4c, 0x7b, 0xc5, 0x1a, 0x42, 0x95, 0xef, 0x9a, 0xb7, 0x27, 0x7e, 0xda, + /* (2^104)P */ 0xca, 0xd5, 0xc0, 0x17, 0xa1, 0x66, 0x79, 0x9c, 0x2a, 0xb7, 0x0a, 0xfe, 0x62, 0xe4, 0x26, 0x78, 0x90, 0xa7, 0xcb, 0xb0, 0x4f, 0x6d, 0xf9, 0x8f, 0xf7, 0x7d, 0xac, 0xb8, 0x78, 0x1f, 0x41, 0xea, 0x97, 0x1e, 0x62, 0x97, 0x43, 0x80, 0x58, 0x80, 0xb6, 0x69, 0x7d, 0xee, 0x16, 0xd2, 0xa1, 0x81, 0xd7, 0xb1, 0x27, 0x03, 0x48, 0xda, 0xab, 0xec, + /* (2^105)P */ 0x5b, 0xed, 0x40, 0x8e, 0x8c, 0xc1, 0x66, 0x90, 0x7f, 0x0c, 0xb2, 0xfc, 0xbd, 0x16, 0xac, 0x7d, 0x4c, 0x6a, 0xf9, 0xae, 0xe7, 0x4e, 0x11, 0x12, 0xe9, 0xbe, 0x17, 0x09, 0xc6, 0xc1, 0x5e, 0xb5, 0x7b, 0x50, 0x5c, 0x27, 0xfb, 0x80, 0xab, 0x01, 0xfa, 0x5b, 0x9b, 0x75, 0x16, 0x6e, 0xb2, 0x5c, 0x8c, 0x2f, 0xa5, 0x6a, 0x1a, 0x68, 0xa6, 0x90, + /* (2^106)P */ 0x75, 0xfe, 0xb6, 0x96, 0x96, 0x87, 0x4c, 0x12, 0xa9, 0xd1, 0xd8, 0x03, 0xa3, 0xc1, 0x15, 0x96, 0xe8, 0xa0, 0x75, 0x82, 0xa0, 0x6d, 0xea, 0x54, 0xdc, 0x5f, 0x0d, 0x7e, 0xf6, 0x70, 0xb5, 0xdc, 0x7a, 0xf6, 0xc4, 0xd4, 0x21, 0x49, 0xf5, 0xd4, 0x14, 0x6d, 0x48, 0x1d, 0x7c, 0x99, 0x42, 0xdf, 0x78, 0x6b, 0x9d, 0xb9, 0x30, 0x3c, 0xd0, 0x29, + /* (2^107)P */ 0x85, 0xd6, 0xd8, 0xf3, 0x91, 0x74, 0xdd, 0xbd, 0x72, 0x96, 0x10, 0xe4, 0x76, 0x02, 0x5a, 0x72, 0x67, 0xd3, 0x17, 0x72, 0x14, 0x9a, 0x20, 0x5b, 0x0f, 0x8d, 0xed, 0x6d, 0x4e, 0xe3, 0xd9, 0x82, 0xc2, 0x99, 0xee, 0x39, 0x61, 0x69, 0x8a, 0x24, 0x01, 0x92, 0x15, 0xe7, 0xfc, 0xf9, 0x4d, 0xac, 0xf1, 0x30, 0x49, 0x01, 0x0b, 0x6e, 0x0f, 0x20, + /* (2^108)P */ 0xd8, 0x25, 0x94, 0x5e, 0x43, 0x29, 0xf5, 0xcc, 0xe8, 0xe3, 0x55, 0x41, 0x3c, 0x9f, 0x58, 0x5b, 0x00, 0xeb, 0xc5, 0xdf, 0xcf, 0xfb, 0xfd, 0x6e, 0x92, 0xec, 0x99, 0x30, 0xd6, 0x05, 0xdd, 0x80, 0x7a, 0x5d, 0x6d, 0x16, 0x85, 0xd8, 0x9d, 0x43, 0x65, 0xd8, 0x2c, 0x33, 0x2f, 0x5c, 0x41, 0xea, 0xb7, 0x95, 0x77, 0xf2, 0x9e, 0x59, 0x09, 0xe8, + /* (2^109)P */ 0x00, 0xa0, 0x03, 0x80, 0xcd, 0x60, 0xe5, 0x17, 0xd4, 0x15, 0x99, 0xdd, 0x4f, 0xbf, 0x66, 0xb8, 0xc0, 0xf5, 0xf9, 0xfc, 0x6d, 0x42, 0x18, 0x34, 0x1c, 0x7d, 0x5b, 0xb5, 0x09, 0xd0, 0x99, 0x57, 0x81, 0x0b, 0x62, 0xb3, 0xa2, 0xf9, 0x0b, 0xae, 0x95, 0xb8, 0xc2, 0x3b, 0x0d, 0x5b, 0x00, 0xf1, 0xed, 0xbc, 0x05, 0x9d, 0x61, 0xbc, 0x73, 0x9d, + /* (2^110)P */ 0xd4, 0xdb, 0x29, 0xe5, 0x85, 0xe9, 0xc6, 0x89, 0x2a, 0xa8, 0x54, 0xab, 0xb3, 0x7f, 0x88, 0xc0, 0x4d, 0xe0, 0xd1, 0x74, 0x6e, 0xa3, 0xa7, 0x39, 0xd5, 0xcc, 0xa1, 0x8a, 0xcb, 0x5b, 0x34, 0xad, 0x92, 0xb4, 0xd8, 0xd5, 0x17, 0xf6, 0x77, 0x18, 0x9e, 0xaf, 0x45, 0x3b, 0x03, 0xe2, 0xf8, 0x52, 0x60, 0xdc, 0x15, 0x20, 0x9e, 0xdf, 0xd8, 0x5d, + /* (2^111)P */ 0x02, 0xc1, 0xac, 0x1a, 0x15, 0x8e, 0x6c, 0xf5, 0x1e, 0x1e, 0xba, 0x7e, 0xc2, 0xda, 0x7d, 0x02, 0xda, 0x43, 0xae, 0x04, 0x70, 0x28, 0x54, 0x78, 0x94, 0xf5, 0x4f, 0x07, 0x84, 0x8f, 0xed, 0xaa, 0xc0, 0xb8, 0xcd, 0x7f, 0x7e, 0x33, 0xa3, 0xbe, 0x21, 0x29, 0xc8, 0x56, 0x34, 0xc0, 0x76, 0x87, 0x8f, 0xc7, 0x73, 0x58, 0x90, 0x16, 0xfc, 0xd6, + /* (2^112)P */ 0xb8, 0x3f, 0xe1, 0xdf, 0x3a, 0x91, 0x25, 0x0c, 0xf6, 0x47, 0xa8, 0x89, 0xc4, 0xc6, 0x61, 0xec, 0x86, 0x2c, 0xfd, 0xbe, 0xa4, 0x6f, 0xc2, 0xd4, 0x46, 0x19, 0x70, 0x5d, 0x09, 0x02, 0x86, 0xd3, 0x4b, 0xe9, 0x16, 0x7b, 0xf0, 0x0d, 0x6c, 0xff, 0x91, 0x05, 0xbf, 0x55, 0xb4, 0x00, 0x8d, 0xe5, 0x6d, 0x68, 0x20, 0x90, 0x12, 0xb5, 0x5c, 0x32, + /* (2^113)P */ 0x80, 0x45, 0xc8, 0x51, 0x87, 0xba, 0x1c, 0x5c, 0xcf, 0x5f, 0x4b, 0x3c, 0x9e, 0x3b, 0x36, 0xd2, 0x26, 0xa2, 0x7f, 0xab, 0xb7, 0xbf, 0xda, 0x68, 0x23, 0x8f, 0xc3, 0xa0, 0xfd, 0xad, 0xf1, 0x56, 0x3b, 0xd0, 0x75, 0x2b, 0x44, 0x61, 0xd8, 0xf4, 0xf1, 0x05, 0x49, 0x53, 0x07, 0xee, 0x47, 0xef, 0xc0, 0x7c, 0x9d, 0xe4, 0x15, 0x88, 0xc5, 0x47, + /* (2^114)P */ 0x2d, 0xb5, 0x09, 0x80, 0xb9, 0xd3, 0xd8, 0xfe, 0x4c, 0xd2, 0xa6, 0x6e, 0xd3, 0x75, 0xcf, 0xb0, 0x99, 0xcb, 0x50, 0x8d, 0xe9, 0x67, 0x9b, 0x20, 0xe8, 0x57, 0xd8, 0x14, 0x85, 0x73, 0x6a, 0x74, 0xe0, 0x99, 0xf0, 0x6b, 0x6e, 0x59, 0x30, 0x31, 0x33, 0x96, 0x5f, 0xa1, 0x0c, 0x1b, 0xf4, 0xca, 0x09, 0xe1, 0x9b, 0xb5, 0xcf, 0x6d, 0x0b, 0xeb, + /* (2^115)P */ 0x1a, 0xde, 0x50, 0xa9, 0xac, 0x3e, 0x10, 0x43, 0x4f, 0x82, 0x4f, 0xc0, 0xfe, 0x3f, 0x33, 0xd2, 0x64, 0x86, 0x50, 0xa9, 0x51, 0x76, 0x5e, 0x50, 0x97, 0x6c, 0x73, 0x8d, 0x77, 0xa3, 0x75, 0x03, 0xbc, 0xc9, 0xfb, 0x50, 0xd9, 0x6d, 0x16, 0xad, 0x5d, 0x32, 0x3d, 0xac, 0x44, 0xdf, 0x51, 0xf7, 0x19, 0xd4, 0x0b, 0x57, 0x78, 0x0b, 0x81, 0x4e, + /* (2^116)P */ 0x32, 0x24, 0xf1, 0x6c, 0x55, 0x62, 0x1d, 0xb3, 0x1f, 0xda, 0xfa, 0x6a, 0x8f, 0x98, 0x01, 0x16, 0xde, 0x44, 0x50, 0x0d, 0x2e, 0x6c, 0x0b, 0xa2, 0xd3, 0x74, 0x0e, 0xa9, 0xbf, 0x8d, 0xa9, 0xc8, 0xc8, 0x2f, 0x62, 0xc1, 0x35, 0x5e, 0xfd, 0x3a, 0xb3, 0x83, 0x2d, 0xee, 0x4e, 0xfd, 0x5c, 0x5e, 0xad, 0x85, 0xa5, 0x10, 0xb5, 0x4f, 0x34, 0xa7, + /* (2^117)P */ 0xd1, 0x58, 0x6f, 0xe6, 0x54, 0x2c, 0xc2, 0xcd, 0xcf, 0x83, 0xdc, 0x88, 0x0c, 0xb9, 0xb4, 0x62, 0x18, 0x89, 0x65, 0x28, 0xe9, 0x72, 0x4b, 0x65, 0xcf, 0xd6, 0x90, 0x88, 0xd7, 0x76, 0x17, 0x4f, 0x74, 0x64, 0x1e, 0xcb, 0xd3, 0xf5, 0x4b, 0xaa, 0x2e, 0x4d, 0x2d, 0x7c, 0x13, 0x1f, 0xfd, 0xd9, 0x60, 0x83, 0x7e, 0xda, 0x64, 0x1c, 0xdc, 0x9f, + /* (2^118)P */ 0xad, 0xef, 0xac, 0x1b, 0xc1, 0x30, 0x5a, 0x15, 0xc9, 0x1f, 0xac, 0xf1, 0xca, 0x44, 0x95, 0x95, 0xea, 0xf2, 0x22, 0xe7, 0x8d, 0x25, 0xf0, 0xff, 0xd8, 0x71, 0xf7, 0xf8, 0x8f, 0x8f, 0xcd, 0xf4, 0x1e, 0xfe, 0x6c, 0x68, 0x04, 0xb8, 0x78, 0xa1, 0x5f, 0xa6, 0x5d, 0x5e, 0xf9, 0x8d, 0xea, 0x80, 0xcb, 0xf3, 0x17, 0xa6, 0x03, 0xc9, 0x38, 0xd5, + /* (2^119)P */ 0x79, 0x14, 0x31, 0xc3, 0x38, 0xe5, 0xaa, 0xbf, 0x17, 0xa3, 0x04, 0x4e, 0x80, 0x59, 0x9c, 0x9f, 0x19, 0x39, 0xe4, 0x2d, 0x23, 0x54, 0x4a, 0x7f, 0x3e, 0xf3, 0xd9, 0xc7, 0xba, 0x6c, 0x8f, 0x6b, 0xfa, 0x34, 0xb5, 0x23, 0x17, 0x1d, 0xff, 0x1d, 0xea, 0x1f, 0xd7, 0xba, 0x61, 0xb2, 0xe0, 0x38, 0x6a, 0xe9, 0xcf, 0x48, 0x5d, 0x6a, 0x10, 0x9c, + /* (2^120)P */ 0xc8, 0xbb, 0x13, 0x1c, 0x3f, 0x3c, 0x34, 0xfd, 0xac, 0x37, 0x52, 0x44, 0x25, 0xa8, 0xde, 0x1d, 0x63, 0xf4, 0x81, 0x9a, 0xbe, 0x0b, 0x74, 0x2e, 0xc8, 0x51, 0x16, 0xd3, 0xac, 0x4a, 0xaf, 0xe2, 0x5f, 0x3a, 0x89, 0x32, 0xd1, 0x9b, 0x7c, 0x90, 0x0d, 0xac, 0xdc, 0x8b, 0x73, 0x45, 0x45, 0x97, 0xb1, 0x90, 0x2c, 0x1b, 0x31, 0xca, 0xb1, 0x94, + /* (2^121)P */ 0x07, 0x28, 0xdd, 0x10, 0x14, 0xa5, 0x95, 0x7e, 0xf3, 0xe4, 0xd4, 0x14, 0xb4, 0x7e, 0x76, 0xdb, 0x42, 0xd6, 0x94, 0x3e, 0xeb, 0x44, 0x64, 0x88, 0x0d, 0xec, 0xc1, 0x21, 0xf0, 0x79, 0xe0, 0x83, 0x67, 0x55, 0x53, 0xc2, 0xf6, 0xc5, 0xc5, 0x89, 0x39, 0xe8, 0x42, 0xd0, 0x17, 0xbd, 0xff, 0x35, 0x59, 0x0e, 0xc3, 0x06, 0x86, 0xd4, 0x64, 0xcf, + /* (2^122)P */ 0x91, 0xa8, 0xdb, 0x57, 0x9b, 0xe2, 0x96, 0x31, 0x10, 0x6e, 0xd7, 0x9a, 0x97, 0xb3, 0xab, 0xb5, 0x15, 0x66, 0xbe, 0xcc, 0x6d, 0x9a, 0xac, 0x06, 0xb3, 0x0d, 0xaa, 0x4b, 0x9c, 0x96, 0x79, 0x6c, 0x34, 0xee, 0x9e, 0x53, 0x4d, 0x6e, 0xbd, 0x88, 0x02, 0xbf, 0x50, 0x54, 0x12, 0x5d, 0x01, 0x02, 0x46, 0xc6, 0x74, 0x02, 0x8c, 0x24, 0xae, 0xb1, + /* (2^123)P */ 0xf5, 0x22, 0xea, 0xac, 0x7d, 0x9c, 0x33, 0x8a, 0xa5, 0x36, 0x79, 0x6a, 0x4f, 0xa4, 0xdc, 0xa5, 0x73, 0x64, 0xc4, 0x6f, 0x43, 0x02, 0x3b, 0x94, 0x66, 0xd2, 0x4b, 0x4f, 0xf6, 0x45, 0x33, 0x5d, 0x10, 0x33, 0x18, 0x1e, 0xa3, 0xfc, 0xf7, 0xd2, 0xb8, 0xc8, 0xa7, 0xe0, 0x76, 0x8a, 0xcd, 0xff, 0x4f, 0x99, 0x34, 0x47, 0x84, 0x91, 0x96, 0x9f, + /* (2^124)P */ 0x8a, 0x48, 0x3b, 0x48, 0x4a, 0xbc, 0xac, 0xe2, 0x80, 0xd6, 0xd2, 0x35, 0xde, 0xd0, 0x56, 0x42, 0x33, 0xb3, 0x56, 0x5a, 0xcd, 0xb8, 0x3d, 0xb5, 0x25, 0xc1, 0xed, 0xff, 0x87, 0x0b, 0x79, 0xff, 0xf2, 0x62, 0xe1, 0x76, 0xc6, 0xa2, 0x0f, 0xa8, 0x9b, 0x0d, 0xcc, 0x3f, 0x3d, 0x35, 0x27, 0x8d, 0x0b, 0x74, 0xb0, 0xc3, 0x78, 0x8c, 0xcc, 0xc8, + /* (2^125)P */ 0xfc, 0x9a, 0x0c, 0xa8, 0x49, 0x42, 0xb8, 0xdf, 0xcf, 0xb3, 0x19, 0xa6, 0x64, 0x57, 0xfe, 0xe8, 0xf8, 0xa6, 0x4b, 0x86, 0xa1, 0xd5, 0x83, 0x7f, 0x14, 0x99, 0x18, 0x0c, 0x7d, 0x5b, 0xf7, 0x3d, 0xf9, 0x4b, 0x79, 0xb1, 0x86, 0x30, 0xb4, 0x5e, 0x6a, 0xe8, 0x9d, 0xfa, 0x8a, 0x41, 0xc4, 0x30, 0xfc, 0x56, 0x74, 0x14, 0x42, 0xc8, 0x96, 0x0e, + /* (2^126)P */ 0xdf, 0x66, 0xec, 0xbc, 0x44, 0xdb, 0x19, 0xce, 0xd4, 0xb5, 0x49, 0x40, 0x07, 0x49, 0xe0, 0x3a, 0x61, 0x10, 0xfb, 0x7d, 0xba, 0xb1, 0xe0, 0x28, 0x5b, 0x99, 0x59, 0x96, 0xa2, 0xee, 0xe0, 0x23, 0x37, 0x39, 0x1f, 0xe6, 0x57, 0x9f, 0xf8, 0xf8, 0xdc, 0x74, 0xf6, 0x8f, 0x4f, 0x5e, 0x51, 0xa4, 0x12, 0xac, 0xbe, 0xe4, 0xf3, 0xd1, 0xf0, 0x24, + /* (2^127)P */ 0x1e, 0x3e, 0x9a, 0x5f, 0xdf, 0x9f, 0xd6, 0x4e, 0x8a, 0x28, 0xc3, 0xcd, 0x96, 0x9d, 0x57, 0xc7, 0x61, 0x81, 0x90, 0xff, 0xae, 0xb1, 0x4f, 0xc2, 0x96, 0x8b, 0x1a, 0x18, 0xf4, 0x50, 0xcb, 0x31, 0xe1, 0x57, 0xf4, 0x90, 0xa8, 0xea, 0xac, 0xe7, 0x61, 0x98, 0xb6, 0x15, 0xc1, 0x7b, 0x29, 0xa4, 0xc3, 0x18, 0xef, 0xb9, 0xd8, 0xdf, 0xf6, 0xac, + /* (2^128)P */ 0xca, 0xa8, 0x6c, 0xf1, 0xb4, 0xca, 0xfe, 0x31, 0xee, 0x48, 0x38, 0x8b, 0x0e, 0xbb, 0x7a, 0x30, 0xaa, 0xf9, 0xee, 0x27, 0x53, 0x24, 0xdc, 0x2e, 0x15, 0xa6, 0x48, 0x8f, 0xa0, 0x7e, 0xf1, 0xdc, 0x93, 0x87, 0x39, 0xeb, 0x7f, 0x38, 0x92, 0x92, 0x4c, 0x29, 0xe9, 0x57, 0xd8, 0x59, 0xfc, 0xe9, 0x9c, 0x44, 0xc0, 0x65, 0xcf, 0xac, 0x4b, 0xdc, + /* (2^129)P */ 0xa3, 0xd0, 0x37, 0x8f, 0x86, 0x2f, 0xc6, 0x47, 0x55, 0x46, 0x65, 0x26, 0x4b, 0x91, 0xe2, 0x18, 0x5c, 0x4f, 0x23, 0xc1, 0x37, 0x29, 0xb9, 0xc1, 0x27, 0xc5, 0x3c, 0xbf, 0x7e, 0x23, 0xdb, 0x73, 0x99, 0xbd, 0x1b, 0xb2, 0x31, 0x68, 0x3a, 0xad, 0xb7, 0xb0, 0x10, 0xc5, 0xe5, 0x11, 0x51, 0xba, 0xa7, 0x60, 0x66, 0x54, 0xf0, 0x08, 0xd7, 0x69, + /* (2^130)P */ 0x89, 0x41, 0x79, 0xcc, 0xeb, 0x0a, 0xf5, 0x4b, 0xa3, 0x4c, 0xce, 0x52, 0xb0, 0xa7, 0xe4, 0x41, 0x75, 0x7d, 0x04, 0xbb, 0x09, 0x4c, 0x50, 0x9f, 0xdf, 0xea, 0x74, 0x61, 0x02, 0xad, 0xb4, 0x9d, 0xb7, 0x05, 0xb9, 0xea, 0xeb, 0x91, 0x35, 0xe7, 0x49, 0xea, 0xd3, 0x4f, 0x3c, 0x60, 0x21, 0x7a, 0xde, 0xc7, 0xe2, 0x5a, 0xee, 0x8e, 0x93, 0xc7, + /* (2^131)P */ 0x00, 0xe8, 0xed, 0xd0, 0xb3, 0x0d, 0xaf, 0xb2, 0xde, 0x2c, 0xf6, 0x00, 0xe2, 0xea, 0x6d, 0xf8, 0x0e, 0xd9, 0x67, 0x59, 0xa9, 0x50, 0xbb, 0x17, 0x8f, 0xff, 0xb1, 0x9f, 0x17, 0xb6, 0xf2, 0xb5, 0xba, 0x80, 0xf7, 0x0f, 0xba, 0xd5, 0x09, 0x43, 0xaa, 0x4e, 0x3a, 0x67, 0x6a, 0x89, 0x9b, 0x18, 0x65, 0x35, 0xf8, 0x3a, 0x49, 0x91, 0x30, 0x51, + /* (2^132)P */ 0x8d, 0x25, 0xe9, 0x0e, 0x7d, 0x50, 0x76, 0xe4, 0x58, 0x7e, 0xb9, 0x33, 0xe6, 0x65, 0x90, 0xc2, 0x50, 0x9d, 0x50, 0x2e, 0x11, 0xad, 0xd5, 0x43, 0x52, 0x32, 0x41, 0x4f, 0x7b, 0xb6, 0xa0, 0xec, 0x81, 0x75, 0x36, 0x7c, 0x77, 0x85, 0x59, 0x70, 0xe4, 0xf9, 0xef, 0x66, 0x8d, 0x35, 0xc8, 0x2a, 0x6e, 0x5b, 0xc6, 0x0d, 0x0b, 0x29, 0x60, 0x68, + /* (2^133)P */ 0xf8, 0xce, 0xb0, 0x3a, 0x56, 0x7d, 0x51, 0x9a, 0x25, 0x73, 0xea, 0xdd, 0xe4, 0xe0, 0x0e, 0xf0, 0x07, 0xc0, 0x31, 0x00, 0x73, 0x35, 0xd0, 0x39, 0xc4, 0x9b, 0xb7, 0x95, 0xe0, 0x62, 0x70, 0x36, 0x0b, 0xcb, 0xa0, 0x42, 0xde, 0x51, 0xcf, 0x41, 0xe0, 0xb8, 0xb4, 0xc0, 0xe5, 0x46, 0x99, 0x9f, 0x02, 0x7f, 0x14, 0x8c, 0xc1, 0x4e, 0xef, 0xe8, + /* (2^134)P */ 0x10, 0x01, 0x57, 0x0a, 0xbe, 0x8b, 0x18, 0xc8, 0xca, 0x00, 0x28, 0x77, 0x4a, 0x9a, 0xc7, 0x55, 0x2a, 0xcc, 0x0c, 0x7b, 0xb9, 0xe9, 0xc8, 0x97, 0x7c, 0x02, 0xe3, 0x09, 0x2f, 0x62, 0x30, 0xb8, 0x40, 0x09, 0x65, 0xe9, 0x55, 0x63, 0xb5, 0x07, 0xca, 0x9f, 0x00, 0xdf, 0x9d, 0x5c, 0xc7, 0xee, 0x57, 0xa5, 0x90, 0x15, 0x1e, 0x22, 0xa0, 0x12, + /* (2^135)P */ 0x71, 0x2d, 0xc9, 0xef, 0x27, 0xb9, 0xd8, 0x12, 0x43, 0x6b, 0xa8, 0xce, 0x3b, 0x6d, 0x6e, 0x91, 0x43, 0x23, 0xbc, 0x32, 0xb3, 0xbf, 0xe1, 0xc7, 0x39, 0xcf, 0x7c, 0x42, 0x4c, 0xb1, 0x30, 0xe2, 0xdd, 0x69, 0x06, 0xe5, 0xea, 0xf0, 0x2a, 0x16, 0x50, 0x71, 0xca, 0x92, 0xdf, 0xc1, 0xcc, 0xec, 0xe6, 0x54, 0x07, 0xf3, 0x18, 0x8d, 0xd8, 0x29, + /* (2^136)P */ 0x98, 0x51, 0x48, 0x8f, 0xfa, 0x2e, 0x5e, 0x67, 0xb0, 0xc6, 0x17, 0x12, 0xb6, 0x7d, 0xc9, 0xad, 0x81, 0x11, 0xad, 0x0c, 0x1c, 0x2d, 0x45, 0xdf, 0xac, 0x66, 0xbd, 0x08, 0x6f, 0x7c, 0xc7, 0x06, 0x6e, 0x19, 0x08, 0x39, 0x64, 0xd7, 0xe4, 0xd1, 0x11, 0x5f, 0x1c, 0xf4, 0x67, 0xc3, 0x88, 0x6a, 0xe6, 0x07, 0xa3, 0x83, 0xd7, 0xfd, 0x2a, 0xf9, + /* (2^137)P */ 0x87, 0xed, 0xeb, 0xd9, 0xdf, 0xff, 0x43, 0x8b, 0xaa, 0x20, 0x58, 0xb0, 0xb4, 0x6b, 0x14, 0xb8, 0x02, 0xc5, 0x40, 0x20, 0x22, 0xbb, 0xf7, 0xb4, 0xf3, 0x05, 0x1e, 0x4d, 0x94, 0xff, 0xe3, 0xc5, 0x22, 0x82, 0xfe, 0xaf, 0x90, 0x42, 0x98, 0x6b, 0x76, 0x8b, 0x3e, 0x89, 0x3f, 0x42, 0x2a, 0xa7, 0x26, 0x00, 0xda, 0x5c, 0xa2, 0x2b, 0xec, 0xdd, + /* (2^138)P */ 0x5c, 0x21, 0x16, 0x0d, 0x46, 0xb8, 0xd0, 0xa7, 0x88, 0xe7, 0x25, 0xcb, 0x3e, 0x50, 0x73, 0x61, 0xe7, 0xaf, 0x5a, 0x3f, 0x47, 0x8b, 0x3d, 0x97, 0x79, 0x2c, 0xe6, 0x6d, 0x95, 0x74, 0x65, 0x70, 0x36, 0xfd, 0xd1, 0x9e, 0x13, 0x18, 0x63, 0xb1, 0x2d, 0x0b, 0xb5, 0x36, 0x3e, 0xe7, 0x35, 0x42, 0x3b, 0xe6, 0x1f, 0x4d, 0x9d, 0x59, 0xa2, 0x43, + /* (2^139)P */ 0x8c, 0x0c, 0x7c, 0x24, 0x9e, 0xe0, 0xf8, 0x05, 0x1c, 0x9e, 0x1f, 0x31, 0xc0, 0x70, 0xb3, 0xfb, 0x4e, 0xf8, 0x0a, 0x57, 0xb7, 0x49, 0xb5, 0x73, 0xa1, 0x5f, 0x9b, 0x6a, 0x07, 0x6c, 0x87, 0x71, 0x87, 0xd4, 0xbe, 0x98, 0x1e, 0x98, 0xee, 0x52, 0xc1, 0x7b, 0x95, 0x0f, 0x28, 0x32, 0x36, 0x28, 0xd0, 0x3a, 0x0f, 0x7d, 0x2a, 0xa9, 0x62, 0xb9, + /* (2^140)P */ 0x97, 0xe6, 0x18, 0x77, 0xf9, 0x34, 0xac, 0xbc, 0xe0, 0x62, 0x9f, 0x42, 0xde, 0xbd, 0x2f, 0xf7, 0x1f, 0xb7, 0x14, 0x52, 0x8a, 0x79, 0xb2, 0x3f, 0xd2, 0x95, 0x71, 0x01, 0xe8, 0xaf, 0x8c, 0xa4, 0xa4, 0xa7, 0x27, 0xf3, 0x5c, 0xdf, 0x3e, 0x57, 0x7a, 0xf1, 0x76, 0x49, 0xe6, 0x42, 0x3f, 0x8f, 0x1e, 0x63, 0x4a, 0x65, 0xb5, 0x41, 0xf5, 0x02, + /* (2^141)P */ 0x72, 0x85, 0xc5, 0x0b, 0xe1, 0x47, 0x64, 0x02, 0xc5, 0x4d, 0x81, 0x69, 0xb2, 0xcf, 0x0f, 0x6c, 0xd4, 0x6d, 0xd0, 0xc7, 0xb4, 0x1c, 0xd0, 0x32, 0x59, 0x89, 0xe2, 0xe0, 0x96, 0x8b, 0x12, 0x98, 0xbf, 0x63, 0x7a, 0x4c, 0x76, 0x7e, 0x58, 0x17, 0x8f, 0x5b, 0x0a, 0x59, 0x65, 0x75, 0xbc, 0x61, 0x1f, 0xbe, 0xc5, 0x6e, 0x0a, 0x57, 0x52, 0x70, + /* (2^142)P */ 0x92, 0x1c, 0x77, 0xbb, 0x62, 0x02, 0x6c, 0x25, 0x9c, 0x66, 0x07, 0x83, 0xab, 0xcc, 0x80, 0x5d, 0xd2, 0x76, 0x0c, 0xa4, 0xc5, 0xb4, 0x8a, 0x68, 0x23, 0x31, 0x32, 0x29, 0x8a, 0x47, 0x92, 0x12, 0x80, 0xb3, 0xfa, 0x18, 0xe4, 0x8d, 0xc0, 0x4d, 0xfe, 0x97, 0x5f, 0x72, 0x41, 0xb5, 0x5c, 0x7a, 0xbd, 0xf0, 0xcf, 0x5e, 0x97, 0xaa, 0x64, 0x32, + /* (2^143)P */ 0x35, 0x3f, 0x75, 0xc1, 0x7a, 0x75, 0x7e, 0xa9, 0xc6, 0x0b, 0x4e, 0x32, 0x62, 0xec, 0xe3, 0x5c, 0xfb, 0x01, 0x43, 0xb6, 0xd4, 0x5b, 0x75, 0xd2, 0xee, 0x7f, 0x5d, 0x23, 0x2b, 0xb3, 0x54, 0x34, 0x4c, 0xd3, 0xb4, 0x32, 0x84, 0x81, 0xb5, 0x09, 0x76, 0x19, 0xda, 0x58, 0xda, 0x7c, 0xdb, 0x2e, 0xdd, 0x4c, 0x8e, 0xdd, 0x5d, 0x89, 0x10, 0x10, + /* (2^144)P */ 0x57, 0x25, 0x6a, 0x08, 0x37, 0x92, 0xa8, 0xdf, 0x24, 0xef, 0x8f, 0x33, 0x34, 0x52, 0xa4, 0x4c, 0xf0, 0x77, 0x9f, 0x69, 0x77, 0xd5, 0x8f, 0xd2, 0x9a, 0xb3, 0xb6, 0x1d, 0x2d, 0xa6, 0xf7, 0x1f, 0xda, 0xd7, 0xcb, 0x75, 0x11, 0xc3, 0x6b, 0xc0, 0x38, 0xb1, 0xd5, 0x2d, 0x96, 0x84, 0x16, 0xfa, 0x26, 0xb9, 0xcc, 0x3f, 0x16, 0x47, 0x23, 0x74, + /* (2^145)P */ 0x9b, 0x61, 0x2a, 0x1c, 0xdd, 0x39, 0xa5, 0xfa, 0x1c, 0x7d, 0x63, 0x50, 0xca, 0xe6, 0x9d, 0xfa, 0xb7, 0xc4, 0x4c, 0x6a, 0x97, 0x5f, 0x36, 0x4e, 0x47, 0xdd, 0x17, 0xf7, 0xf9, 0x19, 0xce, 0x75, 0x17, 0xad, 0xce, 0x2a, 0xf3, 0xfe, 0x27, 0x8f, 0x3e, 0x48, 0xc0, 0x60, 0x87, 0x24, 0x19, 0xae, 0x59, 0xe4, 0x5a, 0x00, 0x2a, 0xba, 0xa2, 0x1f, + /* (2^146)P */ 0x26, 0x88, 0x42, 0x60, 0x9f, 0x6e, 0x2c, 0x7c, 0x39, 0x0f, 0x47, 0x6a, 0x0e, 0x02, 0xbb, 0x4b, 0x34, 0x29, 0x55, 0x18, 0x36, 0xcf, 0x3b, 0x47, 0xf1, 0x2e, 0xfc, 0x6e, 0x94, 0xff, 0xe8, 0x6b, 0x06, 0xd2, 0xba, 0x77, 0x5e, 0x60, 0xd7, 0x19, 0xef, 0x02, 0x9d, 0x3a, 0xc2, 0xb7, 0xa9, 0xd8, 0x57, 0xee, 0x7e, 0x2b, 0xf2, 0x6d, 0x28, 0xda, + /* (2^147)P */ 0xdf, 0xd9, 0x92, 0x11, 0x98, 0x23, 0xe2, 0x45, 0x2f, 0x74, 0x70, 0xee, 0x0e, 0x55, 0x65, 0x79, 0x86, 0x38, 0x17, 0x92, 0x85, 0x87, 0x99, 0x50, 0xd9, 0x7c, 0xdb, 0xa1, 0x10, 0xec, 0x30, 0xb7, 0x40, 0xa3, 0x23, 0x9b, 0x0e, 0x27, 0x49, 0x29, 0x03, 0x94, 0xff, 0x53, 0xdc, 0xd7, 0xed, 0x49, 0xa9, 0x5a, 0x3b, 0xee, 0xd7, 0xc7, 0x65, 0xaf, + /* (2^148)P */ 0xa0, 0xbd, 0xbe, 0x03, 0xee, 0x0c, 0xbe, 0x32, 0x00, 0x7b, 0x52, 0xcb, 0x92, 0x29, 0xbf, 0xa0, 0xc6, 0xd9, 0xd2, 0xd6, 0x15, 0xe8, 0x3a, 0x75, 0x61, 0x65, 0x56, 0xae, 0xad, 0x3c, 0x2a, 0x64, 0x14, 0x3f, 0x8e, 0xc1, 0x2d, 0x0c, 0x8d, 0x20, 0xdb, 0x58, 0x4b, 0xe5, 0x40, 0x15, 0x4b, 0xdc, 0xa8, 0xbd, 0xef, 0x08, 0xa7, 0xd1, 0xf4, 0xb0, + /* (2^149)P */ 0xa9, 0x0f, 0x05, 0x94, 0x66, 0xac, 0x1f, 0x65, 0x3f, 0xe1, 0xb8, 0xe1, 0x34, 0x5e, 0x1d, 0x8f, 0xe3, 0x93, 0x03, 0x15, 0xff, 0xb6, 0x65, 0xb6, 0x6e, 0xc0, 0x2f, 0xd4, 0x2e, 0xb9, 0x2c, 0x13, 0x3c, 0x99, 0x1c, 0xb5, 0x87, 0xba, 0x79, 0xcb, 0xf0, 0x18, 0x06, 0x86, 0x04, 0x14, 0x25, 0x09, 0xcd, 0x1c, 0x14, 0xda, 0x35, 0xd0, 0x38, 0x3b, + /* (2^150)P */ 0x1b, 0x04, 0xa3, 0x27, 0xb4, 0xd3, 0x37, 0x48, 0x1e, 0x8f, 0x69, 0xd3, 0x5a, 0x2f, 0x20, 0x02, 0x36, 0xbe, 0x06, 0x7b, 0x6b, 0x6c, 0x12, 0x5b, 0x80, 0x74, 0x44, 0xe6, 0xf8, 0xf5, 0x95, 0x59, 0x29, 0xab, 0x51, 0x47, 0x83, 0x28, 0xe0, 0xad, 0xde, 0xaa, 0xd3, 0xb1, 0x1a, 0xcb, 0xa3, 0xcd, 0x8b, 0x6a, 0xb1, 0xa7, 0x0a, 0xd1, 0xf9, 0xbe, + /* (2^151)P */ 0xce, 0x2f, 0x85, 0xca, 0x74, 0x6d, 0x49, 0xb8, 0xce, 0x80, 0x44, 0xe0, 0xda, 0x5b, 0xcf, 0x2f, 0x79, 0x74, 0xfe, 0xb4, 0x2c, 0x99, 0x20, 0x6e, 0x09, 0x04, 0xfb, 0x6d, 0x57, 0x5b, 0x95, 0x0c, 0x45, 0xda, 0x4f, 0x7f, 0x63, 0xcc, 0x85, 0x5a, 0x67, 0x50, 0x68, 0x71, 0xb4, 0x67, 0xb1, 0x2e, 0xc1, 0x1c, 0xdc, 0xff, 0x2a, 0x7c, 0x10, 0x5e, + /* (2^152)P */ 0xa6, 0xde, 0xf3, 0xd4, 0x22, 0x30, 0x24, 0x9e, 0x0b, 0x30, 0x54, 0x59, 0x7e, 0xa2, 0xeb, 0x89, 0x54, 0x65, 0x3e, 0x40, 0xd1, 0xde, 0xe6, 0xee, 0x4d, 0xbf, 0x5e, 0x40, 0x1d, 0xee, 0x4f, 0x68, 0xd9, 0xa7, 0x2f, 0xb3, 0x64, 0xb3, 0xf5, 0xc8, 0xd3, 0xaa, 0x70, 0x70, 0x3d, 0xef, 0xd3, 0x95, 0x54, 0xdb, 0x3e, 0x94, 0x95, 0x92, 0x1f, 0x45, + /* (2^153)P */ 0x22, 0x80, 0x1d, 0x9d, 0x96, 0xa5, 0x78, 0x6f, 0xe0, 0x1e, 0x1b, 0x66, 0x42, 0xc8, 0xae, 0x9e, 0x46, 0x45, 0x08, 0x41, 0xdf, 0x80, 0xae, 0x6f, 0xdb, 0x15, 0x5a, 0x21, 0x31, 0x7a, 0xd0, 0xf2, 0x54, 0x15, 0x88, 0xd3, 0x0f, 0x7f, 0x14, 0x5a, 0x14, 0x97, 0xab, 0xf4, 0x58, 0x6a, 0x9f, 0xea, 0x74, 0xe5, 0x6b, 0x90, 0x59, 0x2b, 0x48, 0xd9, + /* (2^154)P */ 0x12, 0x24, 0x04, 0xf5, 0x50, 0xc2, 0x8c, 0xb0, 0x7c, 0x46, 0x98, 0xd5, 0x24, 0xad, 0xf6, 0x72, 0xdc, 0x82, 0x1a, 0x60, 0xc1, 0xeb, 0x48, 0xef, 0x7f, 0x6e, 0xe6, 0xcc, 0xdb, 0x7b, 0xae, 0xbe, 0x5e, 0x1e, 0x5c, 0xe6, 0x0a, 0x70, 0xdf, 0xa4, 0xa3, 0x85, 0x1b, 0x1b, 0x7f, 0x72, 0xb9, 0x96, 0x6f, 0xdc, 0x03, 0x76, 0x66, 0xfb, 0xa0, 0x33, + /* (2^155)P */ 0x37, 0x40, 0xbb, 0xbc, 0x68, 0x58, 0x86, 0xca, 0xbb, 0xa5, 0x24, 0x76, 0x3d, 0x48, 0xd1, 0xad, 0xb4, 0xa8, 0xcf, 0xc3, 0xb6, 0xa8, 0xba, 0x1a, 0x3a, 0xbe, 0x33, 0x75, 0x04, 0x5c, 0x13, 0x8c, 0x0d, 0x70, 0x8d, 0xa6, 0x4e, 0x2a, 0xeb, 0x17, 0x3c, 0x22, 0xdd, 0x3e, 0x96, 0x40, 0x11, 0x9e, 0x4e, 0xae, 0x3d, 0xf8, 0x91, 0xd7, 0x50, 0xc8, + /* (2^156)P */ 0xd8, 0xca, 0xde, 0x19, 0xcf, 0x00, 0xe4, 0x73, 0x18, 0x7f, 0x9b, 0x9f, 0xf4, 0x5b, 0x49, 0x49, 0x99, 0xdc, 0xa4, 0x46, 0x21, 0xb5, 0xd7, 0x3e, 0xb7, 0x47, 0x1b, 0xa9, 0x9f, 0x4c, 0x69, 0x7d, 0xec, 0x33, 0xd6, 0x1c, 0x51, 0x7f, 0x47, 0x74, 0x7a, 0x6c, 0xf3, 0xd2, 0x2e, 0xbf, 0xdf, 0x6c, 0x9e, 0x77, 0x3b, 0x34, 0xf6, 0x73, 0x80, 0xed, + /* (2^157)P */ 0x16, 0xfb, 0x16, 0xc3, 0xc2, 0x83, 0xe4, 0xf4, 0x03, 0x7f, 0x52, 0xb0, 0x67, 0x51, 0x7b, 0x24, 0x5a, 0x51, 0xd3, 0xb6, 0x4e, 0x59, 0x76, 0xcd, 0x08, 0x7b, 0x1d, 0x7a, 0x9c, 0x65, 0xae, 0xce, 0xaa, 0xd2, 0x1c, 0x85, 0x66, 0x68, 0x06, 0x15, 0xa8, 0x06, 0xe6, 0x16, 0x37, 0xf4, 0x49, 0x9e, 0x0f, 0x50, 0x37, 0xb1, 0xb2, 0x93, 0x70, 0x43, + /* (2^158)P */ 0x18, 0x3a, 0x16, 0xe5, 0x8d, 0xc8, 0x35, 0xd6, 0x7b, 0x09, 0xec, 0x61, 0x5f, 0x5c, 0x2a, 0x19, 0x96, 0x2e, 0xc3, 0xfd, 0xab, 0xe6, 0x23, 0xae, 0xab, 0xc5, 0xcb, 0xb9, 0x7b, 0x2d, 0x34, 0x51, 0xb9, 0x41, 0x9e, 0x7d, 0xca, 0xda, 0x25, 0x45, 0x14, 0xb0, 0xc7, 0x4d, 0x26, 0x2b, 0xfe, 0x43, 0xb0, 0x21, 0x5e, 0xfa, 0xdc, 0x7c, 0xf9, 0x5a, + /* (2^159)P */ 0x94, 0xad, 0x42, 0x17, 0xf5, 0xcd, 0x1c, 0x0d, 0xf6, 0x41, 0xd2, 0x55, 0xbb, 0x50, 0xf1, 0xc6, 0xbc, 0xa6, 0xc5, 0x3a, 0xfd, 0x9b, 0x75, 0x3e, 0xf6, 0x1a, 0xa7, 0xb2, 0x6e, 0x64, 0x12, 0xdc, 0x3c, 0xe5, 0xf6, 0xfc, 0x3b, 0xfa, 0x43, 0x81, 0xd4, 0xa5, 0xee, 0xf5, 0x9c, 0x47, 0x2f, 0xd0, 0x9c, 0xde, 0xa1, 0x48, 0x91, 0x9a, 0x34, 0xc1, + /* (2^160)P */ 0x37, 0x1b, 0xb3, 0x88, 0xc9, 0x98, 0x4e, 0xfb, 0x84, 0x4f, 0x2b, 0x0a, 0xb6, 0x8f, 0x35, 0x15, 0xcd, 0x61, 0x7a, 0x5f, 0x5c, 0xa0, 0xca, 0x23, 0xa0, 0x93, 0x1f, 0xcc, 0x3c, 0x39, 0x3a, 0x24, 0xa7, 0x49, 0xad, 0x8d, 0x59, 0xcc, 0x94, 0x5a, 0x16, 0xf5, 0x70, 0xe8, 0x52, 0x1e, 0xee, 0x20, 0x30, 0x17, 0x7e, 0xf0, 0x4c, 0x93, 0x06, 0x5a, + /* (2^161)P */ 0x81, 0xba, 0x3b, 0xd7, 0x3e, 0xb4, 0x32, 0x3a, 0x22, 0x39, 0x2a, 0xfc, 0x19, 0xd9, 0xd2, 0xf6, 0xc5, 0x79, 0x6c, 0x0e, 0xde, 0xda, 0x01, 0xff, 0x52, 0xfb, 0xb6, 0x95, 0x4e, 0x7a, 0x10, 0xb8, 0x06, 0x86, 0x3c, 0xcd, 0x56, 0xd6, 0x15, 0xbf, 0x6e, 0x3e, 0x4f, 0x35, 0x5e, 0xca, 0xbc, 0xa5, 0x95, 0xa2, 0xdf, 0x2d, 0x1d, 0xaf, 0x59, 0xf9, + /* (2^162)P */ 0x69, 0xe5, 0xe2, 0xfa, 0xc9, 0x7f, 0xdd, 0x09, 0xf5, 0x6b, 0x4e, 0x2e, 0xbe, 0xb4, 0xbf, 0x3e, 0xb2, 0xf2, 0x81, 0x30, 0xe1, 0x07, 0xa8, 0x0d, 0x2b, 0xd2, 0x5a, 0x55, 0xbe, 0x4b, 0x86, 0x5d, 0xb0, 0x5e, 0x7c, 0x8f, 0xc1, 0x3c, 0x81, 0x4c, 0xf7, 0x6d, 0x7d, 0xe6, 0x4f, 0x8a, 0x85, 0xc2, 0x2f, 0x28, 0xef, 0x8c, 0x69, 0xc2, 0xc2, 0x1a, + /* (2^163)P */ 0xd9, 0xe4, 0x0e, 0x1e, 0xc2, 0xf7, 0x2f, 0x9f, 0xa1, 0x40, 0xfe, 0x46, 0x16, 0xaf, 0x2e, 0xd1, 0xec, 0x15, 0x9b, 0x61, 0x92, 0xce, 0xfc, 0x10, 0x43, 0x1d, 0x00, 0xf6, 0xbe, 0x20, 0x80, 0x80, 0x6f, 0x3c, 0x16, 0x94, 0x59, 0xba, 0x03, 0x53, 0x6e, 0xb6, 0xdd, 0x25, 0x7b, 0x86, 0xbf, 0x96, 0xf4, 0x2f, 0xa1, 0x96, 0x8d, 0xf9, 0xb3, 0x29, + /* (2^164)P */ 0x3b, 0x04, 0x60, 0x6e, 0xce, 0xab, 0xd2, 0x63, 0x18, 0x53, 0x88, 0x16, 0x4a, 0x6a, 0xab, 0x72, 0x03, 0x68, 0xa5, 0xd4, 0x0d, 0xb2, 0x82, 0x81, 0x1f, 0x2b, 0x5c, 0x75, 0xe8, 0xd2, 0x1d, 0x7f, 0xe7, 0x1b, 0x35, 0x02, 0xde, 0xec, 0xbd, 0xcb, 0xc7, 0x01, 0xd3, 0x95, 0x61, 0xfe, 0xb2, 0x7a, 0x66, 0x09, 0x4c, 0x6d, 0xfd, 0x39, 0xf7, 0x52, + /* (2^165)P */ 0x42, 0xc1, 0x5f, 0xf8, 0x35, 0x52, 0xc1, 0xfe, 0xc5, 0x11, 0x80, 0x1c, 0x11, 0x46, 0x31, 0x11, 0xbe, 0xd0, 0xc4, 0xb6, 0x07, 0x13, 0x38, 0xa0, 0x8d, 0x65, 0xf0, 0x56, 0x9e, 0x16, 0xbf, 0x9d, 0xcd, 0x51, 0x34, 0xf9, 0x08, 0x48, 0x7b, 0x76, 0x0c, 0x7b, 0x30, 0x07, 0xa8, 0x76, 0xaf, 0xa3, 0x29, 0x38, 0xb0, 0x58, 0xde, 0x72, 0x4b, 0x45, + /* (2^166)P */ 0xd4, 0x16, 0xa7, 0xc0, 0xb4, 0x9f, 0xdf, 0x1a, 0x37, 0xc8, 0x35, 0xed, 0xc5, 0x85, 0x74, 0x64, 0x09, 0x22, 0xef, 0xe9, 0x0c, 0xaf, 0x12, 0x4c, 0x9e, 0xf8, 0x47, 0x56, 0xe0, 0x7f, 0x4e, 0x24, 0x6b, 0x0c, 0xe7, 0xad, 0xc6, 0x47, 0x1d, 0xa4, 0x0d, 0x86, 0x89, 0x65, 0xe8, 0x5f, 0x71, 0xc7, 0xe9, 0xcd, 0xec, 0x6c, 0x62, 0xc7, 0xe3, 0xb3, + /* (2^167)P */ 0xb5, 0xea, 0x86, 0xe3, 0x15, 0x18, 0x3f, 0x6d, 0x7b, 0x05, 0x95, 0x15, 0x53, 0x26, 0x1c, 0xeb, 0xbe, 0x7e, 0x16, 0x42, 0x4b, 0xa2, 0x3d, 0xdd, 0x0e, 0xff, 0xba, 0x67, 0xb5, 0xae, 0x7a, 0x17, 0xde, 0x23, 0xad, 0x14, 0xcc, 0xd7, 0xaf, 0x57, 0x01, 0xe0, 0xdd, 0x48, 0xdd, 0xd7, 0xe3, 0xdf, 0xe9, 0x2d, 0xda, 0x67, 0xa4, 0x9f, 0x29, 0x04, + /* (2^168)P */ 0x16, 0x53, 0xe6, 0x9c, 0x4e, 0xe5, 0x1e, 0x70, 0x81, 0x25, 0x02, 0x9b, 0x47, 0x6d, 0xd2, 0x08, 0x73, 0xbe, 0x0a, 0xf1, 0x7b, 0xeb, 0x24, 0xeb, 0x38, 0x23, 0x5c, 0xb6, 0x3e, 0xce, 0x1e, 0xe3, 0xbc, 0x82, 0x35, 0x1f, 0xaf, 0x3a, 0x3a, 0xe5, 0x4e, 0xc1, 0xca, 0xbf, 0x47, 0xb4, 0xbb, 0xbc, 0x5f, 0xea, 0xc6, 0xca, 0xf3, 0xa0, 0xa2, 0x73, + /* (2^169)P */ 0xef, 0xa4, 0x7a, 0x4e, 0xe4, 0xc7, 0xb6, 0x43, 0x2e, 0xa5, 0xe4, 0xa5, 0xba, 0x1e, 0xa5, 0xfe, 0x9e, 0xce, 0xa9, 0x80, 0x04, 0xcb, 0x4f, 0xd8, 0x74, 0x05, 0x48, 0xfa, 0x99, 0x11, 0x5d, 0x97, 0x3b, 0x07, 0x0d, 0xdd, 0xe6, 0xb1, 0x74, 0x87, 0x1a, 0xd3, 0x26, 0xb7, 0x8f, 0xe1, 0x63, 0x3d, 0xec, 0x53, 0x93, 0xb0, 0x81, 0x78, 0x34, 0xa4, + /* (2^170)P */ 0xe1, 0xe7, 0xd4, 0x58, 0x9d, 0x0e, 0x8b, 0x65, 0x66, 0x37, 0x16, 0x48, 0x6f, 0xaa, 0x42, 0x37, 0x77, 0xad, 0xb1, 0x56, 0x48, 0xdf, 0x65, 0x36, 0x30, 0xb8, 0x00, 0x12, 0xd8, 0x32, 0x28, 0x7f, 0xc1, 0x71, 0xeb, 0x93, 0x0f, 0x48, 0x04, 0xe1, 0x5a, 0x6a, 0x96, 0xc1, 0xca, 0x89, 0x6d, 0x1b, 0x82, 0x4c, 0x18, 0x6d, 0x55, 0x4b, 0xea, 0xfd, + /* (2^171)P */ 0x62, 0x1a, 0x53, 0xb4, 0xb1, 0xbe, 0x6f, 0x15, 0x18, 0x88, 0xd4, 0x66, 0x61, 0xc7, 0x12, 0x69, 0x02, 0xbd, 0x03, 0x23, 0x2b, 0xef, 0xf9, 0x54, 0xa4, 0x85, 0xa8, 0xe3, 0xb7, 0xbd, 0xa9, 0xa3, 0xf3, 0x2a, 0xdd, 0xf1, 0xd4, 0x03, 0x0f, 0xa9, 0xa1, 0xd8, 0xa3, 0xcd, 0xb2, 0x71, 0x90, 0x4b, 0x35, 0x62, 0xf2, 0x2f, 0xce, 0x67, 0x1f, 0xaa, + /* (2^172)P */ 0x9e, 0x1e, 0xcd, 0x43, 0x7e, 0x87, 0x37, 0x94, 0x3a, 0x97, 0x4c, 0x7e, 0xee, 0xc9, 0x37, 0x85, 0xf1, 0xd9, 0x4f, 0xbf, 0xf9, 0x6f, 0x39, 0x9a, 0x39, 0x87, 0x2e, 0x25, 0x84, 0x42, 0xc3, 0x80, 0xcb, 0x07, 0x22, 0xae, 0x30, 0xd5, 0x50, 0xa1, 0x23, 0xcc, 0x31, 0x81, 0x9d, 0xf1, 0x30, 0xd9, 0x2b, 0x73, 0x41, 0x16, 0x50, 0xab, 0x2d, 0xa2, + /* (2^173)P */ 0xa4, 0x69, 0x4f, 0xa1, 0x4e, 0xb9, 0xbf, 0x14, 0xe8, 0x2b, 0x04, 0x93, 0xb7, 0x6e, 0x9f, 0x7d, 0x73, 0x0a, 0xc5, 0x14, 0xb8, 0xde, 0x8c, 0xc1, 0xfe, 0xc0, 0xa7, 0xa4, 0xcc, 0x42, 0x42, 0x81, 0x15, 0x65, 0x8a, 0x80, 0xb9, 0xde, 0x1f, 0x60, 0x33, 0x0e, 0xcb, 0xfc, 0xe0, 0xdb, 0x83, 0xa1, 0xe5, 0xd0, 0x16, 0x86, 0x2c, 0xe2, 0x87, 0xed, + /* (2^174)P */ 0x7a, 0xc0, 0xeb, 0x6b, 0xf6, 0x0d, 0x4c, 0x6d, 0x1e, 0xdb, 0xab, 0xe7, 0x19, 0x45, 0xc6, 0xe3, 0xb2, 0x06, 0xbb, 0xbc, 0x70, 0x99, 0x83, 0x33, 0xeb, 0x28, 0xc8, 0x77, 0xf6, 0x4d, 0x01, 0xb7, 0x59, 0xa0, 0xd2, 0xb3, 0x2a, 0x72, 0x30, 0xe7, 0x11, 0x39, 0xb6, 0x41, 0x29, 0x65, 0x5a, 0x14, 0xb9, 0x86, 0x08, 0xe0, 0x7d, 0x32, 0x8c, 0xf0, + /* (2^175)P */ 0x5c, 0x11, 0x30, 0x9e, 0x05, 0x27, 0xf5, 0x45, 0x0f, 0xb3, 0xc9, 0x75, 0xc3, 0xd7, 0xe1, 0x82, 0x3b, 0x8e, 0x87, 0x23, 0x00, 0x15, 0x19, 0x07, 0xd9, 0x21, 0x53, 0xc7, 0xf1, 0xa3, 0xbf, 0x70, 0x64, 0x15, 0x18, 0xca, 0x23, 0x9e, 0xd3, 0x08, 0xc3, 0x2a, 0x8b, 0xe5, 0x83, 0x04, 0x89, 0x14, 0xfd, 0x28, 0x25, 0x1c, 0xe3, 0x26, 0xa7, 0x22, + /* (2^176)P */ 0xdc, 0xd4, 0x75, 0x60, 0x99, 0x94, 0xea, 0x09, 0x8e, 0x8a, 0x3c, 0x1b, 0xf9, 0xbd, 0x33, 0x0d, 0x51, 0x3d, 0x12, 0x6f, 0x4e, 0x72, 0xe0, 0x17, 0x20, 0xe9, 0x75, 0xe6, 0x3a, 0xb2, 0x13, 0x83, 0x4e, 0x7a, 0x08, 0x9e, 0xd1, 0x04, 0x5f, 0x6b, 0x42, 0x0b, 0x76, 0x2a, 0x2d, 0x77, 0x53, 0x6c, 0x65, 0x6d, 0x8e, 0x25, 0x3c, 0xb6, 0x8b, 0x69, + /* (2^177)P */ 0xb9, 0x49, 0x28, 0xd0, 0xdc, 0x6c, 0x8f, 0x4c, 0xc9, 0x14, 0x8a, 0x38, 0xa3, 0xcb, 0xc4, 0x9d, 0x53, 0xcf, 0xe9, 0xe3, 0xcf, 0xe0, 0xb1, 0xf2, 0x1b, 0x4c, 0x7f, 0x83, 0x2a, 0x7a, 0xe9, 0x8b, 0x3b, 0x86, 0x61, 0x30, 0xe9, 0x99, 0xbd, 0xba, 0x19, 0x6e, 0x65, 0x2a, 0x12, 0x3e, 0x9c, 0xa8, 0xaf, 0xc3, 0xcf, 0xf8, 0x1f, 0x77, 0x86, 0xea, + /* (2^178)P */ 0x30, 0xde, 0xe7, 0xff, 0x54, 0xf7, 0xa2, 0x59, 0xf6, 0x0b, 0xfb, 0x7a, 0xf2, 0x39, 0xf0, 0xdb, 0x39, 0xbc, 0xf0, 0xfa, 0x60, 0xeb, 0x6b, 0x4f, 0x47, 0x17, 0xc8, 0x00, 0x65, 0x6d, 0x25, 0x1c, 0xd0, 0x48, 0x56, 0x53, 0x45, 0x11, 0x30, 0x02, 0x49, 0x20, 0x27, 0xac, 0xf2, 0x4c, 0xac, 0x64, 0x3d, 0x52, 0xb8, 0x89, 0xe0, 0x93, 0x16, 0x0f, + /* (2^179)P */ 0x84, 0x09, 0xba, 0x40, 0xb2, 0x2f, 0xa3, 0xa8, 0xc2, 0xba, 0x46, 0x33, 0x05, 0x9d, 0x62, 0xad, 0xa1, 0x3c, 0x33, 0xef, 0x0d, 0xeb, 0xf0, 0x77, 0x11, 0x5a, 0xb0, 0x21, 0x9c, 0xdf, 0x55, 0x24, 0x25, 0x35, 0x51, 0x61, 0x92, 0xf0, 0xb1, 0xce, 0xf5, 0xd4, 0x7b, 0x6c, 0x21, 0x9d, 0x56, 0x52, 0xf8, 0xa1, 0x4c, 0xe9, 0x27, 0x55, 0xac, 0x91, + /* (2^180)P */ 0x03, 0x3e, 0x30, 0xd2, 0x0a, 0xfa, 0x7d, 0x82, 0x3d, 0x1f, 0x8b, 0xcb, 0xb6, 0x04, 0x5c, 0xcc, 0x8b, 0xda, 0xe2, 0x68, 0x74, 0x08, 0x8c, 0x44, 0x83, 0x57, 0x6d, 0x6f, 0x80, 0xb0, 0x7e, 0xa9, 0x82, 0x91, 0x7b, 0x4c, 0x37, 0x97, 0xd1, 0x63, 0xd1, 0xbd, 0x45, 0xe6, 0x8a, 0x86, 0xd6, 0x89, 0x54, 0xfd, 0xd2, 0xb1, 0xd7, 0x54, 0xad, 0xaf, + /* (2^181)P */ 0x8b, 0x33, 0x62, 0x49, 0x9f, 0x63, 0xf9, 0x87, 0x42, 0x58, 0xbf, 0xb3, 0xe6, 0x68, 0x02, 0x60, 0x5c, 0x76, 0x62, 0xf7, 0x61, 0xd7, 0x36, 0x31, 0xf7, 0x9c, 0xb5, 0xe5, 0x13, 0x6c, 0xea, 0x78, 0xae, 0xcf, 0xde, 0xbf, 0xb6, 0xeb, 0x4f, 0xc8, 0x2a, 0xb4, 0x9a, 0x9f, 0xf3, 0xd1, 0x6a, 0xec, 0x0c, 0xbd, 0x85, 0x98, 0x40, 0x06, 0x1c, 0x2a, + /* (2^182)P */ 0x74, 0x3b, 0xe7, 0x81, 0xd5, 0xae, 0x54, 0x56, 0x03, 0xe8, 0x97, 0x16, 0x76, 0xcf, 0x24, 0x96, 0x96, 0x5b, 0xcc, 0x09, 0xab, 0x23, 0x6f, 0x54, 0xae, 0x8f, 0xe4, 0x12, 0xcb, 0xfd, 0xbc, 0xac, 0x93, 0x45, 0x3d, 0x68, 0x08, 0x22, 0x59, 0xc6, 0xf0, 0x47, 0x19, 0x8c, 0x79, 0x93, 0x1e, 0x0e, 0x30, 0xb0, 0x94, 0xfb, 0x17, 0x1d, 0x5a, 0x12, + /* (2^183)P */ 0x85, 0xff, 0x40, 0x18, 0x85, 0xff, 0x44, 0x37, 0x69, 0x23, 0x4d, 0x34, 0xe1, 0xeb, 0xa3, 0x1b, 0x55, 0x40, 0xc1, 0x64, 0xf4, 0xd4, 0x13, 0x0a, 0x9f, 0xb9, 0x19, 0xfc, 0x88, 0x7d, 0xc0, 0x72, 0xcf, 0x69, 0x2f, 0xd2, 0x0c, 0x82, 0x0f, 0xda, 0x08, 0xba, 0x0f, 0xaa, 0x3b, 0xe9, 0xe5, 0x83, 0x7a, 0x06, 0xe8, 0x1b, 0x38, 0x43, 0xc3, 0x54, + /* (2^184)P */ 0x14, 0xaa, 0xb3, 0x6e, 0xe6, 0x28, 0xee, 0xc5, 0x22, 0x6c, 0x7c, 0xf9, 0xa8, 0x71, 0xcc, 0xfe, 0x68, 0x7e, 0xd3, 0xb8, 0x37, 0x96, 0xca, 0x0b, 0xd9, 0xb6, 0x06, 0xa9, 0xf6, 0x71, 0xe8, 0x31, 0xf7, 0xd8, 0xf1, 0x5d, 0xab, 0xb9, 0xf0, 0x5c, 0x98, 0xcf, 0x22, 0xa2, 0x2a, 0xf6, 0xd0, 0x59, 0xf0, 0x9d, 0xd9, 0x6a, 0x4f, 0x59, 0x57, 0xad, + /* (2^185)P */ 0xd7, 0x2b, 0x3d, 0x38, 0x4c, 0x2e, 0x23, 0x4d, 0x49, 0xa2, 0x62, 0x62, 0xf9, 0x0f, 0xde, 0x08, 0xf3, 0x86, 0x71, 0xb6, 0xc7, 0xf9, 0x85, 0x9c, 0x33, 0xa1, 0xcf, 0x16, 0xaa, 0x60, 0xb9, 0xb7, 0xea, 0xed, 0x01, 0x1c, 0x59, 0xdb, 0x3f, 0x3f, 0x97, 0x2e, 0xf0, 0x09, 0x9f, 0x10, 0x85, 0x5f, 0x53, 0x39, 0xf3, 0x13, 0x40, 0x56, 0x95, 0xf9, + /* (2^186)P */ 0xb4, 0xe3, 0xda, 0xc6, 0x1f, 0x78, 0x8e, 0xac, 0xd4, 0x20, 0x1d, 0xa0, 0xbf, 0x4c, 0x09, 0x16, 0xa7, 0x30, 0xb5, 0x8d, 0x9e, 0xa1, 0x5f, 0x6d, 0x52, 0xf4, 0x71, 0xb6, 0x32, 0x2d, 0x21, 0x51, 0xc6, 0xfc, 0x2f, 0x08, 0xf4, 0x13, 0x6c, 0x55, 0xba, 0x72, 0x81, 0x24, 0x49, 0x0e, 0x4f, 0x06, 0x36, 0x39, 0x6a, 0xc5, 0x81, 0xfc, 0xeb, 0xb2, + /* (2^187)P */ 0x7d, 0x8d, 0xc8, 0x6c, 0xea, 0xb4, 0xb9, 0xe8, 0x40, 0xc9, 0x69, 0xc9, 0x30, 0x05, 0xfd, 0x34, 0x46, 0xfd, 0x94, 0x05, 0x16, 0xf5, 0x4b, 0x13, 0x3d, 0x24, 0x1a, 0xd6, 0x64, 0x2b, 0x9c, 0xe2, 0xa5, 0xd9, 0x98, 0xe0, 0xe8, 0xf4, 0xbc, 0x2c, 0xbd, 0xa2, 0x56, 0xe3, 0x9e, 0x14, 0xdb, 0xbf, 0x05, 0xbf, 0x9a, 0x13, 0x5d, 0xf7, 0x91, 0xa3, + /* (2^188)P */ 0x8b, 0xcb, 0x27, 0xf3, 0x15, 0x26, 0x05, 0x40, 0x0f, 0xa6, 0x15, 0x13, 0x71, 0x95, 0xa2, 0xc6, 0x38, 0x04, 0x67, 0xf8, 0x9a, 0x83, 0x06, 0xaa, 0x25, 0x36, 0x72, 0x01, 0x6f, 0x74, 0x5f, 0xe5, 0x6e, 0x44, 0x99, 0xce, 0x13, 0xbc, 0x82, 0xc2, 0x0d, 0xa4, 0x98, 0x50, 0x38, 0xf3, 0xa2, 0xc5, 0xe5, 0x24, 0x1f, 0x6f, 0x56, 0x3e, 0x07, 0xb2, + /* (2^189)P */ 0xbd, 0x0f, 0x32, 0x60, 0x07, 0xb1, 0xd7, 0x0b, 0x11, 0x07, 0x57, 0x02, 0x89, 0xe8, 0x8b, 0xe8, 0x5a, 0x1f, 0xee, 0x54, 0x6b, 0xff, 0xb3, 0x04, 0x07, 0x57, 0x13, 0x0b, 0x94, 0xa8, 0x4d, 0x81, 0xe2, 0x17, 0x16, 0x45, 0xd4, 0x4b, 0xf7, 0x7e, 0x64, 0x66, 0x20, 0xe8, 0x0b, 0x26, 0xfd, 0xa9, 0x8a, 0x47, 0x52, 0x89, 0x14, 0xd0, 0xd1, 0xa1, + /* (2^190)P */ 0xdc, 0x03, 0xe6, 0x20, 0x44, 0x47, 0x8f, 0x04, 0x16, 0x24, 0x22, 0xc1, 0x55, 0x5c, 0xbe, 0x43, 0xc3, 0x92, 0xc5, 0x54, 0x3d, 0x5d, 0xd1, 0x05, 0x9c, 0xc6, 0x7c, 0xbf, 0x23, 0x84, 0x1a, 0xba, 0x4f, 0x1f, 0xfc, 0xa1, 0xae, 0x1a, 0x64, 0x02, 0x51, 0xf1, 0xcb, 0x7a, 0x20, 0xce, 0xb2, 0x34, 0x3c, 0xca, 0xe0, 0xe4, 0xba, 0x22, 0xd4, 0x7b, + /* (2^191)P */ 0xca, 0xfd, 0xca, 0xd7, 0xde, 0x61, 0xae, 0xf0, 0x79, 0x0c, 0x20, 0xab, 0xbc, 0x6f, 0x4d, 0x61, 0xf0, 0xc7, 0x9c, 0x8d, 0x4b, 0x52, 0xf3, 0xb9, 0x48, 0x63, 0x0b, 0xb6, 0xd2, 0x25, 0x9a, 0x96, 0x72, 0xc1, 0x6b, 0x0c, 0xb5, 0xfb, 0x71, 0xaa, 0xad, 0x47, 0x5b, 0xe7, 0xc0, 0x0a, 0x55, 0xb2, 0xd4, 0x16, 0x2f, 0xb1, 0x01, 0xfd, 0xce, 0x27, + /* (2^192)P */ 0x64, 0x11, 0x4b, 0xab, 0x57, 0x09, 0xc6, 0x49, 0x4a, 0x37, 0xc3, 0x36, 0xc4, 0x7b, 0x81, 0x1f, 0x42, 0xed, 0xbb, 0xe0, 0xa0, 0x8d, 0x51, 0xe6, 0xca, 0x8b, 0xb9, 0xcd, 0x99, 0x2d, 0x91, 0x53, 0xa9, 0x47, 0xcb, 0x32, 0xc7, 0xa4, 0x92, 0xec, 0x46, 0x74, 0x44, 0x6d, 0x71, 0x9f, 0x6d, 0x0c, 0x69, 0xa4, 0xf8, 0xbe, 0x9f, 0x7f, 0xa0, 0xd7, + /* (2^193)P */ 0x5f, 0x33, 0xb6, 0x91, 0xc8, 0xa5, 0x3f, 0x5d, 0x7f, 0x38, 0x6e, 0x74, 0x20, 0x4a, 0xd6, 0x2b, 0x98, 0x2a, 0x41, 0x4b, 0x83, 0x64, 0x0b, 0x92, 0x7a, 0x06, 0x1e, 0xc6, 0x2c, 0xf6, 0xe4, 0x91, 0xe5, 0xb1, 0x2e, 0x6e, 0x4e, 0xa8, 0xc8, 0x14, 0x32, 0x57, 0x44, 0x1c, 0xe4, 0xb9, 0x7f, 0x54, 0x51, 0x08, 0x81, 0xaa, 0x4e, 0xce, 0xa1, 0x5d, + /* (2^194)P */ 0x5c, 0xd5, 0x9b, 0x5e, 0x7c, 0xb5, 0xb1, 0x52, 0x73, 0x00, 0x41, 0x56, 0x79, 0x08, 0x7e, 0x07, 0x28, 0x06, 0xa6, 0xfb, 0x7f, 0x69, 0xbd, 0x7a, 0x3c, 0xae, 0x9f, 0x39, 0xbb, 0x54, 0xa2, 0x79, 0xb9, 0x0e, 0x7f, 0xbb, 0xe0, 0xe6, 0xb7, 0x27, 0x64, 0x38, 0x45, 0xdb, 0x84, 0xe4, 0x61, 0x72, 0x3f, 0xe2, 0x24, 0xfe, 0x7a, 0x31, 0x9a, 0xc9, + /* (2^195)P */ 0xa1, 0xd2, 0xa4, 0xee, 0x24, 0x96, 0xe5, 0x5b, 0x79, 0x78, 0x3c, 0x7b, 0x82, 0x3b, 0x8b, 0x58, 0x0b, 0xa3, 0x63, 0x2d, 0xbc, 0x75, 0x46, 0xe8, 0x83, 0x1a, 0xc0, 0x2a, 0x92, 0x61, 0xa8, 0x75, 0x37, 0x3c, 0xbf, 0x0f, 0xef, 0x8f, 0x6c, 0x97, 0x75, 0x10, 0x05, 0x7a, 0xde, 0x23, 0xe8, 0x2a, 0x35, 0xeb, 0x41, 0x64, 0x7d, 0xcf, 0xe0, 0x52, + /* (2^196)P */ 0x4a, 0xd0, 0x49, 0x93, 0xae, 0xf3, 0x24, 0x8c, 0xe1, 0x09, 0x98, 0x45, 0xd8, 0xb9, 0xfe, 0x8e, 0x8c, 0xa8, 0x2c, 0xc9, 0x9f, 0xce, 0x01, 0xdc, 0x38, 0x11, 0xab, 0x85, 0xb9, 0xe8, 0x00, 0x51, 0xfd, 0x82, 0xe1, 0x9b, 0x4e, 0xfc, 0xb5, 0x2a, 0x0f, 0x8b, 0xda, 0x4e, 0x02, 0xca, 0xcc, 0xe3, 0x91, 0xc4, 0xe0, 0xcf, 0x7b, 0xd6, 0xe6, 0x6a, + /* (2^197)P */ 0xfe, 0x11, 0xd7, 0xaa, 0xe3, 0x0c, 0x52, 0x2e, 0x04, 0xe0, 0xe0, 0x61, 0xc8, 0x05, 0xd7, 0x31, 0x4c, 0xc3, 0x9b, 0x2d, 0xce, 0x59, 0xbe, 0x12, 0xb7, 0x30, 0x21, 0xfc, 0x81, 0xb8, 0x5e, 0x57, 0x73, 0xd0, 0xad, 0x8e, 0x9e, 0xe4, 0xeb, 0xcd, 0xcf, 0xd2, 0x0f, 0x01, 0x35, 0x16, 0xed, 0x7a, 0x43, 0x8e, 0x42, 0xdc, 0xea, 0x4c, 0xa8, 0x7c, + /* (2^198)P */ 0x37, 0x26, 0xcc, 0x76, 0x0b, 0xe5, 0x76, 0xdd, 0x3e, 0x19, 0x3c, 0xc4, 0x6c, 0x7f, 0xd0, 0x03, 0xc1, 0xb8, 0x59, 0x82, 0xca, 0x36, 0xc1, 0xe4, 0xc8, 0xb2, 0x83, 0x69, 0x9c, 0xc5, 0x9d, 0x12, 0x82, 0x1c, 0xea, 0xb2, 0x84, 0x9f, 0xf3, 0x52, 0x6b, 0xbb, 0xd8, 0x81, 0x56, 0x83, 0x04, 0x66, 0x05, 0x22, 0x49, 0x37, 0x93, 0xb1, 0xfd, 0xd5, + /* (2^199)P */ 0xaf, 0x96, 0xbf, 0x03, 0xbe, 0xe6, 0x5d, 0x78, 0x19, 0xba, 0x37, 0x46, 0x0a, 0x2b, 0x52, 0x7c, 0xd8, 0x51, 0x9e, 0x3d, 0x29, 0x42, 0xdb, 0x0e, 0x31, 0x20, 0x94, 0xf8, 0x43, 0x9a, 0x2d, 0x22, 0xd3, 0xe3, 0xa1, 0x79, 0x68, 0xfb, 0x2d, 0x7e, 0xd6, 0x79, 0xda, 0x0b, 0xc6, 0x5b, 0x76, 0x68, 0xf0, 0xfe, 0x72, 0x59, 0xbb, 0xa1, 0x9c, 0x74, + /* (2^200)P */ 0x0a, 0xd9, 0xec, 0xc5, 0xbd, 0xf0, 0xda, 0xcf, 0x82, 0xab, 0x46, 0xc5, 0x32, 0x13, 0xdc, 0x5b, 0xac, 0xc3, 0x53, 0x9a, 0x7f, 0xef, 0xa5, 0x40, 0x5a, 0x1f, 0xc1, 0x12, 0x91, 0x54, 0x83, 0x6a, 0xb0, 0x9a, 0x85, 0x4d, 0xbf, 0x36, 0x8e, 0xd3, 0xa2, 0x2b, 0xe5, 0xd6, 0xc6, 0xe1, 0x58, 0x5b, 0x82, 0x9b, 0xc8, 0xf2, 0x03, 0xba, 0xf5, 0x92, + /* (2^201)P */ 0xfb, 0x21, 0x7e, 0xde, 0xe7, 0xb4, 0xc0, 0x56, 0x86, 0x3a, 0x5b, 0x78, 0xf8, 0xf0, 0xf4, 0xe7, 0x5c, 0x00, 0xd2, 0xd7, 0xd6, 0xf8, 0x75, 0x5e, 0x0f, 0x3e, 0xd1, 0x4b, 0x77, 0xd8, 0xad, 0xb0, 0xc9, 0x8b, 0x59, 0x7d, 0x30, 0x76, 0x64, 0x7a, 0x76, 0xd9, 0x51, 0x69, 0xfc, 0xbd, 0x8e, 0xb5, 0x55, 0xe0, 0xd2, 0x07, 0x15, 0xa9, 0xf7, 0xa4, + /* (2^202)P */ 0xaa, 0x2d, 0x2f, 0x2b, 0x3c, 0x15, 0xdd, 0xcd, 0xe9, 0x28, 0x82, 0x4f, 0xa2, 0xaa, 0x31, 0x48, 0xcc, 0xfa, 0x07, 0x73, 0x8a, 0x34, 0x74, 0x0d, 0xab, 0x1a, 0xca, 0xd2, 0xbf, 0x3a, 0xdb, 0x1a, 0x5f, 0x50, 0x62, 0xf4, 0x6b, 0x83, 0x38, 0x43, 0x96, 0xee, 0x6b, 0x39, 0x1e, 0xf0, 0x17, 0x80, 0x1e, 0x9b, 0xed, 0x2b, 0x2f, 0xcc, 0x65, 0xf7, + /* (2^203)P */ 0x03, 0xb3, 0x23, 0x9c, 0x0d, 0xd1, 0xeb, 0x7e, 0x34, 0x17, 0x8a, 0x4c, 0xde, 0x54, 0x39, 0xc4, 0x11, 0x82, 0xd3, 0xa4, 0x00, 0x32, 0x95, 0x9c, 0xa6, 0x64, 0x76, 0x6e, 0xd6, 0x53, 0x27, 0xb4, 0x6a, 0x14, 0x8c, 0x54, 0xf6, 0x58, 0x9e, 0x22, 0x4a, 0x55, 0x18, 0x77, 0xd0, 0x08, 0x6b, 0x19, 0x8a, 0xb5, 0xe7, 0x19, 0xb8, 0x60, 0x92, 0xb1, + /* (2^204)P */ 0x66, 0xec, 0xf3, 0x12, 0xde, 0x67, 0x7f, 0xd4, 0x5b, 0xf6, 0x70, 0x64, 0x0a, 0xb5, 0xc2, 0xf9, 0xb3, 0x64, 0xab, 0x56, 0x46, 0xc7, 0x93, 0xc2, 0x8b, 0x2d, 0xd0, 0xd6, 0x39, 0x3b, 0x1f, 0xcd, 0xb3, 0xac, 0xcc, 0x2c, 0x27, 0x6a, 0xbc, 0xb3, 0x4b, 0xa8, 0x3c, 0x69, 0x20, 0xe2, 0x18, 0x35, 0x17, 0xe1, 0x8a, 0xd3, 0x11, 0x74, 0xaa, 0x4d, + /* (2^205)P */ 0x96, 0xc4, 0x16, 0x7e, 0xfd, 0xf5, 0xd0, 0x7d, 0x1f, 0x32, 0x1b, 0xdb, 0xa6, 0xfd, 0x51, 0x75, 0x4d, 0xd7, 0x00, 0xe5, 0x7f, 0x58, 0x5b, 0xeb, 0x4b, 0x6a, 0x78, 0xfe, 0xe5, 0xd6, 0x8f, 0x99, 0x17, 0xca, 0x96, 0x45, 0xf7, 0x52, 0xdf, 0x84, 0x06, 0x77, 0xb9, 0x05, 0x63, 0x5d, 0xe9, 0x91, 0xb1, 0x4b, 0x82, 0x5a, 0xdb, 0xd7, 0xca, 0x69, + /* (2^206)P */ 0x02, 0xd3, 0x38, 0x38, 0x87, 0xea, 0xbd, 0x9f, 0x11, 0xca, 0xf3, 0x21, 0xf1, 0x9b, 0x35, 0x97, 0x98, 0xff, 0x8e, 0x6d, 0x3d, 0xd6, 0xb2, 0xfa, 0x68, 0xcb, 0x7e, 0x62, 0x85, 0xbb, 0xc7, 0x5d, 0xee, 0x32, 0x30, 0x2e, 0x71, 0x96, 0x63, 0x43, 0x98, 0xc4, 0xa7, 0xde, 0x60, 0xb2, 0xd9, 0x43, 0x4a, 0xfa, 0x97, 0x2d, 0x5f, 0x21, 0xd4, 0xfe, + /* (2^207)P */ 0x3b, 0x20, 0x29, 0x07, 0x07, 0xb5, 0x78, 0xc3, 0xc7, 0xab, 0x56, 0xba, 0x40, 0xde, 0x1d, 0xcf, 0xc3, 0x00, 0x56, 0x21, 0x0c, 0xc8, 0x42, 0xd9, 0x0e, 0xcd, 0x02, 0x7c, 0x07, 0xb9, 0x11, 0xd7, 0x96, 0xaf, 0xff, 0xad, 0xc5, 0xba, 0x30, 0x6d, 0x82, 0x3a, 0xbf, 0xef, 0x7b, 0xf7, 0x0a, 0x74, 0xbd, 0x31, 0x0c, 0xe4, 0xec, 0x1a, 0xe5, 0xc5, + /* (2^208)P */ 0xcc, 0xf2, 0x28, 0x16, 0x12, 0xbf, 0xef, 0x85, 0xbc, 0xf7, 0xcb, 0x9f, 0xdb, 0xa8, 0xb2, 0x49, 0x53, 0x48, 0xa8, 0x24, 0xa8, 0x68, 0x8d, 0xbb, 0x21, 0x0a, 0x5a, 0xbd, 0xb2, 0x91, 0x61, 0x47, 0xc4, 0x43, 0x08, 0xa6, 0x19, 0xef, 0x8e, 0x88, 0x39, 0xc6, 0x33, 0x30, 0xf3, 0x0e, 0xc5, 0x92, 0x66, 0xd6, 0xfe, 0xc5, 0x12, 0xd9, 0x4c, 0x2d, + /* (2^209)P */ 0x30, 0x34, 0x07, 0xbf, 0x9c, 0x5a, 0x4e, 0x65, 0xf1, 0x39, 0x35, 0x38, 0xae, 0x7b, 0x55, 0xac, 0x6a, 0x92, 0x24, 0x7e, 0x50, 0xd3, 0xba, 0x78, 0x51, 0xfe, 0x4d, 0x32, 0x05, 0x11, 0xf5, 0x52, 0xf1, 0x31, 0x45, 0x39, 0x98, 0x7b, 0x28, 0x56, 0xc3, 0x5d, 0x4f, 0x07, 0x6f, 0x84, 0xb8, 0x1a, 0x58, 0x0b, 0xc4, 0x7c, 0xc4, 0x8d, 0x32, 0x8e, + /* (2^210)P */ 0x7e, 0xaf, 0x98, 0xce, 0xc5, 0x2b, 0x9d, 0xf6, 0xfa, 0x2c, 0xb6, 0x2a, 0x5a, 0x1d, 0xc0, 0x24, 0x8d, 0xa4, 0xce, 0xb1, 0x12, 0x01, 0xf9, 0x79, 0xc6, 0x79, 0x38, 0x0c, 0xd4, 0x07, 0xc9, 0xf7, 0x37, 0xa1, 0x0b, 0xfe, 0x72, 0xec, 0x5d, 0xd6, 0xb0, 0x1c, 0x70, 0xbe, 0x70, 0x01, 0x13, 0xe0, 0x86, 0x95, 0xc7, 0x2e, 0x12, 0x3b, 0xe6, 0xa6, + /* (2^211)P */ 0x24, 0x82, 0x67, 0xe0, 0x14, 0x7b, 0x56, 0x08, 0x38, 0x44, 0xdb, 0xa0, 0x3a, 0x05, 0x47, 0xb2, 0xc0, 0xac, 0xd1, 0xcc, 0x3f, 0x82, 0xb8, 0x8a, 0x88, 0xbc, 0xf5, 0x33, 0xa1, 0x35, 0x0f, 0xf6, 0xe2, 0xef, 0x6c, 0xf7, 0x37, 0x9e, 0xe8, 0x10, 0xca, 0xb0, 0x8e, 0x80, 0x86, 0x00, 0x23, 0xd0, 0x4a, 0x76, 0x9f, 0xf7, 0x2c, 0x52, 0x15, 0x0e, + /* (2^212)P */ 0x5e, 0x49, 0xe1, 0x2c, 0x9a, 0x01, 0x76, 0xa6, 0xb3, 0x07, 0x5b, 0xa4, 0x07, 0xef, 0x1d, 0xc3, 0x6a, 0xbb, 0x64, 0xbe, 0x71, 0x15, 0x6e, 0x32, 0x31, 0x46, 0x9a, 0x9e, 0x8f, 0x45, 0x73, 0xce, 0x0b, 0x94, 0x1a, 0x52, 0x07, 0xf4, 0x50, 0x30, 0x49, 0x53, 0x50, 0xfb, 0x71, 0x1f, 0x5a, 0x03, 0xa9, 0x76, 0xf2, 0x8f, 0x42, 0xff, 0xed, 0xed, + /* (2^213)P */ 0xed, 0x08, 0xdb, 0x91, 0x1c, 0xee, 0xa2, 0xb4, 0x47, 0xa2, 0xfa, 0xcb, 0x03, 0xd1, 0xff, 0x8c, 0xad, 0x64, 0x50, 0x61, 0xcd, 0xfc, 0x88, 0xa0, 0x31, 0x95, 0x30, 0xb9, 0x58, 0xdd, 0xd7, 0x43, 0xe4, 0x46, 0xc2, 0x16, 0xd9, 0x72, 0x4a, 0x56, 0x51, 0x70, 0x85, 0xf1, 0xa1, 0x80, 0x40, 0xd5, 0xba, 0x67, 0x81, 0xda, 0xcd, 0x03, 0xea, 0x51, + /* (2^214)P */ 0x42, 0x50, 0xf0, 0xef, 0x37, 0x61, 0x72, 0x85, 0xe1, 0xf1, 0xff, 0x6f, 0x3d, 0xe8, 0x7b, 0x21, 0x5c, 0xe5, 0x50, 0x03, 0xde, 0x00, 0xc1, 0xf7, 0x3a, 0x55, 0x12, 0x1c, 0x9e, 0x1e, 0xce, 0xd1, 0x2f, 0xaf, 0x05, 0x70, 0x5b, 0x47, 0xf2, 0x04, 0x7a, 0x89, 0xbc, 0x78, 0xa6, 0x65, 0x6c, 0xaa, 0x3c, 0xa2, 0x3c, 0x8b, 0x5c, 0xa9, 0x22, 0x48, + /* (2^215)P */ 0x7e, 0x8c, 0x8f, 0x2f, 0x60, 0xe3, 0x5a, 0x94, 0xd4, 0xce, 0xdd, 0x9d, 0x83, 0x3b, 0x77, 0x78, 0x43, 0x1d, 0xfd, 0x8f, 0xc8, 0xe8, 0x02, 0x90, 0xab, 0xf6, 0xc9, 0xfc, 0xf1, 0x63, 0xaa, 0x5f, 0x42, 0xf1, 0x78, 0x34, 0x64, 0x16, 0x75, 0x9c, 0x7d, 0xd0, 0xe4, 0x74, 0x5a, 0xa8, 0xfb, 0xcb, 0xac, 0x20, 0xa3, 0xc2, 0xa6, 0x20, 0xf8, 0x1b, + /* (2^216)P */ 0x00, 0x4f, 0x1e, 0x56, 0xb5, 0x34, 0xb2, 0x87, 0x31, 0xe5, 0xee, 0x8d, 0xf1, 0x41, 0x67, 0xb7, 0x67, 0x3a, 0x54, 0x86, 0x5c, 0xf0, 0x0b, 0x37, 0x2f, 0x1b, 0x92, 0x5d, 0x58, 0x93, 0xdc, 0xd8, 0x58, 0xcc, 0x9e, 0x67, 0xd0, 0x97, 0x3a, 0xaf, 0x49, 0x39, 0x2d, 0x3b, 0xd8, 0x98, 0xfb, 0x76, 0x6b, 0xe7, 0xaf, 0xc3, 0x45, 0x44, 0x53, 0x94, + /* (2^217)P */ 0x30, 0xbd, 0x90, 0x75, 0xd3, 0xbd, 0x3b, 0x58, 0x27, 0x14, 0x9f, 0x6b, 0xd4, 0x31, 0x99, 0xcd, 0xde, 0x3a, 0x21, 0x1e, 0xb4, 0x02, 0xe4, 0x33, 0x04, 0x02, 0xb0, 0x50, 0x66, 0x68, 0x90, 0xdd, 0x7b, 0x69, 0x31, 0xd9, 0xcf, 0x68, 0x73, 0xf1, 0x60, 0xdd, 0xc8, 0x1d, 0x5d, 0xe3, 0xd6, 0x5b, 0x2a, 0xa4, 0xea, 0xc4, 0x3f, 0x08, 0xcd, 0x9c, + /* (2^218)P */ 0x6b, 0x1a, 0xbf, 0x55, 0xc1, 0x1b, 0x0c, 0x05, 0x09, 0xdf, 0xf5, 0x5e, 0xa3, 0x77, 0x95, 0xe9, 0xdf, 0x19, 0xdd, 0xc7, 0x94, 0xcb, 0x06, 0x73, 0xd0, 0x88, 0x02, 0x33, 0x94, 0xca, 0x7a, 0x2f, 0x8e, 0x3d, 0x72, 0x61, 0x2d, 0x4d, 0xa6, 0x61, 0x1f, 0x32, 0x5e, 0x87, 0x53, 0x36, 0x11, 0x15, 0x20, 0xb3, 0x5a, 0x57, 0x51, 0x93, 0x20, 0xd8, + /* (2^219)P */ 0xb7, 0x56, 0xf4, 0xab, 0x7d, 0x0c, 0xfb, 0x99, 0x1a, 0x30, 0x29, 0xb0, 0x75, 0x2a, 0xf8, 0x53, 0x71, 0x23, 0xbd, 0xa7, 0xd8, 0x0a, 0xe2, 0x27, 0x65, 0xe9, 0x74, 0x26, 0x98, 0x4a, 0x69, 0x19, 0xb2, 0x4d, 0x0a, 0x17, 0x98, 0xb2, 0xa9, 0x57, 0x4e, 0xf6, 0x86, 0xc8, 0x01, 0xa4, 0xc6, 0x98, 0xad, 0x5a, 0x90, 0x2c, 0x05, 0x46, 0x64, 0xb7, + /* (2^220)P */ 0x7b, 0x91, 0xdf, 0xfc, 0xf8, 0x1c, 0x8c, 0x15, 0x9e, 0xf7, 0xd5, 0xa8, 0xe8, 0xe7, 0xe3, 0xa3, 0xb0, 0x04, 0x74, 0xfa, 0x78, 0xfb, 0x26, 0xbf, 0x67, 0x42, 0xf9, 0x8c, 0x9b, 0xb4, 0x69, 0x5b, 0x02, 0x13, 0x6d, 0x09, 0x6c, 0xd6, 0x99, 0x61, 0x7b, 0x89, 0x4a, 0x67, 0x75, 0xa3, 0x98, 0x13, 0x23, 0x1d, 0x18, 0x24, 0x0e, 0xef, 0x41, 0x79, + /* (2^221)P */ 0x86, 0x33, 0xab, 0x08, 0xcb, 0xbf, 0x1e, 0x76, 0x3c, 0x0b, 0xbd, 0x30, 0xdb, 0xe9, 0xa3, 0x35, 0x87, 0x1b, 0xe9, 0x07, 0x00, 0x66, 0x7f, 0x3b, 0x35, 0x0c, 0x8a, 0x3f, 0x61, 0xbc, 0xe0, 0xae, 0xf6, 0xcc, 0x54, 0xe1, 0x72, 0x36, 0x2d, 0xee, 0x93, 0x24, 0xf8, 0xd7, 0xc5, 0xf9, 0xcb, 0xb0, 0xe5, 0x88, 0x0d, 0x23, 0x4b, 0x76, 0x15, 0xa2, + /* (2^222)P */ 0x37, 0xdb, 0x83, 0xd5, 0x6d, 0x06, 0x24, 0x37, 0x1b, 0x15, 0x85, 0x15, 0xe2, 0xc0, 0x4e, 0x02, 0xa9, 0x6d, 0x0a, 0x3a, 0x94, 0x4a, 0x6f, 0x49, 0x00, 0x01, 0x72, 0xbb, 0x60, 0x14, 0x35, 0xae, 0xb4, 0xc6, 0x01, 0x0a, 0x00, 0x9e, 0xc3, 0x58, 0xc5, 0xd1, 0x5e, 0x30, 0x73, 0x96, 0x24, 0x85, 0x9d, 0xf0, 0xf9, 0xec, 0x09, 0xd3, 0xe7, 0x70, + /* (2^223)P */ 0xf3, 0xbd, 0x96, 0x87, 0xe9, 0x71, 0xbd, 0xd6, 0xa2, 0x45, 0xeb, 0x0a, 0xcd, 0x2c, 0xf1, 0x72, 0xa6, 0x31, 0xa9, 0x6f, 0x09, 0xa1, 0x5e, 0xdd, 0xc8, 0x8d, 0x0d, 0xbc, 0x5a, 0x8d, 0xb1, 0x2c, 0x9a, 0xcc, 0x37, 0x74, 0xc2, 0xa9, 0x4e, 0xd6, 0xc0, 0x3c, 0xa0, 0x23, 0xb0, 0xa0, 0x77, 0x14, 0x80, 0x45, 0x71, 0x6a, 0x2d, 0x41, 0xc3, 0x82, + /* (2^224)P */ 0x37, 0x44, 0xec, 0x8a, 0x3e, 0xc1, 0x0c, 0xa9, 0x12, 0x9c, 0x08, 0x88, 0xcb, 0xd9, 0xf8, 0xba, 0x00, 0xd6, 0xc3, 0xdf, 0xef, 0x7a, 0x44, 0x7e, 0x25, 0x69, 0xc9, 0xc1, 0x46, 0xe5, 0x20, 0x9e, 0xcc, 0x0b, 0x05, 0x3e, 0xf4, 0x78, 0x43, 0x0c, 0xa6, 0x2f, 0xc1, 0xfa, 0x70, 0xb2, 0x3c, 0x31, 0x7a, 0x63, 0x58, 0xab, 0x17, 0xcf, 0x4c, 0x4f, + /* (2^225)P */ 0x2b, 0x08, 0x31, 0x59, 0x75, 0x8b, 0xec, 0x0a, 0xa9, 0x79, 0x70, 0xdd, 0xf1, 0x11, 0xc3, 0x11, 0x1f, 0xab, 0x37, 0xaa, 0x26, 0xea, 0x53, 0xc4, 0x79, 0xa7, 0x91, 0x00, 0xaa, 0x08, 0x42, 0xeb, 0x8b, 0x8b, 0xe8, 0xc3, 0x2f, 0xb8, 0x78, 0x90, 0x38, 0x0e, 0x8a, 0x42, 0x0c, 0x0f, 0xbf, 0x3e, 0xf8, 0xd8, 0x07, 0xcf, 0x6a, 0x34, 0xc9, 0xfa, + /* (2^226)P */ 0x11, 0xe0, 0x76, 0x4d, 0x23, 0xc5, 0xa6, 0xcc, 0x9f, 0x9a, 0x2a, 0xde, 0x3a, 0xb5, 0x92, 0x39, 0x19, 0x8a, 0xf1, 0x8d, 0xf9, 0x4d, 0xc9, 0xb4, 0x39, 0x9f, 0x57, 0xd8, 0x72, 0xab, 0x1d, 0x61, 0x6a, 0xb2, 0xff, 0x52, 0xba, 0x54, 0x0e, 0xfb, 0x83, 0x30, 0x8a, 0xf7, 0x3b, 0xf4, 0xd8, 0xae, 0x1a, 0x94, 0x3a, 0xec, 0x63, 0xfe, 0x6e, 0x7c, + /* (2^227)P */ 0xdc, 0x70, 0x8e, 0x55, 0x44, 0xbf, 0xd2, 0x6a, 0xa0, 0x14, 0x61, 0x89, 0xd5, 0x55, 0x45, 0x3c, 0xf6, 0x40, 0x0d, 0x83, 0x85, 0x44, 0xb4, 0x62, 0x56, 0xfe, 0x60, 0xd7, 0x07, 0x1d, 0x47, 0x30, 0x3b, 0x73, 0xa4, 0xb5, 0xb7, 0xea, 0xac, 0xda, 0xf1, 0x17, 0xaa, 0x60, 0xdf, 0xe9, 0x84, 0xda, 0x31, 0x32, 0x61, 0xbf, 0xd0, 0x7e, 0x8a, 0x02, + /* (2^228)P */ 0xb9, 0x51, 0xb3, 0x89, 0x21, 0x5d, 0xa2, 0xfe, 0x79, 0x2a, 0xb3, 0x2a, 0x3b, 0xe6, 0x6f, 0x2b, 0x22, 0x03, 0xea, 0x7b, 0x1f, 0xaf, 0x85, 0xc3, 0x38, 0x55, 0x5b, 0x8e, 0xb4, 0xaa, 0x77, 0xfe, 0x03, 0x6e, 0xda, 0x91, 0x24, 0x0c, 0x48, 0x39, 0x27, 0x43, 0x16, 0xd2, 0x0a, 0x0d, 0x43, 0xa3, 0x0e, 0xca, 0x45, 0xd1, 0x7f, 0xf5, 0xd3, 0x16, + /* (2^229)P */ 0x3d, 0x32, 0x9b, 0x38, 0xf8, 0x06, 0x93, 0x78, 0x5b, 0x50, 0x2b, 0x06, 0xd8, 0x66, 0xfe, 0xab, 0x9b, 0x58, 0xc7, 0xd1, 0x4d, 0xd5, 0xf8, 0x3b, 0x10, 0x7e, 0x85, 0xde, 0x58, 0x4e, 0xdf, 0x53, 0xd9, 0x58, 0xe0, 0x15, 0x81, 0x9f, 0x1a, 0x78, 0xfc, 0x9f, 0x10, 0xc2, 0x23, 0xd6, 0x78, 0xd1, 0x9d, 0xd2, 0xd5, 0x1c, 0x53, 0xe2, 0xc9, 0x76, + /* (2^230)P */ 0x98, 0x1e, 0x38, 0x7b, 0x71, 0x18, 0x4b, 0x15, 0xaf, 0xa1, 0xa6, 0x98, 0xcb, 0x26, 0xa3, 0xc8, 0x07, 0x46, 0xda, 0x3b, 0x70, 0x65, 0xec, 0x7a, 0x2b, 0x34, 0x94, 0xa8, 0xb6, 0x14, 0xf8, 0x1a, 0xce, 0xf7, 0xc8, 0x60, 0xf3, 0x88, 0xf4, 0x33, 0x60, 0x7b, 0xd1, 0x02, 0xe7, 0xda, 0x00, 0x4a, 0xea, 0xd2, 0xfd, 0x88, 0xd2, 0x99, 0x28, 0xf3, + /* (2^231)P */ 0x28, 0x24, 0x1d, 0x26, 0xc2, 0xeb, 0x8b, 0x3b, 0xb4, 0x6b, 0xbe, 0x6b, 0x77, 0xff, 0xf3, 0x21, 0x3b, 0x26, 0x6a, 0x8c, 0x8e, 0x2a, 0x44, 0xa8, 0x01, 0x2b, 0x71, 0xea, 0x64, 0x30, 0xfd, 0xfd, 0x95, 0xcb, 0x39, 0x38, 0x48, 0xfa, 0x96, 0x97, 0x8c, 0x2f, 0x33, 0xca, 0x03, 0xe6, 0xd7, 0x94, 0x55, 0x6c, 0xc3, 0xb3, 0xa8, 0xf7, 0xae, 0x8c, + /* (2^232)P */ 0xea, 0x62, 0x8a, 0xb4, 0xeb, 0x74, 0xf7, 0xb8, 0xae, 0xc5, 0x20, 0x71, 0x06, 0xd6, 0x7c, 0x62, 0x9b, 0x69, 0x74, 0xef, 0xa7, 0x6d, 0xd6, 0x8c, 0x37, 0xb9, 0xbf, 0xcf, 0xeb, 0xe4, 0x2f, 0x04, 0x02, 0x21, 0x7d, 0x75, 0x6b, 0x92, 0x48, 0xf8, 0x70, 0xad, 0x69, 0xe2, 0xea, 0x0e, 0x88, 0x67, 0x72, 0xcc, 0x2d, 0x10, 0xce, 0x2d, 0xcf, 0x65, + /* (2^233)P */ 0x49, 0xf3, 0x57, 0x64, 0xe5, 0x5c, 0xc5, 0x65, 0x49, 0x97, 0xc4, 0x8a, 0xcc, 0xa9, 0xca, 0x94, 0x7b, 0x86, 0x88, 0xb6, 0x51, 0x27, 0x69, 0xa5, 0x0f, 0x8b, 0x06, 0x59, 0xa0, 0x94, 0xef, 0x63, 0x1a, 0x01, 0x9e, 0x4f, 0xd2, 0x5a, 0x93, 0xc0, 0x7c, 0xe6, 0x61, 0x77, 0xb6, 0xf5, 0x40, 0xd9, 0x98, 0x43, 0x5b, 0x56, 0x68, 0xe9, 0x37, 0x8f, + /* (2^234)P */ 0xee, 0x87, 0xd2, 0x05, 0x1b, 0x39, 0x89, 0x10, 0x07, 0x6d, 0xe8, 0xfd, 0x8b, 0x4d, 0xb2, 0xa7, 0x7b, 0x1e, 0xa0, 0x6c, 0x0d, 0x3d, 0x3d, 0x49, 0xba, 0x61, 0x36, 0x1f, 0xc2, 0x84, 0x4a, 0xcc, 0x87, 0xa9, 0x1b, 0x23, 0x04, 0xe2, 0x3e, 0x97, 0xe1, 0xdb, 0xd5, 0x5a, 0xe8, 0x41, 0x6b, 0xe5, 0x5a, 0xa1, 0x99, 0xe5, 0x7b, 0xa7, 0xe0, 0x3b, + /* (2^235)P */ 0xea, 0xa3, 0x6a, 0xdd, 0x77, 0x7f, 0x77, 0x41, 0xc5, 0x6a, 0xe4, 0xaf, 0x11, 0x5f, 0x88, 0xa5, 0x10, 0xee, 0xd0, 0x8c, 0x0c, 0xb4, 0xa5, 0x2a, 0xd0, 0xd8, 0x1d, 0x47, 0x06, 0xc0, 0xd5, 0xce, 0x51, 0x54, 0x9b, 0x2b, 0xe6, 0x2f, 0xe7, 0xe7, 0x31, 0x5f, 0x5c, 0x23, 0x81, 0x3e, 0x03, 0x93, 0xaa, 0x2d, 0x71, 0x84, 0xa0, 0x89, 0x32, 0xa6, + /* (2^236)P */ 0x55, 0xa3, 0x13, 0x92, 0x4e, 0x93, 0x7d, 0xec, 0xca, 0x57, 0xfb, 0x37, 0xae, 0xd2, 0x18, 0x2e, 0x54, 0x05, 0x6c, 0xd1, 0x28, 0xca, 0x90, 0x40, 0x82, 0x2e, 0x79, 0xc6, 0x5a, 0xc7, 0xdd, 0x84, 0x93, 0xdf, 0x15, 0xb8, 0x1f, 0xb1, 0xf9, 0xaf, 0x2c, 0xe5, 0x32, 0xcd, 0xc2, 0x99, 0x6d, 0xac, 0x85, 0x5c, 0x63, 0xd3, 0xe2, 0xff, 0x24, 0xda, + /* (2^237)P */ 0x2d, 0x8d, 0xfd, 0x65, 0xcc, 0xe5, 0x02, 0xa0, 0xe5, 0xb9, 0xec, 0x59, 0x09, 0x50, 0x27, 0xb7, 0x3d, 0x2a, 0x79, 0xb2, 0x76, 0x5d, 0x64, 0x95, 0xf8, 0xc5, 0xaf, 0x8a, 0x62, 0x11, 0x5c, 0x56, 0x1c, 0x05, 0x64, 0x9e, 0x5e, 0xbd, 0x54, 0x04, 0xe6, 0x9e, 0xab, 0xe6, 0x22, 0x7e, 0x42, 0x54, 0xb5, 0xa5, 0xd0, 0x8d, 0x28, 0x6b, 0x0f, 0x0b, + /* (2^238)P */ 0x2d, 0xb2, 0x8c, 0x59, 0x10, 0x37, 0x84, 0x3b, 0x9b, 0x65, 0x1b, 0x0f, 0x10, 0xf9, 0xea, 0x60, 0x1b, 0x02, 0xf5, 0xee, 0x8b, 0xe6, 0x32, 0x7d, 0x10, 0x7f, 0x5f, 0x8c, 0x72, 0x09, 0x4e, 0x1f, 0x29, 0xff, 0x65, 0xcb, 0x3e, 0x3a, 0xd2, 0x96, 0x50, 0x1e, 0xea, 0x64, 0x99, 0xb5, 0x4c, 0x7a, 0x69, 0xb8, 0x95, 0xae, 0x48, 0xc0, 0x7c, 0xb1, + /* (2^239)P */ 0xcd, 0x7c, 0x4f, 0x3e, 0xea, 0xf3, 0x90, 0xcb, 0x12, 0x76, 0xd1, 0x17, 0xdc, 0x0d, 0x13, 0x0f, 0xfd, 0x4d, 0xb5, 0x1f, 0xe4, 0xdd, 0xf2, 0x4d, 0x58, 0xea, 0xa5, 0x66, 0x92, 0xcf, 0xe5, 0x54, 0xea, 0x9b, 0x35, 0x83, 0x1a, 0x44, 0x8e, 0x62, 0x73, 0x45, 0x98, 0xa3, 0x89, 0x95, 0x52, 0x93, 0x1a, 0x8d, 0x63, 0x0f, 0xc2, 0x57, 0x3c, 0xb1, + /* (2^240)P */ 0x72, 0xb4, 0xdf, 0x51, 0xb7, 0xf6, 0x52, 0xa2, 0x14, 0x56, 0xe5, 0x0a, 0x2e, 0x75, 0x81, 0x02, 0xee, 0x93, 0x48, 0x0a, 0x92, 0x4e, 0x0c, 0x0f, 0xdf, 0x09, 0x89, 0x99, 0xf6, 0xf9, 0x22, 0xa2, 0x32, 0xf8, 0xb0, 0x76, 0x0c, 0xb2, 0x4d, 0x6e, 0xbe, 0x83, 0x35, 0x61, 0x44, 0xd2, 0x58, 0xc7, 0xdd, 0x14, 0xcf, 0xc3, 0x4b, 0x7c, 0x07, 0xee, + /* (2^241)P */ 0x8b, 0x03, 0xee, 0xcb, 0xa7, 0x2e, 0x28, 0xbd, 0x97, 0xd1, 0x4c, 0x2b, 0xd1, 0x92, 0x67, 0x5b, 0x5a, 0x12, 0xbf, 0x29, 0x17, 0xfc, 0x50, 0x09, 0x74, 0x76, 0xa2, 0xd4, 0x82, 0xfd, 0x2c, 0x0c, 0x90, 0xf7, 0xe7, 0xe5, 0x9a, 0x2c, 0x16, 0x40, 0xb9, 0x6c, 0xd9, 0xe0, 0x22, 0x9e, 0xf8, 0xdd, 0x73, 0xe4, 0x7b, 0x9e, 0xbe, 0x4f, 0x66, 0x22, + /* (2^242)P */ 0xa4, 0x10, 0xbe, 0xb8, 0x83, 0x3a, 0x77, 0x8e, 0xea, 0x0a, 0xc4, 0x97, 0x3e, 0xb6, 0x6c, 0x81, 0xd7, 0x65, 0xd9, 0xf7, 0xae, 0xe6, 0xbe, 0xab, 0x59, 0x81, 0x29, 0x4b, 0xff, 0xe1, 0x0f, 0xc3, 0x2b, 0xad, 0x4b, 0xef, 0xc4, 0x50, 0x9f, 0x88, 0x31, 0xf2, 0xde, 0x80, 0xd6, 0xf4, 0x20, 0x9c, 0x77, 0x9b, 0xbe, 0xbe, 0x08, 0xf5, 0xf0, 0x95, + /* (2^243)P */ 0x0e, 0x7c, 0x7b, 0x7c, 0xb3, 0xd8, 0x83, 0xfc, 0x8c, 0x75, 0x51, 0x74, 0x1b, 0xe1, 0x6d, 0x11, 0x05, 0x46, 0x24, 0x0d, 0xa4, 0x2b, 0x32, 0xfd, 0x2c, 0x4e, 0x21, 0xdf, 0x39, 0x6b, 0x96, 0xfc, 0xff, 0x92, 0xfc, 0x35, 0x0d, 0x9a, 0x4b, 0xc0, 0x70, 0x46, 0x32, 0x7d, 0xc0, 0xc4, 0x04, 0xe0, 0x2d, 0x83, 0xa7, 0x00, 0xc7, 0xcb, 0xb4, 0x8f, + /* (2^244)P */ 0xa9, 0x5a, 0x7f, 0x0e, 0xdd, 0x2c, 0x85, 0xaa, 0x4d, 0xac, 0xde, 0xb3, 0xb6, 0xaf, 0xe6, 0xd1, 0x06, 0x7b, 0x2c, 0xa4, 0x01, 0x19, 0x22, 0x7d, 0x78, 0xf0, 0x3a, 0xea, 0x89, 0xfe, 0x21, 0x61, 0x6d, 0xb8, 0xfe, 0xa5, 0x2a, 0xab, 0x0d, 0x7b, 0x51, 0x39, 0xb6, 0xde, 0xbc, 0xf0, 0xc5, 0x48, 0xd7, 0x09, 0x82, 0x6e, 0x66, 0x75, 0xc5, 0xcd, + /* (2^245)P */ 0xee, 0xdf, 0x2b, 0x6c, 0xa8, 0xde, 0x61, 0xe1, 0x27, 0xfa, 0x2a, 0x0f, 0x68, 0xe7, 0x7a, 0x9b, 0x13, 0xe9, 0x56, 0xd2, 0x1c, 0x3d, 0x2f, 0x3c, 0x7a, 0xf6, 0x6f, 0x45, 0xee, 0xe8, 0xf4, 0xa0, 0xa6, 0xe8, 0xa5, 0x27, 0xee, 0xf2, 0x85, 0xa9, 0xd5, 0x0e, 0xa9, 0x26, 0x60, 0xfe, 0xee, 0xc7, 0x59, 0x99, 0x5e, 0xa3, 0xdf, 0x23, 0x36, 0xd5, + /* (2^246)P */ 0x15, 0x66, 0x6f, 0xd5, 0x78, 0xa4, 0x0a, 0xf7, 0xb1, 0xe8, 0x75, 0x6b, 0x48, 0x7d, 0xa6, 0x4d, 0x3d, 0x36, 0x9b, 0xc7, 0xcc, 0x68, 0x9a, 0xfe, 0x2f, 0x39, 0x2a, 0x51, 0x31, 0x39, 0x7d, 0x73, 0x6f, 0xc8, 0x74, 0x72, 0x6f, 0x6e, 0xda, 0x5f, 0xad, 0x48, 0xc8, 0x40, 0xe1, 0x06, 0x01, 0x36, 0xa1, 0x88, 0xc8, 0x99, 0x9c, 0xd1, 0x11, 0x8f, + /* (2^247)P */ 0xab, 0xc5, 0xcb, 0xcf, 0xbd, 0x73, 0x21, 0xd0, 0x82, 0xb1, 0x2e, 0x2d, 0xd4, 0x36, 0x1b, 0xed, 0xa9, 0x8a, 0x26, 0x79, 0xc4, 0x17, 0xae, 0xe5, 0x09, 0x0a, 0x0c, 0xa4, 0x21, 0xa0, 0x6e, 0xdd, 0x62, 0x8e, 0x44, 0x62, 0xcc, 0x50, 0xff, 0x93, 0xb3, 0x9a, 0x72, 0x8c, 0x3f, 0xa1, 0xa6, 0x4d, 0x87, 0xd5, 0x1c, 0x5a, 0xc0, 0x0b, 0x1a, 0xd6, + /* (2^248)P */ 0x67, 0x36, 0x6a, 0x1f, 0x96, 0xe5, 0x80, 0x20, 0xa9, 0xe8, 0x0b, 0x0e, 0x21, 0x29, 0x3f, 0xc8, 0x0a, 0x6d, 0x27, 0x47, 0xca, 0xd9, 0x05, 0x55, 0xbf, 0x11, 0xcf, 0x31, 0x7a, 0x37, 0xc7, 0x90, 0xa9, 0xf4, 0x07, 0x5e, 0xd5, 0xc3, 0x92, 0xaa, 0x95, 0xc8, 0x23, 0x2a, 0x53, 0x45, 0xe3, 0x3a, 0x24, 0xe9, 0x67, 0x97, 0x3a, 0x82, 0xf9, 0xa6, + /* (2^249)P */ 0x92, 0x9e, 0x6d, 0x82, 0x67, 0xe9, 0xf9, 0x17, 0x96, 0x2c, 0xa7, 0xd3, 0x89, 0xf9, 0xdb, 0xd8, 0x20, 0xc6, 0x2e, 0xec, 0x4a, 0x76, 0x64, 0xbf, 0x27, 0x40, 0xe2, 0xb4, 0xdf, 0x1f, 0xa0, 0xef, 0x07, 0x80, 0xfb, 0x8e, 0x12, 0xf8, 0xb8, 0xe1, 0xc6, 0xdf, 0x7c, 0x69, 0x35, 0x5a, 0xe1, 0x8e, 0x5d, 0x69, 0x84, 0x56, 0xb6, 0x31, 0x1c, 0x0b, + /* (2^250)P */ 0xd6, 0x94, 0x5c, 0xef, 0xbb, 0x46, 0x45, 0x44, 0x5b, 0xa1, 0xae, 0x03, 0x65, 0xdd, 0xb5, 0x66, 0x88, 0x35, 0x29, 0x95, 0x16, 0x54, 0xa6, 0xf5, 0xc9, 0x78, 0x34, 0xe6, 0x0f, 0xc4, 0x2b, 0x5b, 0x79, 0x51, 0x68, 0x48, 0x3a, 0x26, 0x87, 0x05, 0x70, 0xaf, 0x8b, 0xa6, 0xc7, 0x2e, 0xb3, 0xa9, 0x10, 0x01, 0xb0, 0xb9, 0x31, 0xfd, 0xdc, 0x80, + /* (2^251)P */ 0x25, 0xf2, 0xad, 0xd6, 0x75, 0xa3, 0x04, 0x05, 0x64, 0x8a, 0x97, 0x60, 0x27, 0x2a, 0xe5, 0x6d, 0xb0, 0x73, 0xf4, 0x07, 0x2a, 0x9d, 0xe9, 0x46, 0xb4, 0x1c, 0x51, 0xf8, 0x63, 0x98, 0x7e, 0xe5, 0x13, 0x51, 0xed, 0x98, 0x65, 0x98, 0x4f, 0x8f, 0xe7, 0x7e, 0x72, 0xd7, 0x64, 0x11, 0x2f, 0xcd, 0x12, 0xf8, 0xc4, 0x63, 0x52, 0x0f, 0x7f, 0xc4, + /* (2^252)P */ 0x5c, 0xd9, 0x85, 0x63, 0xc7, 0x8a, 0x65, 0x9a, 0x25, 0x83, 0x31, 0x73, 0x49, 0xf0, 0x93, 0x96, 0x70, 0x67, 0x6d, 0xb1, 0xff, 0x95, 0x54, 0xe4, 0xf8, 0x15, 0x6c, 0x5f, 0xbd, 0xf6, 0x0f, 0x38, 0x7b, 0x68, 0x7d, 0xd9, 0x3d, 0xf0, 0xa9, 0xa0, 0xe4, 0xd1, 0xb6, 0x34, 0x6d, 0x14, 0x16, 0xc2, 0x4c, 0x30, 0x0e, 0x67, 0xd3, 0xbe, 0x2e, 0xc0, + /* (2^253)P */ 0x06, 0x6b, 0x52, 0xc8, 0x14, 0xcd, 0xae, 0x03, 0x93, 0xea, 0xc1, 0xf2, 0xf6, 0x8b, 0xc5, 0xb6, 0xdc, 0x82, 0x42, 0x29, 0x94, 0xe0, 0x25, 0x6c, 0x3f, 0x9f, 0x5d, 0xe4, 0x96, 0xf6, 0x8e, 0x3f, 0xf9, 0x72, 0xc4, 0x77, 0x60, 0x8b, 0xa4, 0xf9, 0xa8, 0xc3, 0x0a, 0x81, 0xb1, 0x97, 0x70, 0x18, 0xab, 0xea, 0x37, 0x8a, 0x08, 0xc7, 0xe2, 0x95, + /* (2^254)P */ 0x94, 0x49, 0xd9, 0x5f, 0x76, 0x72, 0x82, 0xad, 0x2d, 0x50, 0x1a, 0x7a, 0x5b, 0xe6, 0x95, 0x1e, 0x95, 0x65, 0x87, 0x1c, 0x52, 0xd7, 0x44, 0xe6, 0x9b, 0x56, 0xcd, 0x6f, 0x05, 0xff, 0x67, 0xc5, 0xdb, 0xa2, 0xac, 0xe4, 0xa2, 0x28, 0x63, 0x5f, 0xfb, 0x0c, 0x3b, 0xf1, 0x87, 0xc3, 0x36, 0x78, 0x3f, 0x77, 0xfa, 0x50, 0x85, 0xf9, 0xd7, 0x82, + /* (2^255)P */ 0x64, 0xc0, 0xe0, 0xd8, 0x2d, 0xed, 0xcb, 0x6a, 0xfd, 0xcd, 0xbc, 0x7e, 0x9f, 0xc8, 0x85, 0xe9, 0xc1, 0x7c, 0x0f, 0xe5, 0x18, 0xea, 0xd4, 0x51, 0xad, 0x59, 0x13, 0x75, 0xd9, 0x3d, 0xd4, 0x8a, 0xb2, 0xbe, 0x78, 0x52, 0x2b, 0x52, 0x94, 0x37, 0x41, 0xd6, 0xb4, 0xb6, 0x45, 0x20, 0x76, 0xe0, 0x1f, 0x31, 0xdb, 0xb1, 0xa1, 0x43, 0xf0, 0x18, + /* (2^256)P */ 0x74, 0xa9, 0xa4, 0xa9, 0xdd, 0x6e, 0x3e, 0x68, 0xe5, 0xc3, 0x2e, 0x92, 0x17, 0xa4, 0xcb, 0x80, 0xb1, 0xf0, 0x06, 0x93, 0xef, 0xe6, 0x00, 0xe6, 0x3b, 0xb1, 0x32, 0x65, 0x7b, 0x83, 0xb6, 0x8a, 0x49, 0x1b, 0x14, 0x89, 0xee, 0xba, 0xf5, 0x6a, 0x8d, 0x36, 0xef, 0xb0, 0xd8, 0xb2, 0x16, 0x99, 0x17, 0x35, 0x02, 0x16, 0x55, 0x58, 0xdd, 0x82, + /* (2^257)P */ 0x36, 0x95, 0xe8, 0xf4, 0x36, 0x42, 0xbb, 0xc5, 0x3e, 0xfa, 0x30, 0x84, 0x9e, 0x59, 0xfd, 0xd2, 0x95, 0x42, 0xf8, 0x64, 0xd9, 0xb9, 0x0e, 0x9f, 0xfa, 0xd0, 0x7b, 0x20, 0x31, 0x77, 0x48, 0x29, 0x4d, 0xd0, 0x32, 0x57, 0x56, 0x30, 0xa6, 0x17, 0x53, 0x04, 0xbf, 0x08, 0x28, 0xec, 0xb8, 0x46, 0xc1, 0x03, 0x89, 0xdc, 0xed, 0xa0, 0x35, 0x53, + /* (2^258)P */ 0xc5, 0x7f, 0x9e, 0xd8, 0xc5, 0xba, 0x5f, 0x68, 0xc8, 0x23, 0x75, 0xea, 0x0d, 0xd9, 0x5a, 0xfd, 0x61, 0x1a, 0xa3, 0x2e, 0x45, 0x63, 0x14, 0x55, 0x86, 0x21, 0x29, 0xbe, 0xef, 0x5e, 0x50, 0xe5, 0x18, 0x59, 0xe7, 0xe3, 0xce, 0x4d, 0x8c, 0x15, 0x8f, 0x89, 0x66, 0x44, 0x52, 0x3d, 0xfa, 0xc7, 0x9a, 0x59, 0x90, 0x8e, 0xc0, 0x06, 0x3f, 0xc9, + /* (2^259)P */ 0x8e, 0x04, 0xd9, 0x16, 0x50, 0x1d, 0x8c, 0x9f, 0xd5, 0xe3, 0xce, 0xfd, 0x47, 0x04, 0x27, 0x4d, 0xc2, 0xfa, 0x71, 0xd9, 0x0b, 0xb8, 0x65, 0xf4, 0x11, 0xf3, 0x08, 0xee, 0x81, 0xc8, 0x67, 0x99, 0x0b, 0x8d, 0x77, 0xa3, 0x4f, 0xb5, 0x9b, 0xdb, 0x26, 0xf1, 0x97, 0xeb, 0x04, 0x54, 0xeb, 0x80, 0x08, 0x1d, 0x1d, 0xf6, 0x3d, 0x1f, 0x5a, 0xb8, + /* (2^260)P */ 0xb7, 0x9c, 0x9d, 0xee, 0xb9, 0x5c, 0xad, 0x0d, 0x9e, 0xfd, 0x60, 0x3c, 0x27, 0x4e, 0xa2, 0x95, 0xfb, 0x64, 0x7e, 0x79, 0x64, 0x87, 0x10, 0xb4, 0x73, 0xe0, 0x9d, 0x46, 0x4d, 0x3d, 0xee, 0x83, 0xe4, 0x16, 0x88, 0x97, 0xe6, 0x4d, 0xba, 0x70, 0xb6, 0x96, 0x7b, 0xff, 0x4b, 0xc8, 0xcf, 0x72, 0x83, 0x3e, 0x5b, 0x24, 0x2e, 0x57, 0xf1, 0x82, + /* (2^261)P */ 0x30, 0x71, 0x40, 0x51, 0x4f, 0x44, 0xbb, 0xc7, 0xf0, 0x54, 0x6e, 0x9d, 0xeb, 0x15, 0xad, 0xf8, 0x61, 0x43, 0x5a, 0xef, 0xc0, 0xb1, 0x57, 0xae, 0x03, 0x40, 0xe8, 0x68, 0x6f, 0x03, 0x20, 0x4f, 0x8a, 0x51, 0x2a, 0x9e, 0xd2, 0x45, 0xaf, 0xb4, 0xf5, 0xd4, 0x95, 0x7f, 0x3d, 0x3d, 0xb7, 0xb6, 0x28, 0xc5, 0x08, 0x8b, 0x44, 0xd6, 0x3f, 0xe7, + /* (2^262)P */ 0xa9, 0x52, 0x04, 0x67, 0xcb, 0x20, 0x63, 0xf8, 0x18, 0x01, 0x44, 0x21, 0x6a, 0x8a, 0x83, 0x48, 0xd4, 0xaf, 0x23, 0x0f, 0x35, 0x8d, 0xe5, 0x5a, 0xc4, 0x7c, 0x55, 0x46, 0x19, 0x5f, 0x35, 0xe0, 0x5d, 0x97, 0x4c, 0x2d, 0x04, 0xed, 0x59, 0xd4, 0xb0, 0xb2, 0xc6, 0xe3, 0x51, 0xe1, 0x38, 0xc6, 0x30, 0x49, 0x8f, 0xae, 0x61, 0x64, 0xce, 0xa8, + /* (2^263)P */ 0x9b, 0x64, 0x83, 0x3c, 0xd3, 0xdf, 0xb9, 0x27, 0xe7, 0x5b, 0x7f, 0xeb, 0xf3, 0x26, 0xcf, 0xb1, 0x8f, 0xaf, 0x26, 0xc8, 0x48, 0xce, 0xa1, 0xac, 0x7d, 0x10, 0x34, 0x28, 0xe1, 0x1f, 0x69, 0x03, 0x64, 0x77, 0x61, 0xdd, 0x4a, 0x9b, 0x18, 0x47, 0xf8, 0xca, 0x63, 0xc9, 0x03, 0x2d, 0x20, 0x2a, 0x69, 0x6e, 0x42, 0xd0, 0xe7, 0xaa, 0xb5, 0xf3, + /* (2^264)P */ 0xea, 0x31, 0x0c, 0x57, 0x0f, 0x3e, 0xe3, 0x35, 0xd8, 0x30, 0xa5, 0x6f, 0xdd, 0x95, 0x43, 0xc6, 0x66, 0x07, 0x4f, 0x34, 0xc3, 0x7e, 0x04, 0x10, 0x2d, 0xc4, 0x1c, 0x94, 0x52, 0x2e, 0x5b, 0x9a, 0x65, 0x2f, 0x91, 0xaa, 0x4f, 0x3c, 0xdc, 0x23, 0x18, 0xe1, 0x4f, 0x85, 0xcd, 0xf4, 0x8c, 0x51, 0xf7, 0xab, 0x4f, 0xdc, 0x15, 0x5c, 0x9e, 0xc5, + /* (2^265)P */ 0x54, 0x57, 0x23, 0x17, 0xe7, 0x82, 0x2f, 0x04, 0x7d, 0xfe, 0xe7, 0x1f, 0xa2, 0x57, 0x79, 0xe9, 0x58, 0x9b, 0xbe, 0xc6, 0x16, 0x4a, 0x17, 0x50, 0x90, 0x4a, 0x34, 0x70, 0x87, 0x37, 0x01, 0x26, 0xd8, 0xa3, 0x5f, 0x07, 0x7c, 0xd0, 0x7d, 0x05, 0x8a, 0x93, 0x51, 0x2f, 0x99, 0xea, 0xcf, 0x00, 0xd8, 0xc7, 0xe6, 0x9b, 0x8c, 0x62, 0x45, 0x87, + /* (2^266)P */ 0xc3, 0xfd, 0x29, 0x66, 0xe7, 0x30, 0x29, 0x77, 0xe0, 0x0d, 0x63, 0x5b, 0xe6, 0x90, 0x1a, 0x1e, 0x99, 0xc2, 0xa7, 0xab, 0xff, 0xa7, 0xbd, 0x79, 0x01, 0x97, 0xfd, 0x27, 0x1b, 0x43, 0x2b, 0xe6, 0xfe, 0x5e, 0xf1, 0xb9, 0x35, 0x38, 0x08, 0x25, 0x55, 0x90, 0x68, 0x2e, 0xc3, 0x67, 0x39, 0x9f, 0x2b, 0x2c, 0x70, 0x48, 0x8c, 0x47, 0xee, 0x56, + /* (2^267)P */ 0xf7, 0x32, 0x70, 0xb5, 0xe6, 0x42, 0xfd, 0x0a, 0x39, 0x9b, 0x07, 0xfe, 0x0e, 0xf4, 0x47, 0xba, 0x6a, 0x3f, 0xf5, 0x2c, 0x15, 0xf3, 0x60, 0x3f, 0xb1, 0x83, 0x7b, 0x2e, 0x34, 0x58, 0x1a, 0x6e, 0x4a, 0x49, 0x05, 0x45, 0xca, 0xdb, 0x00, 0x01, 0x0c, 0x42, 0x5e, 0x60, 0x40, 0x5f, 0xd9, 0xc7, 0x3a, 0x9e, 0x1c, 0x8d, 0xab, 0x11, 0x55, 0x65, + /* (2^268)P */ 0x87, 0x40, 0xb7, 0x0d, 0xaa, 0x34, 0x89, 0x90, 0x75, 0x6d, 0xa2, 0xfe, 0x3b, 0x6d, 0x5c, 0x39, 0x98, 0x10, 0x9e, 0x15, 0xc5, 0x35, 0xa2, 0x27, 0x23, 0x0a, 0x2d, 0x60, 0xe2, 0xa8, 0x7f, 0x3e, 0x77, 0x8f, 0xcc, 0x44, 0xcc, 0x30, 0x28, 0xe2, 0xf0, 0x04, 0x8c, 0xee, 0xe4, 0x5f, 0x68, 0x8c, 0xdf, 0x70, 0xbf, 0x31, 0xee, 0x2a, 0xfc, 0xce, + /* (2^269)P */ 0x92, 0xf2, 0xa0, 0xd9, 0x58, 0x3b, 0x7c, 0x1a, 0x99, 0x46, 0x59, 0x54, 0x60, 0x06, 0x8d, 0x5e, 0xf0, 0x22, 0xa1, 0xed, 0x92, 0x8a, 0x4d, 0x76, 0x95, 0x05, 0x0b, 0xff, 0xfc, 0x9a, 0xd1, 0xcc, 0x05, 0xb9, 0x5e, 0x99, 0xe8, 0x2a, 0x76, 0x7b, 0xfd, 0xa6, 0xe2, 0xd1, 0x1a, 0xd6, 0x76, 0x9f, 0x2f, 0x0e, 0xd1, 0xa8, 0x77, 0x5a, 0x40, 0x5a, + /* (2^270)P */ 0xff, 0xf9, 0x3f, 0xa9, 0xa6, 0x6c, 0x6d, 0x03, 0x8b, 0xa7, 0x10, 0x5d, 0x3f, 0xec, 0x3e, 0x1c, 0x0b, 0x6b, 0xa2, 0x6a, 0x22, 0xa9, 0x28, 0xd0, 0x66, 0xc9, 0xc2, 0x3d, 0x47, 0x20, 0x7d, 0xa6, 0x1d, 0xd8, 0x25, 0xb5, 0xf2, 0xf9, 0x70, 0x19, 0x6b, 0xf8, 0x43, 0x36, 0xc5, 0x1f, 0xe4, 0x5a, 0x4c, 0x13, 0xe4, 0x6d, 0x08, 0x0b, 0x1d, 0xb1, + /* (2^271)P */ 0x3f, 0x20, 0x9b, 0xfb, 0xec, 0x7d, 0x31, 0xc5, 0xfc, 0x88, 0x0b, 0x30, 0xed, 0x36, 0xc0, 0x63, 0xb1, 0x7d, 0x10, 0xda, 0xb6, 0x2e, 0xad, 0xf3, 0xec, 0x94, 0xe7, 0xec, 0xb5, 0x9c, 0xfe, 0xf5, 0x35, 0xf0, 0xa2, 0x2d, 0x7f, 0xca, 0x6b, 0x67, 0x1a, 0xf6, 0xb3, 0xda, 0x09, 0x2a, 0xaa, 0xdf, 0xb1, 0xca, 0x9b, 0xfb, 0xeb, 0xb3, 0xcd, 0xc0, + /* (2^272)P */ 0xcd, 0x4d, 0x89, 0x00, 0xa4, 0x3b, 0x48, 0xf0, 0x76, 0x91, 0x35, 0xa5, 0xf8, 0xc9, 0xb6, 0x46, 0xbc, 0xf6, 0x9a, 0x45, 0x47, 0x17, 0x96, 0x80, 0x5b, 0x3a, 0x28, 0x33, 0xf9, 0x5a, 0xef, 0x43, 0x07, 0xfe, 0x3b, 0xf4, 0x8e, 0x19, 0xce, 0xd2, 0x94, 0x4b, 0x6d, 0x8e, 0x67, 0x20, 0xc7, 0x4f, 0x2f, 0x59, 0x8e, 0xe1, 0xa1, 0xa9, 0xf9, 0x0e, + /* (2^273)P */ 0xdc, 0x7b, 0xb5, 0x50, 0x2e, 0xe9, 0x7e, 0x8b, 0x78, 0xa1, 0x38, 0x96, 0x22, 0xc3, 0x61, 0x67, 0x6d, 0xc8, 0x58, 0xed, 0x41, 0x1d, 0x5d, 0x86, 0x98, 0x7f, 0x2f, 0x1b, 0x8d, 0x3e, 0xaa, 0xc1, 0xd2, 0x0a, 0xf3, 0xbf, 0x95, 0x04, 0xf3, 0x10, 0x3c, 0x2b, 0x7f, 0x90, 0x46, 0x04, 0xaa, 0x6a, 0xa9, 0x35, 0x76, 0xac, 0x49, 0xb5, 0x00, 0x45, + /* (2^274)P */ 0xb1, 0x93, 0x79, 0x84, 0x4a, 0x2a, 0x30, 0x78, 0x16, 0xaa, 0xc5, 0x74, 0x06, 0xce, 0xa5, 0xa7, 0x32, 0x86, 0xe0, 0xf9, 0x10, 0xd2, 0x58, 0x76, 0xfb, 0x66, 0x49, 0x76, 0x3a, 0x90, 0xba, 0xb5, 0xcc, 0x99, 0xcd, 0x09, 0xc1, 0x9a, 0x74, 0x23, 0xdf, 0x0c, 0xfe, 0x99, 0x52, 0x80, 0xa3, 0x7c, 0x1c, 0x71, 0x5f, 0x2c, 0x49, 0x57, 0xf4, 0xf9, + /* (2^275)P */ 0x6d, 0xbf, 0x52, 0xe6, 0x25, 0x98, 0xed, 0xcf, 0xe3, 0xbc, 0x08, 0xa2, 0x1a, 0x90, 0xae, 0xa0, 0xbf, 0x07, 0x15, 0xad, 0x0a, 0x9f, 0x3e, 0x47, 0x44, 0xc2, 0x10, 0x46, 0xa6, 0x7a, 0x9e, 0x2f, 0x57, 0xbc, 0xe2, 0xf0, 0x1d, 0xd6, 0x9a, 0x06, 0xed, 0xfc, 0x54, 0x95, 0x92, 0x15, 0xa2, 0xf7, 0x8d, 0x6b, 0xef, 0xb2, 0x05, 0xed, 0x5c, 0x63, + /* (2^276)P */ 0xbc, 0x0b, 0x27, 0x3a, 0x3a, 0xf8, 0xe1, 0x48, 0x02, 0x7e, 0x27, 0xe6, 0x81, 0x62, 0x07, 0x73, 0x74, 0xe5, 0x52, 0xd7, 0xf8, 0x26, 0xca, 0x93, 0x4d, 0x3e, 0x9b, 0x55, 0x09, 0x8e, 0xe3, 0xd7, 0xa6, 0xe3, 0xb6, 0x2a, 0xa9, 0xb3, 0xb0, 0xa0, 0x8c, 0x01, 0xbb, 0x07, 0x90, 0x78, 0x6d, 0x6d, 0xe9, 0xf0, 0x7a, 0x90, 0xbd, 0xdc, 0x0c, 0x36, + /* (2^277)P */ 0x7f, 0x20, 0x12, 0x0f, 0x40, 0x00, 0x53, 0xd8, 0x0c, 0x27, 0x47, 0x47, 0x22, 0x80, 0xfb, 0x62, 0xe4, 0xa7, 0xf7, 0xbd, 0x42, 0xa5, 0xc3, 0x2b, 0xb2, 0x7f, 0x50, 0xcc, 0xe2, 0xfb, 0xd5, 0xc0, 0x63, 0xdd, 0x24, 0x5f, 0x7c, 0x08, 0x91, 0xbf, 0x6e, 0x47, 0x44, 0xd4, 0x6a, 0xc0, 0xc3, 0x09, 0x39, 0x27, 0xdd, 0xc7, 0xca, 0x06, 0x29, 0x55, + /* (2^278)P */ 0x76, 0x28, 0x58, 0xb0, 0xd2, 0xf3, 0x0f, 0x04, 0xe9, 0xc9, 0xab, 0x66, 0x5b, 0x75, 0x51, 0xdc, 0xe5, 0x8f, 0xe8, 0x1f, 0xdb, 0x03, 0x0f, 0xb0, 0x7d, 0xf9, 0x20, 0x64, 0x89, 0xe9, 0xdc, 0xe6, 0x24, 0xc3, 0xd5, 0xd2, 0x41, 0xa6, 0xe4, 0xe3, 0xc4, 0x79, 0x7c, 0x0f, 0xa1, 0x61, 0x2f, 0xda, 0xa4, 0xc9, 0xfd, 0xad, 0x5c, 0x65, 0x6a, 0xf3, + /* (2^279)P */ 0xd5, 0xab, 0x72, 0x7a, 0x3b, 0x59, 0xea, 0xcf, 0xd5, 0x17, 0xd2, 0xb2, 0x5f, 0x2d, 0xab, 0xad, 0x9e, 0x88, 0x64, 0x55, 0x96, 0x6e, 0xf3, 0x44, 0xa9, 0x11, 0xf5, 0xf8, 0x3a, 0xf1, 0xcd, 0x79, 0x4c, 0x99, 0x6d, 0x23, 0x6a, 0xa0, 0xc2, 0x1a, 0x19, 0x45, 0xb5, 0xd8, 0x95, 0x2f, 0x49, 0xe9, 0x46, 0x39, 0x26, 0x60, 0x04, 0x15, 0x8b, 0xcc, + /* (2^280)P */ 0x66, 0x0c, 0xf0, 0x54, 0x41, 0x02, 0x91, 0xab, 0xe5, 0x85, 0x8a, 0x44, 0xa6, 0x34, 0x96, 0x32, 0xc0, 0xdf, 0x6c, 0x41, 0x39, 0xd4, 0xc6, 0xe1, 0xe3, 0x81, 0xb0, 0x4c, 0x34, 0x4f, 0xe5, 0xf4, 0x35, 0x46, 0x1f, 0xeb, 0x75, 0xfd, 0x43, 0x37, 0x50, 0x99, 0xab, 0xad, 0xb7, 0x8c, 0xa1, 0x57, 0xcb, 0xe6, 0xce, 0x16, 0x2e, 0x85, 0xcc, 0xf9, + /* (2^281)P */ 0x63, 0xd1, 0x3f, 0x9e, 0xa2, 0x17, 0x2e, 0x1d, 0x3e, 0xce, 0x48, 0x2d, 0xbb, 0x8f, 0x69, 0xc9, 0xa6, 0x3d, 0x4e, 0xfe, 0x09, 0x56, 0xb3, 0x02, 0x5f, 0x99, 0x97, 0x0c, 0x54, 0xda, 0x32, 0x97, 0x9b, 0xf4, 0x95, 0xf1, 0xad, 0xe3, 0x2b, 0x04, 0xa7, 0x9b, 0x3f, 0xbb, 0xe7, 0x87, 0x2e, 0x1f, 0x8b, 0x4b, 0x7a, 0xa4, 0x43, 0x0c, 0x0f, 0x35, + /* (2^282)P */ 0x05, 0xdc, 0xe0, 0x2c, 0xa1, 0xc1, 0xd0, 0xf1, 0x1f, 0x4e, 0xc0, 0x6c, 0x35, 0x7b, 0xca, 0x8f, 0x8b, 0x02, 0xb1, 0xf7, 0xd6, 0x2e, 0xe7, 0x93, 0x80, 0x85, 0x18, 0x88, 0x19, 0xb9, 0xb4, 0x4a, 0xbc, 0xeb, 0x5a, 0x78, 0x38, 0xed, 0xc6, 0x27, 0x2a, 0x74, 0x76, 0xf0, 0x1b, 0x79, 0x92, 0x2f, 0xd2, 0x81, 0x98, 0xdf, 0xa9, 0x50, 0x19, 0xeb, + /* (2^283)P */ 0xb5, 0xe7, 0xb4, 0x11, 0x3a, 0x81, 0xb6, 0xb4, 0xf8, 0xa2, 0xb3, 0x6c, 0xfc, 0x9d, 0xe0, 0xc0, 0xe0, 0x59, 0x7f, 0x05, 0x37, 0xef, 0x2c, 0xa9, 0x3a, 0x24, 0xac, 0x7b, 0x25, 0xa0, 0x55, 0xd2, 0x44, 0x82, 0x82, 0x6e, 0x64, 0xa3, 0x58, 0xc8, 0x67, 0xae, 0x26, 0xa7, 0x0f, 0x42, 0x63, 0xe1, 0x93, 0x01, 0x52, 0x19, 0xaf, 0x49, 0x3e, 0x33, + /* (2^284)P */ 0x05, 0x85, 0xe6, 0x66, 0xaf, 0x5f, 0xdf, 0xbf, 0x9d, 0x24, 0x62, 0x60, 0x90, 0xe2, 0x4c, 0x7d, 0x4e, 0xc3, 0x74, 0x5d, 0x4f, 0x53, 0xf3, 0x63, 0x13, 0xf4, 0x74, 0x28, 0x6b, 0x7d, 0x57, 0x0c, 0x9d, 0x84, 0xa7, 0x1a, 0xff, 0xa0, 0x79, 0xdf, 0xfc, 0x65, 0x98, 0x8e, 0x22, 0x0d, 0x62, 0x7e, 0xf2, 0x34, 0x60, 0x83, 0x05, 0x14, 0xb1, 0xc1, + /* (2^285)P */ 0x64, 0x22, 0xcc, 0xdf, 0x5c, 0xbc, 0x88, 0x68, 0x4c, 0xd9, 0xbc, 0x0e, 0xc9, 0x8b, 0xb4, 0x23, 0x52, 0xad, 0xb0, 0xb3, 0xf1, 0x17, 0xd8, 0x15, 0x04, 0x6b, 0x99, 0xf0, 0xc4, 0x7d, 0x48, 0x22, 0x4a, 0xf8, 0x6f, 0xaa, 0x88, 0x0d, 0xc5, 0x5e, 0xa9, 0x1c, 0x61, 0x3d, 0x95, 0xa9, 0x7b, 0x6a, 0x79, 0x33, 0x0a, 0x2b, 0x99, 0xe3, 0x4e, 0x48, + /* (2^286)P */ 0x6b, 0x9b, 0x6a, 0x2a, 0xf1, 0x60, 0x31, 0xb4, 0x73, 0xd1, 0x87, 0x45, 0x9c, 0x15, 0x58, 0x4b, 0x91, 0x6d, 0x94, 0x1c, 0x41, 0x11, 0x4a, 0x83, 0xec, 0xaf, 0x65, 0xbc, 0x34, 0xaa, 0x26, 0xe2, 0xaf, 0xed, 0x46, 0x05, 0x4e, 0xdb, 0xc6, 0x4e, 0x10, 0x28, 0x4e, 0x72, 0xe5, 0x31, 0xa3, 0x20, 0xd7, 0xb1, 0x96, 0x64, 0xf6, 0xce, 0x08, 0x08, + /* (2^287)P */ 0x16, 0xa9, 0x5c, 0x9f, 0x9a, 0xb4, 0xb8, 0xc8, 0x32, 0x78, 0xc0, 0x3a, 0xd9, 0x5f, 0x94, 0xac, 0x3a, 0x42, 0x1f, 0x43, 0xd6, 0x80, 0x47, 0x2c, 0xdc, 0x76, 0x27, 0xfa, 0x50, 0xe5, 0xa1, 0xe4, 0xc3, 0xcb, 0x61, 0x31, 0xe1, 0x2e, 0xde, 0x81, 0x3b, 0x77, 0x1c, 0x39, 0x3c, 0xdb, 0xda, 0x87, 0x4b, 0x84, 0x12, 0xeb, 0xdd, 0x54, 0xbf, 0xe7, + /* (2^288)P */ 0xbf, 0xcb, 0x73, 0x21, 0x3d, 0x7e, 0x13, 0x8c, 0xa6, 0x34, 0x21, 0x2b, 0xa5, 0xe4, 0x9f, 0x8e, 0x9c, 0x01, 0x9c, 0x43, 0xd9, 0xc7, 0xb9, 0xf1, 0xbe, 0x7f, 0x45, 0x51, 0x97, 0xa1, 0x8e, 0x01, 0xf8, 0xbd, 0xd2, 0xbf, 0x81, 0x3a, 0x8b, 0xab, 0xe4, 0x89, 0xb7, 0xbd, 0xf2, 0xcd, 0xa9, 0x8a, 0x8a, 0xde, 0xfb, 0x8a, 0x55, 0x12, 0x7b, 0x17, + /* (2^289)P */ 0x1b, 0x95, 0x58, 0x4d, 0xe6, 0x51, 0x31, 0x52, 0x1c, 0xd8, 0x15, 0x84, 0xb1, 0x0d, 0x36, 0x25, 0x88, 0x91, 0x46, 0x71, 0x42, 0x56, 0xe2, 0x90, 0x08, 0x9e, 0x77, 0x1b, 0xee, 0x22, 0x3f, 0xec, 0xee, 0x8c, 0x7b, 0x2e, 0x79, 0xc4, 0x6c, 0x07, 0xa1, 0x7e, 0x52, 0xf5, 0x26, 0x5c, 0x84, 0x2a, 0x50, 0x6e, 0x82, 0xb3, 0x76, 0xda, 0x35, 0x16, + /* (2^290)P */ 0x0a, 0x6f, 0x99, 0x87, 0xc0, 0x7d, 0x8a, 0xb2, 0xca, 0xae, 0xe8, 0x65, 0x98, 0x0f, 0xb3, 0x44, 0xe1, 0xdc, 0x52, 0x79, 0x75, 0xec, 0x8f, 0x95, 0x87, 0x45, 0xd1, 0x32, 0x18, 0x55, 0x15, 0xce, 0x64, 0x9b, 0x08, 0x4f, 0x2c, 0xea, 0xba, 0x1c, 0x57, 0x06, 0x63, 0xc8, 0xb1, 0xfd, 0xc5, 0x67, 0xe7, 0x1f, 0x87, 0x9e, 0xde, 0x72, 0x7d, 0xec, + /* (2^291)P */ 0x36, 0x8b, 0x4d, 0x2c, 0xc2, 0x46, 0xe8, 0x96, 0xac, 0x0b, 0x8c, 0xc5, 0x09, 0x10, 0xfc, 0xf2, 0xda, 0xea, 0x22, 0xb2, 0xd3, 0x89, 0xeb, 0xb2, 0x85, 0x0f, 0xff, 0x59, 0x50, 0x2c, 0x99, 0x5a, 0x1f, 0xec, 0x2a, 0x6f, 0xec, 0xcf, 0xe9, 0xce, 0x12, 0x6b, 0x19, 0xd8, 0xde, 0x9b, 0xce, 0x0e, 0x6a, 0xaa, 0xe1, 0x32, 0xea, 0x4c, 0xfe, 0x92, + /* (2^292)P */ 0x5f, 0x17, 0x70, 0x53, 0x26, 0x03, 0x0b, 0xab, 0xd1, 0xc1, 0x42, 0x0b, 0xab, 0x2b, 0x3d, 0x31, 0xa4, 0xd5, 0x2b, 0x5e, 0x00, 0xd5, 0x9a, 0x22, 0x34, 0xe0, 0x53, 0x3f, 0x59, 0x7f, 0x2c, 0x6d, 0x72, 0x9a, 0xa4, 0xbe, 0x3d, 0x42, 0x05, 0x1b, 0xf2, 0x7f, 0x88, 0x56, 0xd1, 0x7c, 0x7d, 0x6b, 0x9f, 0x43, 0xfe, 0x65, 0x19, 0xae, 0x9c, 0x4c, + /* (2^293)P */ 0xf3, 0x7c, 0x20, 0xa9, 0xfc, 0xf2, 0xf2, 0x3b, 0x3c, 0x57, 0x41, 0x94, 0xe5, 0xcc, 0x6a, 0x37, 0x5d, 0x09, 0xf2, 0xab, 0xc2, 0xca, 0x60, 0x38, 0x6b, 0x7a, 0xe1, 0x78, 0x2b, 0xc1, 0x1d, 0xe8, 0xfd, 0xbc, 0x3d, 0x5c, 0xa2, 0xdb, 0x49, 0x20, 0x79, 0xe6, 0x1b, 0x9b, 0x65, 0xd9, 0x6d, 0xec, 0x57, 0x1d, 0xd2, 0xe9, 0x90, 0xeb, 0x43, 0x7b, + /* (2^294)P */ 0x2a, 0x8b, 0x2e, 0x19, 0x18, 0x10, 0xb8, 0x83, 0xe7, 0x7d, 0x2d, 0x9a, 0x3a, 0xe5, 0xd1, 0xe4, 0x7c, 0x38, 0xe5, 0x59, 0x2a, 0x6e, 0xd9, 0x01, 0x29, 0x3d, 0x23, 0xf7, 0x52, 0xba, 0x61, 0x04, 0x9a, 0xde, 0xc4, 0x31, 0x50, 0xeb, 0x1b, 0xaa, 0xde, 0x39, 0x58, 0xd8, 0x1b, 0x1e, 0xfc, 0x57, 0x9a, 0x28, 0x43, 0x9e, 0x97, 0x5e, 0xaa, 0xa3, + /* (2^295)P */ 0x97, 0x0a, 0x74, 0xc4, 0x39, 0x99, 0x6b, 0x40, 0xc7, 0x3e, 0x8c, 0xa7, 0xb1, 0x4e, 0x9a, 0x59, 0x6e, 0x1c, 0xfe, 0xfc, 0x2a, 0x5e, 0x73, 0x2b, 0x8c, 0xa9, 0x71, 0xf5, 0xda, 0x6b, 0x15, 0xab, 0xf7, 0xbe, 0x2a, 0x44, 0x5f, 0xba, 0xae, 0x67, 0x93, 0xc5, 0x86, 0xc1, 0xb8, 0xdf, 0xdc, 0xcb, 0xd7, 0xff, 0xb1, 0x71, 0x7c, 0x6f, 0x88, 0xf8, + /* (2^296)P */ 0x3f, 0x89, 0xb1, 0xbf, 0x24, 0x16, 0xac, 0x56, 0xfe, 0xdf, 0x94, 0x71, 0xbf, 0xd6, 0x57, 0x0c, 0xb4, 0x77, 0x37, 0xaa, 0x2a, 0x70, 0x76, 0x49, 0xaf, 0x0c, 0x97, 0x8e, 0x78, 0x2a, 0x67, 0xc9, 0x3b, 0x3d, 0x5b, 0x01, 0x2f, 0xda, 0xd5, 0xa8, 0xde, 0x02, 0xa9, 0xac, 0x76, 0x00, 0x0b, 0x46, 0xc6, 0x2d, 0xdc, 0x08, 0xf4, 0x10, 0x2c, 0xbe, + /* (2^297)P */ 0xcb, 0x07, 0xf9, 0x91, 0xc6, 0xd5, 0x3e, 0x54, 0x63, 0xae, 0xfc, 0x10, 0xbe, 0x3a, 0x20, 0x73, 0x4e, 0x65, 0x0e, 0x2d, 0x86, 0x77, 0x83, 0x9d, 0xe2, 0x0a, 0xe9, 0xac, 0x22, 0x52, 0x76, 0xd4, 0x6e, 0xfa, 0xe0, 0x09, 0xef, 0x78, 0x82, 0x9f, 0x26, 0xf9, 0x06, 0xb5, 0xe7, 0x05, 0x0e, 0xf2, 0x46, 0x72, 0x93, 0xd3, 0x24, 0xbd, 0x87, 0x60, + /* (2^298)P */ 0x14, 0x55, 0x84, 0x7b, 0x6c, 0x60, 0x80, 0x73, 0x8c, 0xbe, 0x2d, 0xd6, 0x69, 0xd6, 0x17, 0x26, 0x44, 0x9f, 0x88, 0xa2, 0x39, 0x7c, 0x89, 0xbc, 0x6d, 0x9e, 0x46, 0xb6, 0x68, 0x66, 0xea, 0xdc, 0x31, 0xd6, 0x21, 0x51, 0x9f, 0x28, 0x28, 0xaf, 0x9e, 0x47, 0x2c, 0x4c, 0x8f, 0xf3, 0xaf, 0x1f, 0xe4, 0xab, 0xac, 0xe9, 0x0c, 0x91, 0x3a, 0x61, + /* (2^299)P */ 0xb0, 0x37, 0x55, 0x4b, 0xe9, 0xc3, 0xb1, 0xce, 0x42, 0xe6, 0xc5, 0x11, 0x7f, 0x2c, 0x11, 0xfc, 0x4e, 0x71, 0x17, 0x00, 0x74, 0x7f, 0xbf, 0x07, 0x4d, 0xfd, 0x40, 0xb2, 0x87, 0xb0, 0xef, 0x1f, 0x35, 0x2c, 0x2d, 0xd7, 0xe1, 0xe4, 0xad, 0x0e, 0x7f, 0x63, 0x66, 0x62, 0x23, 0x41, 0xf6, 0xc1, 0x14, 0xa6, 0xd7, 0xa9, 0x11, 0x56, 0x9d, 0x1b, + /* (2^300)P */ 0x02, 0x82, 0x42, 0x18, 0x4f, 0x1b, 0xc9, 0x5d, 0x78, 0x5f, 0xee, 0xed, 0x01, 0x49, 0x8f, 0xf2, 0xa0, 0xe2, 0x6e, 0xbb, 0x6b, 0x04, 0x8d, 0xb2, 0x41, 0xae, 0xc8, 0x1b, 0x59, 0x34, 0xb8, 0x2a, 0xdb, 0x1f, 0xd2, 0x52, 0xdf, 0x3f, 0x35, 0x00, 0x8b, 0x61, 0xbc, 0x97, 0xa0, 0xc4, 0x77, 0xd1, 0xe4, 0x2c, 0x59, 0x68, 0xff, 0x30, 0xf2, 0xe2, + /* (2^301)P */ 0x79, 0x08, 0xb1, 0xdb, 0x55, 0xae, 0xd0, 0xed, 0xda, 0xa0, 0xec, 0x6c, 0xae, 0x68, 0xf2, 0x0b, 0x61, 0xb3, 0xf5, 0x21, 0x69, 0x87, 0x0b, 0x03, 0xea, 0x8a, 0x15, 0xd9, 0x7e, 0xca, 0xf7, 0xcd, 0xf3, 0x33, 0xb3, 0x4c, 0x5b, 0x23, 0x4e, 0x6f, 0x90, 0xad, 0x91, 0x4b, 0x4f, 0x46, 0x37, 0xe5, 0xe8, 0xb7, 0xeb, 0xd5, 0xca, 0x34, 0x4e, 0x23, + /* (2^302)P */ 0x09, 0x02, 0xdd, 0xfd, 0x70, 0xac, 0x56, 0x80, 0x36, 0x5e, 0x49, 0xd0, 0x3f, 0xc2, 0xe0, 0xba, 0x46, 0x7f, 0x5c, 0xf7, 0xc5, 0xbd, 0xd5, 0x55, 0x7d, 0x3f, 0xd5, 0x7d, 0x06, 0xdf, 0x27, 0x20, 0x4f, 0xe9, 0x30, 0xec, 0x1b, 0xa0, 0x0c, 0xd4, 0x2c, 0xe1, 0x2b, 0x65, 0x73, 0xea, 0x75, 0x35, 0xe8, 0xe6, 0x56, 0xd6, 0x07, 0x15, 0x99, 0xdf, + /* (2^303)P */ 0x4e, 0x10, 0xb7, 0xd0, 0x63, 0x8c, 0xcf, 0x16, 0x00, 0x7c, 0x58, 0xdf, 0x86, 0xdc, 0x4e, 0xca, 0x9c, 0x40, 0x5a, 0x42, 0xfd, 0xec, 0x98, 0xa4, 0x42, 0x53, 0xae, 0x16, 0x9d, 0xfd, 0x75, 0x5a, 0x12, 0x56, 0x1e, 0xc6, 0x57, 0xcc, 0x79, 0x27, 0x96, 0x00, 0xcf, 0x80, 0x4f, 0x8a, 0x36, 0x5c, 0xbb, 0xe9, 0x12, 0xdb, 0xb6, 0x2b, 0xad, 0x96, + /* (2^304)P */ 0x92, 0x32, 0x1f, 0xfd, 0xc6, 0x02, 0x94, 0x08, 0x1b, 0x60, 0x6a, 0x9f, 0x8b, 0xd6, 0xc8, 0xad, 0xd5, 0x1b, 0x27, 0x4e, 0xa4, 0x4d, 0x4a, 0x00, 0x10, 0x5f, 0x86, 0x11, 0xf5, 0xe3, 0x14, 0x32, 0x43, 0xee, 0xb9, 0xc7, 0xab, 0xf4, 0x6f, 0xe5, 0x66, 0x0c, 0x06, 0x0d, 0x96, 0x79, 0x28, 0xaf, 0x45, 0x2b, 0x56, 0xbe, 0xe4, 0x4a, 0x52, 0xd6, + /* (2^305)P */ 0x15, 0x16, 0x69, 0xef, 0x60, 0xca, 0x82, 0x25, 0x0f, 0xc6, 0x30, 0xa0, 0x0a, 0xd1, 0x83, 0x29, 0xcd, 0xb6, 0x89, 0x6c, 0xf5, 0xb2, 0x08, 0x38, 0xe6, 0xca, 0x6b, 0x19, 0x93, 0xc6, 0x5f, 0x75, 0x8e, 0x60, 0x34, 0x23, 0xc4, 0x13, 0x17, 0x69, 0x55, 0xcc, 0x72, 0x9c, 0x2b, 0x6c, 0x80, 0xf4, 0x4b, 0x8b, 0xb6, 0x97, 0x65, 0x07, 0xb6, 0xfb, + /* (2^306)P */ 0x01, 0x99, 0x74, 0x28, 0xa6, 0x67, 0xa3, 0xe5, 0x25, 0xfb, 0xdf, 0x82, 0x93, 0xe7, 0x35, 0x74, 0xce, 0xe3, 0x15, 0x1c, 0x1d, 0x79, 0x52, 0x84, 0x08, 0x04, 0x2f, 0x5c, 0xb8, 0xcd, 0x7f, 0x89, 0xb0, 0x39, 0x93, 0x63, 0xc9, 0x5d, 0x06, 0x01, 0x59, 0xf7, 0x7e, 0xf1, 0x4c, 0x3d, 0x12, 0x8d, 0x69, 0x1d, 0xb7, 0x21, 0x5e, 0x88, 0x82, 0xa2, + /* (2^307)P */ 0x8e, 0x69, 0xaf, 0x9a, 0x41, 0x0d, 0x9d, 0xcf, 0x8e, 0x8d, 0x5c, 0x51, 0x6e, 0xde, 0x0e, 0x48, 0x23, 0x89, 0xe5, 0x37, 0x80, 0xd6, 0x9d, 0x72, 0x32, 0x26, 0x38, 0x2d, 0x63, 0xa0, 0xfa, 0xd3, 0x40, 0xc0, 0x8c, 0x68, 0x6f, 0x2b, 0x1e, 0x9a, 0x39, 0x51, 0x78, 0x74, 0x9a, 0x7b, 0x4a, 0x8f, 0x0c, 0xa0, 0x88, 0x60, 0xa5, 0x21, 0xcd, 0xc7, + /* (2^308)P */ 0x3a, 0x7f, 0x73, 0x14, 0xbf, 0x89, 0x6a, 0x4c, 0x09, 0x5d, 0xf2, 0x93, 0x20, 0x2d, 0xc4, 0x29, 0x86, 0x06, 0x95, 0xab, 0x22, 0x76, 0x4c, 0x54, 0xe1, 0x7e, 0x80, 0x6d, 0xab, 0x29, 0x61, 0x87, 0x77, 0xf6, 0xc0, 0x3e, 0xda, 0xab, 0x65, 0x7e, 0x39, 0x12, 0xa1, 0x6b, 0x42, 0xf7, 0xc5, 0x97, 0x77, 0xec, 0x6f, 0x22, 0xbe, 0x44, 0xc7, 0x03, + /* (2^309)P */ 0xa5, 0x23, 0x90, 0x41, 0xa3, 0xc5, 0x3e, 0xe0, 0xa5, 0x32, 0x49, 0x1f, 0x39, 0x78, 0xb1, 0xd8, 0x24, 0xea, 0xd4, 0x87, 0x53, 0x42, 0x51, 0xf4, 0xd9, 0x46, 0x25, 0x2f, 0x62, 0xa9, 0x90, 0x9a, 0x4a, 0x25, 0x8a, 0xd2, 0x10, 0xe7, 0x3c, 0xbc, 0x58, 0x8d, 0x16, 0x14, 0x96, 0xa4, 0x6f, 0xf8, 0x12, 0x69, 0x91, 0x73, 0xe2, 0xfa, 0xf4, 0x57, + /* (2^310)P */ 0x51, 0x45, 0x3f, 0x96, 0xdc, 0x97, 0x38, 0xa6, 0x01, 0x63, 0x09, 0xea, 0xc2, 0x13, 0x30, 0xb0, 0x00, 0xb8, 0x0a, 0xce, 0xd1, 0x8f, 0x3e, 0x69, 0x62, 0x46, 0x33, 0x9c, 0xbf, 0x4b, 0xcb, 0x0c, 0x90, 0x1c, 0x45, 0xcf, 0x37, 0x5b, 0xf7, 0x4b, 0x5e, 0x95, 0xc3, 0x28, 0x9f, 0x08, 0x83, 0x53, 0x74, 0xab, 0x0c, 0xb4, 0xc0, 0xa1, 0xbc, 0x89, + /* (2^311)P */ 0x06, 0xb1, 0x51, 0x15, 0x65, 0x60, 0x21, 0x17, 0x7a, 0x20, 0x65, 0xee, 0x12, 0x35, 0x4d, 0x46, 0xf4, 0xf8, 0xd0, 0xb1, 0xca, 0x09, 0x30, 0x08, 0x89, 0x23, 0x3b, 0xe7, 0xab, 0x8b, 0x77, 0xa6, 0xad, 0x25, 0xdd, 0xea, 0x3c, 0x7d, 0xa5, 0x24, 0xb3, 0xe8, 0xfa, 0xfb, 0xc9, 0xf2, 0x71, 0xe9, 0xfa, 0xf2, 0xdc, 0x54, 0xdd, 0x55, 0x2e, 0x2f, + /* (2^312)P */ 0x7f, 0x96, 0x96, 0xfb, 0x52, 0x86, 0xcf, 0xea, 0x62, 0x18, 0xf1, 0x53, 0x1f, 0x61, 0x2a, 0x9f, 0x8c, 0x51, 0xca, 0x2c, 0xde, 0x6d, 0xce, 0xab, 0x58, 0x32, 0x0b, 0x33, 0x9b, 0x99, 0xb4, 0x5c, 0x88, 0x2a, 0x76, 0xcc, 0x3e, 0x54, 0x1e, 0x9d, 0xa2, 0x89, 0xe4, 0x19, 0xba, 0x80, 0xc8, 0x39, 0x32, 0x7f, 0x0f, 0xc7, 0x84, 0xbb, 0x43, 0x56, + /* (2^313)P */ 0x9b, 0x07, 0xb4, 0x42, 0xa9, 0xa0, 0x78, 0x4f, 0x28, 0x70, 0x2b, 0x7e, 0x61, 0xe0, 0xdd, 0x02, 0x98, 0xfc, 0xed, 0x31, 0x80, 0xf1, 0x15, 0x52, 0x89, 0x23, 0xcd, 0x5d, 0x2b, 0xc5, 0x19, 0x32, 0xfb, 0x70, 0x50, 0x7a, 0x97, 0x6b, 0x42, 0xdb, 0xca, 0xdb, 0xc4, 0x59, 0x99, 0xe0, 0x12, 0x1f, 0x17, 0xba, 0x8b, 0xf0, 0xc4, 0x38, 0x5d, 0x27, + /* (2^314)P */ 0x29, 0x1d, 0xdc, 0x2b, 0xf6, 0x5b, 0x04, 0x61, 0x36, 0x76, 0xa0, 0x56, 0x36, 0x6e, 0xd7, 0x24, 0x4d, 0xe7, 0xef, 0x44, 0xd2, 0xd5, 0x07, 0xcd, 0xc4, 0x9d, 0x80, 0x48, 0xc3, 0x38, 0xcf, 0xd8, 0xa3, 0xdd, 0xb2, 0x5e, 0xb5, 0x70, 0x15, 0xbb, 0x36, 0x85, 0x8a, 0xd7, 0xfb, 0x56, 0x94, 0x73, 0x9c, 0x81, 0xbe, 0xb1, 0x44, 0x28, 0xf1, 0x37, + /* (2^315)P */ 0xbf, 0xcf, 0x5c, 0xd2, 0xe2, 0xea, 0xc2, 0xcd, 0x70, 0x7a, 0x9d, 0xcb, 0x81, 0xc1, 0xe9, 0xf1, 0x56, 0x71, 0x52, 0xf7, 0x1b, 0x87, 0xc6, 0xd8, 0xcc, 0xb2, 0x69, 0xf3, 0xb0, 0xbd, 0xba, 0x83, 0x12, 0x26, 0xc4, 0xce, 0x72, 0xde, 0x3b, 0x21, 0x28, 0x9e, 0x5a, 0x94, 0xf5, 0x04, 0xa3, 0xc8, 0x0f, 0x5e, 0xbc, 0x71, 0xf9, 0x0d, 0xce, 0xf5, + /* (2^316)P */ 0x93, 0x97, 0x00, 0x85, 0xf4, 0xb4, 0x40, 0xec, 0xd9, 0x2b, 0x6c, 0xd6, 0x63, 0x9e, 0x93, 0x0a, 0x5a, 0xf4, 0xa7, 0x9a, 0xe3, 0x3c, 0xf0, 0x55, 0xd1, 0x96, 0x6c, 0xf5, 0x2a, 0xce, 0xd7, 0x95, 0x72, 0xbf, 0xc5, 0x0c, 0xce, 0x79, 0xa2, 0x0a, 0x78, 0xe0, 0x72, 0xd0, 0x66, 0x28, 0x05, 0x75, 0xd3, 0x23, 0x09, 0x91, 0xed, 0x7e, 0xc4, 0xbc, + /* (2^317)P */ 0x77, 0xc2, 0x9a, 0xf7, 0xa6, 0xe6, 0x18, 0xb4, 0xe7, 0xf6, 0xda, 0xec, 0x44, 0x6d, 0xfb, 0x08, 0xee, 0x65, 0xa8, 0x92, 0x85, 0x1f, 0xba, 0x38, 0x93, 0x20, 0x5c, 0x4d, 0xd2, 0x18, 0x0f, 0x24, 0xbe, 0x1a, 0x96, 0x44, 0x7d, 0xeb, 0xb3, 0xda, 0x95, 0xf4, 0xaf, 0x6c, 0x06, 0x0f, 0x47, 0x37, 0xc8, 0x77, 0x63, 0xe1, 0x29, 0xef, 0xff, 0xa5, + /* (2^318)P */ 0x16, 0x12, 0xd9, 0x47, 0x90, 0x22, 0x9b, 0x05, 0xf2, 0xa5, 0x9a, 0xae, 0x83, 0x98, 0xb5, 0xac, 0xab, 0x29, 0xaa, 0xdc, 0x5f, 0xde, 0xcd, 0xf7, 0x42, 0xad, 0x3b, 0x96, 0xd6, 0x3e, 0x6e, 0x52, 0x47, 0xb1, 0xab, 0x51, 0xde, 0x49, 0x7c, 0x87, 0x8d, 0x86, 0xe2, 0x70, 0x13, 0x21, 0x51, 0x1c, 0x0c, 0x25, 0xc1, 0xb0, 0xe6, 0x19, 0xcf, 0x12, + /* (2^319)P */ 0xf0, 0xbc, 0x97, 0x8f, 0x4b, 0x2f, 0xd1, 0x1f, 0x8c, 0x57, 0xed, 0x3c, 0xf4, 0x26, 0x19, 0xbb, 0x60, 0xca, 0x24, 0xc5, 0xd9, 0x97, 0xe2, 0x5f, 0x76, 0x49, 0x39, 0x7e, 0x2d, 0x12, 0x21, 0x98, 0xda, 0xe6, 0xdb, 0xd2, 0xd8, 0x9f, 0x18, 0xd8, 0x83, 0x6c, 0xba, 0x89, 0x8d, 0x29, 0xfa, 0x46, 0x33, 0x8c, 0x28, 0xdf, 0x6a, 0xb3, 0x69, 0x28, + /* (2^320)P */ 0x86, 0x17, 0xbc, 0xd6, 0x7c, 0xba, 0x1e, 0x83, 0xbb, 0x84, 0xb5, 0x8c, 0xad, 0xdf, 0xa1, 0x24, 0x81, 0x70, 0x40, 0x0f, 0xad, 0xad, 0x3b, 0x23, 0xd0, 0x93, 0xa0, 0x49, 0x5c, 0x4b, 0x51, 0xbe, 0x20, 0x49, 0x4e, 0xda, 0x2d, 0xd3, 0xad, 0x1b, 0x74, 0x08, 0x41, 0xf0, 0xef, 0x19, 0xe9, 0x45, 0x5d, 0x02, 0xae, 0x26, 0x25, 0xd9, 0xd1, 0xc2, + /* (2^321)P */ 0x48, 0x81, 0x3e, 0xb2, 0x83, 0xf8, 0x4d, 0xb3, 0xd0, 0x4c, 0x75, 0xb3, 0xa0, 0x52, 0x26, 0xf2, 0xaf, 0x5d, 0x36, 0x70, 0x72, 0xd6, 0xb7, 0x88, 0x08, 0x69, 0xbd, 0x15, 0x25, 0xb1, 0x45, 0x1b, 0xb7, 0x0b, 0x5f, 0x71, 0x5d, 0x83, 0x49, 0xb9, 0x84, 0x3b, 0x7c, 0xc1, 0x50, 0x93, 0x05, 0x53, 0xe0, 0x61, 0xea, 0xc1, 0xef, 0xdb, 0x82, 0x97, + /* (2^322)P */ 0x00, 0xd5, 0xc3, 0x3a, 0x4d, 0x8a, 0x23, 0x7a, 0xef, 0xff, 0x37, 0xef, 0xf3, 0xbc, 0xa9, 0xb6, 0xae, 0xd7, 0x3a, 0x7b, 0xfd, 0x3e, 0x8e, 0x9b, 0xab, 0x44, 0x54, 0x60, 0x28, 0x6c, 0xbf, 0x15, 0x24, 0x4a, 0x56, 0x60, 0x7f, 0xa9, 0x7a, 0x28, 0x59, 0x2c, 0x8a, 0xd1, 0x7d, 0x6b, 0x00, 0xfd, 0xa5, 0xad, 0xbc, 0x19, 0x3f, 0xcb, 0x73, 0xe0, + /* (2^323)P */ 0xcf, 0x9e, 0x66, 0x06, 0x4d, 0x2b, 0xf5, 0x9c, 0xc2, 0x9d, 0x9e, 0xed, 0x5a, 0x5c, 0x2d, 0x00, 0xbf, 0x29, 0x90, 0x88, 0xe4, 0x5d, 0xfd, 0xe2, 0xf0, 0x38, 0xec, 0x4d, 0x26, 0xea, 0x54, 0xf0, 0x3c, 0x84, 0x10, 0x6a, 0xf9, 0x66, 0x9c, 0xe7, 0x21, 0xfd, 0x0f, 0xc7, 0x13, 0x50, 0x81, 0xb6, 0x50, 0xf9, 0x04, 0x7f, 0xa4, 0x37, 0x85, 0x14, + /* (2^324)P */ 0xdb, 0x87, 0x49, 0xc7, 0xa8, 0x39, 0x0c, 0x32, 0x98, 0x0c, 0xb9, 0x1a, 0x1b, 0x4d, 0xe0, 0x8a, 0x9a, 0x8e, 0x8f, 0xab, 0x5a, 0x17, 0x3d, 0x04, 0x21, 0xce, 0x3e, 0x2c, 0xf9, 0xa3, 0x97, 0xe4, 0x77, 0x95, 0x0e, 0xb6, 0xa5, 0x15, 0xad, 0x3a, 0x1e, 0x46, 0x53, 0x17, 0x09, 0x83, 0x71, 0x4e, 0x86, 0x38, 0xd5, 0x23, 0x44, 0x16, 0x8d, 0xc8, + /* (2^325)P */ 0x05, 0x5e, 0x99, 0x08, 0xbb, 0xc3, 0xc0, 0xb7, 0x6c, 0x12, 0xf2, 0xf3, 0xf4, 0x7c, 0x6a, 0x4d, 0x9e, 0xeb, 0x3d, 0xb9, 0x63, 0x94, 0xce, 0x81, 0xd8, 0x11, 0xcb, 0x55, 0x69, 0x4a, 0x20, 0x0b, 0x4c, 0x2e, 0x14, 0xb8, 0xd4, 0x6a, 0x7c, 0xf0, 0xed, 0xfc, 0x8f, 0xef, 0xa0, 0xeb, 0x6c, 0x01, 0xe2, 0xdc, 0x10, 0x22, 0xa2, 0x01, 0x85, 0x64, + /* (2^326)P */ 0x58, 0xe1, 0x9c, 0x27, 0x55, 0xc6, 0x25, 0xa6, 0x7d, 0x67, 0x88, 0x65, 0x99, 0x6c, 0xcb, 0xdb, 0x27, 0x4f, 0x44, 0x29, 0xf5, 0x4a, 0x23, 0x10, 0xbc, 0x03, 0x3f, 0x36, 0x1e, 0xef, 0xb0, 0xba, 0x75, 0xe8, 0x74, 0x5f, 0x69, 0x3e, 0x26, 0x40, 0xb4, 0x2f, 0xdc, 0x43, 0xbf, 0xa1, 0x8b, 0xbd, 0xca, 0x6e, 0xc1, 0x6e, 0x21, 0x79, 0xa0, 0xd0, + /* (2^327)P */ 0x78, 0x93, 0x4a, 0x2d, 0x22, 0x6e, 0x6e, 0x7d, 0x74, 0xd2, 0x66, 0x58, 0xce, 0x7b, 0x1d, 0x97, 0xb1, 0xf2, 0xda, 0x1c, 0x79, 0xfb, 0xba, 0xd1, 0xc0, 0xc5, 0x6e, 0xc9, 0x11, 0x89, 0xd2, 0x41, 0x8d, 0x70, 0xb9, 0xcc, 0xea, 0x6a, 0xb3, 0x45, 0xb6, 0x05, 0x2e, 0xf2, 0x17, 0xf1, 0x27, 0xb8, 0xed, 0x06, 0x1f, 0xdb, 0x9d, 0x1f, 0x69, 0x28, + /* (2^328)P */ 0x93, 0x12, 0xa8, 0x11, 0xe1, 0x92, 0x30, 0x8d, 0xac, 0xe1, 0x1c, 0x60, 0x7c, 0xed, 0x2d, 0x2e, 0xd3, 0x03, 0x5c, 0x9c, 0xc5, 0xbd, 0x64, 0x4a, 0x8c, 0xba, 0x76, 0xfe, 0xc6, 0xc1, 0xea, 0xc2, 0x4f, 0xbe, 0x70, 0x3d, 0x64, 0xcf, 0x8e, 0x18, 0xcb, 0xcd, 0x57, 0xa7, 0xf7, 0x36, 0xa9, 0x6b, 0x3e, 0xb8, 0x69, 0xee, 0x47, 0xa2, 0x7e, 0xb2, + /* (2^329)P */ 0x96, 0xaf, 0x3a, 0xf5, 0xed, 0xcd, 0xaf, 0xf7, 0x82, 0xaf, 0x59, 0x62, 0x0b, 0x36, 0x85, 0xf9, 0xaf, 0xd6, 0x38, 0xff, 0x87, 0x2e, 0x1d, 0x6c, 0x8b, 0xaf, 0x3b, 0xdf, 0x28, 0xa2, 0xd6, 0x4d, 0x80, 0x92, 0xc3, 0x0f, 0x34, 0xa8, 0xae, 0x69, 0x5d, 0x7b, 0x9d, 0xbc, 0xf5, 0xfd, 0x1d, 0xb1, 0x96, 0x55, 0x86, 0xe1, 0x5c, 0xb6, 0xac, 0xb9, + /* (2^330)P */ 0x50, 0x9e, 0x37, 0x28, 0x7d, 0xa8, 0x33, 0x63, 0xda, 0x3f, 0x20, 0x98, 0x0e, 0x09, 0xa8, 0x77, 0x3b, 0x7a, 0xfc, 0x16, 0x85, 0x44, 0x64, 0x77, 0x65, 0x68, 0x92, 0x41, 0xc6, 0x1f, 0xdf, 0x27, 0xf9, 0xec, 0xa0, 0x61, 0x22, 0xea, 0x19, 0xe7, 0x75, 0x8b, 0x4e, 0xe5, 0x0f, 0xb7, 0xf7, 0xd2, 0x53, 0xf4, 0xdd, 0x4a, 0xaa, 0x78, 0x40, 0xb7, + /* (2^331)P */ 0xd4, 0x89, 0xe3, 0x79, 0xba, 0xb6, 0xc3, 0xda, 0xe6, 0x78, 0x65, 0x7d, 0x6e, 0x22, 0x62, 0xb1, 0x3d, 0xea, 0x90, 0x84, 0x30, 0x5e, 0xd4, 0x39, 0x84, 0x78, 0xd9, 0x75, 0xd6, 0xce, 0x2a, 0x11, 0x29, 0x69, 0xa4, 0x5e, 0xaa, 0x2a, 0x98, 0x5a, 0xe5, 0x91, 0x8f, 0xb2, 0xfb, 0xda, 0x97, 0xe8, 0x83, 0x6f, 0x04, 0xb9, 0x5d, 0xaf, 0xe1, 0x9b, + /* (2^332)P */ 0x8b, 0xe4, 0xe1, 0x48, 0x9c, 0xc4, 0x83, 0x89, 0xdf, 0x65, 0xd3, 0x35, 0x55, 0x13, 0xf4, 0x1f, 0x36, 0x92, 0x33, 0x38, 0xcb, 0xed, 0x15, 0xe6, 0x60, 0x2d, 0x25, 0xf5, 0x36, 0x60, 0x3a, 0x37, 0x9b, 0x71, 0x9d, 0x42, 0xb0, 0x14, 0xc8, 0xba, 0x62, 0xa3, 0x49, 0xb0, 0x88, 0xc1, 0x72, 0x73, 0xdd, 0x62, 0x40, 0xa9, 0x62, 0x88, 0x99, 0xca, + /* (2^333)P */ 0x47, 0x7b, 0xea, 0xda, 0x46, 0x2f, 0x45, 0xc6, 0xe3, 0xb4, 0x4d, 0x8d, 0xac, 0x0b, 0x54, 0x22, 0x06, 0x31, 0x16, 0x66, 0x3e, 0xe4, 0x38, 0x12, 0xcd, 0xf3, 0xe7, 0x99, 0x37, 0xd9, 0x62, 0x24, 0x4b, 0x05, 0xf2, 0x58, 0xe6, 0x29, 0x4b, 0x0d, 0xf6, 0xc1, 0xba, 0xa0, 0x1e, 0x0f, 0xcb, 0x1f, 0xc6, 0x2b, 0x19, 0xfc, 0x82, 0x01, 0xd0, 0x86, + /* (2^334)P */ 0xa2, 0xae, 0x77, 0x20, 0xfb, 0xa8, 0x18, 0xb4, 0x61, 0xef, 0xe8, 0x52, 0x79, 0xbb, 0x86, 0x90, 0x5d, 0x2e, 0x76, 0xed, 0x66, 0x60, 0x5d, 0x00, 0xb5, 0xa4, 0x00, 0x40, 0x89, 0xec, 0xd1, 0xd2, 0x0d, 0x26, 0xb9, 0x30, 0xb2, 0xd2, 0xb8, 0xe8, 0x0e, 0x56, 0xf9, 0x67, 0x94, 0x2e, 0x62, 0xe1, 0x79, 0x48, 0x2b, 0xa9, 0xfa, 0xea, 0xdb, 0x28, + /* (2^335)P */ 0x35, 0xf1, 0xb0, 0x43, 0xbd, 0x27, 0xef, 0x18, 0x44, 0xa2, 0x04, 0xb4, 0x69, 0xa1, 0x97, 0x1f, 0x8c, 0x04, 0x82, 0x9b, 0x00, 0x6d, 0xf8, 0xbf, 0x7d, 0xc1, 0x5b, 0xab, 0xe8, 0xb2, 0x34, 0xbd, 0xaf, 0x7f, 0xb2, 0x0d, 0xf3, 0xed, 0xfc, 0x5b, 0x50, 0xee, 0xe7, 0x4a, 0x20, 0xd9, 0xf5, 0xc6, 0x9a, 0x97, 0x6d, 0x07, 0x2f, 0xb9, 0x31, 0x02, + /* (2^336)P */ 0xf9, 0x54, 0x4a, 0xc5, 0x61, 0x7e, 0x1d, 0xa6, 0x0e, 0x1a, 0xa8, 0xd3, 0x8c, 0x36, 0x7d, 0xf1, 0x06, 0xb1, 0xac, 0x93, 0xcd, 0xe9, 0x8f, 0x61, 0x6c, 0x5d, 0x03, 0x23, 0xdf, 0x85, 0x53, 0x39, 0x63, 0x5e, 0xeb, 0xf3, 0xd3, 0xd3, 0x75, 0x97, 0x9b, 0x62, 0x9b, 0x01, 0xb3, 0x19, 0xd8, 0x2b, 0x36, 0xf2, 0x2c, 0x2c, 0x6f, 0x36, 0xc6, 0x3c, + /* (2^337)P */ 0x05, 0x74, 0x43, 0x10, 0xb6, 0xb0, 0xf8, 0xbf, 0x02, 0x46, 0x9a, 0xee, 0xc1, 0xaf, 0xc1, 0xe5, 0x5a, 0x2e, 0xbb, 0xe1, 0xdc, 0xc6, 0xce, 0x51, 0x29, 0x50, 0xbf, 0x1b, 0xde, 0xff, 0xba, 0x4d, 0x8d, 0x8b, 0x7e, 0xe7, 0xbd, 0x5b, 0x8f, 0xbe, 0xe3, 0x75, 0x71, 0xff, 0x37, 0x05, 0x5a, 0x10, 0xeb, 0x54, 0x7e, 0x44, 0x72, 0x2c, 0xd4, 0xfc, + /* (2^338)P */ 0x03, 0x12, 0x1c, 0xb2, 0x08, 0x90, 0xa1, 0x2d, 0x50, 0xa0, 0xad, 0x7f, 0x8d, 0xa6, 0x97, 0xc1, 0xbd, 0xdc, 0xc3, 0xa7, 0xad, 0x31, 0xdf, 0xb8, 0x03, 0x84, 0xc3, 0xb9, 0x29, 0x3d, 0x92, 0x2e, 0xc3, 0x90, 0x07, 0xe8, 0xa7, 0xc7, 0xbc, 0x61, 0xe9, 0x3e, 0xa0, 0x35, 0xda, 0x1d, 0xab, 0x48, 0xfe, 0x50, 0xc9, 0x25, 0x59, 0x23, 0x69, 0x3f, + /* (2^339)P */ 0x8e, 0x91, 0xab, 0x6b, 0x91, 0x4f, 0x89, 0x76, 0x67, 0xad, 0xb2, 0x65, 0x9d, 0xad, 0x02, 0x36, 0xdc, 0xac, 0x96, 0x93, 0x97, 0x21, 0x14, 0xd0, 0xe8, 0x11, 0x60, 0x1e, 0xeb, 0x96, 0x06, 0xf2, 0x53, 0xf2, 0x6d, 0xb7, 0x93, 0x6f, 0x26, 0x91, 0x23, 0xe3, 0x34, 0x04, 0x92, 0x91, 0x37, 0x08, 0x50, 0xd6, 0x28, 0x09, 0x27, 0xa1, 0x0c, 0x00, + /* (2^340)P */ 0x1f, 0xbb, 0x21, 0x26, 0x33, 0xcb, 0xa4, 0xd1, 0xee, 0x85, 0xf9, 0xd9, 0x3c, 0x90, 0xc3, 0xd1, 0x26, 0xa2, 0x25, 0x93, 0x43, 0x61, 0xed, 0x91, 0x6e, 0x54, 0x03, 0x2e, 0x42, 0x9d, 0xf7, 0xa6, 0x02, 0x0f, 0x2f, 0x9c, 0x7a, 0x8d, 0x12, 0xc2, 0x18, 0xfc, 0x41, 0xff, 0x85, 0x26, 0x1a, 0x44, 0x55, 0x0b, 0x89, 0xab, 0x6f, 0x62, 0x33, 0x8c, + /* (2^341)P */ 0xe0, 0x3c, 0x5d, 0x70, 0x64, 0x87, 0x81, 0x35, 0xf2, 0x37, 0xa6, 0x24, 0x3e, 0xe0, 0x62, 0xd5, 0x71, 0xe7, 0x93, 0xfb, 0xac, 0xc3, 0xe7, 0xc7, 0x04, 0xe2, 0x70, 0xd3, 0x29, 0x5b, 0x21, 0xbf, 0xf4, 0x26, 0x5d, 0xf3, 0x95, 0xb4, 0x2a, 0x6a, 0x07, 0x55, 0xa6, 0x4b, 0x3b, 0x15, 0xf2, 0x25, 0x8a, 0x95, 0x3f, 0x63, 0x2f, 0x7a, 0x23, 0x96, + /* (2^342)P */ 0x0d, 0x3d, 0xd9, 0x13, 0xa7, 0xb3, 0x5e, 0x67, 0xf7, 0x02, 0x23, 0xee, 0x84, 0xff, 0x99, 0xda, 0xb9, 0x53, 0xf8, 0xf0, 0x0e, 0x39, 0x2f, 0x3c, 0x64, 0x34, 0xe3, 0x09, 0xfd, 0x2b, 0x33, 0xc7, 0xfe, 0x62, 0x2b, 0x84, 0xdf, 0x2b, 0xd2, 0x7c, 0x26, 0x01, 0x70, 0x66, 0x5b, 0x85, 0xc2, 0xbe, 0x88, 0x37, 0xf1, 0x30, 0xac, 0xb8, 0x76, 0xa3, + /* (2^343)P */ 0x6e, 0x01, 0xf0, 0x55, 0x35, 0xe4, 0xbd, 0x43, 0x62, 0x9d, 0xd6, 0x11, 0xef, 0x6f, 0xb8, 0x8c, 0xaa, 0x98, 0x87, 0xc6, 0x6d, 0xc4, 0xcc, 0x74, 0x92, 0x53, 0x4a, 0xdf, 0xe4, 0x08, 0x89, 0x17, 0xd0, 0x0f, 0xf4, 0x00, 0x60, 0x78, 0x08, 0x44, 0xb5, 0xda, 0x18, 0xed, 0x98, 0xc8, 0x61, 0x3d, 0x39, 0xdb, 0xcf, 0x1d, 0x49, 0x40, 0x65, 0x75, + /* (2^344)P */ 0x8e, 0x10, 0xae, 0x5f, 0x06, 0xd2, 0x95, 0xfd, 0x20, 0x16, 0x49, 0x5b, 0x57, 0xbe, 0x22, 0x8b, 0x43, 0xfb, 0xe6, 0xcc, 0x26, 0xa5, 0x5d, 0xd3, 0x68, 0xc5, 0xf9, 0x5a, 0x86, 0x24, 0x87, 0x27, 0x05, 0xfd, 0xe2, 0xff, 0xb3, 0xa3, 0x7b, 0x37, 0x59, 0xc5, 0x4e, 0x14, 0x94, 0xf9, 0x3b, 0xcb, 0x7c, 0xed, 0xca, 0x1d, 0xb2, 0xac, 0x05, 0x4a, + /* (2^345)P */ 0xf4, 0xd1, 0x81, 0xeb, 0x89, 0xbf, 0xfe, 0x1e, 0x41, 0x92, 0x29, 0xee, 0xe1, 0x43, 0xf5, 0x86, 0x1d, 0x2f, 0xbb, 0x1e, 0x84, 0x5d, 0x7b, 0x8d, 0xd5, 0xda, 0xee, 0x1e, 0x8a, 0xd0, 0x27, 0xf2, 0x60, 0x51, 0x59, 0x82, 0xf4, 0x84, 0x2b, 0x5b, 0x14, 0x2d, 0x81, 0x82, 0x3e, 0x2b, 0xb4, 0x6d, 0x51, 0x4f, 0xc5, 0xcb, 0xbf, 0x74, 0xe3, 0xb4, + /* (2^346)P */ 0x19, 0x2f, 0x22, 0xb3, 0x04, 0x5f, 0x81, 0xca, 0x05, 0x60, 0xb9, 0xaa, 0xee, 0x0e, 0x2f, 0x48, 0x38, 0xf9, 0x91, 0xb4, 0x66, 0xe4, 0x57, 0x28, 0x54, 0x10, 0xe9, 0x61, 0x9d, 0xd4, 0x90, 0x75, 0xb1, 0x39, 0x23, 0xb6, 0xfc, 0x82, 0xe0, 0xfa, 0xbb, 0x5c, 0x6e, 0xc3, 0x44, 0x13, 0x00, 0x83, 0x55, 0x9e, 0x8e, 0x10, 0x61, 0x81, 0x91, 0x04, + /* (2^347)P */ 0x5f, 0x2a, 0xd7, 0x81, 0xd9, 0x9c, 0xbb, 0x79, 0xbc, 0x62, 0x56, 0x98, 0x03, 0x5a, 0x18, 0x85, 0x2a, 0x9c, 0xd0, 0xfb, 0xd2, 0xb1, 0xaf, 0xef, 0x0d, 0x24, 0xc5, 0xfa, 0x39, 0xbb, 0x6b, 0xed, 0xa4, 0xdf, 0xe4, 0x87, 0xcd, 0x41, 0xd3, 0x72, 0x32, 0xc6, 0x28, 0x21, 0xb1, 0xba, 0x8b, 0xa3, 0x91, 0x79, 0x76, 0x22, 0x25, 0x10, 0x61, 0xd1, + /* (2^348)P */ 0x73, 0xb5, 0x32, 0x97, 0xdd, 0xeb, 0xdd, 0x22, 0x22, 0xf1, 0x33, 0x3c, 0x77, 0x56, 0x7d, 0x6b, 0x48, 0x2b, 0x05, 0x81, 0x03, 0x03, 0x91, 0x9a, 0xe3, 0x5e, 0xd4, 0xee, 0x3f, 0xf8, 0xbb, 0x50, 0x21, 0x32, 0x4c, 0x4a, 0x58, 0x49, 0xde, 0x0c, 0xde, 0x30, 0x82, 0x3d, 0x92, 0xf0, 0x6c, 0xcc, 0x32, 0x3e, 0xd2, 0x78, 0x8a, 0x6e, 0x2c, 0xd0, + /* (2^349)P */ 0xf0, 0xf7, 0xa1, 0x0b, 0xc1, 0x74, 0x85, 0xa8, 0xe9, 0xdd, 0x48, 0xa1, 0xc0, 0x16, 0xd8, 0x2b, 0x61, 0x08, 0xc2, 0x2b, 0x30, 0x26, 0x79, 0xce, 0x9e, 0xfd, 0x39, 0xd7, 0x81, 0xa4, 0x63, 0x8c, 0xd5, 0x74, 0xa0, 0x88, 0xfa, 0x03, 0x30, 0xe9, 0x7f, 0x2b, 0xc6, 0x02, 0xc9, 0x5e, 0xe4, 0xd5, 0x4d, 0x92, 0xd0, 0xf6, 0xf2, 0x5b, 0x79, 0x08, + /* (2^350)P */ 0x34, 0x89, 0x81, 0x43, 0xd1, 0x94, 0x2c, 0x10, 0x54, 0x9b, 0xa0, 0xe5, 0x44, 0xe8, 0xc2, 0x2f, 0x3e, 0x0e, 0x74, 0xae, 0xba, 0xe2, 0xac, 0x85, 0x6b, 0xd3, 0x5c, 0x97, 0xf7, 0x90, 0xf1, 0x12, 0xc0, 0x03, 0xc8, 0x1f, 0x37, 0x72, 0x8c, 0x9b, 0x9c, 0x17, 0x96, 0x9d, 0xc7, 0xbf, 0xa3, 0x3f, 0x44, 0x3d, 0x87, 0x81, 0xbd, 0x81, 0xa6, 0x5f, + /* (2^351)P */ 0xe4, 0xff, 0x78, 0x62, 0x82, 0x5b, 0x76, 0x58, 0xf5, 0x5b, 0xa6, 0xc4, 0x53, 0x11, 0x3b, 0x7b, 0xaa, 0x67, 0xf8, 0xea, 0x3b, 0x5d, 0x9a, 0x2e, 0x04, 0xeb, 0x4a, 0x24, 0xfb, 0x56, 0xf0, 0xa8, 0xd4, 0x14, 0xed, 0x0f, 0xfd, 0xc5, 0x26, 0x17, 0x2a, 0xf0, 0xb9, 0x13, 0x8c, 0xbd, 0x65, 0x14, 0x24, 0x95, 0x27, 0x12, 0x63, 0x2a, 0x09, 0x18, + /* (2^352)P */ 0xe1, 0x5c, 0xe7, 0xe0, 0x00, 0x6a, 0x96, 0xf2, 0x49, 0x6a, 0x39, 0xa5, 0xe0, 0x17, 0x79, 0x4a, 0x63, 0x07, 0x62, 0x09, 0x61, 0x1b, 0x6e, 0xa9, 0xb5, 0x62, 0xb7, 0xde, 0xdf, 0x80, 0x4c, 0x5a, 0x99, 0x73, 0x59, 0x9d, 0xfb, 0xb1, 0x5e, 0xbe, 0xb8, 0xb7, 0x63, 0x93, 0xe8, 0xad, 0x5e, 0x1f, 0xae, 0x59, 0x1c, 0xcd, 0xb4, 0xc2, 0xb3, 0x8a, + /* (2^353)P */ 0x78, 0x53, 0xa1, 0x4c, 0x70, 0x9c, 0x63, 0x7e, 0xb3, 0x12, 0x40, 0x5f, 0xbb, 0x23, 0xa7, 0xf7, 0x77, 0x96, 0x5b, 0x4d, 0x91, 0x10, 0x52, 0x85, 0x9e, 0xa5, 0x38, 0x0b, 0xfd, 0x25, 0x01, 0x4b, 0xfa, 0x4d, 0xd3, 0x3f, 0x78, 0x74, 0x42, 0xff, 0x62, 0x2d, 0x27, 0xdc, 0x9d, 0xd1, 0x29, 0x76, 0x2e, 0x78, 0xb3, 0x35, 0xfa, 0x15, 0xd5, 0x38, + /* (2^354)P */ 0x8b, 0xc7, 0x43, 0xce, 0xf0, 0x5e, 0xf1, 0x0d, 0x02, 0x38, 0xe8, 0x82, 0xc9, 0x25, 0xad, 0x2d, 0x27, 0xa4, 0x54, 0x18, 0xb2, 0x30, 0x73, 0xa4, 0x41, 0x08, 0xe4, 0x86, 0xe6, 0x8c, 0xe9, 0x2a, 0x34, 0xb3, 0xd6, 0x61, 0x8f, 0x66, 0x26, 0x08, 0xb6, 0x06, 0x33, 0xaa, 0x12, 0xac, 0x72, 0xec, 0x2e, 0x52, 0xa3, 0x25, 0x3e, 0xd7, 0x62, 0xe8, + /* (2^355)P */ 0xc4, 0xbb, 0x89, 0xc8, 0x40, 0xcc, 0x84, 0xec, 0x4a, 0xd9, 0xc4, 0x55, 0x78, 0x00, 0xcf, 0xd8, 0xe9, 0x24, 0x59, 0xdc, 0x5e, 0xf0, 0x66, 0xa1, 0x83, 0xae, 0x97, 0x18, 0xc5, 0x54, 0x27, 0xa2, 0x21, 0x52, 0x03, 0x31, 0x5b, 0x11, 0x67, 0xf6, 0x12, 0x00, 0x87, 0x2f, 0xff, 0x59, 0x70, 0x8f, 0x6d, 0x71, 0xab, 0xab, 0x24, 0xb8, 0xba, 0x35, + /* (2^356)P */ 0x69, 0x43, 0xa7, 0x14, 0x06, 0x96, 0xe9, 0xc2, 0xe3, 0x2b, 0x45, 0x22, 0xc0, 0xd0, 0x2f, 0x34, 0xd1, 0x01, 0x99, 0xfc, 0x99, 0x38, 0xa1, 0x25, 0x2e, 0x59, 0x6c, 0x27, 0xc9, 0xeb, 0x7b, 0xdc, 0x4e, 0x26, 0x68, 0xba, 0xfa, 0xec, 0x02, 0x05, 0x64, 0x80, 0x30, 0x20, 0x5c, 0x26, 0x7f, 0xaf, 0x95, 0x17, 0x3d, 0x5c, 0x9e, 0x96, 0x96, 0xaf, + /* (2^357)P */ 0xa6, 0xba, 0x21, 0x29, 0x32, 0xe2, 0x98, 0xde, 0x9b, 0x6d, 0x0b, 0x44, 0x91, 0xa8, 0x3e, 0xd4, 0xb8, 0x04, 0x6c, 0xf6, 0x04, 0x39, 0xbd, 0x52, 0x05, 0x15, 0x27, 0x78, 0x8e, 0x55, 0xac, 0x79, 0xc5, 0xe6, 0x00, 0x7f, 0x90, 0xa2, 0xdd, 0x07, 0x13, 0xe0, 0x24, 0x70, 0x5c, 0x0f, 0x4d, 0xa9, 0xf9, 0xae, 0xcb, 0x34, 0x10, 0x9d, 0x89, 0x9d, + /* (2^358)P */ 0x12, 0xe0, 0xb3, 0x9f, 0xc4, 0x96, 0x1d, 0xcf, 0xed, 0x99, 0x64, 0x28, 0x8d, 0xc7, 0x31, 0x82, 0xee, 0x5e, 0x75, 0x48, 0xff, 0x3a, 0xf2, 0x09, 0x34, 0x03, 0x93, 0x52, 0x19, 0xb2, 0xc5, 0x81, 0x93, 0x45, 0x5e, 0x59, 0x21, 0x2b, 0xec, 0x89, 0xba, 0x36, 0x6e, 0xf9, 0x82, 0x75, 0x7e, 0x82, 0x3f, 0xaa, 0xe2, 0xe3, 0x3b, 0x94, 0xfd, 0x98, + /* (2^359)P */ 0x7c, 0xdb, 0x75, 0x31, 0x61, 0xfb, 0x15, 0x28, 0x94, 0xd7, 0xc3, 0x5a, 0xa9, 0xa1, 0x0a, 0x66, 0x0f, 0x2b, 0x13, 0x3e, 0x42, 0xb5, 0x28, 0x3a, 0xca, 0x83, 0xf3, 0x61, 0x22, 0xf4, 0x40, 0xc5, 0xdf, 0xe7, 0x31, 0x9f, 0x7e, 0x51, 0x75, 0x06, 0x9d, 0x51, 0xc8, 0xe7, 0x9f, 0xc3, 0x71, 0x4f, 0x3d, 0x5b, 0xfb, 0xe9, 0x8e, 0x08, 0x40, 0x8e, + /* (2^360)P */ 0xf7, 0x31, 0xad, 0x50, 0x5d, 0x25, 0x93, 0x73, 0x68, 0xf6, 0x7c, 0x89, 0x5a, 0x3d, 0x9f, 0x9b, 0x05, 0x82, 0xe7, 0x70, 0x4b, 0x19, 0xaa, 0xcf, 0xff, 0xde, 0x50, 0x8f, 0x2f, 0x69, 0xd3, 0xf0, 0x99, 0x51, 0x6b, 0x9d, 0xb6, 0x56, 0x6f, 0xf8, 0x4c, 0x74, 0x8b, 0x4c, 0x91, 0xf9, 0xa9, 0xb1, 0x3e, 0x07, 0xdf, 0x0b, 0x27, 0x8a, 0xb1, 0xed, + /* (2^361)P */ 0xfb, 0x67, 0xd9, 0x48, 0xd2, 0xe4, 0x44, 0x9b, 0x43, 0x15, 0x8a, 0xeb, 0x00, 0x53, 0xad, 0x25, 0xc7, 0x7e, 0x19, 0x30, 0x87, 0xb7, 0xd5, 0x5f, 0x04, 0xf8, 0xaa, 0xdd, 0x57, 0xae, 0x34, 0x75, 0xe2, 0x84, 0x4b, 0x54, 0x60, 0x37, 0x95, 0xe4, 0xd3, 0xec, 0xac, 0xef, 0x47, 0x31, 0xa3, 0xc8, 0x31, 0x22, 0xdb, 0x26, 0xe7, 0x6a, 0xb5, 0xad, + /* (2^362)P */ 0x44, 0x09, 0x5c, 0x95, 0xe4, 0x72, 0x3c, 0x1a, 0xd1, 0xac, 0x42, 0x51, 0x99, 0x6f, 0xfa, 0x1f, 0xf2, 0x22, 0xbe, 0xff, 0x7b, 0x66, 0xf5, 0x6c, 0xb3, 0x66, 0xc7, 0x4d, 0x78, 0x31, 0x83, 0x80, 0xf5, 0x41, 0xe9, 0x7f, 0xbe, 0xf7, 0x23, 0x49, 0x6b, 0x84, 0x4e, 0x7e, 0x47, 0x07, 0x6e, 0x74, 0xdf, 0xe5, 0x9d, 0x9e, 0x56, 0x2a, 0xc0, 0xbc, + /* (2^363)P */ 0xac, 0x10, 0x80, 0x8c, 0x7c, 0xfa, 0x83, 0xdf, 0xb3, 0xd0, 0xc4, 0xbe, 0xfb, 0x9f, 0xac, 0xc9, 0xc3, 0x40, 0x95, 0x0b, 0x09, 0x23, 0xda, 0x63, 0x67, 0xcf, 0xe7, 0x9f, 0x7d, 0x7b, 0x6b, 0xe2, 0xe6, 0x6d, 0xdb, 0x87, 0x9e, 0xa6, 0xff, 0x6d, 0xab, 0xbd, 0xfb, 0x54, 0x84, 0x68, 0xcf, 0x89, 0xf1, 0xd0, 0xe2, 0x85, 0x61, 0xdc, 0x22, 0xd1, + /* (2^364)P */ 0xa8, 0x48, 0xfb, 0x8c, 0x6a, 0x63, 0x01, 0x72, 0x43, 0x43, 0xeb, 0x21, 0xa3, 0x00, 0x8a, 0xc0, 0x87, 0x51, 0x9e, 0x86, 0x75, 0x16, 0x79, 0xf9, 0x6b, 0x11, 0x80, 0x62, 0xc2, 0x9d, 0xb8, 0x8c, 0x30, 0x8e, 0x8d, 0x03, 0x52, 0x7e, 0x31, 0x59, 0x38, 0xf9, 0x25, 0xc7, 0x0f, 0xc7, 0xa8, 0x2b, 0x5c, 0x80, 0xfa, 0x90, 0xa2, 0x63, 0xca, 0xe7, + /* (2^365)P */ 0xf1, 0x5d, 0xb5, 0xd9, 0x20, 0x10, 0x7d, 0x0f, 0xc5, 0x50, 0x46, 0x07, 0xff, 0x02, 0x75, 0x2b, 0x4a, 0xf3, 0x39, 0x91, 0x72, 0xb7, 0xd5, 0xcc, 0x38, 0xb8, 0xe7, 0x36, 0x26, 0x5e, 0x11, 0x97, 0x25, 0xfb, 0x49, 0x68, 0xdc, 0xb4, 0x46, 0x87, 0x5c, 0xc2, 0x7f, 0xaa, 0x7d, 0x36, 0x23, 0xa6, 0xc6, 0x53, 0xec, 0xbc, 0x57, 0x47, 0xc1, 0x2b, + /* (2^366)P */ 0x25, 0x5d, 0x7d, 0x95, 0xda, 0x0b, 0x8f, 0x78, 0x1e, 0x19, 0x09, 0xfa, 0x67, 0xe0, 0xa0, 0x17, 0x24, 0x76, 0x6c, 0x30, 0x1f, 0x62, 0x3d, 0xbe, 0x45, 0x70, 0xcc, 0xb6, 0x1e, 0x68, 0x06, 0x25, 0x68, 0x16, 0x1a, 0x33, 0x3f, 0x90, 0xc7, 0x78, 0x2d, 0x98, 0x3c, 0x2f, 0xb9, 0x2d, 0x94, 0x0b, 0xfb, 0x49, 0x56, 0x30, 0xd7, 0xc1, 0xe6, 0x48, + /* (2^367)P */ 0x7a, 0xd1, 0xe0, 0x8e, 0x67, 0xfc, 0x0b, 0x50, 0x1f, 0x84, 0x98, 0xfa, 0xaf, 0xae, 0x2e, 0x31, 0x27, 0xcf, 0x3f, 0xf2, 0x6e, 0x8d, 0x81, 0x8f, 0xd2, 0x5f, 0xde, 0xd3, 0x5e, 0xe9, 0xe7, 0x13, 0x48, 0x83, 0x5a, 0x4e, 0x84, 0xd1, 0x58, 0xcf, 0x6b, 0x84, 0xdf, 0x13, 0x1d, 0x91, 0x85, 0xe8, 0xcb, 0x29, 0x79, 0xd2, 0xca, 0xac, 0x6a, 0x93, + /* (2^368)P */ 0x53, 0x82, 0xce, 0x61, 0x96, 0x88, 0x6f, 0xe1, 0x4a, 0x4c, 0x1e, 0x30, 0x73, 0xe8, 0x74, 0xde, 0x40, 0x2b, 0xe0, 0xc4, 0xb5, 0xd8, 0x7c, 0x15, 0xe7, 0xe1, 0xb1, 0xe0, 0xd6, 0x88, 0xb1, 0x6a, 0x57, 0x19, 0x6a, 0x22, 0x66, 0x57, 0xf6, 0x8d, 0xfd, 0xc0, 0xf2, 0xa3, 0x03, 0x56, 0xfb, 0x2e, 0x75, 0x5e, 0xc7, 0x8e, 0x22, 0x96, 0x5c, 0x06, + /* (2^369)P */ 0x98, 0x7e, 0xbf, 0x3e, 0xbf, 0x24, 0x9d, 0x15, 0xd3, 0xf6, 0xd3, 0xd2, 0xf0, 0x11, 0xf2, 0xdb, 0x36, 0x23, 0x38, 0xf7, 0x1d, 0x71, 0x20, 0xd2, 0x54, 0x7f, 0x1e, 0x24, 0x8f, 0xe2, 0xaa, 0xf7, 0x3f, 0x6b, 0x41, 0x4e, 0xdc, 0x0e, 0xec, 0xe8, 0x35, 0x0a, 0x08, 0x6d, 0x89, 0x5b, 0x32, 0x91, 0x01, 0xb6, 0xe0, 0x2c, 0xc6, 0xa1, 0xbe, 0xb4, + /* (2^370)P */ 0x29, 0xf2, 0x1e, 0x1c, 0xdc, 0x68, 0x8a, 0x43, 0x87, 0x2c, 0x48, 0xb3, 0x9e, 0xed, 0xd2, 0x82, 0x46, 0xac, 0x2f, 0xef, 0x93, 0x34, 0x37, 0xca, 0x64, 0x8d, 0xc9, 0x06, 0x90, 0xbb, 0x78, 0x0a, 0x3c, 0x4c, 0xcf, 0x35, 0x7a, 0x0f, 0xf7, 0xa7, 0xf4, 0x2f, 0x45, 0x69, 0x3f, 0xa9, 0x5d, 0xce, 0x7b, 0x8a, 0x84, 0xc3, 0xae, 0xf4, 0xda, 0xd5, + /* (2^371)P */ 0xca, 0xba, 0x95, 0x43, 0x05, 0x7b, 0x06, 0xd9, 0x5c, 0x0a, 0x18, 0x5f, 0x6a, 0x6a, 0xce, 0xc0, 0x3d, 0x95, 0x51, 0x0e, 0x1a, 0xbe, 0x85, 0x7a, 0xf2, 0x69, 0xec, 0xc0, 0x8c, 0xca, 0xa3, 0x32, 0x0a, 0x76, 0x50, 0xc6, 0x76, 0x61, 0x00, 0x89, 0xbf, 0x6e, 0x0f, 0x48, 0x90, 0x31, 0x93, 0xec, 0x34, 0x70, 0xf0, 0xc3, 0x8d, 0xf0, 0x0f, 0xb5, + /* (2^372)P */ 0xbe, 0x23, 0xe2, 0x18, 0x99, 0xf1, 0xed, 0x8a, 0xf6, 0xc9, 0xac, 0xb8, 0x1e, 0x9a, 0x3c, 0x15, 0xae, 0xd7, 0x6d, 0xb3, 0x04, 0xee, 0x5b, 0x0d, 0x1e, 0x79, 0xb7, 0xf9, 0xf9, 0x8d, 0xad, 0xf9, 0x8f, 0x5a, 0x6a, 0x7b, 0xd7, 0x9b, 0xca, 0x62, 0xfe, 0x9c, 0xc0, 0x6f, 0x6d, 0x9d, 0x76, 0xa3, 0x69, 0xb9, 0x4c, 0xa1, 0xc4, 0x0c, 0x76, 0xaa, + /* (2^373)P */ 0x1c, 0x06, 0xfe, 0x3f, 0x45, 0x70, 0xcd, 0x97, 0xa9, 0xa2, 0xb1, 0xd3, 0xf2, 0xa5, 0x0c, 0x49, 0x2c, 0x75, 0x73, 0x1f, 0xcf, 0x00, 0xaf, 0xd5, 0x2e, 0xde, 0x0d, 0x8f, 0x8f, 0x7c, 0xc4, 0x58, 0xce, 0xd4, 0xf6, 0x24, 0x19, 0x2e, 0xd8, 0xc5, 0x1d, 0x1a, 0x3f, 0xb8, 0x4f, 0xbc, 0x7d, 0xbd, 0x68, 0xe3, 0x81, 0x98, 0x1b, 0xa8, 0xc9, 0xd9, + /* (2^374)P */ 0x39, 0x95, 0x78, 0x24, 0x6c, 0x38, 0xe4, 0xe7, 0xd0, 0x8d, 0xb9, 0x38, 0x71, 0x5e, 0xc1, 0x62, 0x80, 0xcc, 0xcb, 0x8c, 0x97, 0xca, 0xf8, 0xb9, 0xd9, 0x9c, 0xce, 0x72, 0x7b, 0x70, 0xee, 0x5f, 0xea, 0xa2, 0xdf, 0xa9, 0x14, 0x10, 0xf9, 0x6e, 0x59, 0x9f, 0x9c, 0xe0, 0x0c, 0xb2, 0x07, 0x97, 0xcd, 0xd2, 0x89, 0x16, 0xfd, 0x9c, 0xa8, 0xa5, + /* (2^375)P */ 0x5a, 0x61, 0xf1, 0x59, 0x7c, 0x38, 0xda, 0xe2, 0x85, 0x99, 0x68, 0xe9, 0xc9, 0xf7, 0x32, 0x7e, 0xc4, 0xca, 0xb7, 0x11, 0x08, 0x69, 0x2b, 0x66, 0x02, 0xf7, 0x2e, 0x18, 0xc3, 0x8e, 0xe1, 0xf9, 0xc5, 0x19, 0x9a, 0x0a, 0x9c, 0x07, 0xba, 0xc7, 0x9c, 0x03, 0x34, 0x89, 0x99, 0x67, 0x0b, 0x16, 0x4b, 0x07, 0x36, 0x16, 0x36, 0x2c, 0xe2, 0xa1, + /* (2^376)P */ 0x70, 0x10, 0x91, 0x27, 0xa8, 0x24, 0x8e, 0x29, 0x04, 0x6f, 0x79, 0x1f, 0xd3, 0xa5, 0x68, 0xd3, 0x0b, 0x7d, 0x56, 0x4d, 0x14, 0x57, 0x7b, 0x2e, 0x00, 0x9f, 0x9a, 0xfd, 0x6c, 0x63, 0x18, 0x81, 0xdb, 0x9d, 0xb7, 0xd7, 0xa4, 0x1e, 0xe8, 0x40, 0xf1, 0x4c, 0xa3, 0x01, 0xd5, 0x4b, 0x75, 0xea, 0xdd, 0x97, 0xfd, 0x5b, 0xb2, 0x66, 0x6a, 0x24, + /* (2^377)P */ 0x72, 0x11, 0xfe, 0x73, 0x1b, 0xd3, 0xea, 0x7f, 0x93, 0x15, 0x15, 0x05, 0xfe, 0x40, 0xe8, 0x28, 0xd8, 0x50, 0x47, 0x66, 0xfa, 0xb7, 0xb5, 0x04, 0xba, 0x35, 0x1e, 0x32, 0x9f, 0x5f, 0x32, 0xba, 0x3d, 0xd1, 0xed, 0x9a, 0x76, 0xca, 0xa3, 0x3e, 0x77, 0xd8, 0xd8, 0x7c, 0x5f, 0x68, 0x42, 0xb5, 0x86, 0x7f, 0x3b, 0xc9, 0xc1, 0x89, 0x64, 0xda, + /* (2^378)P */ 0xd5, 0xd4, 0x17, 0x31, 0xfc, 0x6a, 0xfd, 0xb8, 0xe8, 0xe5, 0x3e, 0x39, 0x06, 0xe4, 0xd1, 0x90, 0x2a, 0xca, 0xf6, 0x54, 0x6c, 0x1b, 0x2f, 0x49, 0x97, 0xb1, 0x2a, 0x82, 0x43, 0x3d, 0x1f, 0x8b, 0xe2, 0x47, 0xc5, 0x24, 0xa8, 0xd5, 0x53, 0x29, 0x7d, 0xc6, 0x87, 0xa6, 0x25, 0x3a, 0x64, 0xdd, 0x71, 0x08, 0x9e, 0xcd, 0xe9, 0x45, 0xc7, 0xba, + /* (2^379)P */ 0x37, 0x72, 0x6d, 0x13, 0x7a, 0x8d, 0x04, 0x31, 0xe6, 0xe3, 0x9e, 0x36, 0x71, 0x3e, 0xc0, 0x1e, 0xe3, 0x71, 0xd3, 0x49, 0x4e, 0x4a, 0x36, 0x42, 0x68, 0x68, 0x61, 0xc7, 0x3c, 0xdb, 0x81, 0x49, 0xf7, 0x91, 0x4d, 0xea, 0x4c, 0x4f, 0x98, 0xc6, 0x7e, 0x60, 0x84, 0x4b, 0x6a, 0x37, 0xbb, 0x52, 0xf7, 0xce, 0x02, 0xe4, 0xad, 0xd1, 0x3c, 0xa7, + /* (2^380)P */ 0x51, 0x06, 0x2d, 0xf8, 0x08, 0xe8, 0xf1, 0x0c, 0xe5, 0xa9, 0xac, 0x29, 0x73, 0x3b, 0xed, 0x98, 0x5f, 0x55, 0x08, 0x38, 0x51, 0x44, 0x36, 0x5d, 0xea, 0xc3, 0xb8, 0x0e, 0xa0, 0x4f, 0xd2, 0x79, 0xe9, 0x98, 0xc3, 0xf5, 0x00, 0xb9, 0x26, 0x27, 0x42, 0xa8, 0x07, 0xc1, 0x12, 0x31, 0xc1, 0xc3, 0x3c, 0x3b, 0x7a, 0x72, 0x97, 0xc2, 0x70, 0x3a, + /* (2^381)P */ 0xf4, 0xb2, 0xba, 0x32, 0xbc, 0xa9, 0x2f, 0x87, 0xc7, 0x3c, 0x45, 0xcd, 0xae, 0xe2, 0x13, 0x6d, 0x3a, 0xf2, 0xf5, 0x66, 0x97, 0x29, 0xaf, 0x53, 0x9f, 0xda, 0xea, 0x14, 0xdf, 0x04, 0x98, 0x19, 0x95, 0x9e, 0x2a, 0x00, 0x5c, 0x9d, 0x1d, 0xf0, 0x39, 0x23, 0xff, 0xfc, 0xca, 0x36, 0xb7, 0xde, 0xdf, 0x37, 0x78, 0x52, 0x21, 0xfa, 0x19, 0x10, + /* (2^382)P */ 0x50, 0x20, 0x73, 0x74, 0x62, 0x21, 0xf2, 0xf7, 0x9b, 0x66, 0x85, 0x34, 0x74, 0xd4, 0x9d, 0x60, 0xd7, 0xbc, 0xc8, 0x46, 0x3b, 0xb8, 0x80, 0x42, 0x15, 0x0a, 0x6c, 0x35, 0x1a, 0x69, 0xf0, 0x1d, 0x4b, 0x29, 0x54, 0x5a, 0x9a, 0x48, 0xec, 0x9f, 0x37, 0x74, 0x91, 0xd0, 0xd1, 0x9e, 0x00, 0xc2, 0x76, 0x56, 0xd6, 0xa0, 0x15, 0x14, 0x83, 0x59, + /* (2^383)P */ 0xc2, 0xf8, 0x22, 0x20, 0x23, 0x07, 0xbd, 0x1d, 0x6f, 0x1e, 0x8c, 0x56, 0x06, 0x6a, 0x4b, 0x9f, 0xe2, 0xa9, 0x92, 0x46, 0x4b, 0x46, 0x59, 0xd7, 0xe1, 0xda, 0x14, 0x98, 0x07, 0x65, 0x7e, 0x28, 0x20, 0xf2, 0x9d, 0x4f, 0x36, 0x5c, 0x92, 0xe0, 0x9d, 0xfe, 0x3e, 0xda, 0xe4, 0x47, 0x19, 0x3c, 0x00, 0x7f, 0x22, 0xf2, 0x9e, 0x51, 0xae, 0x4d, + /* (2^384)P */ 0xbe, 0x8c, 0x1b, 0x10, 0xb6, 0xad, 0xcc, 0xcc, 0xd8, 0x5e, 0x21, 0xa6, 0xfb, 0xf1, 0xf6, 0xbd, 0x0a, 0x24, 0x67, 0xb4, 0x57, 0x7a, 0xbc, 0xe8, 0xe9, 0xff, 0xee, 0x0a, 0x1f, 0xee, 0xbd, 0xc8, 0x44, 0xed, 0x2b, 0xbb, 0x55, 0x1f, 0xdd, 0x7c, 0xb3, 0xeb, 0x3f, 0x63, 0xa1, 0x28, 0x91, 0x21, 0xab, 0x71, 0xc6, 0x4c, 0xd0, 0xe9, 0xb0, 0x21, + /* (2^385)P */ 0xad, 0xc9, 0x77, 0x2b, 0xee, 0x89, 0xa4, 0x7b, 0xfd, 0xf9, 0xf6, 0x14, 0xe4, 0xed, 0x1a, 0x16, 0x9b, 0x78, 0x41, 0x43, 0xa8, 0x83, 0x72, 0x06, 0x2e, 0x7c, 0xdf, 0xeb, 0x7e, 0xdd, 0xd7, 0x8b, 0xea, 0x9a, 0x2b, 0x03, 0xba, 0x57, 0xf3, 0xf1, 0xd9, 0xe5, 0x09, 0xc5, 0x98, 0x61, 0x1c, 0x51, 0x6d, 0x5d, 0x6e, 0xfb, 0x5e, 0x95, 0x9f, 0xb5, + /* (2^386)P */ 0x23, 0xe2, 0x1e, 0x95, 0xa3, 0x5e, 0x42, 0x10, 0xc7, 0xc3, 0x70, 0xbf, 0x4b, 0x6b, 0x83, 0x36, 0x93, 0xb7, 0x68, 0x47, 0x88, 0x3a, 0x10, 0x88, 0x48, 0x7f, 0x8c, 0xae, 0x54, 0x10, 0x02, 0xa4, 0x52, 0x8f, 0x8d, 0xf7, 0x26, 0x4f, 0x50, 0xc3, 0x6a, 0xe2, 0x4e, 0x3b, 0x4c, 0xb9, 0x8a, 0x14, 0x15, 0x6d, 0x21, 0x29, 0xb3, 0x6e, 0x4e, 0xd0, + /* (2^387)P */ 0x4c, 0x8a, 0x18, 0x3f, 0xb7, 0x20, 0xfd, 0x3e, 0x54, 0xca, 0x68, 0x3c, 0xea, 0x6f, 0xf4, 0x6b, 0xa2, 0xbd, 0x01, 0xbd, 0xfe, 0x08, 0xa8, 0xd8, 0xc2, 0x20, 0x36, 0x05, 0xcd, 0xe9, 0xf3, 0x9e, 0xfa, 0x85, 0x66, 0x8f, 0x4b, 0x1d, 0x8c, 0x64, 0x4f, 0xb8, 0xc6, 0x0f, 0x5b, 0x57, 0xd8, 0x24, 0x19, 0x5a, 0x14, 0x4b, 0x92, 0xd3, 0x96, 0xbc, + /* (2^388)P */ 0xa9, 0x3f, 0xc9, 0x6c, 0xca, 0x64, 0x1e, 0x6f, 0xdf, 0x65, 0x7f, 0x9a, 0x47, 0x6b, 0x8a, 0x60, 0x31, 0xa6, 0x06, 0xac, 0x69, 0x30, 0xe6, 0xea, 0x63, 0x42, 0x26, 0x5f, 0xdb, 0xd0, 0xf2, 0x8e, 0x34, 0x0a, 0x3a, 0xeb, 0xf3, 0x79, 0xc8, 0xb7, 0x60, 0x56, 0x5c, 0x37, 0x95, 0x71, 0xf8, 0x7f, 0x49, 0x3e, 0x9e, 0x01, 0x26, 0x1e, 0x80, 0x9f, + /* (2^389)P */ 0xf8, 0x16, 0x9a, 0xaa, 0xb0, 0x28, 0xb5, 0x8e, 0xd0, 0x60, 0xe5, 0x26, 0xa9, 0x47, 0xc4, 0x5c, 0xa9, 0x39, 0xfe, 0x0a, 0xd8, 0x07, 0x2b, 0xb3, 0xce, 0xf1, 0xea, 0x1a, 0xf4, 0x7b, 0x98, 0x31, 0x3d, 0x13, 0x29, 0x80, 0xe8, 0x0d, 0xcf, 0x56, 0x39, 0x86, 0x50, 0x0c, 0xb3, 0x18, 0xf4, 0xc5, 0xca, 0xf2, 0x6f, 0xcd, 0x8d, 0xd5, 0x02, 0xb0, + /* (2^390)P */ 0xbf, 0x39, 0x3f, 0xac, 0x6d, 0x1a, 0x6a, 0xe4, 0x42, 0x24, 0xd6, 0x41, 0x9d, 0xb9, 0x5b, 0x46, 0x73, 0x93, 0x76, 0xaa, 0xb7, 0x37, 0x36, 0xa6, 0x09, 0xe5, 0x04, 0x3b, 0x66, 0xc4, 0x29, 0x3e, 0x41, 0xc2, 0xcb, 0xe5, 0x17, 0xd7, 0x34, 0x67, 0x1d, 0x2c, 0x12, 0xec, 0x24, 0x7a, 0x40, 0xa2, 0x45, 0x41, 0xf0, 0x75, 0xed, 0x43, 0x30, 0xc9, + /* (2^391)P */ 0x80, 0xf6, 0x47, 0x5b, 0xad, 0x54, 0x02, 0xbc, 0xdd, 0xa4, 0xb2, 0xd7, 0x42, 0x95, 0xf2, 0x0d, 0x1b, 0xef, 0x37, 0xa7, 0xb4, 0x34, 0x04, 0x08, 0x71, 0x1b, 0xd3, 0xdf, 0xa1, 0xf0, 0x2b, 0xfa, 0xc0, 0x1f, 0xf3, 0x44, 0xb5, 0xc6, 0x47, 0x3d, 0x65, 0x67, 0x45, 0x4d, 0x2f, 0xde, 0x52, 0x73, 0xfc, 0x30, 0x01, 0x6b, 0xc1, 0x03, 0xd8, 0xd7, + /* (2^392)P */ 0x1c, 0x67, 0x55, 0x3e, 0x01, 0x17, 0x0f, 0x3e, 0xe5, 0x34, 0x58, 0xfc, 0xcb, 0x71, 0x24, 0x74, 0x5d, 0x36, 0x1e, 0x89, 0x2a, 0x63, 0xf8, 0xf8, 0x9f, 0x50, 0x9f, 0x32, 0x92, 0x29, 0xd8, 0x1a, 0xec, 0x76, 0x57, 0x6c, 0x67, 0x12, 0x6a, 0x6e, 0xef, 0x97, 0x1f, 0xc3, 0x77, 0x60, 0x3c, 0x22, 0xcb, 0xc7, 0x04, 0x1a, 0x89, 0x2d, 0x10, 0xa6, + /* (2^393)P */ 0x12, 0xf5, 0xa9, 0x26, 0x16, 0xd9, 0x3c, 0x65, 0x5d, 0x83, 0xab, 0xd1, 0x70, 0x6b, 0x1c, 0xdb, 0xe7, 0x86, 0x0d, 0xfb, 0xe7, 0xf8, 0x2a, 0x58, 0x6e, 0x7a, 0x66, 0x13, 0x53, 0x3a, 0x6f, 0x8d, 0x43, 0x5f, 0x14, 0x23, 0x14, 0xff, 0x3d, 0x52, 0x7f, 0xee, 0xbd, 0x7a, 0x34, 0x8b, 0x35, 0x24, 0xc3, 0x7a, 0xdb, 0xcf, 0x22, 0x74, 0x9a, 0x8f, + /* (2^394)P */ 0xdb, 0x20, 0xfc, 0xe5, 0x39, 0x4e, 0x7d, 0x78, 0xee, 0x0b, 0xbf, 0x1d, 0x80, 0xd4, 0x05, 0x4f, 0xb9, 0xd7, 0x4e, 0x94, 0x88, 0x9a, 0x50, 0x78, 0x1a, 0x70, 0x8c, 0xcc, 0x25, 0xb6, 0x61, 0x09, 0xdc, 0x7b, 0xea, 0x3f, 0x7f, 0xea, 0x2a, 0x0d, 0x47, 0x1c, 0x8e, 0xa6, 0x5b, 0xd2, 0xa3, 0x61, 0x93, 0x3c, 0x68, 0x9f, 0x8b, 0xea, 0xb0, 0xcb, + /* (2^395)P */ 0xff, 0x54, 0x02, 0x19, 0xae, 0x8b, 0x4c, 0x2c, 0x3a, 0xe0, 0xe4, 0xac, 0x87, 0xf7, 0x51, 0x45, 0x41, 0x43, 0xdc, 0xaa, 0xcd, 0xcb, 0xdc, 0x40, 0xe3, 0x44, 0x3b, 0x1d, 0x9e, 0x3d, 0xb9, 0x82, 0xcc, 0x7a, 0xc5, 0x12, 0xf8, 0x1e, 0xdd, 0xdb, 0x8d, 0xb0, 0x2a, 0xe8, 0xe6, 0x6c, 0x94, 0x3b, 0xb7, 0x2d, 0xba, 0x79, 0x3b, 0xb5, 0x86, 0xfb, + /* (2^396)P */ 0x82, 0x88, 0x13, 0xdd, 0x6c, 0xcd, 0x85, 0x2b, 0x90, 0x86, 0xb7, 0xac, 0x16, 0xa6, 0x6e, 0x6a, 0x94, 0xd8, 0x1e, 0x4e, 0x41, 0x0f, 0xce, 0x81, 0x6a, 0xa8, 0x26, 0x56, 0x43, 0x52, 0x52, 0xe6, 0xff, 0x88, 0xcf, 0x47, 0x05, 0x1d, 0xff, 0xf3, 0xa0, 0x10, 0xb2, 0x97, 0x87, 0xeb, 0x47, 0xbb, 0xfa, 0x1f, 0xe8, 0x4c, 0xce, 0xc4, 0xcd, 0x93, + /* (2^397)P */ 0xf4, 0x11, 0xf5, 0x8d, 0x89, 0x29, 0x79, 0xb3, 0x59, 0x0b, 0x29, 0x7d, 0x9c, 0x12, 0x4a, 0x65, 0x72, 0x3a, 0xf9, 0xec, 0x37, 0x18, 0x86, 0xef, 0x44, 0x07, 0x25, 0x74, 0x76, 0x53, 0xed, 0x51, 0x01, 0xc6, 0x28, 0xc5, 0xc3, 0x4a, 0x0f, 0x99, 0xec, 0xc8, 0x40, 0x5a, 0x83, 0x30, 0x79, 0xa2, 0x3e, 0x63, 0x09, 0x2d, 0x6f, 0x23, 0x54, 0x1c, + /* (2^398)P */ 0x5c, 0x6f, 0x3b, 0x1c, 0x30, 0x77, 0x7e, 0x87, 0x66, 0x83, 0x2e, 0x7e, 0x85, 0x50, 0xfd, 0xa0, 0x7a, 0xc2, 0xf5, 0x0f, 0xc1, 0x64, 0xe7, 0x0b, 0xbd, 0x59, 0xa7, 0xe7, 0x65, 0x53, 0xc3, 0xf5, 0x55, 0x5b, 0xe1, 0x82, 0x30, 0x5a, 0x61, 0xcd, 0xa0, 0x89, 0x32, 0xdb, 0x87, 0xfc, 0x21, 0x8a, 0xab, 0x6d, 0x82, 0xa8, 0x42, 0x81, 0x4f, 0xf2, + /* (2^399)P */ 0xb3, 0xeb, 0x88, 0x18, 0xf6, 0x56, 0x96, 0xbf, 0xba, 0x5d, 0x71, 0xa1, 0x5a, 0xd1, 0x04, 0x7b, 0xd5, 0x46, 0x01, 0x74, 0xfe, 0x15, 0x25, 0xb7, 0xff, 0x0c, 0x24, 0x47, 0xac, 0xfd, 0xab, 0x47, 0x32, 0xe1, 0x6a, 0x4e, 0xca, 0xcf, 0x7f, 0xdd, 0xf8, 0xd2, 0x4b, 0x3b, 0xf5, 0x17, 0xba, 0xba, 0x8b, 0xa1, 0xec, 0x28, 0x3f, 0x97, 0xab, 0x2a, + /* (2^400)P */ 0x51, 0x38, 0xc9, 0x5e, 0xc6, 0xb3, 0x64, 0xf2, 0x24, 0x4d, 0x04, 0x7d, 0xc8, 0x39, 0x0c, 0x4a, 0xc9, 0x73, 0x74, 0x1b, 0x5c, 0xb2, 0xc5, 0x41, 0x62, 0xa0, 0x4c, 0x6d, 0x8d, 0x91, 0x9a, 0x7b, 0x88, 0xab, 0x9c, 0x7e, 0x23, 0xdb, 0x6f, 0xb5, 0x72, 0xd6, 0x47, 0x40, 0xef, 0x22, 0x58, 0x62, 0x19, 0x6c, 0x38, 0xba, 0x5b, 0x00, 0x30, 0x9f, + /* (2^401)P */ 0x65, 0xbb, 0x3b, 0x9b, 0xe9, 0xae, 0xbf, 0xbe, 0xe4, 0x13, 0x95, 0xf3, 0xe3, 0x77, 0xcb, 0xe4, 0x9a, 0x22, 0xb5, 0x4a, 0x08, 0x9d, 0xb3, 0x9e, 0x27, 0xe0, 0x15, 0x6c, 0x9f, 0x7e, 0x9a, 0x5e, 0x15, 0x45, 0x25, 0x8d, 0x01, 0x0a, 0xd2, 0x2b, 0xbd, 0x48, 0x06, 0x0d, 0x18, 0x97, 0x4b, 0xdc, 0xbc, 0xf0, 0xcd, 0xb2, 0x52, 0x3c, 0xac, 0xf5, + /* (2^402)P */ 0x3e, 0xed, 0x47, 0x6b, 0x5c, 0xf6, 0x76, 0xd0, 0xe9, 0x15, 0xa3, 0xcb, 0x36, 0x00, 0x21, 0xa3, 0x79, 0x20, 0xa5, 0x3e, 0x88, 0x03, 0xcb, 0x7e, 0x63, 0xbb, 0xed, 0xa9, 0x13, 0x35, 0x16, 0xaf, 0x2e, 0xb4, 0x70, 0x14, 0x93, 0xfb, 0xc4, 0x9b, 0xd8, 0xb1, 0xbe, 0x43, 0xd1, 0x85, 0xb8, 0x97, 0xef, 0xea, 0x88, 0xa1, 0x25, 0x52, 0x62, 0x75, + /* (2^403)P */ 0x8e, 0x4f, 0xaa, 0x23, 0x62, 0x7e, 0x2b, 0x37, 0x89, 0x00, 0x11, 0x30, 0xc5, 0x33, 0x4a, 0x89, 0x8a, 0xe2, 0xfc, 0x5c, 0x6a, 0x75, 0xe5, 0xf7, 0x02, 0x4a, 0x9b, 0xf7, 0xb5, 0x6a, 0x85, 0x31, 0xd3, 0x5a, 0xcf, 0xc3, 0xf8, 0xde, 0x2f, 0xcf, 0xb5, 0x24, 0xf4, 0xe3, 0xa1, 0xad, 0x42, 0xae, 0x09, 0xb9, 0x2e, 0x04, 0x2d, 0x01, 0x22, 0x3f, + /* (2^404)P */ 0x41, 0x16, 0xfb, 0x7d, 0x50, 0xfd, 0xb5, 0xba, 0x88, 0x24, 0xba, 0xfd, 0x3d, 0xb2, 0x90, 0x15, 0xb7, 0xfa, 0xa2, 0xe1, 0x4c, 0x7d, 0xb9, 0xc6, 0xff, 0x81, 0x57, 0xb6, 0xc2, 0x9e, 0xcb, 0xc4, 0x35, 0xbd, 0x01, 0xb7, 0xaa, 0xce, 0xd0, 0xe9, 0xb5, 0xd6, 0x72, 0xbf, 0xd2, 0xee, 0xc7, 0xac, 0x94, 0xff, 0x29, 0x57, 0x02, 0x49, 0x09, 0xad, + /* (2^405)P */ 0x27, 0xa5, 0x78, 0x1b, 0xbf, 0x6b, 0xaf, 0x0b, 0x8c, 0xd9, 0xa8, 0x37, 0xb0, 0x67, 0x18, 0xb6, 0xc7, 0x05, 0x8a, 0x67, 0x03, 0x30, 0x62, 0x6e, 0x56, 0x82, 0xa9, 0x54, 0x3e, 0x0c, 0x4e, 0x07, 0xe1, 0x5a, 0x38, 0xed, 0xfa, 0xc8, 0x55, 0x6b, 0x08, 0xa3, 0x6b, 0x64, 0x2a, 0x15, 0xd6, 0x39, 0x6f, 0x47, 0x99, 0x42, 0x3f, 0x33, 0x84, 0x8f, + /* (2^406)P */ 0xbc, 0x45, 0x29, 0x81, 0x0e, 0xa4, 0xc5, 0x72, 0x3a, 0x10, 0xe1, 0xc4, 0x1e, 0xda, 0xc3, 0xfe, 0xb0, 0xce, 0xd2, 0x13, 0x34, 0x67, 0x21, 0xc6, 0x7e, 0xf9, 0x8c, 0xff, 0x39, 0x50, 0xae, 0x92, 0x60, 0x35, 0x2f, 0x8b, 0x6e, 0xc9, 0xc1, 0x27, 0x3a, 0x94, 0x66, 0x3e, 0x26, 0x84, 0x93, 0xc8, 0x6c, 0xcf, 0xd2, 0x03, 0xa1, 0x10, 0xcf, 0xb7, + /* (2^407)P */ 0x64, 0xda, 0x19, 0xf6, 0xc5, 0x73, 0x17, 0x44, 0x88, 0x81, 0x07, 0x0d, 0x34, 0xb2, 0x75, 0xf9, 0xd9, 0xe2, 0xe0, 0x8b, 0x71, 0xcf, 0x72, 0x34, 0x83, 0xb4, 0xce, 0xfc, 0xd7, 0x29, 0x09, 0x5a, 0x98, 0xbf, 0x14, 0xac, 0x77, 0x55, 0x38, 0x47, 0x5b, 0x0f, 0x40, 0x24, 0xe5, 0xa5, 0xa6, 0xac, 0x2d, 0xa6, 0xff, 0x9c, 0x73, 0xfe, 0x5c, 0x7e, + /* (2^408)P */ 0x1e, 0x33, 0xcc, 0x68, 0xb2, 0xbc, 0x8c, 0x93, 0xaf, 0xcc, 0x38, 0xf8, 0xd9, 0x16, 0x72, 0x50, 0xac, 0xd9, 0xb5, 0x0b, 0x9a, 0xbe, 0x46, 0x7a, 0xf1, 0xee, 0xf1, 0xad, 0xec, 0x5b, 0x59, 0x27, 0x9c, 0x05, 0xa3, 0x87, 0xe0, 0x37, 0x2c, 0x83, 0xce, 0xb3, 0x65, 0x09, 0x8e, 0xc3, 0x9c, 0xbf, 0x6a, 0xa2, 0x00, 0xcc, 0x12, 0x36, 0xc5, 0x95, + /* (2^409)P */ 0x36, 0x11, 0x02, 0x14, 0x9c, 0x3c, 0xeb, 0x2f, 0x23, 0x5b, 0x6b, 0x2b, 0x08, 0x54, 0x53, 0xac, 0xb2, 0xa3, 0xe0, 0x26, 0x62, 0x3c, 0xe4, 0xe1, 0x81, 0xee, 0x13, 0x3e, 0xa4, 0x97, 0xef, 0xf9, 0x92, 0x27, 0x01, 0xce, 0x54, 0x8b, 0x3e, 0x31, 0xbe, 0xa7, 0x88, 0xcf, 0x47, 0x99, 0x3c, 0x10, 0x6f, 0x60, 0xb3, 0x06, 0x4e, 0xee, 0x1b, 0xf0, + /* (2^410)P */ 0x59, 0x49, 0x66, 0xcf, 0x22, 0xe6, 0xf6, 0x73, 0xfe, 0xa3, 0x1c, 0x09, 0xfa, 0x5f, 0x65, 0xa8, 0xf0, 0x82, 0xc2, 0xef, 0x16, 0x63, 0x6e, 0x79, 0x69, 0x51, 0x39, 0x07, 0x65, 0xc4, 0x81, 0xec, 0x73, 0x0f, 0x15, 0x93, 0xe1, 0x30, 0x33, 0xe9, 0x37, 0x86, 0x42, 0x4c, 0x1f, 0x9b, 0xad, 0xee, 0x3f, 0xf1, 0x2a, 0x8e, 0x6a, 0xa3, 0xc8, 0x35, + /* (2^411)P */ 0x1e, 0x49, 0xf1, 0xdd, 0xd2, 0x9c, 0x8e, 0x78, 0xb2, 0x06, 0xe4, 0x6a, 0xab, 0x3a, 0xdc, 0xcd, 0xf4, 0xeb, 0xe1, 0xe7, 0x2f, 0xaa, 0xeb, 0x40, 0x31, 0x9f, 0xb9, 0xab, 0x13, 0xa9, 0x78, 0xbf, 0x38, 0x89, 0x0e, 0x85, 0x14, 0x8b, 0x46, 0x76, 0x14, 0xda, 0xcf, 0x33, 0xc8, 0x79, 0xd3, 0xd5, 0xa3, 0x6a, 0x69, 0x45, 0x70, 0x34, 0xc3, 0xe9, + /* (2^412)P */ 0x5e, 0xe7, 0x78, 0xe9, 0x24, 0xcc, 0xe9, 0xf4, 0xc8, 0x6b, 0xe0, 0xfb, 0x3a, 0xbe, 0xcc, 0x42, 0x4a, 0x00, 0x22, 0xf8, 0xe6, 0x32, 0xbe, 0x6d, 0x18, 0x55, 0x60, 0xe9, 0x72, 0x69, 0x50, 0x56, 0xca, 0x04, 0x18, 0x38, 0xa1, 0xee, 0xd8, 0x38, 0x3c, 0xa7, 0x70, 0xe2, 0xb9, 0x4c, 0xa0, 0xc8, 0x89, 0x72, 0xcf, 0x49, 0x7f, 0xdf, 0xbc, 0x67, + /* (2^413)P */ 0x1d, 0x17, 0xcb, 0x0b, 0xbd, 0xb2, 0x36, 0xe3, 0xa8, 0x99, 0x31, 0xb6, 0x26, 0x9c, 0x0c, 0x74, 0xaf, 0x4d, 0x24, 0x61, 0xcf, 0x31, 0x7b, 0xed, 0xdd, 0xc3, 0xf6, 0x32, 0x70, 0xfe, 0x17, 0xf6, 0x51, 0x37, 0x65, 0xce, 0x5d, 0xaf, 0xa5, 0x2f, 0x2a, 0xfe, 0x00, 0x71, 0x7c, 0x50, 0xbe, 0x21, 0xc7, 0xed, 0xc6, 0xfc, 0x67, 0xcf, 0x9c, 0xdd, + /* (2^414)P */ 0x26, 0x3e, 0xf8, 0xbb, 0xd0, 0xb1, 0x01, 0xd8, 0xeb, 0x0b, 0x62, 0x87, 0x35, 0x4c, 0xde, 0xca, 0x99, 0x9c, 0x6d, 0xf7, 0xb6, 0xf0, 0x57, 0x0a, 0x52, 0x29, 0x6a, 0x3f, 0x26, 0x31, 0x04, 0x07, 0x2a, 0xc9, 0xfa, 0x9b, 0x0e, 0x62, 0x8e, 0x72, 0xf2, 0xad, 0xce, 0xb6, 0x35, 0x7a, 0xc1, 0xae, 0x35, 0xc7, 0xa3, 0x14, 0xcf, 0x0c, 0x28, 0xb7, + /* (2^415)P */ 0xa6, 0xf1, 0x32, 0x3a, 0x20, 0xd2, 0x24, 0x97, 0xcf, 0x5d, 0x37, 0x99, 0xaf, 0x33, 0x7a, 0x5b, 0x7a, 0xcc, 0x4e, 0x41, 0x38, 0xb1, 0x4e, 0xad, 0xc9, 0xd9, 0x71, 0x7e, 0xb2, 0xf5, 0xd5, 0x01, 0x6c, 0x4d, 0xfd, 0xa1, 0xda, 0x03, 0x38, 0x9b, 0x3d, 0x92, 0x92, 0xf2, 0xca, 0xbf, 0x1f, 0x24, 0xa4, 0xbb, 0x30, 0x6a, 0x74, 0x56, 0xc8, 0xce, + /* (2^416)P */ 0x27, 0xf4, 0xed, 0xc9, 0xc3, 0xb1, 0x79, 0x85, 0xbe, 0xf6, 0xeb, 0xf3, 0x55, 0xc7, 0xaa, 0xa6, 0xe9, 0x07, 0x5d, 0xf4, 0xeb, 0xa6, 0x81, 0xe3, 0x0e, 0xcf, 0xa3, 0xc1, 0xef, 0xe7, 0x34, 0xb2, 0x03, 0x73, 0x8a, 0x91, 0xf1, 0xad, 0x05, 0xc7, 0x0b, 0x43, 0x99, 0x12, 0x31, 0xc8, 0xc7, 0xc5, 0xa4, 0x3d, 0xcd, 0xe5, 0x4e, 0x6d, 0x24, 0xdd, + /* (2^417)P */ 0x61, 0x54, 0xd0, 0x95, 0x2c, 0x45, 0x75, 0xac, 0xb5, 0x1a, 0x9d, 0x11, 0xeb, 0xed, 0x6b, 0x57, 0xa3, 0xe6, 0xcd, 0x77, 0xd4, 0x83, 0x8e, 0x39, 0xf1, 0x0f, 0x98, 0xcb, 0x40, 0x02, 0x6e, 0x10, 0x82, 0x9e, 0xb4, 0x93, 0x76, 0xd7, 0x97, 0xa3, 0x53, 0x12, 0x86, 0xc6, 0x15, 0x78, 0x73, 0x93, 0xe7, 0x7f, 0xcf, 0x1f, 0xbf, 0xcd, 0xd2, 0x7a, + /* (2^418)P */ 0xc2, 0x21, 0xdc, 0xd5, 0x69, 0xff, 0xca, 0x49, 0x3a, 0xe1, 0xc3, 0x69, 0x41, 0x56, 0xc1, 0x76, 0x63, 0x24, 0xbd, 0x64, 0x1b, 0x3d, 0x92, 0xf9, 0x13, 0x04, 0x25, 0xeb, 0x27, 0xa6, 0xef, 0x39, 0x3a, 0x80, 0xe0, 0xf8, 0x27, 0xee, 0xc9, 0x49, 0x77, 0xef, 0x3f, 0x29, 0x3d, 0x5e, 0xe6, 0x66, 0x83, 0xd1, 0xf6, 0xfe, 0x9d, 0xbc, 0xf1, 0x96, + /* (2^419)P */ 0x6b, 0xc6, 0x99, 0x26, 0x3c, 0xf3, 0x63, 0xf9, 0xc7, 0x29, 0x8c, 0x52, 0x62, 0x2d, 0xdc, 0x8a, 0x66, 0xce, 0x2c, 0xa7, 0xe4, 0xf0, 0xd7, 0x37, 0x17, 0x1e, 0xe4, 0xa3, 0x53, 0x7b, 0x29, 0x8e, 0x60, 0x99, 0xf9, 0x0c, 0x7c, 0x6f, 0xa2, 0xcc, 0x9f, 0x80, 0xdd, 0x5e, 0x46, 0xaa, 0x0d, 0x6c, 0xc9, 0x6c, 0xf7, 0x78, 0x5b, 0x38, 0xe3, 0x24, + /* (2^420)P */ 0x4b, 0x75, 0x6a, 0x2f, 0x08, 0xe1, 0x72, 0x76, 0xab, 0x82, 0x96, 0xdf, 0x3b, 0x1f, 0x9b, 0xd8, 0xed, 0xdb, 0xcd, 0x15, 0x09, 0x5a, 0x1e, 0xb7, 0xc5, 0x26, 0x72, 0x07, 0x0c, 0x50, 0xcd, 0x3b, 0x4d, 0x3f, 0xa2, 0x67, 0xc2, 0x02, 0x61, 0x2e, 0x68, 0xe9, 0x6f, 0xf0, 0x21, 0x2a, 0xa7, 0x3b, 0x88, 0x04, 0x11, 0x64, 0x49, 0x0d, 0xb4, 0x46, + /* (2^421)P */ 0x63, 0x85, 0xf3, 0xc5, 0x2b, 0x5a, 0x9f, 0xf0, 0x17, 0xcb, 0x45, 0x0a, 0xf3, 0x6e, 0x7e, 0xb0, 0x7c, 0xbc, 0xf0, 0x4f, 0x3a, 0xb0, 0xbc, 0x36, 0x36, 0x52, 0x51, 0xcb, 0xfe, 0x9a, 0xcb, 0xe8, 0x7e, 0x4b, 0x06, 0x7f, 0xaa, 0x35, 0xc8, 0x0e, 0x7a, 0x30, 0xa3, 0xb1, 0x09, 0xbb, 0x86, 0x4c, 0xbe, 0xb8, 0xbd, 0xe0, 0x32, 0xa5, 0xd4, 0xf7, + /* (2^422)P */ 0x7d, 0x50, 0x37, 0x68, 0x4e, 0x22, 0xb2, 0x2c, 0xd5, 0x0f, 0x2b, 0x6d, 0xb1, 0x51, 0xf2, 0x82, 0xe9, 0x98, 0x7c, 0x50, 0xc7, 0x96, 0x7e, 0x0e, 0xdc, 0xb1, 0x0e, 0xb2, 0x63, 0x8c, 0x30, 0x37, 0x72, 0x21, 0x9c, 0x61, 0xc2, 0xa7, 0x33, 0xd9, 0xb2, 0x63, 0x93, 0xd1, 0x6b, 0x6a, 0x73, 0xa5, 0x58, 0x80, 0xff, 0x04, 0xc7, 0x83, 0x21, 0x29, + /* (2^423)P */ 0x29, 0x04, 0xbc, 0x99, 0x39, 0xc9, 0x58, 0xc9, 0x6b, 0x17, 0xe8, 0x90, 0xb3, 0xe6, 0xa9, 0xb6, 0x28, 0x9b, 0xcb, 0x3b, 0x28, 0x90, 0x68, 0x71, 0xff, 0xcf, 0x08, 0x78, 0xc9, 0x8d, 0xa8, 0x4e, 0x43, 0xd1, 0x1c, 0x9e, 0xa4, 0xe3, 0xdf, 0xbf, 0x92, 0xf4, 0xf9, 0x41, 0xba, 0x4d, 0x1c, 0xf9, 0xdd, 0x74, 0x76, 0x1c, 0x6e, 0x3e, 0x94, 0x87, + /* (2^424)P */ 0xe4, 0xda, 0xc5, 0xd7, 0xfb, 0x87, 0xc5, 0x4d, 0x6b, 0x19, 0xaa, 0xb9, 0xbc, 0x8c, 0xf2, 0x8a, 0xd8, 0x5d, 0xdb, 0x4d, 0xef, 0xa6, 0xf2, 0x65, 0xf1, 0x22, 0x9c, 0xf1, 0x46, 0x30, 0x71, 0x7c, 0xe4, 0x53, 0x8e, 0x55, 0x2e, 0x9c, 0x9a, 0x31, 0x2a, 0xc3, 0xab, 0x0f, 0xde, 0xe4, 0xbe, 0xd8, 0x96, 0x50, 0x6e, 0x0c, 0x54, 0x49, 0xe6, 0xec, + /* (2^425)P */ 0x3c, 0x1d, 0x5a, 0xa5, 0xda, 0xad, 0xdd, 0xc2, 0xae, 0xac, 0x6f, 0x86, 0x75, 0x31, 0x91, 0x64, 0x45, 0x9d, 0xa4, 0xf0, 0x81, 0xf1, 0x0e, 0xba, 0x74, 0xaf, 0x7b, 0xcd, 0x6f, 0xfe, 0xac, 0x4e, 0xdb, 0x4e, 0x45, 0x35, 0x36, 0xc5, 0xc0, 0x6c, 0x3d, 0x64, 0xf4, 0xd8, 0x07, 0x62, 0xd1, 0xec, 0xf3, 0xfc, 0x93, 0xc9, 0x28, 0x0c, 0x2c, 0xf3, + /* (2^426)P */ 0x0c, 0x69, 0x2b, 0x5c, 0xb6, 0x41, 0x69, 0xf1, 0xa4, 0xf1, 0x5b, 0x75, 0x4c, 0x42, 0x8b, 0x47, 0xeb, 0x69, 0xfb, 0xa8, 0xe6, 0xf9, 0x7b, 0x48, 0x50, 0xaf, 0xd3, 0xda, 0xb2, 0x35, 0x10, 0xb5, 0x5b, 0x40, 0x90, 0x39, 0xc9, 0x07, 0x06, 0x73, 0x26, 0x20, 0x95, 0x01, 0xa4, 0x2d, 0xf0, 0xe7, 0x2e, 0x00, 0x7d, 0x41, 0x09, 0x68, 0x13, 0xc4, + /* (2^427)P */ 0xbe, 0x38, 0x78, 0xcf, 0xc9, 0x4f, 0x36, 0xca, 0x09, 0x61, 0x31, 0x3c, 0x57, 0x2e, 0xec, 0x17, 0xa4, 0x7d, 0x19, 0x2b, 0x9b, 0x5b, 0xbe, 0x8f, 0xd6, 0xc5, 0x2f, 0x86, 0xf2, 0x64, 0x76, 0x17, 0x00, 0x6e, 0x1a, 0x8c, 0x67, 0x1b, 0x68, 0xeb, 0x15, 0xa2, 0xd6, 0x09, 0x91, 0xdd, 0x23, 0x0d, 0x98, 0xb2, 0x10, 0x19, 0x55, 0x9b, 0x63, 0xf2, + /* (2^428)P */ 0x51, 0x1f, 0x93, 0xea, 0x2a, 0x3a, 0xfa, 0x41, 0xc0, 0x57, 0xfb, 0x74, 0xa6, 0x65, 0x09, 0x56, 0x14, 0xb6, 0x12, 0xaa, 0xb3, 0x1a, 0x8d, 0x3b, 0x76, 0x91, 0x7a, 0x23, 0x56, 0x9c, 0x6a, 0xc0, 0xe0, 0x3c, 0x3f, 0xb5, 0x1a, 0xf4, 0x57, 0x71, 0x93, 0x2b, 0xb1, 0xa7, 0x70, 0x57, 0x22, 0x80, 0xf5, 0xb8, 0x07, 0x77, 0x87, 0x0c, 0xbe, 0x83, + /* (2^429)P */ 0x07, 0x9b, 0x0e, 0x52, 0x38, 0x63, 0x13, 0x86, 0x6a, 0xa6, 0xb4, 0xd2, 0x60, 0x68, 0x9a, 0x99, 0x82, 0x0a, 0x04, 0x5f, 0x89, 0x7a, 0x1a, 0x2a, 0xae, 0x2d, 0x35, 0x0c, 0x1e, 0xad, 0xef, 0x4f, 0x9a, 0xfc, 0xc8, 0xd9, 0xcf, 0x9d, 0x48, 0x71, 0xa5, 0x55, 0x79, 0x73, 0x39, 0x1b, 0xd8, 0x73, 0xec, 0x9b, 0x03, 0x16, 0xd8, 0x82, 0xf7, 0x67, + /* (2^430)P */ 0x52, 0x67, 0x42, 0x21, 0xc9, 0x40, 0x78, 0x82, 0x2b, 0x95, 0x2d, 0x20, 0x92, 0xd1, 0xe2, 0x61, 0x25, 0xb0, 0xc6, 0x9c, 0x20, 0x59, 0x8e, 0x28, 0x6f, 0xf3, 0xfd, 0xd3, 0xc1, 0x32, 0x43, 0xc9, 0xa6, 0x08, 0x7a, 0x77, 0x9c, 0x4c, 0x8c, 0x33, 0x71, 0x13, 0x69, 0xe3, 0x52, 0x30, 0xa7, 0xf5, 0x07, 0x67, 0xac, 0xad, 0x46, 0x8a, 0x26, 0x25, + /* (2^431)P */ 0xda, 0x86, 0xc4, 0xa2, 0x71, 0x56, 0xdd, 0xd2, 0x48, 0xd3, 0xde, 0x42, 0x63, 0x01, 0xa7, 0x2c, 0x92, 0x83, 0x6f, 0x2e, 0xd8, 0x1e, 0x3f, 0xc1, 0xc5, 0x42, 0x4e, 0x34, 0x19, 0x54, 0x6e, 0x35, 0x2c, 0x51, 0x2e, 0xfd, 0x0f, 0x9a, 0x45, 0x66, 0x5e, 0x4a, 0x83, 0xda, 0x0a, 0x53, 0x68, 0x63, 0xfa, 0xce, 0x47, 0x20, 0xd3, 0x34, 0xba, 0x0d, + /* (2^432)P */ 0xd0, 0xe9, 0x64, 0xa4, 0x61, 0x4b, 0x86, 0xe5, 0x93, 0x6f, 0xda, 0x0e, 0x31, 0x7e, 0x6e, 0xe3, 0xc6, 0x73, 0xd8, 0xa3, 0x08, 0x57, 0x52, 0xcd, 0x51, 0x63, 0x1d, 0x9f, 0x93, 0x00, 0x62, 0x91, 0x26, 0x21, 0xa7, 0xdd, 0x25, 0x0f, 0x09, 0x0d, 0x35, 0xad, 0xcf, 0x11, 0x8e, 0x6e, 0xe8, 0xae, 0x1d, 0x95, 0xcb, 0x88, 0xf8, 0x70, 0x7b, 0x91, + /* (2^433)P */ 0x0c, 0x19, 0x5c, 0xd9, 0x8d, 0xda, 0x9d, 0x2c, 0x90, 0x54, 0x65, 0xe8, 0xb6, 0x35, 0x50, 0xae, 0xea, 0xae, 0x43, 0xb7, 0x1e, 0x99, 0x8b, 0x4c, 0x36, 0x4e, 0xe4, 0x1e, 0xc4, 0x64, 0x43, 0xb6, 0xeb, 0xd4, 0xe9, 0x60, 0x22, 0xee, 0xcf, 0xb8, 0x52, 0x1b, 0xf0, 0x04, 0xce, 0xbc, 0x2b, 0xf0, 0xbe, 0xcd, 0x44, 0x74, 0x1e, 0x1f, 0x63, 0xf9, + /* (2^434)P */ 0xe1, 0x3f, 0x95, 0x94, 0xb2, 0xb6, 0x31, 0xa9, 0x1b, 0xdb, 0xfd, 0x0e, 0xdb, 0xdd, 0x1a, 0x22, 0x78, 0x60, 0x9f, 0x75, 0x5f, 0x93, 0x06, 0x0c, 0xd8, 0xbb, 0xa2, 0x85, 0x2b, 0x5e, 0xc0, 0x9b, 0xa8, 0x5d, 0xaf, 0x93, 0x91, 0x91, 0x47, 0x41, 0x1a, 0xfc, 0xb4, 0x51, 0x85, 0xad, 0x69, 0x4d, 0x73, 0x69, 0xd5, 0x4e, 0x82, 0xfb, 0x66, 0xcb, + /* (2^435)P */ 0x7c, 0xbe, 0xc7, 0x51, 0xc4, 0x74, 0x6e, 0xab, 0xfd, 0x41, 0x4f, 0x76, 0x4f, 0x24, 0x03, 0xd6, 0x2a, 0xb7, 0x42, 0xb4, 0xda, 0x41, 0x2c, 0x82, 0x48, 0x4c, 0x7f, 0x6f, 0x25, 0x5d, 0x36, 0xd4, 0x69, 0xf5, 0xef, 0x02, 0x81, 0xea, 0x6f, 0x19, 0x69, 0xe8, 0x6f, 0x5b, 0x2f, 0x14, 0x0e, 0x6f, 0x89, 0xb4, 0xb5, 0xd8, 0xae, 0xef, 0x7b, 0x87, + /* (2^436)P */ 0xe9, 0x91, 0xa0, 0x8b, 0xc9, 0xe0, 0x01, 0x90, 0x37, 0xc1, 0x6f, 0xdc, 0x5e, 0xf7, 0xbf, 0x43, 0x00, 0xaa, 0x10, 0x76, 0x76, 0x18, 0x6e, 0x19, 0x1e, 0x94, 0x50, 0x11, 0x0a, 0xd1, 0xe2, 0xdb, 0x08, 0x21, 0xa0, 0x1f, 0xdb, 0x54, 0xfe, 0xea, 0x6e, 0xa3, 0x68, 0x56, 0x87, 0x0b, 0x22, 0x4e, 0x66, 0xf3, 0x82, 0x82, 0x00, 0xcd, 0xd4, 0x12, + /* (2^437)P */ 0x25, 0x8e, 0x24, 0x77, 0x64, 0x4c, 0xe0, 0xf8, 0x18, 0xc0, 0xdc, 0xc7, 0x1b, 0x35, 0x65, 0xde, 0x67, 0x41, 0x5e, 0x6f, 0x90, 0x82, 0xa7, 0x2e, 0x6d, 0xf1, 0x47, 0xb4, 0x92, 0x9c, 0xfd, 0x6a, 0x9a, 0x41, 0x36, 0x20, 0x24, 0x58, 0xc3, 0x59, 0x07, 0x9a, 0xfa, 0x9f, 0x03, 0xcb, 0xc7, 0x69, 0x37, 0x60, 0xe1, 0xab, 0x13, 0x72, 0xee, 0xa2, + /* (2^438)P */ 0x74, 0x78, 0xfb, 0x13, 0xcb, 0x8e, 0x37, 0x1a, 0xf6, 0x1d, 0x17, 0x83, 0x06, 0xd4, 0x27, 0x06, 0x21, 0xe8, 0xda, 0xdf, 0x6b, 0xf3, 0x83, 0x6b, 0x34, 0x8a, 0x8c, 0xee, 0x01, 0x05, 0x5b, 0xed, 0xd3, 0x1b, 0xc9, 0x64, 0x83, 0xc9, 0x49, 0xc2, 0x57, 0x1b, 0xdd, 0xcf, 0xf1, 0x9d, 0x63, 0xee, 0x1c, 0x0d, 0xa0, 0x0a, 0x73, 0x1f, 0x5b, 0x32, + /* (2^439)P */ 0x29, 0xce, 0x1e, 0xc0, 0x6a, 0xf5, 0xeb, 0x99, 0x5a, 0x39, 0x23, 0xe9, 0xdd, 0xac, 0x44, 0x88, 0xbc, 0x80, 0x22, 0xde, 0x2c, 0xcb, 0xa8, 0x3b, 0xff, 0xf7, 0x6f, 0xc7, 0x71, 0x72, 0xa8, 0xa3, 0xf6, 0x4d, 0xc6, 0x75, 0xda, 0x80, 0xdc, 0xd9, 0x30, 0xd9, 0x07, 0x50, 0x5a, 0x54, 0x7d, 0xda, 0x39, 0x6f, 0x78, 0x94, 0xbf, 0x25, 0x98, 0xdc, + /* (2^440)P */ 0x01, 0x26, 0x62, 0x44, 0xfb, 0x0f, 0x11, 0x72, 0x73, 0x0a, 0x16, 0xc7, 0x16, 0x9c, 0x9b, 0x37, 0xd8, 0xff, 0x4f, 0xfe, 0x57, 0xdb, 0xae, 0xef, 0x7d, 0x94, 0x30, 0x04, 0x70, 0x83, 0xde, 0x3c, 0xd4, 0xb5, 0x70, 0xda, 0xa7, 0x55, 0xc8, 0x19, 0xe1, 0x36, 0x15, 0x61, 0xe7, 0x3b, 0x7d, 0x85, 0xbb, 0xf3, 0x42, 0x5a, 0x94, 0xf4, 0x53, 0x2a, + /* (2^441)P */ 0x14, 0x60, 0xa6, 0x0b, 0x83, 0xe1, 0x23, 0x77, 0xc0, 0xce, 0x50, 0xed, 0x35, 0x8d, 0x98, 0x99, 0x7d, 0xf5, 0x8d, 0xce, 0x94, 0x25, 0xc8, 0x0f, 0x6d, 0xfa, 0x4a, 0xa4, 0x3a, 0x1f, 0x66, 0xfb, 0x5a, 0x64, 0xaf, 0x8b, 0x54, 0x54, 0x44, 0x3f, 0x5b, 0x88, 0x61, 0xe4, 0x48, 0x45, 0x26, 0x20, 0xbe, 0x0d, 0x06, 0xbb, 0x65, 0x59, 0xe1, 0x36, + /* (2^442)P */ 0xb7, 0x98, 0xce, 0xa3, 0xe3, 0xee, 0x11, 0x1b, 0x9e, 0x24, 0x59, 0x75, 0x31, 0x37, 0x44, 0x6f, 0x6b, 0x9e, 0xec, 0xb7, 0x44, 0x01, 0x7e, 0xab, 0xbb, 0x69, 0x5d, 0x11, 0xb0, 0x30, 0x64, 0xea, 0x91, 0xb4, 0x7a, 0x8c, 0x02, 0x4c, 0xb9, 0x10, 0xa7, 0xc7, 0x79, 0xe6, 0xdc, 0x77, 0xe3, 0xc8, 0xef, 0x3e, 0xf9, 0x38, 0x81, 0xce, 0x9a, 0xb2, + /* (2^443)P */ 0x91, 0x12, 0x76, 0xd0, 0x10, 0xb4, 0xaf, 0xe1, 0x89, 0x3a, 0x93, 0x6b, 0x5c, 0x19, 0x5f, 0x24, 0xed, 0x04, 0x92, 0xc7, 0xf0, 0x00, 0x08, 0xc1, 0x92, 0xff, 0x90, 0xdb, 0xb2, 0xbf, 0xdf, 0x49, 0xcd, 0xbd, 0x5c, 0x6e, 0xbf, 0x16, 0xbb, 0x61, 0xf9, 0x20, 0x33, 0x35, 0x93, 0x11, 0xbc, 0x59, 0x69, 0xce, 0x18, 0x9f, 0xf8, 0x7b, 0xa1, 0x6e, + /* (2^444)P */ 0xa1, 0xf4, 0xaf, 0xad, 0xf8, 0xe6, 0x99, 0xd2, 0xa1, 0x4d, 0xde, 0x56, 0xc9, 0x7b, 0x0b, 0x11, 0x3e, 0xbf, 0x89, 0x1a, 0x9a, 0x90, 0xe5, 0xe2, 0xa6, 0x37, 0x88, 0xa1, 0x68, 0x59, 0xae, 0x8c, 0xec, 0x02, 0x14, 0x8d, 0xb7, 0x2e, 0x25, 0x75, 0x7f, 0x76, 0x1a, 0xd3, 0x4d, 0xad, 0x8a, 0x00, 0x6c, 0x96, 0x49, 0xa4, 0xc3, 0x2e, 0x5c, 0x7b, + /* (2^445)P */ 0x26, 0x53, 0xf7, 0xda, 0xa8, 0x01, 0x14, 0xb1, 0x63, 0xe3, 0xc3, 0x89, 0x88, 0xb0, 0x85, 0x40, 0x2b, 0x26, 0x9a, 0x10, 0x1a, 0x70, 0x33, 0xf4, 0x50, 0x9d, 0x4d, 0xd8, 0x64, 0xc6, 0x0f, 0xe1, 0x17, 0xc8, 0x10, 0x4b, 0xfc, 0xa0, 0xc9, 0xba, 0x2c, 0x98, 0x09, 0xf5, 0x84, 0xb6, 0x7c, 0x4e, 0xa3, 0xe3, 0x81, 0x1b, 0x32, 0x60, 0x02, 0xdd, + /* (2^446)P */ 0xa3, 0xe5, 0x86, 0xd4, 0x43, 0xa8, 0xd1, 0x98, 0x9d, 0x9d, 0xdb, 0x04, 0xcf, 0x6e, 0x35, 0x05, 0x30, 0x53, 0x3b, 0xbc, 0x90, 0x00, 0x4a, 0xc5, 0x40, 0x2a, 0x0f, 0xde, 0x1a, 0xd7, 0x36, 0x27, 0x44, 0x62, 0xa6, 0xac, 0x9d, 0xd2, 0x70, 0x69, 0x14, 0x39, 0x9b, 0xd1, 0xc3, 0x0a, 0x3a, 0x82, 0x0e, 0xf1, 0x94, 0xd7, 0x42, 0x94, 0xd5, 0x7d, + /* (2^447)P */ 0x04, 0xc0, 0x6e, 0x12, 0x90, 0x70, 0xf9, 0xdf, 0xf7, 0xc9, 0x86, 0xc0, 0xe6, 0x92, 0x8b, 0x0a, 0xa1, 0xc1, 0x3b, 0xcc, 0x33, 0xb7, 0xf0, 0xeb, 0x51, 0x50, 0x80, 0x20, 0x69, 0x1c, 0x4f, 0x89, 0x05, 0x1e, 0xe4, 0x7a, 0x0a, 0xc2, 0xf0, 0xf5, 0x78, 0x91, 0x76, 0x34, 0x45, 0xdc, 0x24, 0x53, 0x24, 0x98, 0xe2, 0x73, 0x6f, 0xe6, 0x46, 0x67, +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go new file mode 100644 index 000000000..b6b236e5d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go @@ -0,0 +1,71 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +var ( + // genX is the x-coordinate of the generator of Goldilocks curve. + genX = fp.Elt{ + 0x5e, 0xc0, 0x0c, 0xc7, 0x2b, 0xa8, 0x26, 0x26, + 0x8e, 0x93, 0x00, 0x8b, 0xe1, 0x80, 0x3b, 0x43, + 0x11, 0x65, 0xb6, 0x2a, 0xf7, 0x1a, 0xae, 0x12, + 0x64, 0xa4, 0xd3, 0xa3, 0x24, 0xe3, 0x6d, 0xea, + 0x67, 0x17, 0x0f, 0x47, 0x70, 0x65, 0x14, 0x9e, + 0xda, 0x36, 0xbf, 0x22, 0xa6, 0x15, 0x1d, 0x22, + 0xed, 0x0d, 0xed, 0x6b, 0xc6, 0x70, 0x19, 0x4f, + } + // genY is the y-coordinate of the generator of Goldilocks curve. + genY = fp.Elt{ + 0x14, 0xfa, 0x30, 0xf2, 0x5b, 0x79, 0x08, 0x98, + 0xad, 0xc8, 0xd7, 0x4e, 0x2c, 0x13, 0xbd, 0xfd, + 0xc4, 0x39, 0x7c, 0xe6, 0x1c, 0xff, 0xd3, 0x3a, + 0xd7, 0xc2, 0xa0, 0x05, 0x1e, 0x9c, 0x78, 0x87, + 0x40, 0x98, 0xa3, 0x6c, 0x73, 0x73, 0xea, 0x4b, + 0x62, 0xc7, 0xc9, 0x56, 0x37, 0x20, 0x76, 0x88, + 0x24, 0xbc, 0xb6, 0x6e, 0x71, 0x46, 0x3f, 0x69, + } + // paramD is -39081 in Fp. + paramD = fp.Elt{ + 0x56, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + // order is 2^446-0x8335dc163bb124b65129c96fde933d8d723a70aadc873d6d54a7bb0d, + // which is the number of points in the prime subgroup. + order = Scalar{ + 0xf3, 0x44, 0x58, 0xab, 0x92, 0xc2, 0x78, 0x23, + 0x55, 0x8f, 0xc5, 0x8d, 0x72, 0xc2, 0x6c, 0x21, + 0x90, 0x36, 0xd6, 0xae, 0x49, 0xdb, 0x4e, 0xc4, + 0xe9, 0x23, 0xca, 0x7c, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, + } + // residue448 is 2^448 mod order. + residue448 = [4]uint64{ + 0x721cf5b5529eec34, 0x7a4cf635c8e9c2ab, 0xeec492d944a725bf, 0x20cd77058, + } + // invFour is 1/4 mod order. + invFour = Scalar{ + 0x3d, 0x11, 0xd6, 0xaa, 0xa4, 0x30, 0xde, 0x48, + 0xd5, 0x63, 0x71, 0xa3, 0x9c, 0x30, 0x5b, 0x08, + 0xa4, 0x8d, 0xb5, 0x6b, 0xd2, 0xb6, 0x13, 0x71, + 0xfa, 0x88, 0x32, 0xdf, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, + } + // paramDTwist is -39082 in Fp. The D parameter of the twist curve. + paramDTwist = fp.Elt{ + 0x55, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } +) diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go new file mode 100644 index 000000000..5a939100d --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go @@ -0,0 +1,80 @@ +// Package goldilocks provides elliptic curve operations over the goldilocks curve. +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +// Curve is the Goldilocks curve x^2+y^2=z^2-39081x^2y^2. +type Curve struct{} + +// Identity returns the identity point. +func (Curve) Identity() *Point { + return &Point{ + y: fp.One(), + z: fp.One(), + } +} + +// IsOnCurve returns true if the point lies on the curve. +func (Curve) IsOnCurve(P *Point) bool { + x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + rhs, lhs := &fp.Elt{}, &fp.Elt{} + fp.Mul(t, &P.ta, &P.tb) // t = ta*tb + fp.Sqr(x2, &P.x) // x^2 + fp.Sqr(y2, &P.y) // y^2 + fp.Sqr(z2, &P.z) // z^2 + fp.Sqr(t2, t) // t^2 + fp.Add(lhs, x2, y2) // x^2 + y^2 + fp.Mul(rhs, t2, ¶mD) // dt^2 + fp.Add(rhs, rhs, z2) // z^2 + dt^2 + fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2) + eq0 := fp.IsZero(lhs) + + fp.Mul(lhs, &P.x, &P.y) // xy + fp.Mul(rhs, t, &P.z) // tz + fp.Sub(lhs, lhs, rhs) // xy - tz + eq1 := fp.IsZero(lhs) + return eq0 && eq1 +} + +// Generator returns the generator point. +func (Curve) Generator() *Point { + return &Point{ + x: genX, + y: genY, + z: fp.One(), + ta: genX, + tb: genY, + } +} + +// Order returns the number of points in the prime subgroup. +func (Curve) Order() Scalar { return order } + +// Double returns 2P. +func (Curve) Double(P *Point) *Point { R := *P; R.Double(); return &R } + +// Add returns P+Q. +func (Curve) Add(P, Q *Point) *Point { R := *P; R.Add(Q); return &R } + +// ScalarMult returns kP. This function runs in constant time. +func (e Curve) ScalarMult(k *Scalar, P *Point) *Point { + k4 := &Scalar{} + k4.divBy4(k) + return e.pull(twistCurve{}.ScalarMult(k4, e.push(P))) +} + +// ScalarBaseMult returns kG where G is the generator point. This function runs in constant time. +func (e Curve) ScalarBaseMult(k *Scalar) *Point { + k4 := &Scalar{} + k4.divBy4(k) + return e.pull(twistCurve{}.ScalarBaseMult(k4)) +} + +// CombinedMult returns mG+nP, where G is the generator point. This function is non-constant time. +func (e Curve) CombinedMult(m, n *Scalar, P *Point) *Point { + m4 := &Scalar{} + n4 := &Scalar{} + m4.divBy4(m) + n4.divBy4(n) + return e.pull(twistCurve{}.CombinedMult(m4, n4, twistCurve{}.pull(P))) +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go new file mode 100644 index 000000000..b1daab851 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go @@ -0,0 +1,52 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +func (Curve) pull(P *twistPoint) *Point { return twistCurve{}.push(P) } +func (twistCurve) pull(P *Point) *twistPoint { return Curve{}.push(P) } + +// push sends a point on the Goldilocks curve to a point on the twist curve. +func (Curve) push(P *Point) *twistPoint { + Q := &twistPoint{} + Px, Py, Pz := &P.x, &P.y, &P.z + a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + *d = *a // D = A + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, a) // (x+y)^2-A + fp.Sub(e, e, b) // E = (x+y)^2-A-B + fp.Add(h, b, d) // H = B+D + fp.Sub(g, b, d) // G = B-D + fp.Sub(f, c, h) // F = C-H + fp.Mul(&Q.z, f, g) // Z = F * G + fp.Mul(&Q.x, e, f) // X = E * F + fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H + return Q +} + +// push sends a point on the twist curve to a point on the Goldilocks curve. +func (twistCurve) push(P *twistPoint) *Point { + Q := &Point{} + Px, Py, Pz := &P.x, &P.y, &P.z + a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Neg(d, a) // D = -A + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, a) // (x+y)^2-A + fp.Sub(e, e, b) // E = (x+y)^2-A-B + fp.Add(h, b, d) // H = B+D + fp.Sub(g, b, d) // G = B-D + fp.Sub(f, c, h) // F = C-H + fp.Mul(&Q.z, f, g) // Z = F * G + fp.Mul(&Q.x, e, f) // X = E * F + fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H + return Q +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go new file mode 100644 index 000000000..11f73de05 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go @@ -0,0 +1,171 @@ +package goldilocks + +import ( + "errors" + "fmt" + + fp "github.com/cloudflare/circl/math/fp448" +) + +// Point is a point on the Goldilocks Curve. +type Point struct{ x, y, z, ta, tb fp.Elt } + +func (P Point) String() string { + return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) +} + +// FromAffine creates a point from affine coordinates. +func FromAffine(x, y *fp.Elt) (*Point, error) { + P := &Point{ + x: *x, + y: *y, + z: fp.One(), + ta: *x, + tb: *y, + } + if !(Curve{}).IsOnCurve(P) { + return P, errors.New("point not on curve") + } + return P, nil +} + +// isLessThan returns true if 0 <= x < y, and assumes that slices are of the +// same length and are interpreted in little-endian order. +func isLessThan(x, y []byte) bool { + i := len(x) - 1 + for i > 0 && x[i] == y[i] { + i-- + } + return x[i] < y[i] +} + +// FromBytes returns a point from the input buffer. +func FromBytes(in []byte) (*Point, error) { + if len(in) < fp.Size+1 { + return nil, errors.New("wrong input length") + } + err := errors.New("invalid decoding") + P := &Point{} + signX := in[fp.Size] >> 7 + copy(P.y[:], in[:fp.Size]) + p := fp.P() + if !isLessThan(P.y[:], p[:]) { + return nil, err + } + + u, v := &fp.Elt{}, &fp.Elt{} + one := fp.One() + fp.Sqr(u, &P.y) // u = y^2 + fp.Mul(v, u, ¶mD) // v = dy^2 + fp.Sub(u, u, &one) // u = y^2-1 + fp.Sub(v, v, &one) // v = dy^2-1 + isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) + if !isQR { + return nil, err + } + fp.Modp(&P.x) // x = x mod p + if fp.IsZero(&P.x) && signX == 1 { + return nil, err + } + if signX != (P.x[0] & 1) { + fp.Neg(&P.x, &P.x) + } + P.ta = P.x + P.tb = P.y + P.z = fp.One() + return P, nil +} + +// IsIdentity returns true is P is the identity Point. +func (P *Point) IsIdentity() bool { + return fp.IsZero(&P.x) && !fp.IsZero(&P.y) && !fp.IsZero(&P.z) && P.y == P.z +} + +// IsEqual returns true if P is equivalent to Q. +func (P *Point) IsEqual(Q *Point) bool { + l, r := &fp.Elt{}, &fp.Elt{} + fp.Mul(l, &P.x, &Q.z) + fp.Mul(r, &Q.x, &P.z) + fp.Sub(l, l, r) + b := fp.IsZero(l) + fp.Mul(l, &P.y, &Q.z) + fp.Mul(r, &Q.y, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + fp.Mul(l, &P.ta, &P.tb) + fp.Mul(l, l, &Q.z) + fp.Mul(r, &Q.ta, &Q.tb) + fp.Mul(r, r, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + return b +} + +// Neg obtains the inverse of the Point. +func (P *Point) Neg() { fp.Neg(&P.x, &P.x); fp.Neg(&P.ta, &P.ta) } + +// ToAffine returns the x,y affine coordinates of P. +func (P *Point) ToAffine() (x, y fp.Elt) { + fp.Inv(&P.z, &P.z) // 1/z + fp.Mul(&P.x, &P.x, &P.z) // x/z + fp.Mul(&P.y, &P.y, &P.z) // y/z + fp.Modp(&P.x) + fp.Modp(&P.y) + fp.SetOne(&P.z) + P.ta = P.x + P.tb = P.y + return P.x, P.y +} + +// ToBytes stores P into a slice of bytes. +func (P *Point) ToBytes(out []byte) error { + if len(out) < fp.Size+1 { + return errors.New("invalid decoding") + } + x, y := P.ToAffine() + out[fp.Size] = (x[0] & 1) << 7 + return fp.ToBytes(out[:fp.Size], &y) +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +func (P *Point) MarshalBinary() (data []byte, err error) { + data = make([]byte, fp.Size+1) + err = P.ToBytes(data[:fp.Size+1]) + return data, err +} + +// UnmarshalBinary must be able to decode the form generated by MarshalBinary. +func (P *Point) UnmarshalBinary(data []byte) error { Q, err := FromBytes(data); *P = *Q; return err } + +// Double sets P = 2Q. +func (P *Point) Double() { P.Add(P) } + +// Add sets P =P+Q.. +func (P *Point) Add(Q *Point) { + // This is formula (5) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + x1, y1, z1, ta1, tb1 := &P.x, &P.y, &P.z, &P.ta, &P.tb + x2, y2, z2, ta2, tb2 := &Q.x, &Q.y, &Q.z, &Q.ta, &Q.tb + x3, y3, z3, E, H := &P.x, &P.y, &P.z, &P.ta, &P.tb + A, B, C, D := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + t1, t2, F, G := C, D, &fp.Elt{}, &fp.Elt{} + fp.Mul(t1, ta1, tb1) // t1 = ta1*tb1 + fp.Mul(t2, ta2, tb2) // t2 = ta2*tb2 + fp.Mul(A, x1, x2) // A = x1*x2 + fp.Mul(B, y1, y2) // B = y1*y2 + fp.Mul(C, t1, t2) // t1*t2 + fp.Mul(C, C, ¶mD) // C = d*t1*t2 + fp.Mul(D, z1, z2) // D = z1*z2 + fp.Add(F, x1, y1) // x1+y1 + fp.Add(E, x2, y2) // x2+y2 + fp.Mul(E, E, F) // (x1+y1)*(x2+y2) + fp.Sub(E, E, A) // (x1+y1)*(x2+y2)-A + fp.Sub(E, E, B) // E = (x1+y1)*(x2+y2)-A-B + fp.Sub(F, D, C) // F = D-C + fp.Add(G, D, C) // G = D+C + fp.Sub(H, B, A) // H = B-A + fp.Mul(z3, F, G) // Z = F * G + fp.Mul(x3, E, F) // X = E * F + fp.Mul(y3, G, H) // Y = G * H, T = E * H +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go new file mode 100644 index 000000000..f98117b25 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go @@ -0,0 +1,203 @@ +package goldilocks + +import ( + "encoding/binary" + "math/bits" +) + +// ScalarSize is the size (in bytes) of scalars. +const ScalarSize = 56 // 448 / 8 + +// _N is the number of 64-bit words to store scalars. +const _N = 7 // 448 / 64 + +// Scalar represents a positive integer stored in little-endian order. +type Scalar [ScalarSize]byte + +type scalar64 [_N]uint64 + +func (z *scalar64) fromScalar(x *Scalar) { + z[0] = binary.LittleEndian.Uint64(x[0*8 : 1*8]) + z[1] = binary.LittleEndian.Uint64(x[1*8 : 2*8]) + z[2] = binary.LittleEndian.Uint64(x[2*8 : 3*8]) + z[3] = binary.LittleEndian.Uint64(x[3*8 : 4*8]) + z[4] = binary.LittleEndian.Uint64(x[4*8 : 5*8]) + z[5] = binary.LittleEndian.Uint64(x[5*8 : 6*8]) + z[6] = binary.LittleEndian.Uint64(x[6*8 : 7*8]) +} + +func (z *scalar64) toScalar(x *Scalar) { + binary.LittleEndian.PutUint64(x[0*8:1*8], z[0]) + binary.LittleEndian.PutUint64(x[1*8:2*8], z[1]) + binary.LittleEndian.PutUint64(x[2*8:3*8], z[2]) + binary.LittleEndian.PutUint64(x[3*8:4*8], z[3]) + binary.LittleEndian.PutUint64(x[4*8:5*8], z[4]) + binary.LittleEndian.PutUint64(x[5*8:6*8], z[5]) + binary.LittleEndian.PutUint64(x[6*8:7*8], z[6]) +} + +// add calculates z = x + y. Assumes len(z) > max(len(x),len(y)). +func add(z, x, y []uint64) uint64 { + l, L, zz := len(x), len(y), y + if l > L { + l, L, zz = L, l, x + } + c := uint64(0) + for i := 0; i < l; i++ { + z[i], c = bits.Add64(x[i], y[i], c) + } + for i := l; i < L; i++ { + z[i], c = bits.Add64(zz[i], 0, c) + } + return c +} + +// sub calculates z = x - y. Assumes len(z) > max(len(x),len(y)). +func sub(z, x, y []uint64) uint64 { + l, L, zz := len(x), len(y), y + if l > L { + l, L, zz = L, l, x + } + c := uint64(0) + for i := 0; i < l; i++ { + z[i], c = bits.Sub64(x[i], y[i], c) + } + for i := l; i < L; i++ { + z[i], c = bits.Sub64(zz[i], 0, c) + } + return c +} + +// mulWord calculates z = x * y. Assumes len(z) >= len(x)+1. +func mulWord(z, x []uint64, y uint64) { + for i := range z { + z[i] = 0 + } + carry := uint64(0) + for i := range x { + hi, lo := bits.Mul64(x[i], y) + lo, cc := bits.Add64(lo, z[i], 0) + hi, _ = bits.Add64(hi, 0, cc) + z[i], cc = bits.Add64(lo, carry, 0) + carry, _ = bits.Add64(hi, 0, cc) + } + z[len(x)] = carry +} + +// Cmov moves x into z if b=1. +func (z *scalar64) Cmov(b uint64, x *scalar64) { + m := uint64(0) - b + for i := range z { + z[i] = (z[i] &^ m) | (x[i] & m) + } +} + +// leftShift shifts to the left the words of z returning the more significant word. +func (z *scalar64) leftShift(low uint64) uint64 { + high := z[_N-1] + for i := _N - 1; i > 0; i-- { + z[i] = z[i-1] + } + z[0] = low + return high +} + +// reduceOneWord calculates z = z + 2^448*x such that the result fits in a Scalar. +func (z *scalar64) reduceOneWord(x uint64) { + prod := (&scalar64{})[:] + mulWord(prod, residue448[:], x) + cc := add(z[:], z[:], prod) + mulWord(prod, residue448[:], cc) + add(z[:], z[:], prod) +} + +// modOrder reduces z mod order. +func (z *scalar64) modOrder() { + var o64, x scalar64 + o64.fromScalar(&order) + // Performs: while (z >= order) { z = z-order } + // At most 8 (eight) iterations reduce 3 bits by subtracting. + for i := 0; i < 8; i++ { + c := sub(x[:], z[:], o64[:]) // (c || x) = z-order + z.Cmov(1-c, &x) // if c != 0 { z = x } + } +} + +// FromBytes stores z = x mod order, where x is a number stored in little-endian order. +func (z *Scalar) FromBytes(x []byte) { + n := len(x) + nCeil := (n + 7) >> 3 + for i := range z { + z[i] = 0 + } + if nCeil < _N { + copy(z[:], x) + return + } + copy(z[:], x[8*(nCeil-_N):]) + var z64 scalar64 + z64.fromScalar(z) + for i := nCeil - _N - 1; i >= 0; i-- { + low := binary.LittleEndian.Uint64(x[8*i:]) + high := z64.leftShift(low) + z64.reduceOneWord(high) + } + z64.modOrder() + z64.toScalar(z) +} + +// divBy4 calculates z = x/4 mod order. +func (z *Scalar) divBy4(x *Scalar) { z.Mul(x, &invFour) } + +// Red reduces z mod order. +func (z *Scalar) Red() { var t scalar64; t.fromScalar(z); t.modOrder(); t.toScalar(z) } + +// Neg calculates z = -z mod order. +func (z *Scalar) Neg() { z.Sub(&order, z) } + +// Add calculates z = x+y mod order. +func (z *Scalar) Add(x, y *Scalar) { + var z64, x64, y64, t scalar64 + x64.fromScalar(x) + y64.fromScalar(y) + c := add(z64[:], x64[:], y64[:]) + add(t[:], z64[:], residue448[:]) + z64.Cmov(c, &t) + z64.modOrder() + z64.toScalar(z) +} + +// Sub calculates z = x-y mod order. +func (z *Scalar) Sub(x, y *Scalar) { + var z64, x64, y64, t scalar64 + x64.fromScalar(x) + y64.fromScalar(y) + c := sub(z64[:], x64[:], y64[:]) + sub(t[:], z64[:], residue448[:]) + z64.Cmov(c, &t) + z64.modOrder() + z64.toScalar(z) +} + +// Mul calculates z = x*y mod order. +func (z *Scalar) Mul(x, y *Scalar) { + var z64, x64, y64 scalar64 + prod := (&[_N + 1]uint64{})[:] + x64.fromScalar(x) + y64.fromScalar(y) + mulWord(prod, x64[:], y64[_N-1]) + copy(z64[:], prod[:_N]) + z64.reduceOneWord(prod[_N]) + for i := _N - 2; i >= 0; i-- { + h := z64.leftShift(0) + z64.reduceOneWord(h) + mulWord(prod, x64[:], y64[i]) + c := add(z64[:], z64[:], prod[:_N]) + z64.reduceOneWord(prod[_N] + c) + } + z64.modOrder() + z64.toScalar(z) +} + +// IsZero returns true if z=0. +func (z *Scalar) IsZero() bool { z.Red(); return *z == Scalar{} } diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go new file mode 100644 index 000000000..8cd4e333b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go @@ -0,0 +1,138 @@ +package goldilocks + +import ( + "crypto/subtle" + "math/bits" + + "github.com/cloudflare/circl/internal/conv" + "github.com/cloudflare/circl/math" + fp "github.com/cloudflare/circl/math/fp448" +) + +// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogeneous to Goldilocks. +type twistCurve struct{} + +// Identity returns the identity point. +func (twistCurve) Identity() *twistPoint { + return &twistPoint{ + y: fp.One(), + z: fp.One(), + } +} + +// subYDiv16 update x = (x - y) / 16. +func subYDiv16(x *scalar64, y int64) { + s := uint64(y >> 63) + x0, b0 := bits.Sub64((*x)[0], uint64(y), 0) + x1, b1 := bits.Sub64((*x)[1], s, b0) + x2, b2 := bits.Sub64((*x)[2], s, b1) + x3, b3 := bits.Sub64((*x)[3], s, b2) + x4, b4 := bits.Sub64((*x)[4], s, b3) + x5, b5 := bits.Sub64((*x)[5], s, b4) + x6, _ := bits.Sub64((*x)[6], s, b5) + x[0] = (x0 >> 4) | (x1 << 60) + x[1] = (x1 >> 4) | (x2 << 60) + x[2] = (x2 >> 4) | (x3 << 60) + x[3] = (x3 >> 4) | (x4 << 60) + x[4] = (x4 >> 4) | (x5 << 60) + x[5] = (x5 >> 4) | (x6 << 60) + x[6] = (x6 >> 4) +} + +func recodeScalar(d *[113]int8, k *Scalar) { + var k64 scalar64 + k64.fromScalar(k) + for i := 0; i < 112; i++ { + d[i] = int8((k64[0] & 0x1f) - 16) + subYDiv16(&k64, int64(d[i])) + } + d[112] = int8(k64[0]) +} + +// ScalarMult returns kP. +func (e twistCurve) ScalarMult(k *Scalar, P *twistPoint) *twistPoint { + var TabP [8]preTwistPointProy + var S preTwistPointProy + var d [113]int8 + + var isZero int + if k.IsZero() { + isZero = 1 + } + subtle.ConstantTimeCopy(isZero, k[:], order[:]) + + minusK := *k + isEven := 1 - int(k[0]&0x1) + minusK.Neg() + subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) + recodeScalar(&d, k) + + P.oddMultiples(TabP[:]) + Q := e.Identity() + for i := 112; i >= 0; i-- { + Q.Double() + Q.Double() + Q.Double() + Q.Double() + mask := d[i] >> 7 + absDi := (d[i] + mask) ^ mask + inx := int32((absDi - 1) >> 1) + sig := int((d[i] >> 7) & 0x1) + for j := range TabP { + S.cmov(&TabP[j], uint(subtle.ConstantTimeEq(inx, int32(j)))) + } + S.cneg(sig) + Q.mixAdd(&S) + } + Q.cneg(uint(isEven)) + return Q +} + +const ( + omegaFix = 7 + omegaVar = 5 +) + +// CombinedMult returns mG+nP. +func (e twistCurve) CombinedMult(m, n *Scalar, P *twistPoint) *twistPoint { + nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m[:]), omegaFix) + nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n[:]), omegaVar) + + if len(nafFix) > len(nafVar) { + nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) + } else if len(nafFix) < len(nafVar) { + nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) + } + + var TabQ [1 << (omegaVar - 2)]preTwistPointProy + P.oddMultiples(TabQ[:]) + Q := e.Identity() + for i := len(nafFix) - 1; i >= 0; i-- { + Q.Double() + // Generator point + if nafFix[i] != 0 { + idxM := absolute(nafFix[i]) >> 1 + R := tabVerif[idxM] + if nafFix[i] < 0 { + R.neg() + } + Q.mixAddZ1(&R) + } + // Variable input point + if nafVar[i] != 0 { + idxN := absolute(nafVar[i]) >> 1 + S := TabQ[idxN] + if nafVar[i] < 0 { + S.neg() + } + Q.mixAdd(&S) + } + } + return Q +} + +// absolute returns always a positive value. +func absolute(x int32) int32 { + mask := x >> 31 + return (x + mask) ^ mask +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go new file mode 100644 index 000000000..c55db77b0 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go @@ -0,0 +1,135 @@ +package goldilocks + +import ( + "fmt" + + fp "github.com/cloudflare/circl/math/fp448" +) + +type twistPoint struct{ x, y, z, ta, tb fp.Elt } + +type preTwistPointAffine struct{ addYX, subYX, dt2 fp.Elt } + +type preTwistPointProy struct { + preTwistPointAffine + z2 fp.Elt +} + +func (P *twistPoint) String() string { + return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb) +} + +// cneg conditionally negates the point if b=1. +func (P *twistPoint) cneg(b uint) { + t := &fp.Elt{} + fp.Neg(t, &P.x) + fp.Cmov(&P.x, t, b) + fp.Neg(t, &P.ta) + fp.Cmov(&P.ta, t, b) +} + +// Double updates P with 2P. +func (P *twistPoint) Double() { + // This is formula (7) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Add(h, a, b) // H = A+B + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, h) // E = (x+y)^2-A-B + fp.Sub(g, b, a) // G = B-A + fp.Sub(f, c, g) // F = C-G + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q = 1. +func (P *twistPoint) mixAddZ1(Q *preTwistPointAffine) { + fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 (z2=1) + P.coreAddition(Q) +} + +// coreAddition calculates P=P+Q for curves with A=-1. +func (P *twistPoint) coreAddition(Q *preTwistPointAffine) { + // This is the formula following (5) from "Twisted Edwards Curves Revisited" by + // Hisil H., Wong K.KH., Carter G., Dawson E. (2008) + // https://doi.org/10.1007/978-3-540-89255-7_20 + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 + a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb + fp.Mul(c, Pta, Ptb) // t1 = ta*tb + fp.Sub(h, Py, Px) // y1-x1 + fp.Add(b, Py, Px) // y1+x1 + fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) + fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) + fp.Mul(c, c, dt2) // C = 2*D*t1*t2 + fp.Sub(e, b, a) // E = B-A + fp.Add(h, b, a) // H = B+A + fp.Sub(f, d, c) // F = D-C + fp.Add(g, d, c) // G = D+C + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *preTwistPointAffine) neg() { + P.addYX, P.subYX = P.subYX, P.addYX + fp.Neg(&P.dt2, &P.dt2) +} + +func (P *preTwistPointAffine) cneg(b int) { + t := &fp.Elt{} + fp.Cswap(&P.addYX, &P.subYX, uint(b)) + fp.Neg(t, &P.dt2) + fp.Cmov(&P.dt2, t, uint(b)) +} + +func (P *preTwistPointAffine) cmov(Q *preTwistPointAffine, b uint) { + fp.Cmov(&P.addYX, &Q.addYX, b) + fp.Cmov(&P.subYX, &Q.subYX, b) + fp.Cmov(&P.dt2, &Q.dt2, b) +} + +// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q != 1. +func (P *twistPoint) mixAdd(Q *preTwistPointProy) { + fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 + P.coreAddition(&Q.preTwistPointAffine) +} + +// oddMultiples calculates T[i] = (2*i-1)P for 0 < i < len(T). +func (P *twistPoint) oddMultiples(T []preTwistPointProy) { + if n := len(T); n > 0 { + T[0].FromTwistPoint(P) + _2P := *P + _2P.Double() + R := &preTwistPointProy{} + R.FromTwistPoint(&_2P) + for i := 1; i < n; i++ { + P.mixAdd(R) + T[i].FromTwistPoint(P) + } + } +} + +// cmov conditionally moves Q into P if b=1. +func (P *preTwistPointProy) cmov(Q *preTwistPointProy, b uint) { + P.preTwistPointAffine.cmov(&Q.preTwistPointAffine, b) + fp.Cmov(&P.z2, &Q.z2, b) +} + +// FromTwistPoint precomputes some coordinates of Q for missed addition. +func (P *preTwistPointProy) FromTwistPoint(Q *twistPoint) { + fp.Add(&P.addYX, &Q.y, &Q.x) // addYX = X + Y + fp.Sub(&P.subYX, &Q.y, &Q.x) // subYX = Y - X + fp.Mul(&P.dt2, &Q.ta, &Q.tb) // T = ta*tb + fp.Mul(&P.dt2, &P.dt2, ¶mDTwist) // D*T + fp.Add(&P.dt2, &P.dt2, &P.dt2) // dt2 = 2*D*T + fp.Add(&P.z2, &Q.z, &Q.z) // z2 = 2*Z +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go new file mode 100644 index 000000000..ed432e02c --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go @@ -0,0 +1,216 @@ +package goldilocks + +import fp "github.com/cloudflare/circl/math/fp448" + +var tabFixMult = [fxV][fx2w1]preTwistPointAffine{ + { + { + addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, + }, + { + addYX: fp.Elt{0xca, 0xd8, 0x7d, 0x86, 0x1a, 0xef, 0xad, 0x11, 0xe3, 0x27, 0x41, 0x7e, 0x7f, 0x3e, 0xa9, 0xd2, 0xb5, 0x4e, 0x50, 0xe0, 0x77, 0x91, 0xc2, 0x13, 0x52, 0x73, 0x41, 0x09, 0xa6, 0x57, 0x9a, 0xc8, 0xa8, 0x90, 0x9d, 0x26, 0x14, 0xbb, 0xa1, 0x2a, 0xf7, 0x45, 0x43, 0x4e, 0xea, 0x35, 0x62, 0xe1, 0x08, 0x85, 0x46, 0xb8, 0x24, 0x05, 0x2d, 0xab}, + subYX: fp.Elt{0x9b, 0xe6, 0xd3, 0xe5, 0xfe, 0x50, 0x36, 0x3c, 0x3c, 0x6d, 0x74, 0x1d, 0x74, 0xc0, 0xde, 0x5b, 0x45, 0x27, 0xe5, 0x12, 0xee, 0x63, 0x35, 0x6b, 0x13, 0xe2, 0x41, 0x6b, 0x3a, 0x05, 0x2b, 0xb1, 0x89, 0x26, 0xb6, 0xc6, 0xd1, 0x84, 0xff, 0x0e, 0x9b, 0xa3, 0xfb, 0x21, 0x36, 0x6b, 0x01, 0xf7, 0x9f, 0x7c, 0xeb, 0xf5, 0x18, 0x7a, 0x2a, 0x70}, + dt2: fp.Elt{0x09, 0xad, 0x99, 0x1a, 0x38, 0xd3, 0xdf, 0x22, 0x37, 0x32, 0x61, 0x8b, 0xf3, 0x19, 0x48, 0x08, 0xe8, 0x49, 0xb6, 0x4a, 0xa7, 0xed, 0xa4, 0xa2, 0xee, 0x86, 0xd7, 0x31, 0x5e, 0xce, 0x95, 0x76, 0x86, 0x42, 0x1c, 0x9d, 0x07, 0x14, 0x8c, 0x34, 0x18, 0x9c, 0x6d, 0x3a, 0xdf, 0xa9, 0xe8, 0x36, 0x7e, 0xe4, 0x95, 0xbe, 0xb5, 0x09, 0xf8, 0x9c}, + }, + { + addYX: fp.Elt{0x51, 0xdb, 0x49, 0xa8, 0x9f, 0xe3, 0xd7, 0xec, 0x0d, 0x0f, 0x49, 0xe8, 0xb6, 0xc5, 0x0f, 0x5a, 0x1c, 0xce, 0x54, 0x0d, 0xb1, 0x8d, 0x5b, 0xbf, 0xf4, 0xaa, 0x34, 0x77, 0xc4, 0x5d, 0x59, 0xb6, 0xc5, 0x0e, 0x5a, 0xd8, 0x5b, 0x30, 0xc2, 0x1d, 0xec, 0x85, 0x1c, 0x42, 0xbe, 0x24, 0x2e, 0x50, 0x55, 0x44, 0xb2, 0x3a, 0x01, 0xaa, 0x98, 0xfb}, + subYX: fp.Elt{0xe7, 0x29, 0xb7, 0xd0, 0xaa, 0x4f, 0x32, 0x53, 0x56, 0xde, 0xbc, 0xd1, 0x92, 0x5d, 0x19, 0xbe, 0xa3, 0xe3, 0x75, 0x48, 0xe0, 0x7a, 0x1b, 0x54, 0x7a, 0xb7, 0x41, 0x77, 0x84, 0x38, 0xdd, 0x14, 0x9f, 0xca, 0x3f, 0xa3, 0xc8, 0xa7, 0x04, 0x70, 0xf1, 0x4d, 0x3d, 0xb3, 0x84, 0x79, 0xcb, 0xdb, 0xe4, 0xc5, 0x42, 0x9b, 0x57, 0x19, 0xf1, 0x2d}, + dt2: fp.Elt{0x20, 0xb4, 0x94, 0x9e, 0xdf, 0x31, 0x44, 0x0b, 0xc9, 0x7b, 0x75, 0x40, 0x9d, 0xd1, 0x96, 0x39, 0x70, 0x71, 0x15, 0xc8, 0x93, 0xd5, 0xc5, 0xe5, 0xba, 0xfe, 0xee, 0x08, 0x6a, 0x98, 0x0a, 0x1b, 0xb2, 0xaa, 0x3a, 0xf4, 0xa4, 0x79, 0xf9, 0x8e, 0x4d, 0x65, 0x10, 0x9b, 0x3a, 0x6e, 0x7c, 0x87, 0x94, 0x92, 0x11, 0x65, 0xbf, 0x1a, 0x09, 0xde}, + }, + { + addYX: fp.Elt{0xf3, 0x84, 0x76, 0x77, 0xa5, 0x6b, 0x27, 0x3b, 0x83, 0x3d, 0xdf, 0xa0, 0xeb, 0x32, 0x6d, 0x58, 0x81, 0x57, 0x64, 0xc2, 0x21, 0x7c, 0x9b, 0xea, 0xe6, 0xb0, 0x93, 0xf9, 0xe7, 0xc3, 0xed, 0x5a, 0x8e, 0xe2, 0xb4, 0x72, 0x76, 0x66, 0x0f, 0x22, 0x29, 0x94, 0x3e, 0x63, 0x48, 0x5e, 0x80, 0xcb, 0xac, 0xfa, 0x95, 0xb6, 0x4b, 0xc4, 0x95, 0x33}, + subYX: fp.Elt{0x0c, 0x55, 0xd1, 0x5e, 0x5f, 0xbf, 0xbf, 0xe2, 0x4c, 0xfc, 0x37, 0x4a, 0xc4, 0xb1, 0xf4, 0x83, 0x61, 0x93, 0x60, 0x8e, 0x9f, 0x31, 0xf0, 0xa0, 0x41, 0xff, 0x1d, 0xe2, 0x7f, 0xca, 0x40, 0xd6, 0x88, 0xe8, 0x91, 0x61, 0xe2, 0x11, 0x18, 0x83, 0xf3, 0x25, 0x2f, 0x3f, 0x49, 0x40, 0xd4, 0x83, 0xe2, 0xd7, 0x74, 0x6a, 0x16, 0x86, 0x4e, 0xab}, + dt2: fp.Elt{0xdd, 0x58, 0x65, 0xd8, 0x9f, 0xdd, 0x70, 0x7f, 0x0f, 0xec, 0xbd, 0x5c, 0x5c, 0x9b, 0x7e, 0x1b, 0x9f, 0x79, 0x36, 0x1f, 0xfd, 0x79, 0x10, 0x1c, 0x52, 0xf3, 0x22, 0xa4, 0x1f, 0x71, 0x6e, 0x63, 0x14, 0xf4, 0xa7, 0x3e, 0xbe, 0xad, 0x43, 0x30, 0x38, 0x8c, 0x29, 0xc6, 0xcf, 0x50, 0x75, 0x21, 0xe5, 0x78, 0xfd, 0xb0, 0x9a, 0xc4, 0x6d, 0xd4}, + }, + }, + { + { + addYX: fp.Elt{0x7a, 0xa1, 0x38, 0xa6, 0xfd, 0x0e, 0x96, 0xd5, 0x26, 0x76, 0x86, 0x70, 0x80, 0x30, 0xa6, 0x67, 0xeb, 0xf4, 0x39, 0xdb, 0x22, 0xf5, 0x9f, 0x98, 0xe4, 0xb5, 0x3a, 0x0c, 0x59, 0xbf, 0x85, 0xc6, 0xf0, 0x0b, 0x1c, 0x41, 0x38, 0x09, 0x01, 0xdb, 0xd6, 0x3c, 0xb7, 0xf1, 0x08, 0x6b, 0x4b, 0x9e, 0x63, 0x53, 0x83, 0xd3, 0xab, 0xa3, 0x72, 0x0d}, + subYX: fp.Elt{0x84, 0x68, 0x25, 0xe8, 0xe9, 0x8f, 0x91, 0xbf, 0xf7, 0xa4, 0x30, 0xae, 0xea, 0x9f, 0xdd, 0x56, 0x64, 0x09, 0xc9, 0x54, 0x68, 0x4e, 0x33, 0xc5, 0x6f, 0x7b, 0x2d, 0x52, 0x2e, 0x42, 0xbe, 0xbe, 0xf5, 0x64, 0xbf, 0x77, 0x54, 0xdf, 0xb0, 0x10, 0xd2, 0x16, 0x5d, 0xce, 0xaf, 0x9f, 0xfb, 0xa3, 0x63, 0x50, 0xcb, 0xc0, 0xd0, 0x88, 0x44, 0xa3}, + dt2: fp.Elt{0xc3, 0x8b, 0xa5, 0xf1, 0x44, 0xe4, 0x41, 0xcd, 0x75, 0xe3, 0x17, 0x69, 0x5b, 0xb9, 0xbb, 0xee, 0x82, 0xbb, 0xce, 0x57, 0xdf, 0x2a, 0x9c, 0x12, 0xab, 0x66, 0x08, 0x68, 0x05, 0x1b, 0x87, 0xee, 0x5d, 0x1e, 0x18, 0x14, 0x22, 0x4b, 0x99, 0x61, 0x75, 0x28, 0xe7, 0x65, 0x1c, 0x36, 0xb6, 0x18, 0x09, 0xa8, 0xdf, 0xef, 0x30, 0x35, 0xbc, 0x58}, + }, + { + addYX: fp.Elt{0xc5, 0xd3, 0x0e, 0x6f, 0xaf, 0x06, 0x69, 0xc4, 0x07, 0x9e, 0x58, 0x6e, 0x3f, 0x49, 0xd9, 0x0a, 0x3c, 0x2c, 0x37, 0xcd, 0x27, 0x4d, 0x87, 0x91, 0x7a, 0xb0, 0x28, 0xad, 0x2f, 0x68, 0x92, 0x05, 0x97, 0xf1, 0x30, 0x5f, 0x4c, 0x10, 0x20, 0x30, 0xd3, 0x08, 0x3f, 0xc1, 0xc6, 0xb7, 0xb5, 0xd1, 0x71, 0x7b, 0xa8, 0x0a, 0xd8, 0xf5, 0x17, 0xcf}, + subYX: fp.Elt{0x64, 0xd4, 0x8f, 0x91, 0x40, 0xab, 0x6e, 0x1a, 0x62, 0x83, 0xdc, 0xd7, 0x30, 0x1a, 0x4a, 0x2a, 0x4c, 0x54, 0x86, 0x19, 0x81, 0x5d, 0x04, 0x52, 0xa3, 0xca, 0x82, 0x38, 0xdc, 0x1e, 0xf0, 0x7a, 0x78, 0x76, 0x49, 0x4f, 0x71, 0xc4, 0x74, 0x2f, 0xf0, 0x5b, 0x2e, 0x5e, 0xac, 0xef, 0x17, 0xe4, 0x8e, 0x6e, 0xed, 0x43, 0x23, 0x61, 0x99, 0x49}, + dt2: fp.Elt{0x64, 0x90, 0x72, 0x76, 0xf8, 0x2c, 0x7d, 0x57, 0xf9, 0x30, 0x5e, 0x7a, 0x10, 0x74, 0x19, 0x39, 0xd9, 0xaf, 0x0a, 0xf1, 0x43, 0xed, 0x88, 0x9c, 0x8b, 0xdc, 0x9b, 0x1c, 0x90, 0xe7, 0xf7, 0xa3, 0xa5, 0x0d, 0xc6, 0xbc, 0x30, 0xfb, 0x91, 0x1a, 0x51, 0xba, 0x2d, 0xbe, 0x89, 0xdf, 0x1d, 0xdc, 0x53, 0xa8, 0x82, 0x8a, 0xd3, 0x8d, 0x16, 0x68}, + }, + { + addYX: fp.Elt{0xef, 0x5c, 0xe3, 0x74, 0xbf, 0x13, 0x4a, 0xbf, 0x66, 0x73, 0x64, 0xb7, 0xd4, 0xce, 0x98, 0x82, 0x05, 0xfa, 0x98, 0x0c, 0x0a, 0xae, 0xe5, 0x6b, 0x9f, 0xac, 0xbb, 0x6e, 0x1f, 0xcf, 0xff, 0xa6, 0x71, 0x9a, 0xa8, 0x7a, 0x9e, 0x64, 0x1f, 0x20, 0x4a, 0x61, 0xa2, 0xd6, 0x50, 0xe3, 0xba, 0x81, 0x0c, 0x50, 0x59, 0x69, 0x59, 0x15, 0x55, 0xdb}, + subYX: fp.Elt{0xe8, 0x77, 0x4d, 0xe8, 0x66, 0x3d, 0xc1, 0x00, 0x3c, 0xf2, 0x25, 0x00, 0xdc, 0xb2, 0xe5, 0x9b, 0x12, 0x89, 0xf3, 0xd6, 0xea, 0x85, 0x60, 0xfe, 0x67, 0x91, 0xfd, 0x04, 0x7c, 0xe0, 0xf1, 0x86, 0x06, 0x11, 0x66, 0xee, 0xd4, 0xd5, 0xbe, 0x3b, 0x0f, 0xe3, 0x59, 0xb3, 0x4f, 0x00, 0xb6, 0xce, 0x80, 0xc1, 0x61, 0xf7, 0xaf, 0x04, 0x6a, 0x3c}, + dt2: fp.Elt{0x00, 0xd7, 0x32, 0x93, 0x67, 0x70, 0x6f, 0xd7, 0x69, 0xab, 0xb1, 0xd3, 0xdc, 0xd6, 0xa8, 0xdd, 0x35, 0x25, 0xca, 0xd3, 0x8a, 0x6d, 0xce, 0xfb, 0xfd, 0x2b, 0x83, 0xf0, 0xd4, 0xac, 0x66, 0xfb, 0x72, 0x87, 0x7e, 0x55, 0xb7, 0x91, 0x58, 0x10, 0xc3, 0x11, 0x7e, 0x15, 0xfe, 0x7c, 0x55, 0x90, 0xa3, 0x9e, 0xed, 0x9a, 0x7f, 0xa7, 0xb7, 0xeb}, + }, + { + addYX: fp.Elt{0x25, 0x0f, 0xc2, 0x09, 0x9c, 0x10, 0xc8, 0x7c, 0x93, 0xa7, 0xbe, 0xe9, 0x26, 0x25, 0x7c, 0x21, 0xfe, 0xe7, 0x5f, 0x3c, 0x02, 0x83, 0xa7, 0x9e, 0xdf, 0xc0, 0x94, 0x2b, 0x7d, 0x1a, 0xd0, 0x1d, 0xcc, 0x2e, 0x7d, 0xd4, 0x85, 0xe7, 0xc1, 0x15, 0x66, 0xd6, 0xd6, 0x32, 0xb8, 0xf7, 0x63, 0xaa, 0x3b, 0xa5, 0xea, 0x49, 0xad, 0x88, 0x9b, 0x66}, + subYX: fp.Elt{0x09, 0x97, 0x79, 0x36, 0x41, 0x56, 0x9b, 0xdf, 0x15, 0xd8, 0x43, 0x28, 0x17, 0x5b, 0x96, 0xc9, 0xcf, 0x39, 0x1f, 0x13, 0xf7, 0x4d, 0x1d, 0x1f, 0xda, 0x51, 0x56, 0xe7, 0x0a, 0x5a, 0x65, 0xb6, 0x2a, 0x87, 0x49, 0x86, 0xc2, 0x2b, 0xcd, 0xfe, 0x07, 0xf6, 0x4c, 0xe2, 0x1d, 0x9b, 0xd8, 0x82, 0x09, 0x5b, 0x11, 0x10, 0x62, 0x56, 0x89, 0xbd}, + dt2: fp.Elt{0xd9, 0x15, 0x73, 0xf2, 0x96, 0x35, 0x53, 0xb0, 0xe7, 0xa8, 0x0b, 0x93, 0x35, 0x0b, 0x3a, 0x00, 0xf5, 0x18, 0xb1, 0xc3, 0x12, 0x3f, 0x91, 0x17, 0xc1, 0x4c, 0x15, 0x5a, 0x86, 0x92, 0x11, 0xbd, 0x44, 0x40, 0x5a, 0x7b, 0x15, 0x89, 0xba, 0xc1, 0xc1, 0xbc, 0x43, 0x45, 0xe6, 0x52, 0x02, 0x73, 0x0a, 0xd0, 0x2a, 0x19, 0xda, 0x47, 0xa8, 0xff}, + }, + }, +} + +// tabVerif contains the odd multiples of P. The entry T[i] = (2i+1)P, where +// P = phi(G) and G is the generator of the Goldilocks curve, and phi is a +// 4-degree isogeny. +var tabVerif = [1 << (omegaFix - 2)]preTwistPointAffine{ + { /* 1P*/ + addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05}, + dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55}, + }, + { /* 3P*/ + addYX: fp.Elt{0xd1, 0xe9, 0xa8, 0x33, 0x20, 0x76, 0x18, 0x08, 0x45, 0x2a, 0xc9, 0x67, 0x2a, 0xc3, 0x15, 0x24, 0xf9, 0x74, 0x21, 0x30, 0x99, 0x59, 0x8b, 0xb2, 0xf0, 0xa4, 0x07, 0xe2, 0x6a, 0x36, 0x8d, 0xd9, 0xd2, 0x4a, 0x7f, 0x73, 0x50, 0x39, 0x3d, 0xaa, 0xa7, 0x51, 0x73, 0x0d, 0x2b, 0x8b, 0x96, 0x47, 0xac, 0x3c, 0x5d, 0xaa, 0x39, 0x9c, 0xcf, 0xd5}, + subYX: fp.Elt{0x6b, 0x11, 0x5d, 0x1a, 0xf9, 0x41, 0x9d, 0xc5, 0x30, 0x3e, 0xad, 0x25, 0x2c, 0x04, 0x45, 0xea, 0xcc, 0x67, 0x07, 0x85, 0xe9, 0xda, 0x0e, 0xb5, 0x40, 0xb7, 0x32, 0xb4, 0x49, 0xdd, 0xff, 0xaa, 0xfc, 0xbb, 0x19, 0xca, 0x8b, 0x79, 0x2b, 0x8f, 0x8d, 0x00, 0x33, 0xc2, 0xad, 0xe9, 0xd3, 0x12, 0xa8, 0xaa, 0x87, 0x62, 0xad, 0x2d, 0xff, 0xa4}, + dt2: fp.Elt{0xb0, 0xaf, 0x3b, 0xea, 0xf0, 0x42, 0x0b, 0x5e, 0x88, 0xd3, 0x98, 0x08, 0x87, 0x59, 0x72, 0x0a, 0xc2, 0xdf, 0xcb, 0x7f, 0x59, 0xb5, 0x4c, 0x63, 0x68, 0xe8, 0x41, 0x38, 0x67, 0x4f, 0xe9, 0xc6, 0xb2, 0x6b, 0x08, 0xa7, 0xf7, 0x0e, 0xcd, 0xea, 0xca, 0x3d, 0xaf, 0x8e, 0xda, 0x4b, 0x2e, 0xd2, 0x88, 0x64, 0x8d, 0xc5, 0x5f, 0x76, 0x0f, 0x3d}, + }, + { /* 5P*/ + addYX: fp.Elt{0xe5, 0x65, 0xc9, 0xe2, 0x75, 0xf0, 0x7d, 0x1a, 0xba, 0xa4, 0x40, 0x4b, 0x93, 0x12, 0xa2, 0x80, 0x95, 0x0d, 0x03, 0x93, 0xe8, 0xa5, 0x4d, 0xe2, 0x3d, 0x81, 0xf5, 0xce, 0xd4, 0x2d, 0x25, 0x59, 0x16, 0x5c, 0xe7, 0xda, 0xc7, 0x45, 0xd2, 0x7e, 0x2c, 0x38, 0xd4, 0x37, 0x64, 0xb2, 0xc2, 0x28, 0xc5, 0x72, 0x16, 0x32, 0x45, 0x36, 0x6f, 0x9f}, + subYX: fp.Elt{0x09, 0xf4, 0x7e, 0xbd, 0x89, 0xdb, 0x19, 0x58, 0xe1, 0x08, 0x00, 0x8a, 0xf4, 0x5f, 0x2a, 0x32, 0x40, 0xf0, 0x2c, 0x3f, 0x5d, 0xe4, 0xfc, 0x89, 0x11, 0x24, 0xb4, 0x2f, 0x97, 0xad, 0xac, 0x8f, 0x19, 0xab, 0xfa, 0x12, 0xe5, 0xf9, 0x50, 0x4e, 0x50, 0x6f, 0x32, 0x30, 0x88, 0xa6, 0xe5, 0x48, 0x28, 0xa2, 0x1b, 0x9f, 0xcd, 0xe2, 0x43, 0x38}, + dt2: fp.Elt{0xa9, 0xcc, 0x53, 0x39, 0x86, 0x02, 0x60, 0x75, 0x34, 0x99, 0x57, 0xbd, 0xfc, 0x5a, 0x8e, 0xce, 0x5e, 0x98, 0x22, 0xd0, 0xa5, 0x24, 0xff, 0x90, 0x28, 0x9f, 0x58, 0xf3, 0x39, 0xe9, 0xba, 0x36, 0x23, 0xfb, 0x7f, 0x41, 0xcc, 0x2b, 0x5a, 0x25, 0x3f, 0x4c, 0x2a, 0xf1, 0x52, 0x6f, 0x2f, 0x07, 0xe3, 0x88, 0x81, 0x77, 0xdd, 0x7c, 0x88, 0x82}, + }, + { /* 7P*/ + addYX: fp.Elt{0xf7, 0xee, 0x88, 0xfd, 0x3a, 0xbf, 0x7e, 0x28, 0x39, 0x23, 0x79, 0xe6, 0x5c, 0x56, 0xcb, 0xb5, 0x48, 0x6a, 0x80, 0x6d, 0x37, 0x60, 0x6c, 0x10, 0x35, 0x49, 0x4b, 0x46, 0x60, 0xd4, 0x79, 0xd4, 0x53, 0xd3, 0x67, 0x88, 0xd0, 0x41, 0xd5, 0x43, 0x85, 0xc8, 0x71, 0xe3, 0x1c, 0xb6, 0xda, 0x22, 0x64, 0x8f, 0x80, 0xac, 0xad, 0x7d, 0xd5, 0x82}, + subYX: fp.Elt{0x92, 0x40, 0xc1, 0x83, 0x21, 0x9b, 0xd5, 0x7d, 0x3f, 0x29, 0xb6, 0x26, 0xef, 0x12, 0xb9, 0x27, 0x39, 0x42, 0x37, 0x97, 0x09, 0x9a, 0x08, 0xe1, 0x68, 0xb6, 0x7a, 0x3f, 0x9f, 0x45, 0xf8, 0x37, 0x19, 0x83, 0x97, 0xe6, 0x73, 0x30, 0x32, 0x35, 0xcf, 0xae, 0x5c, 0x12, 0x68, 0xdf, 0x6e, 0x2b, 0xde, 0x83, 0xa0, 0x44, 0x74, 0x2e, 0x4a, 0xe9}, + dt2: fp.Elt{0xcb, 0x22, 0x0a, 0xda, 0x6b, 0xc1, 0x8a, 0x29, 0xa1, 0xac, 0x8b, 0x5b, 0x8b, 0x32, 0x20, 0xf2, 0x21, 0xae, 0x0c, 0x43, 0xc4, 0xd7, 0x19, 0x37, 0x3d, 0x79, 0x25, 0x98, 0x6c, 0x9c, 0x22, 0x31, 0x2a, 0x55, 0x9f, 0xda, 0x5e, 0xa8, 0x13, 0xdb, 0x8e, 0x2e, 0x16, 0x39, 0xf4, 0x91, 0x6f, 0xec, 0x71, 0x71, 0xc9, 0x10, 0xf2, 0xa4, 0x8f, 0x11}, + }, + { /* 9P*/ + addYX: fp.Elt{0x85, 0xdd, 0x37, 0x62, 0x74, 0x8e, 0x33, 0x5b, 0x25, 0x12, 0x1b, 0xe7, 0xdf, 0x47, 0xe5, 0x12, 0xfd, 0x3a, 0x3a, 0xf5, 0x5d, 0x4c, 0xa2, 0x29, 0x3c, 0x5c, 0x2f, 0xee, 0x18, 0x19, 0x0a, 0x2b, 0xef, 0x67, 0x50, 0x7a, 0x0d, 0x29, 0xae, 0x55, 0x82, 0xcd, 0xd6, 0x41, 0x90, 0xb4, 0x13, 0x31, 0x5d, 0x11, 0xb8, 0xaa, 0x12, 0x86, 0x08, 0xac}, + subYX: fp.Elt{0xcc, 0x37, 0x8d, 0x83, 0x5f, 0xfd, 0xde, 0xd5, 0xf7, 0xf1, 0xae, 0x0a, 0xa7, 0x0b, 0xeb, 0x6d, 0x19, 0x8a, 0xb6, 0x1a, 0x59, 0xd8, 0xff, 0x3c, 0xbc, 0xbc, 0xef, 0x9c, 0xda, 0x7b, 0x75, 0x12, 0xaf, 0x80, 0x8f, 0x2c, 0x3c, 0xaa, 0x0b, 0x17, 0x86, 0x36, 0x78, 0x18, 0xc8, 0x8a, 0xf6, 0xb8, 0x2c, 0x2f, 0x57, 0x2c, 0x62, 0x57, 0xf6, 0x90}, + dt2: fp.Elt{0x83, 0xbc, 0xa2, 0x07, 0xa5, 0x38, 0x96, 0xea, 0xfe, 0x11, 0x46, 0x1d, 0x3b, 0xcd, 0x42, 0xc5, 0xee, 0x67, 0x04, 0x72, 0x08, 0xd8, 0xd9, 0x96, 0x07, 0xf7, 0xac, 0xc3, 0x64, 0xf1, 0x98, 0x2c, 0x55, 0xd7, 0x7d, 0xc8, 0x6c, 0xbd, 0x2c, 0xff, 0x15, 0xd6, 0x6e, 0xb8, 0x17, 0x8e, 0xa8, 0x27, 0x66, 0xb1, 0x73, 0x79, 0x96, 0xff, 0x29, 0x10}, + }, + { /* 11P*/ + addYX: fp.Elt{0x76, 0xcb, 0x9b, 0x0c, 0x5b, 0xfe, 0xe1, 0x2a, 0xdd, 0x6f, 0x6c, 0xdd, 0x6f, 0xb4, 0xc0, 0xc2, 0x1b, 0x4b, 0x38, 0xe8, 0x66, 0x8c, 0x1e, 0x31, 0x63, 0xb9, 0x94, 0xcd, 0xc3, 0x8c, 0x44, 0x25, 0x7b, 0xd5, 0x39, 0x80, 0xfc, 0x01, 0xaa, 0xf7, 0x2a, 0x61, 0x8a, 0x25, 0xd2, 0x5f, 0xc5, 0x66, 0x38, 0xa4, 0x17, 0xcf, 0x3e, 0x11, 0x0f, 0xa3}, + subYX: fp.Elt{0xe0, 0xb6, 0xd1, 0x9c, 0x71, 0x49, 0x2e, 0x7b, 0xde, 0x00, 0xda, 0x6b, 0xf1, 0xec, 0xe6, 0x7a, 0x15, 0x38, 0x71, 0xe9, 0x7b, 0xdb, 0xf8, 0x98, 0xc0, 0x91, 0x2e, 0x53, 0xee, 0x92, 0x87, 0x25, 0xc9, 0xb0, 0xbb, 0x33, 0x15, 0x46, 0x7f, 0xfd, 0x4f, 0x8b, 0x77, 0x05, 0x96, 0xb6, 0xe2, 0x08, 0xdb, 0x0d, 0x09, 0xee, 0x5b, 0xd1, 0x2a, 0x63}, + dt2: fp.Elt{0x8f, 0x7b, 0x57, 0x8c, 0xbf, 0x06, 0x0d, 0x43, 0x21, 0x92, 0x94, 0x2d, 0x6a, 0x38, 0x07, 0x0f, 0xa0, 0xf1, 0xe3, 0xd8, 0x2a, 0xbf, 0x46, 0xc6, 0x9e, 0x1f, 0x8f, 0x2b, 0x46, 0x84, 0x0b, 0x74, 0xed, 0xff, 0xf8, 0xa5, 0x94, 0xae, 0xf1, 0x67, 0xb1, 0x9b, 0xdd, 0x4a, 0xd0, 0xdb, 0xc2, 0xb5, 0x58, 0x49, 0x0c, 0xa9, 0x1d, 0x7d, 0xa9, 0xd3}, + }, + { /* 13P*/ + addYX: fp.Elt{0x73, 0x84, 0x2e, 0x31, 0x1f, 0xdc, 0xed, 0x9f, 0x74, 0xfa, 0xe0, 0x35, 0xb1, 0x85, 0x6a, 0x8d, 0x86, 0xd0, 0xff, 0xd6, 0x08, 0x43, 0x73, 0x1a, 0xd5, 0xf8, 0x43, 0xd4, 0xb3, 0xe5, 0x3f, 0xa8, 0x84, 0x17, 0x59, 0x65, 0x4e, 0xe6, 0xee, 0x54, 0x9c, 0xda, 0x5e, 0x7e, 0x98, 0x29, 0x6d, 0x73, 0x34, 0x1f, 0x99, 0x80, 0x54, 0x54, 0x81, 0x0b}, + subYX: fp.Elt{0xb1, 0xe5, 0xbb, 0x80, 0x22, 0x9c, 0x81, 0x6d, 0xaf, 0x27, 0x65, 0x6f, 0x7e, 0x9c, 0xb6, 0x8d, 0x35, 0x5c, 0x2e, 0x20, 0x48, 0x7a, 0x28, 0xf0, 0x97, 0xfe, 0xb7, 0x71, 0xce, 0xd6, 0xad, 0x3a, 0x81, 0xf6, 0x74, 0x5e, 0xf3, 0xfd, 0x1b, 0xd4, 0x1e, 0x7c, 0xc2, 0xb7, 0xc8, 0xa6, 0xc9, 0x89, 0x03, 0x47, 0xec, 0x24, 0xd6, 0x0e, 0xec, 0x9c}, + dt2: fp.Elt{0x91, 0x0a, 0x43, 0x34, 0x20, 0xc2, 0x64, 0xf7, 0x4e, 0x48, 0xc8, 0xd2, 0x95, 0x83, 0xd1, 0xa4, 0xfb, 0x4e, 0x41, 0x3b, 0x0d, 0xd5, 0x07, 0xd9, 0xf1, 0x13, 0x16, 0x78, 0x54, 0x57, 0xd0, 0xf1, 0x4f, 0x20, 0xac, 0xcf, 0x9c, 0x3b, 0x33, 0x0b, 0x99, 0x54, 0xc3, 0x7f, 0x3e, 0x57, 0x26, 0x86, 0xd5, 0xa5, 0x2b, 0x8d, 0xe3, 0x19, 0x36, 0xf7}, + }, + { /* 15P*/ + addYX: fp.Elt{0x23, 0x69, 0x47, 0x14, 0xf9, 0x9a, 0x50, 0xff, 0x64, 0xd1, 0x50, 0x35, 0xc3, 0x11, 0xd3, 0x19, 0xcf, 0x87, 0xda, 0x30, 0x0b, 0x50, 0xda, 0xc0, 0xe0, 0x25, 0x00, 0xe5, 0x68, 0x93, 0x04, 0xc2, 0xaf, 0xbd, 0x2f, 0x36, 0x5f, 0x47, 0x96, 0x10, 0xa8, 0xbd, 0xe4, 0x88, 0xac, 0x80, 0x52, 0x61, 0x73, 0xe9, 0x63, 0xdd, 0x99, 0xad, 0x20, 0x5b}, + subYX: fp.Elt{0x1b, 0x5e, 0xa2, 0x2a, 0x25, 0x0f, 0x86, 0xc0, 0xb1, 0x2e, 0x0c, 0x13, 0x40, 0x8d, 0xf0, 0xe6, 0x00, 0x55, 0x08, 0xc5, 0x7d, 0xf4, 0xc9, 0x31, 0x25, 0x3a, 0x99, 0x69, 0xdd, 0x67, 0x63, 0x9a, 0xd6, 0x89, 0x2e, 0xa1, 0x19, 0xca, 0x2c, 0xd9, 0x59, 0x5f, 0x5d, 0xc3, 0x6e, 0x62, 0x36, 0x12, 0x59, 0x15, 0xe1, 0xdc, 0xa4, 0xad, 0xc9, 0xd0}, + dt2: fp.Elt{0xbc, 0xea, 0xfc, 0xaf, 0x66, 0x23, 0xb7, 0x39, 0x6b, 0x2a, 0x96, 0xa8, 0x54, 0x43, 0xe9, 0xaa, 0x32, 0x40, 0x63, 0x92, 0x5e, 0xdf, 0x35, 0xc2, 0x9f, 0x24, 0x0c, 0xed, 0xfc, 0xde, 0x73, 0x8f, 0xa7, 0xd5, 0xa3, 0x2b, 0x18, 0x1f, 0xb0, 0xf8, 0xeb, 0x55, 0xd9, 0xc3, 0xfd, 0x28, 0x7c, 0x4f, 0xce, 0x0d, 0xf7, 0xae, 0xc2, 0x83, 0xc3, 0x78}, + }, + { /* 17P*/ + addYX: fp.Elt{0x71, 0xe6, 0x60, 0x93, 0x37, 0xdb, 0x01, 0xa5, 0x4c, 0xba, 0xe8, 0x8e, 0xd5, 0xf9, 0xd3, 0x98, 0xe5, 0xeb, 0xab, 0x3a, 0x15, 0x8b, 0x35, 0x60, 0xbe, 0xe5, 0x9c, 0x2d, 0x10, 0x9b, 0x2e, 0xcf, 0x65, 0x64, 0xea, 0x8f, 0x72, 0xce, 0xf5, 0x18, 0xe5, 0xe2, 0xf0, 0x0e, 0xae, 0x04, 0xec, 0xa0, 0x20, 0x65, 0x63, 0x07, 0xb1, 0x9f, 0x03, 0x97}, + subYX: fp.Elt{0x9e, 0x41, 0x64, 0x30, 0x95, 0x7f, 0x3a, 0x89, 0x7b, 0x0a, 0x79, 0x59, 0x23, 0x9a, 0x3b, 0xfe, 0xa4, 0x13, 0x08, 0xb2, 0x2e, 0x04, 0x50, 0x10, 0x30, 0xcd, 0x2e, 0xa4, 0x91, 0x71, 0x50, 0x36, 0x4a, 0x02, 0xf4, 0x8d, 0xa3, 0x36, 0x1b, 0xf4, 0x52, 0xba, 0x15, 0x04, 0x8b, 0x80, 0x25, 0xd9, 0xae, 0x67, 0x20, 0xd9, 0x88, 0x8f, 0x97, 0xa6}, + dt2: fp.Elt{0xb5, 0xe7, 0x46, 0xbd, 0x55, 0x23, 0xa0, 0x68, 0xc0, 0x12, 0xd9, 0xf1, 0x0a, 0x75, 0xe2, 0xda, 0xf4, 0x6b, 0xca, 0x14, 0xe4, 0x9f, 0x0f, 0xb5, 0x3c, 0xa6, 0xa5, 0xa2, 0x63, 0x94, 0xd1, 0x1c, 0x39, 0x58, 0x57, 0x02, 0x27, 0x98, 0xb6, 0x47, 0xc6, 0x61, 0x4b, 0x5c, 0xab, 0x6f, 0x2d, 0xab, 0xe3, 0xc1, 0x69, 0xf9, 0x12, 0xb0, 0xc8, 0xd5}, + }, + { /* 19P*/ + addYX: fp.Elt{0x19, 0x7d, 0xd5, 0xac, 0x79, 0xa2, 0x82, 0x9b, 0x28, 0x31, 0x22, 0xc0, 0x73, 0x02, 0x76, 0x17, 0x10, 0x70, 0x79, 0x57, 0xc9, 0x84, 0x62, 0x8e, 0x04, 0x04, 0x61, 0x67, 0x08, 0x48, 0xb4, 0x4b, 0xde, 0x53, 0x8c, 0xff, 0x36, 0x1b, 0x62, 0x86, 0x5d, 0xe1, 0x9b, 0xb1, 0xe5, 0xe8, 0x44, 0x64, 0xa1, 0x68, 0x3f, 0xa8, 0x45, 0x52, 0x91, 0xed}, + subYX: fp.Elt{0x42, 0x1a, 0x36, 0x1f, 0x90, 0x15, 0x24, 0x8d, 0x24, 0x80, 0xe6, 0xfe, 0x1e, 0xf0, 0xad, 0xaf, 0x6a, 0x93, 0xf0, 0xa6, 0x0d, 0x5d, 0xea, 0xf6, 0x62, 0x96, 0x7a, 0x05, 0x76, 0x85, 0x74, 0x32, 0xc7, 0xc8, 0x64, 0x53, 0x62, 0xe7, 0x54, 0x84, 0xe0, 0x40, 0x66, 0x19, 0x70, 0x40, 0x95, 0x35, 0x68, 0x64, 0x43, 0xcd, 0xba, 0x29, 0x32, 0xa8}, + dt2: fp.Elt{0x3e, 0xf6, 0xd6, 0xe4, 0x99, 0xeb, 0x20, 0x66, 0x08, 0x2e, 0x26, 0x64, 0xd7, 0x76, 0xf3, 0xb4, 0xc5, 0xa4, 0x35, 0x92, 0xd2, 0x99, 0x70, 0x5a, 0x1a, 0xe9, 0xe9, 0x3d, 0x3b, 0xe1, 0xcd, 0x0e, 0xee, 0x24, 0x13, 0x03, 0x22, 0xd6, 0xd6, 0x72, 0x08, 0x2b, 0xde, 0xfd, 0x93, 0xed, 0x0c, 0x7f, 0x5e, 0x31, 0x22, 0x4d, 0x80, 0x78, 0xc0, 0x48}, + }, + { /* 21P*/ + addYX: fp.Elt{0x8f, 0x72, 0xd2, 0x9e, 0xc4, 0xcd, 0x2c, 0xbf, 0xa8, 0xd3, 0x24, 0x62, 0x28, 0xee, 0x39, 0x0a, 0x19, 0x3a, 0x58, 0xff, 0x21, 0x2e, 0x69, 0x6c, 0x6e, 0x18, 0xd0, 0xcd, 0x61, 0xc1, 0x18, 0x02, 0x5a, 0xe9, 0xe3, 0xef, 0x1f, 0x8e, 0x10, 0xe8, 0x90, 0x2b, 0x48, 0xcd, 0xee, 0x38, 0xbd, 0x3a, 0xca, 0xbc, 0x2d, 0xe2, 0x3a, 0x03, 0x71, 0x02}, + subYX: fp.Elt{0xf8, 0xa4, 0x32, 0x26, 0x66, 0xaf, 0x3b, 0x53, 0xe7, 0xb0, 0x91, 0x92, 0xf5, 0x3c, 0x74, 0xce, 0xf2, 0xdd, 0x68, 0xa9, 0xf4, 0xcd, 0x5f, 0x60, 0xab, 0x71, 0xdf, 0xcd, 0x5c, 0x5d, 0x51, 0x72, 0x3a, 0x96, 0xea, 0xd6, 0xde, 0x54, 0x8e, 0x55, 0x4c, 0x08, 0x4c, 0x60, 0xdd, 0x34, 0xa9, 0x6f, 0xf3, 0x04, 0x02, 0xa8, 0xa6, 0x4e, 0x4d, 0x62}, + dt2: fp.Elt{0x76, 0x4a, 0xae, 0x38, 0x62, 0x69, 0x72, 0xdc, 0xe8, 0x43, 0xbe, 0x1d, 0x61, 0xde, 0x31, 0xc3, 0x42, 0x8f, 0x33, 0x9d, 0xca, 0xc7, 0x9c, 0xec, 0x6a, 0xe2, 0xaa, 0x01, 0x49, 0x78, 0x8d, 0x72, 0x4f, 0x38, 0xea, 0x52, 0xc2, 0xd3, 0xc9, 0x39, 0x71, 0xba, 0xb9, 0x09, 0x9b, 0xa3, 0x7f, 0x45, 0x43, 0x65, 0x36, 0x29, 0xca, 0xe7, 0x5c, 0x5f}, + }, + { /* 23P*/ + addYX: fp.Elt{0x89, 0x42, 0x35, 0x48, 0x6d, 0x74, 0xe5, 0x1f, 0xc3, 0xdd, 0x28, 0x5b, 0x84, 0x41, 0x33, 0x9f, 0x42, 0xf3, 0x1d, 0x5d, 0x15, 0x6d, 0x76, 0x33, 0x36, 0xaf, 0xe9, 0xdd, 0xfa, 0x63, 0x4f, 0x7a, 0x9c, 0xeb, 0x1c, 0x4f, 0x34, 0x65, 0x07, 0x54, 0xbb, 0x4c, 0x8b, 0x62, 0x9d, 0xd0, 0x06, 0x99, 0xb3, 0xe9, 0xda, 0x85, 0x19, 0xb0, 0x3d, 0x3c}, + subYX: fp.Elt{0xbb, 0x99, 0xf6, 0xbf, 0xaf, 0x2c, 0x22, 0x0d, 0x7a, 0xaa, 0x98, 0x6f, 0x01, 0x82, 0x99, 0xcf, 0x88, 0xbd, 0x0e, 0x3a, 0x89, 0xe0, 0x9c, 0x8c, 0x17, 0x20, 0xc4, 0xe0, 0xcf, 0x43, 0x7a, 0xef, 0x0d, 0x9f, 0x87, 0xd4, 0xfb, 0xf2, 0x96, 0xb8, 0x03, 0xe8, 0xcb, 0x5c, 0xec, 0x65, 0x5f, 0x49, 0xa4, 0x7c, 0x85, 0xb4, 0xf6, 0xc7, 0xdb, 0xa3}, + dt2: fp.Elt{0x11, 0xf3, 0x32, 0xa3, 0xa7, 0xb2, 0x7d, 0x51, 0x82, 0x44, 0xeb, 0xa2, 0x7d, 0x72, 0xcb, 0xc6, 0xf6, 0xc7, 0xb2, 0x38, 0x0e, 0x0f, 0x4f, 0x29, 0x00, 0xe4, 0x5b, 0x94, 0x46, 0x86, 0x66, 0xa1, 0x83, 0xb3, 0xeb, 0x15, 0xb6, 0x31, 0x50, 0x28, 0xeb, 0xed, 0x0d, 0x32, 0x39, 0xe9, 0x23, 0x81, 0x99, 0x3e, 0xff, 0x17, 0x4c, 0x11, 0x43, 0xd1}, + }, + { /* 25P*/ + addYX: fp.Elt{0xce, 0xe7, 0xf8, 0x94, 0x8f, 0x96, 0xf8, 0x96, 0xe6, 0x72, 0x20, 0x44, 0x2c, 0xa7, 0xfc, 0xba, 0xc8, 0xe1, 0xbb, 0xc9, 0x16, 0x85, 0xcd, 0x0b, 0xe5, 0xb5, 0x5a, 0x7f, 0x51, 0x43, 0x63, 0x8b, 0x23, 0x8e, 0x1d, 0x31, 0xff, 0x46, 0x02, 0x66, 0xcc, 0x9e, 0x4d, 0xa2, 0xca, 0xe2, 0xc7, 0xfd, 0x22, 0xb1, 0xdb, 0xdf, 0x6f, 0xe6, 0xa5, 0x82}, + subYX: fp.Elt{0xd0, 0xf5, 0x65, 0x40, 0xec, 0x8e, 0x65, 0x42, 0x78, 0xc1, 0x65, 0xe4, 0x10, 0xc8, 0x0b, 0x1b, 0xdd, 0x96, 0x68, 0xce, 0xee, 0x45, 0x55, 0xd8, 0x6e, 0xd3, 0xe6, 0x77, 0x19, 0xae, 0xc2, 0x8d, 0x8d, 0x3e, 0x14, 0x3f, 0x6d, 0x00, 0x2f, 0x9b, 0xd1, 0x26, 0x60, 0x28, 0x0f, 0x3a, 0x47, 0xb3, 0xe6, 0x68, 0x28, 0x24, 0x25, 0xca, 0xc8, 0x06}, + dt2: fp.Elt{0x54, 0xbb, 0x60, 0x92, 0xdb, 0x8f, 0x0f, 0x38, 0xe0, 0xe6, 0xe4, 0xc9, 0xcc, 0x14, 0x62, 0x01, 0xc4, 0x2b, 0x0f, 0xcf, 0xed, 0x7d, 0x8e, 0xa4, 0xd9, 0x73, 0x0b, 0xba, 0x0c, 0xaf, 0x0c, 0xf9, 0xe2, 0xeb, 0x29, 0x2a, 0x53, 0xdf, 0x2c, 0x5a, 0xfa, 0x8f, 0xc1, 0x01, 0xd7, 0xb1, 0x45, 0x73, 0x92, 0x32, 0x83, 0x85, 0x12, 0x74, 0x89, 0x44}, + }, + { /* 27P*/ + addYX: fp.Elt{0x0b, 0x73, 0x3c, 0xc2, 0xb1, 0x2e, 0xe1, 0xa7, 0xf5, 0xc9, 0x7a, 0xfb, 0x3d, 0x2d, 0xac, 0x59, 0xdb, 0xfa, 0x36, 0x11, 0xd1, 0x13, 0x04, 0x51, 0x1d, 0xab, 0x9b, 0x6b, 0x93, 0xfe, 0xda, 0xb0, 0x8e, 0xb4, 0x79, 0x11, 0x21, 0x0f, 0x65, 0xb9, 0xbb, 0x79, 0x96, 0x2a, 0xfd, 0x30, 0xe0, 0xb4, 0x2d, 0x9a, 0x55, 0x25, 0x5d, 0xd4, 0xad, 0x2a}, + subYX: fp.Elt{0x9e, 0xc5, 0x04, 0xfe, 0xec, 0x3c, 0x64, 0x1c, 0xed, 0x95, 0xed, 0xae, 0xaf, 0x5c, 0x6e, 0x08, 0x9e, 0x02, 0x29, 0x59, 0x7e, 0x5f, 0xc4, 0x9a, 0xd5, 0x32, 0x72, 0x86, 0xe1, 0x4e, 0x3c, 0xce, 0x99, 0x69, 0x3b, 0xc4, 0xdd, 0x4d, 0xb7, 0xbb, 0xda, 0x3b, 0x1a, 0x99, 0xaa, 0x62, 0x15, 0xc1, 0xf0, 0xb6, 0x6c, 0xec, 0x56, 0xc1, 0xff, 0x0c}, + dt2: fp.Elt{0x2f, 0xf1, 0x3f, 0x7a, 0x2d, 0x56, 0x19, 0x7f, 0xea, 0xbe, 0x59, 0x2e, 0x13, 0x67, 0x81, 0xfb, 0xdb, 0xc8, 0xa3, 0x1d, 0xd5, 0xe9, 0x13, 0x8b, 0x29, 0xdf, 0xcf, 0x9f, 0xe7, 0xd9, 0x0b, 0x70, 0xd3, 0x15, 0x57, 0x4a, 0xe9, 0x50, 0x12, 0x1b, 0x81, 0x4b, 0x98, 0x98, 0xa8, 0x31, 0x1d, 0x27, 0x47, 0x38, 0xed, 0x57, 0x99, 0x26, 0xb2, 0xee}, + }, + { /* 29P*/ + addYX: fp.Elt{0x1c, 0xb2, 0xb2, 0x67, 0x3b, 0x8b, 0x3d, 0x5a, 0x30, 0x7e, 0x38, 0x7e, 0x3c, 0x3d, 0x28, 0x56, 0x59, 0xd8, 0x87, 0x53, 0x8b, 0xe6, 0x6c, 0x5d, 0xe5, 0x0a, 0x33, 0x10, 0xce, 0xa2, 0x17, 0x0d, 0xe8, 0x76, 0xee, 0x68, 0xa8, 0x72, 0x54, 0xbd, 0xa6, 0x24, 0x94, 0x6e, 0x77, 0xc7, 0x53, 0xb7, 0x89, 0x1c, 0x7a, 0xe9, 0x78, 0x9a, 0x74, 0x5f}, + subYX: fp.Elt{0x76, 0x96, 0x1c, 0xcf, 0x08, 0x55, 0xd8, 0x1e, 0x0d, 0xa3, 0x59, 0x95, 0x32, 0xf4, 0xc2, 0x8e, 0x84, 0x5e, 0x4b, 0x04, 0xda, 0x71, 0xc9, 0x78, 0x52, 0xde, 0x14, 0xb4, 0x31, 0xf4, 0xd4, 0xb8, 0x58, 0xc5, 0x20, 0xe8, 0xdd, 0x15, 0xb5, 0xee, 0xea, 0x61, 0xe0, 0xf5, 0xd6, 0xae, 0x55, 0x59, 0x05, 0x3e, 0xaf, 0x74, 0xac, 0x1f, 0x17, 0x82}, + dt2: fp.Elt{0x59, 0x24, 0xcd, 0xfc, 0x11, 0x7e, 0x85, 0x18, 0x3d, 0x69, 0xf7, 0x71, 0x31, 0x66, 0x98, 0x42, 0x95, 0x00, 0x8c, 0xb2, 0xae, 0x39, 0x7e, 0x85, 0xd6, 0xb0, 0x02, 0xec, 0xce, 0xfc, 0x25, 0xb2, 0xe3, 0x99, 0x8e, 0x5b, 0x61, 0x96, 0x2e, 0x6d, 0x96, 0x57, 0x71, 0xa5, 0x93, 0x41, 0x0e, 0x6f, 0xfd, 0x0a, 0xbf, 0xa9, 0xf7, 0x56, 0xa9, 0x3e}, + }, + { /* 31P*/ + addYX: fp.Elt{0xa2, 0x2e, 0x0c, 0x17, 0x4d, 0xcc, 0x85, 0x2c, 0x18, 0xa0, 0xd2, 0x08, 0xba, 0x11, 0xfa, 0x47, 0x71, 0x86, 0xaf, 0x36, 0x6a, 0xd7, 0xfe, 0xb9, 0xb0, 0x2f, 0x89, 0x98, 0x49, 0x69, 0xf8, 0x6a, 0xad, 0x27, 0x5e, 0x0a, 0x22, 0x60, 0x5e, 0x5d, 0xca, 0x06, 0x51, 0x27, 0x99, 0x29, 0x85, 0x68, 0x98, 0xe1, 0xc4, 0x21, 0x50, 0xa0, 0xe9, 0xc1}, + subYX: fp.Elt{0x4d, 0x70, 0xee, 0x91, 0x92, 0x3f, 0xb7, 0xd3, 0x1d, 0xdb, 0x8d, 0x6e, 0x16, 0xf5, 0x65, 0x7d, 0x5f, 0xb5, 0x6c, 0x59, 0x26, 0x70, 0x4b, 0xf2, 0xfc, 0xe7, 0xdf, 0x86, 0xfe, 0xa5, 0xa7, 0xa6, 0x5d, 0xfb, 0x06, 0xe9, 0xf9, 0xcc, 0xc0, 0x37, 0xcc, 0xd8, 0x09, 0x04, 0xd2, 0xa5, 0x1d, 0xd7, 0xb7, 0xce, 0x92, 0xac, 0x3c, 0xad, 0xfb, 0xae}, + dt2: fp.Elt{0x17, 0xa3, 0x9a, 0xc7, 0x86, 0x2a, 0x51, 0xf7, 0x96, 0x79, 0x49, 0x22, 0x2e, 0x5a, 0x01, 0x5c, 0xb5, 0x95, 0xd4, 0xe8, 0xcb, 0x00, 0xca, 0x2d, 0x55, 0xb6, 0x34, 0x36, 0x0b, 0x65, 0x46, 0xf0, 0x49, 0xfc, 0x87, 0x86, 0xe5, 0xc3, 0x15, 0xdb, 0x32, 0xcd, 0xf2, 0xd3, 0x82, 0x4c, 0xe6, 0x61, 0x8a, 0xaf, 0xd4, 0x9e, 0x0f, 0x5a, 0xf2, 0x81}, + }, + { /* 33P*/ + addYX: fp.Elt{0x88, 0x10, 0xc0, 0xcb, 0xf5, 0x77, 0xae, 0xa5, 0xbe, 0xf6, 0xcd, 0x2e, 0x8b, 0x7e, 0xbd, 0x79, 0x62, 0x4a, 0xeb, 0x69, 0xc3, 0x28, 0xaa, 0x72, 0x87, 0xa9, 0x25, 0x87, 0x46, 0xea, 0x0e, 0x62, 0xa3, 0x6a, 0x1a, 0xe2, 0xba, 0xdc, 0x81, 0x10, 0x33, 0x01, 0xf6, 0x16, 0x89, 0x80, 0xc6, 0xcd, 0xdb, 0xdc, 0xba, 0x0e, 0x09, 0x4a, 0x35, 0x4a}, + subYX: fp.Elt{0x86, 0xb2, 0x2b, 0xd0, 0xb8, 0x4a, 0x6d, 0x66, 0x7b, 0x32, 0xdf, 0x3b, 0x1a, 0x19, 0x1f, 0x63, 0xee, 0x1f, 0x3d, 0x1c, 0x5c, 0x14, 0x60, 0x5b, 0x72, 0x49, 0x07, 0xb1, 0x0d, 0x72, 0xc6, 0x35, 0xf0, 0xbc, 0x5e, 0xda, 0x80, 0x6b, 0x64, 0x5b, 0xe5, 0x34, 0x54, 0x39, 0xdd, 0xe6, 0x3c, 0xcb, 0xe5, 0x29, 0x32, 0x06, 0xc6, 0xb1, 0x96, 0x34}, + dt2: fp.Elt{0x85, 0x86, 0xf5, 0x84, 0x86, 0xe6, 0x77, 0x8a, 0x71, 0x85, 0x0c, 0x4f, 0x81, 0x5b, 0x29, 0x06, 0xb5, 0x2e, 0x26, 0x71, 0x07, 0x78, 0x07, 0xae, 0xbc, 0x95, 0x46, 0xc3, 0x65, 0xac, 0xe3, 0x76, 0x51, 0x7d, 0xd4, 0x85, 0x31, 0xe3, 0x43, 0xf3, 0x1b, 0x7c, 0xf7, 0x6b, 0x2c, 0xf8, 0x1c, 0xbb, 0x8d, 0xca, 0xab, 0x4b, 0xba, 0x7f, 0xa4, 0xe2}, + }, + { /* 35P*/ + addYX: fp.Elt{0x1a, 0xee, 0xe7, 0xa4, 0x8a, 0x9d, 0x53, 0x80, 0xc6, 0xb8, 0x4e, 0xdc, 0x89, 0xe0, 0xc4, 0x2b, 0x60, 0x52, 0x6f, 0xec, 0x81, 0xd2, 0x55, 0x6b, 0x1b, 0x6f, 0x17, 0x67, 0x8e, 0x42, 0x26, 0x4c, 0x65, 0x23, 0x29, 0xc6, 0x7b, 0xcd, 0x9f, 0xad, 0x4b, 0x42, 0xd3, 0x0c, 0x75, 0xc3, 0x8a, 0xf5, 0xbe, 0x9e, 0x55, 0xf7, 0x47, 0x5d, 0xbd, 0x3a}, + subYX: fp.Elt{0x0d, 0xa8, 0x3b, 0xf9, 0xc7, 0x7e, 0xc6, 0x86, 0x94, 0xc0, 0x01, 0xff, 0x27, 0xce, 0x43, 0xac, 0xe5, 0xe1, 0xd2, 0x8d, 0xc1, 0x22, 0x31, 0xbe, 0xe1, 0xaf, 0xf9, 0x4a, 0x78, 0xa1, 0x0c, 0xaa, 0xd4, 0x80, 0xe4, 0x09, 0x8d, 0xfb, 0x1d, 0x52, 0xc8, 0x60, 0x2d, 0xf2, 0xa2, 0x89, 0x02, 0x56, 0x3d, 0x56, 0x27, 0x85, 0xc7, 0xf0, 0x2b, 0x9a}, + dt2: fp.Elt{0x62, 0x7c, 0xc7, 0x6b, 0x2c, 0x9d, 0x0a, 0x7c, 0xe5, 0x50, 0x3c, 0xe6, 0x87, 0x1c, 0x82, 0x30, 0x67, 0x3c, 0x39, 0xb6, 0xa0, 0x31, 0xfb, 0x03, 0x7b, 0xa1, 0x58, 0xdf, 0x12, 0x76, 0x5d, 0x5d, 0x0a, 0x8f, 0x9b, 0x37, 0x32, 0xc3, 0x60, 0x33, 0xea, 0x9f, 0x0a, 0x99, 0xfa, 0x20, 0xd0, 0x33, 0x21, 0xc3, 0x94, 0xd4, 0x86, 0x49, 0x7c, 0x4e}, + }, + { /* 37P*/ + addYX: fp.Elt{0xc7, 0x0c, 0x71, 0xfe, 0x55, 0xd1, 0x95, 0x8f, 0x43, 0xbb, 0x6b, 0x74, 0x30, 0xbd, 0xe8, 0x6f, 0x1c, 0x1b, 0x06, 0x62, 0xf5, 0xfc, 0x65, 0xa0, 0xeb, 0x81, 0x12, 0xc9, 0x64, 0x66, 0x61, 0xde, 0xf3, 0x6d, 0xd4, 0xae, 0x8e, 0xb1, 0x72, 0xe0, 0xcd, 0x37, 0x01, 0x28, 0x52, 0xd7, 0x39, 0x46, 0x0c, 0x55, 0xcf, 0x47, 0x70, 0xef, 0xa1, 0x17}, + subYX: fp.Elt{0x8d, 0x58, 0xde, 0x83, 0x88, 0x16, 0x0e, 0x12, 0x42, 0x03, 0x50, 0x60, 0x4b, 0xdf, 0xbf, 0x95, 0xcc, 0x7d, 0x18, 0x17, 0x7e, 0x31, 0x5d, 0x8a, 0x66, 0xc1, 0xcf, 0x14, 0xea, 0xf4, 0xf4, 0xe5, 0x63, 0x2d, 0x32, 0x86, 0x9b, 0xed, 0x1f, 0x4f, 0x03, 0xaf, 0x33, 0x92, 0xcb, 0xaf, 0x9c, 0x05, 0x0d, 0x47, 0x1b, 0x42, 0xba, 0x13, 0x22, 0x98}, + dt2: fp.Elt{0xb5, 0x48, 0xeb, 0x7d, 0x3d, 0x10, 0x9f, 0x59, 0xde, 0xf8, 0x1c, 0x4f, 0x7d, 0x9d, 0x40, 0x4d, 0x9e, 0x13, 0x24, 0xb5, 0x21, 0x09, 0xb7, 0xee, 0x98, 0x5c, 0x56, 0xbc, 0x5e, 0x2b, 0x78, 0x38, 0x06, 0xac, 0xe3, 0xe0, 0xfa, 0x2e, 0xde, 0x4f, 0xd2, 0xb3, 0xfb, 0x2d, 0x71, 0x84, 0xd1, 0x9d, 0x12, 0x5b, 0x35, 0xc8, 0x03, 0x68, 0x67, 0xc7}, + }, + { /* 39P*/ + addYX: fp.Elt{0xb6, 0x65, 0xfb, 0xa7, 0x06, 0x35, 0xbb, 0xe0, 0x31, 0x8d, 0x91, 0x40, 0x98, 0xab, 0x30, 0xe4, 0xca, 0x12, 0x59, 0x89, 0xed, 0x65, 0x5d, 0x7f, 0xae, 0x69, 0xa0, 0xa4, 0xfa, 0x78, 0xb4, 0xf7, 0xed, 0xae, 0x86, 0x78, 0x79, 0x64, 0x24, 0xa6, 0xd4, 0xe1, 0xf6, 0xd3, 0xa0, 0x89, 0xba, 0x20, 0xf4, 0x54, 0x0d, 0x8f, 0xdb, 0x1a, 0x79, 0xdb}, + subYX: fp.Elt{0xe1, 0x82, 0x0c, 0x4d, 0xde, 0x9f, 0x40, 0xf0, 0xc1, 0xbd, 0x8b, 0xd3, 0x24, 0x03, 0xcd, 0xf2, 0x92, 0x7d, 0xe2, 0x68, 0x7f, 0xf1, 0xbe, 0x69, 0xde, 0x34, 0x67, 0x4c, 0x85, 0x3b, 0xec, 0x98, 0xcc, 0x4d, 0x3e, 0xc0, 0x96, 0x27, 0xe6, 0x75, 0xfc, 0xdf, 0x37, 0xc0, 0x1e, 0x27, 0xe0, 0xf6, 0xc2, 0xbd, 0xbc, 0x3d, 0x9b, 0x39, 0xdc, 0xe2}, + dt2: fp.Elt{0xd8, 0x29, 0xa7, 0x39, 0xe3, 0x9f, 0x2f, 0x0e, 0x4b, 0x24, 0x21, 0x70, 0xef, 0xfd, 0x91, 0xea, 0xbf, 0xe1, 0x72, 0x90, 0xcc, 0xc9, 0x84, 0x0e, 0xad, 0xd5, 0xe6, 0xbb, 0xc5, 0x99, 0x7f, 0xa4, 0xf0, 0x2e, 0xcc, 0x95, 0x64, 0x27, 0x19, 0xd8, 0x4c, 0x27, 0x0d, 0xff, 0xb6, 0x29, 0xe2, 0x6c, 0xfa, 0xbb, 0x4d, 0x9c, 0xbb, 0xaf, 0xa5, 0xec}, + }, + { /* 41P*/ + addYX: fp.Elt{0xd6, 0x33, 0x3f, 0x9f, 0xcf, 0xfd, 0x4c, 0xd1, 0xfe, 0xe5, 0xeb, 0x64, 0x27, 0xae, 0x7a, 0xa2, 0x82, 0x50, 0x6d, 0xaa, 0xe3, 0x5d, 0xe2, 0x48, 0x60, 0xb3, 0x76, 0x04, 0xd9, 0x19, 0xa7, 0xa1, 0x73, 0x8d, 0x38, 0xa9, 0xaf, 0x45, 0xb5, 0xb2, 0x62, 0x9b, 0xf1, 0x35, 0x7b, 0x84, 0x66, 0xeb, 0x06, 0xef, 0xf1, 0xb2, 0x2d, 0x6a, 0x61, 0x15}, + subYX: fp.Elt{0x86, 0x50, 0x42, 0xf7, 0xda, 0x59, 0xb2, 0xcf, 0x0d, 0x3d, 0xee, 0x8e, 0x53, 0x5d, 0xf7, 0x9e, 0x6a, 0x26, 0x2d, 0xc7, 0x8c, 0x8e, 0x18, 0x50, 0x6d, 0xb7, 0x51, 0x4c, 0xa7, 0x52, 0x6e, 0x0e, 0x0a, 0x16, 0x74, 0xb2, 0x81, 0x8b, 0x56, 0x27, 0x22, 0x84, 0xf4, 0x56, 0xc5, 0x06, 0xe1, 0x8b, 0xca, 0x2d, 0xdb, 0x9a, 0xf6, 0x10, 0x9c, 0x51}, + dt2: fp.Elt{0x1f, 0x16, 0xa2, 0x78, 0x96, 0x1b, 0x85, 0x9c, 0x76, 0x49, 0xd4, 0x0f, 0xac, 0xb0, 0xf4, 0xd0, 0x06, 0x2c, 0x7e, 0x6d, 0x6e, 0x8e, 0xc7, 0x9f, 0x18, 0xad, 0xfc, 0x88, 0x0c, 0x0c, 0x09, 0x05, 0x05, 0xa0, 0x79, 0x72, 0x32, 0x72, 0x87, 0x0f, 0x49, 0x87, 0x0c, 0xb4, 0x12, 0xc2, 0x09, 0xf8, 0x9f, 0x30, 0x72, 0xa9, 0x47, 0x13, 0x93, 0x49}, + }, + { /* 43P*/ + addYX: fp.Elt{0xcc, 0xb1, 0x4c, 0xd3, 0xc0, 0x9e, 0x9e, 0x4d, 0x6d, 0x28, 0x0b, 0xa5, 0x94, 0xa7, 0x2e, 0xc2, 0xc7, 0xaf, 0x29, 0x73, 0xc9, 0x68, 0xea, 0x0f, 0x34, 0x37, 0x8d, 0x96, 0x8f, 0x3a, 0x3d, 0x73, 0x1e, 0x6d, 0x9f, 0xcf, 0x8d, 0x83, 0xb5, 0x71, 0xb9, 0xe1, 0x4b, 0x67, 0x71, 0xea, 0xcf, 0x56, 0xe5, 0xeb, 0x72, 0x15, 0x2f, 0x9e, 0xa8, 0xaa}, + subYX: fp.Elt{0xf4, 0x3e, 0x85, 0x1c, 0x1a, 0xef, 0x50, 0xd1, 0xb4, 0x20, 0xb2, 0x60, 0x05, 0x98, 0xfe, 0x47, 0x3b, 0xc1, 0x76, 0xca, 0x2c, 0x4e, 0x5a, 0x42, 0xa3, 0xf7, 0x20, 0xaa, 0x57, 0x39, 0xee, 0x34, 0x1f, 0xe1, 0x68, 0xd3, 0x7e, 0x06, 0xc4, 0x6c, 0xc7, 0x76, 0x2b, 0xe4, 0x1c, 0x48, 0x44, 0xe6, 0xe5, 0x44, 0x24, 0x8d, 0xb3, 0xb6, 0x88, 0x32}, + dt2: fp.Elt{0x18, 0xa7, 0xba, 0xd0, 0x44, 0x6f, 0x33, 0x31, 0x00, 0xf8, 0xf6, 0x12, 0xe3, 0xc5, 0xc7, 0xb5, 0x91, 0x9c, 0x91, 0xb5, 0x75, 0x18, 0x18, 0x8a, 0xab, 0xed, 0x24, 0x11, 0x2e, 0xce, 0x5a, 0x0f, 0x94, 0x5f, 0x2e, 0xca, 0xd3, 0x80, 0xea, 0xe5, 0x34, 0x96, 0x67, 0x8b, 0x6a, 0x26, 0x5e, 0xc8, 0x9d, 0x2c, 0x5e, 0x6c, 0xa2, 0x0c, 0xbf, 0xf0}, + }, + { /* 45P*/ + addYX: fp.Elt{0xb3, 0xbf, 0xa3, 0x85, 0xee, 0xf6, 0x58, 0x02, 0x78, 0xc4, 0x30, 0xd6, 0x57, 0x59, 0x8c, 0x88, 0x08, 0x7c, 0xbc, 0xbe, 0x0a, 0x74, 0xa9, 0xde, 0x69, 0xe7, 0x41, 0xd8, 0xbf, 0x66, 0x8d, 0x3d, 0x28, 0x00, 0x8c, 0x47, 0x65, 0x34, 0xfe, 0x86, 0x9e, 0x6a, 0xf2, 0x41, 0x6a, 0x94, 0xc4, 0x88, 0x75, 0x23, 0x0d, 0x52, 0x69, 0xee, 0x07, 0x89}, + subYX: fp.Elt{0x22, 0x3c, 0xa1, 0x70, 0x58, 0x97, 0x93, 0xbe, 0x59, 0xa8, 0x0b, 0x8a, 0x46, 0x2a, 0x38, 0x1e, 0x08, 0x6b, 0x61, 0x9f, 0xf2, 0x4a, 0x8b, 0x80, 0x68, 0x6e, 0xc8, 0x92, 0x60, 0xf3, 0xc9, 0x89, 0xb2, 0x6d, 0x63, 0xb0, 0xeb, 0x83, 0x15, 0x63, 0x0e, 0x64, 0xbb, 0xb8, 0xfe, 0xb4, 0x81, 0x90, 0x01, 0x28, 0x10, 0xb9, 0x74, 0x6e, 0xde, 0xa4}, + dt2: fp.Elt{0x1a, 0x23, 0x45, 0xa8, 0x6f, 0x4e, 0xa7, 0x4a, 0x0c, 0xeb, 0xb0, 0x43, 0xf9, 0xef, 0x99, 0x60, 0x5b, 0xdb, 0x66, 0xc0, 0x86, 0x71, 0x43, 0xb1, 0x22, 0x7b, 0x1c, 0xe7, 0x8d, 0x09, 0x1d, 0x83, 0x76, 0x9c, 0xd3, 0x5a, 0xdd, 0x42, 0xd9, 0x2f, 0x2d, 0xba, 0x7a, 0xc2, 0xd9, 0x6b, 0xd4, 0x7a, 0xf1, 0xd5, 0x5f, 0x6b, 0x85, 0xbf, 0x0b, 0xf1}, + }, + { /* 47P*/ + addYX: fp.Elt{0xb2, 0x83, 0xfa, 0x1f, 0xd2, 0xce, 0xb6, 0xf2, 0x2d, 0xea, 0x1b, 0xe5, 0x29, 0xa5, 0x72, 0xf9, 0x25, 0x48, 0x4e, 0xf2, 0x50, 0x1b, 0x39, 0xda, 0x34, 0xc5, 0x16, 0x13, 0xb4, 0x0c, 0xa1, 0x00, 0x79, 0x7a, 0xf5, 0x8b, 0xf3, 0x70, 0x14, 0xb6, 0xfc, 0x9a, 0x47, 0x68, 0x1e, 0x42, 0x70, 0x64, 0x2a, 0x84, 0x3e, 0x3d, 0x20, 0x58, 0xf9, 0x6a}, + subYX: fp.Elt{0xd9, 0xee, 0xc0, 0xc4, 0xf5, 0xc2, 0x86, 0xaf, 0x45, 0xd2, 0xd2, 0x87, 0x1b, 0x64, 0xd5, 0xe0, 0x8c, 0x44, 0x00, 0x4f, 0x43, 0x89, 0x04, 0x48, 0x4a, 0x0b, 0xca, 0x94, 0x06, 0x2f, 0x23, 0x5b, 0x6c, 0x8d, 0x44, 0x66, 0x53, 0xf5, 0x5a, 0x20, 0x72, 0x28, 0x58, 0x84, 0xcc, 0x73, 0x22, 0x5e, 0xd1, 0x0b, 0x56, 0x5e, 0x6a, 0xa3, 0x11, 0x91}, + dt2: fp.Elt{0x6e, 0x9f, 0x88, 0xa8, 0x68, 0x2f, 0x12, 0x37, 0x88, 0xfc, 0x92, 0x8f, 0x24, 0xeb, 0x5b, 0x2a, 0x2a, 0xd0, 0x14, 0x40, 0x4c, 0xa9, 0xa4, 0x03, 0x0c, 0x45, 0x48, 0x13, 0xe8, 0xa6, 0x37, 0xab, 0xc0, 0x06, 0x38, 0x6c, 0x96, 0x73, 0x40, 0x6c, 0xc6, 0xea, 0x56, 0xc6, 0xe9, 0x1a, 0x69, 0xeb, 0x7a, 0xd1, 0x33, 0x69, 0x58, 0x2b, 0xea, 0x2f}, + }, + { /* 49P*/ + addYX: fp.Elt{0x58, 0xa8, 0x05, 0x41, 0x00, 0x9d, 0xaa, 0xd9, 0x98, 0xcf, 0xb9, 0x41, 0xb5, 0x4a, 0x8d, 0xe2, 0xe7, 0xc0, 0x72, 0xef, 0xc8, 0x28, 0x6b, 0x68, 0x9d, 0xc9, 0xdf, 0x05, 0x8b, 0xd0, 0x04, 0x74, 0x79, 0x45, 0x52, 0x05, 0xa3, 0x6e, 0x35, 0x3a, 0xe3, 0xef, 0xb2, 0xdc, 0x08, 0x6f, 0x4e, 0x76, 0x85, 0x67, 0xba, 0x23, 0x8f, 0xdd, 0xaf, 0x09}, + subYX: fp.Elt{0xb4, 0x38, 0xc8, 0xff, 0x4f, 0x65, 0x2a, 0x7e, 0xad, 0xb1, 0xc6, 0xb9, 0x3d, 0xd6, 0xf7, 0x14, 0xcf, 0xf6, 0x98, 0x75, 0xbb, 0x47, 0x83, 0x90, 0xe7, 0xe1, 0xf6, 0x14, 0x99, 0x7e, 0xfa, 0xe4, 0x77, 0x24, 0xe3, 0xe7, 0xf0, 0x1e, 0xdb, 0x27, 0x4e, 0x16, 0x04, 0xf2, 0x08, 0x52, 0xfc, 0xec, 0x55, 0xdb, 0x2e, 0x67, 0xe1, 0x94, 0x32, 0x89}, + dt2: fp.Elt{0x00, 0xad, 0x03, 0x35, 0x1a, 0xb1, 0x88, 0xf0, 0xc9, 0x11, 0xe4, 0x12, 0x52, 0x61, 0xfd, 0x8a, 0x1b, 0x6a, 0x0a, 0x4c, 0x42, 0x46, 0x22, 0x0e, 0xa5, 0xf9, 0xe2, 0x50, 0xf2, 0xb2, 0x1f, 0x20, 0x78, 0x10, 0xf6, 0xbf, 0x7f, 0x0c, 0x9c, 0xad, 0x40, 0x8b, 0x82, 0xd4, 0xba, 0x69, 0x09, 0xac, 0x4b, 0x6d, 0xc4, 0x49, 0x17, 0x81, 0x57, 0x3b}, + }, + { /* 51P*/ + addYX: fp.Elt{0x0d, 0xfe, 0xb4, 0x35, 0x11, 0xbd, 0x1d, 0x6b, 0xc2, 0xc5, 0x3b, 0xd2, 0x23, 0x2c, 0x72, 0xe3, 0x48, 0xb1, 0x48, 0x73, 0xfb, 0xa3, 0x21, 0x6e, 0xc0, 0x09, 0x69, 0xac, 0xe1, 0x60, 0xbc, 0x24, 0x03, 0x99, 0x63, 0x0a, 0x00, 0xf0, 0x75, 0xf6, 0x92, 0xc5, 0xd6, 0xdb, 0x51, 0xd4, 0x7d, 0xe6, 0xf4, 0x11, 0x79, 0xd7, 0xc3, 0xaf, 0x48, 0xd0}, + subYX: fp.Elt{0xf4, 0x4f, 0xaf, 0x31, 0xe3, 0x10, 0x89, 0x95, 0xf0, 0x8a, 0xf6, 0x31, 0x9f, 0x48, 0x02, 0xba, 0x42, 0x2b, 0x3c, 0x22, 0x8b, 0xcc, 0x12, 0x98, 0x6e, 0x7a, 0x64, 0x3a, 0xc4, 0xca, 0x32, 0x2a, 0x72, 0xf8, 0x2c, 0xcf, 0x78, 0x5e, 0x7a, 0x75, 0x6e, 0x72, 0x46, 0x48, 0x62, 0x28, 0xac, 0x58, 0x1a, 0xc6, 0x59, 0x88, 0x2a, 0x44, 0x9e, 0x83}, + dt2: fp.Elt{0xb3, 0xde, 0x36, 0xfd, 0xeb, 0x1b, 0xd4, 0x24, 0x1b, 0x08, 0x8c, 0xfe, 0xa9, 0x41, 0xa1, 0x64, 0xf2, 0x6d, 0xdb, 0xf9, 0x94, 0xae, 0x86, 0x71, 0xab, 0x10, 0xbf, 0xa3, 0xb2, 0xa0, 0xdf, 0x10, 0x8c, 0x74, 0xce, 0xb3, 0xfc, 0xdb, 0xba, 0x15, 0xf6, 0x91, 0x7a, 0x9c, 0x36, 0x1e, 0x45, 0x07, 0x3c, 0xec, 0x1a, 0x61, 0x26, 0x93, 0xe3, 0x50}, + }, + { /* 53P*/ + addYX: fp.Elt{0xc5, 0x50, 0xc5, 0x83, 0xb0, 0xbd, 0xd9, 0xf6, 0x6d, 0x15, 0x5e, 0xc1, 0x1a, 0x33, 0xa0, 0xce, 0x13, 0x70, 0x3b, 0xe1, 0x31, 0xc6, 0xc4, 0x02, 0xec, 0x8c, 0xd5, 0x9c, 0x97, 0xd3, 0x12, 0xc4, 0xa2, 0xf9, 0xd5, 0xfb, 0x22, 0x69, 0x94, 0x09, 0x2f, 0x59, 0xce, 0xdb, 0xf2, 0xf2, 0x00, 0xe0, 0xa9, 0x08, 0x44, 0x2e, 0x8b, 0x6b, 0xf5, 0xb3}, + subYX: fp.Elt{0x90, 0xdd, 0xec, 0xa2, 0x65, 0xb7, 0x61, 0xbc, 0xaa, 0x70, 0xa2, 0x15, 0xd8, 0xb0, 0xf8, 0x8e, 0x23, 0x3d, 0x9f, 0x46, 0xa3, 0x29, 0x20, 0xd1, 0xa1, 0x15, 0x81, 0xc6, 0xb6, 0xde, 0xbe, 0x60, 0x63, 0x24, 0xac, 0x15, 0xfb, 0xeb, 0xd3, 0xea, 0x57, 0x13, 0x86, 0x38, 0x1e, 0x22, 0xf4, 0x8c, 0x5d, 0xaf, 0x1b, 0x27, 0x21, 0x4f, 0xa3, 0x63}, + dt2: fp.Elt{0x07, 0x15, 0x87, 0xc4, 0xfd, 0xa1, 0x97, 0x7a, 0x07, 0x1f, 0x56, 0xcc, 0xe3, 0x6a, 0x01, 0x90, 0xce, 0xf9, 0xfa, 0x50, 0xb2, 0xe0, 0x87, 0x8b, 0x6c, 0x63, 0x6c, 0xf6, 0x2a, 0x09, 0xef, 0xef, 0xd2, 0x31, 0x40, 0x25, 0xf6, 0x84, 0xcb, 0xe0, 0xc4, 0x23, 0xc1, 0xcb, 0xe2, 0x02, 0x83, 0x2d, 0xed, 0x74, 0x74, 0x8b, 0xf8, 0x7c, 0x81, 0x18}, + }, + { /* 55P*/ + addYX: fp.Elt{0x9e, 0xe5, 0x59, 0x95, 0x63, 0x2e, 0xac, 0x8b, 0x03, 0x3c, 0xc1, 0x8e, 0xe1, 0x5b, 0x56, 0x3c, 0x16, 0x41, 0xe4, 0xc2, 0x60, 0x0c, 0x6d, 0x65, 0x9f, 0xfc, 0x27, 0x68, 0x43, 0x44, 0x05, 0x12, 0x6c, 0xda, 0x04, 0xef, 0xcf, 0xcf, 0xdc, 0x0a, 0x1a, 0x7f, 0x12, 0xd3, 0xeb, 0x02, 0xb6, 0x04, 0xca, 0xd6, 0xcb, 0xf0, 0x22, 0xba, 0x35, 0x6d}, + subYX: fp.Elt{0x09, 0x6d, 0xf9, 0x64, 0x4c, 0xe6, 0x41, 0xff, 0x01, 0x4d, 0xce, 0x1e, 0xfa, 0x38, 0xa2, 0x25, 0x62, 0xff, 0x03, 0x39, 0x18, 0x91, 0xbb, 0x9d, 0xce, 0x02, 0xf0, 0xf1, 0x3c, 0x55, 0x18, 0xa9, 0xab, 0x4d, 0xd2, 0x35, 0xfd, 0x8d, 0xa9, 0xb2, 0xad, 0xb7, 0x06, 0x6e, 0xc6, 0x69, 0x49, 0xd6, 0x98, 0x98, 0x0b, 0x22, 0x81, 0x6b, 0xbd, 0xa0}, + dt2: fp.Elt{0x22, 0xf4, 0x85, 0x5d, 0x2b, 0xf1, 0x55, 0xa5, 0xd6, 0x27, 0x86, 0x57, 0x12, 0x1f, 0x16, 0x0a, 0x5a, 0x9b, 0xf2, 0x38, 0xb6, 0x28, 0xd8, 0x99, 0x0c, 0x89, 0x1d, 0x7f, 0xca, 0x21, 0x17, 0x1a, 0x0b, 0x02, 0x5f, 0x77, 0x2f, 0x73, 0x30, 0x7c, 0xc8, 0xd7, 0x2b, 0xcc, 0xe7, 0xf3, 0x21, 0xac, 0x53, 0xa7, 0x11, 0x5d, 0xd8, 0x1d, 0x9b, 0xf5}, + }, + { /* 57P*/ + addYX: fp.Elt{0x94, 0x63, 0x5d, 0xef, 0xfd, 0x6d, 0x25, 0x4e, 0x6d, 0x29, 0x03, 0xed, 0x24, 0x28, 0x27, 0x57, 0x47, 0x3e, 0x6a, 0x1a, 0xfe, 0x37, 0xee, 0x5f, 0x83, 0x29, 0x14, 0xfd, 0x78, 0x25, 0x8a, 0xe1, 0x02, 0x38, 0xd8, 0xca, 0x65, 0x55, 0x40, 0x7d, 0x48, 0x2c, 0x7c, 0x7e, 0x60, 0xb6, 0x0c, 0x6d, 0xf7, 0xe8, 0xb3, 0x62, 0x53, 0xd6, 0x9c, 0x2b}, + subYX: fp.Elt{0x47, 0x25, 0x70, 0x62, 0xf5, 0x65, 0x93, 0x62, 0x08, 0xac, 0x59, 0x66, 0xdb, 0x08, 0xd9, 0x1a, 0x19, 0xaf, 0xf4, 0xef, 0x02, 0xa2, 0x78, 0xa9, 0x55, 0x1c, 0xfa, 0x08, 0x11, 0xcb, 0xa3, 0x71, 0x74, 0xb1, 0x62, 0xe7, 0xc7, 0xf3, 0x5a, 0xb5, 0x8b, 0xd4, 0xf6, 0x10, 0x57, 0x79, 0x72, 0x2f, 0x13, 0x86, 0x7b, 0x44, 0x5f, 0x48, 0xfd, 0x88}, + dt2: fp.Elt{0x10, 0x02, 0xcd, 0x05, 0x9a, 0xc3, 0x32, 0x6d, 0x10, 0x3a, 0x74, 0xba, 0x06, 0xc4, 0x3b, 0x34, 0xbc, 0x36, 0xed, 0xa3, 0xba, 0x9a, 0xdb, 0x6d, 0xd4, 0x69, 0x99, 0x97, 0xd0, 0xe4, 0xdd, 0xf5, 0xd4, 0x7c, 0xd3, 0x4e, 0xab, 0xd1, 0x3b, 0xbb, 0xe9, 0xc7, 0x6a, 0x94, 0x25, 0x61, 0xf0, 0x06, 0xc5, 0x12, 0xa8, 0x86, 0xe5, 0x35, 0x46, 0xeb}, + }, + { /* 59P*/ + addYX: fp.Elt{0x9e, 0x95, 0x11, 0xc6, 0xc7, 0xe8, 0xee, 0x5a, 0x26, 0xa0, 0x72, 0x72, 0x59, 0x91, 0x59, 0x16, 0x49, 0x99, 0x7e, 0xbb, 0xd7, 0x15, 0xb4, 0xf2, 0x40, 0xf9, 0x5a, 0x4d, 0xc8, 0xa0, 0xe2, 0x34, 0x7b, 0x34, 0xf3, 0x99, 0xbf, 0xa9, 0xf3, 0x79, 0xc1, 0x1a, 0x0c, 0xf4, 0x86, 0x74, 0x4e, 0xcb, 0xbc, 0x90, 0xad, 0xb6, 0x51, 0x6d, 0xaa, 0x33}, + subYX: fp.Elt{0x9f, 0xd1, 0xc5, 0xa2, 0x6c, 0x24, 0x88, 0x15, 0x71, 0x68, 0xf6, 0x07, 0x45, 0x02, 0xc4, 0x73, 0x7e, 0x75, 0x87, 0xca, 0x7c, 0xf0, 0x92, 0x00, 0x75, 0xd6, 0x5a, 0xdd, 0xe0, 0x64, 0x16, 0x9d, 0x62, 0x80, 0x33, 0x9f, 0xf4, 0x8e, 0x1a, 0x15, 0x1c, 0xd3, 0x0f, 0x4d, 0x4f, 0x62, 0x2d, 0xd7, 0xa5, 0x77, 0xe3, 0xea, 0xf0, 0xfb, 0x1a, 0xdb}, + dt2: fp.Elt{0x6a, 0xa2, 0xb1, 0xaa, 0xfb, 0x5a, 0x32, 0x4e, 0xff, 0x47, 0x06, 0xd5, 0x9a, 0x4f, 0xce, 0x83, 0x5b, 0x82, 0x34, 0x3e, 0x47, 0xb8, 0xf8, 0xe9, 0x7c, 0x67, 0x69, 0x8d, 0x9c, 0xb7, 0xde, 0x57, 0xf4, 0x88, 0x41, 0x56, 0x0c, 0x87, 0x1e, 0xc9, 0x2f, 0x54, 0xbf, 0x5c, 0x68, 0x2c, 0xd9, 0xc4, 0xef, 0x53, 0x73, 0x1e, 0xa6, 0x38, 0x02, 0x10}, + }, + { /* 61P*/ + addYX: fp.Elt{0x08, 0x80, 0x4a, 0xc9, 0xb7, 0xa8, 0x88, 0xd9, 0xfc, 0x6a, 0xc0, 0x3e, 0xc2, 0x33, 0x4d, 0x2b, 0x2a, 0xa3, 0x6d, 0x72, 0x3e, 0xdc, 0x34, 0x68, 0x08, 0xbf, 0x27, 0xef, 0xf4, 0xff, 0xe2, 0x0c, 0x31, 0x0c, 0xa2, 0x0a, 0x1f, 0x65, 0xc1, 0x4c, 0x61, 0xd3, 0x1b, 0xbc, 0x25, 0xb1, 0xd0, 0xd4, 0x89, 0xb2, 0x53, 0xfb, 0x43, 0xa5, 0xaf, 0x04}, + subYX: fp.Elt{0xe3, 0xe1, 0x37, 0xad, 0x58, 0xa9, 0x55, 0x81, 0xee, 0x64, 0x21, 0xb9, 0xf5, 0x4c, 0x35, 0xea, 0x4a, 0xd3, 0x26, 0xaa, 0x90, 0xd4, 0x60, 0x46, 0x09, 0x4b, 0x4a, 0x62, 0xf9, 0xcd, 0xe1, 0xee, 0xbb, 0xc2, 0x09, 0x0b, 0xb0, 0x96, 0x8e, 0x43, 0x77, 0xaf, 0x25, 0x20, 0x5e, 0x47, 0xe4, 0x1d, 0x50, 0x69, 0x74, 0x08, 0xd7, 0xb9, 0x90, 0x13}, + dt2: fp.Elt{0x51, 0x91, 0x95, 0x64, 0x03, 0x16, 0xfd, 0x6e, 0x26, 0x94, 0x6b, 0x61, 0xe7, 0xd9, 0xe0, 0x4a, 0x6d, 0x7c, 0xfa, 0xc0, 0xe2, 0x43, 0x23, 0x53, 0x70, 0xf5, 0x6f, 0x73, 0x8b, 0x81, 0xb0, 0x0c, 0xee, 0x2e, 0x46, 0xf2, 0x8d, 0xa6, 0xfb, 0xb5, 0x1c, 0x33, 0xbf, 0x90, 0x59, 0xc9, 0x7c, 0xb8, 0x6f, 0xad, 0x75, 0x02, 0x90, 0x8e, 0x59, 0x75}, + }, + { /* 63P*/ + addYX: fp.Elt{0x36, 0x4d, 0x77, 0x04, 0xb8, 0x7d, 0x4a, 0xd1, 0xc5, 0xbb, 0x7b, 0x50, 0x5f, 0x8d, 0x9d, 0x62, 0x0f, 0x66, 0x71, 0xec, 0x87, 0xc5, 0x80, 0x82, 0xc8, 0xf4, 0x6a, 0x94, 0x92, 0x5b, 0xb0, 0x16, 0x9b, 0xb2, 0xc9, 0x6f, 0x2b, 0x2d, 0xee, 0x95, 0x73, 0x2e, 0xc2, 0x1b, 0xc5, 0x55, 0x36, 0x86, 0x24, 0xf8, 0x20, 0x05, 0x0d, 0x93, 0xd7, 0x76}, + subYX: fp.Elt{0x7f, 0x01, 0xeb, 0x2e, 0x48, 0x4d, 0x1d, 0xf1, 0x06, 0x7e, 0x7c, 0x2a, 0x43, 0xbf, 0x28, 0xac, 0xe9, 0x58, 0x13, 0xc8, 0xbf, 0x8e, 0xc0, 0xef, 0xe8, 0x4f, 0x46, 0x8a, 0xe7, 0xc0, 0xf6, 0x0f, 0x0a, 0x03, 0x48, 0x91, 0x55, 0x39, 0x2a, 0xe3, 0xdc, 0xf6, 0x22, 0x9d, 0x4d, 0x71, 0x55, 0x68, 0x25, 0x6e, 0x95, 0x52, 0xee, 0x4c, 0xd9, 0x01}, + dt2: fp.Elt{0xac, 0x33, 0x3f, 0x7c, 0x27, 0x35, 0x15, 0x91, 0x33, 0x8d, 0xf9, 0xc4, 0xf4, 0xf3, 0x90, 0x09, 0x75, 0x69, 0x62, 0x9f, 0x61, 0x35, 0x83, 0x92, 0x04, 0xef, 0x96, 0x38, 0x80, 0x9e, 0x88, 0xb3, 0x67, 0x95, 0xbe, 0x79, 0x3c, 0x35, 0xd8, 0xdc, 0xb2, 0x3e, 0x2d, 0xe6, 0x46, 0xbe, 0x81, 0xf3, 0x32, 0x0e, 0x37, 0x23, 0x75, 0x2a, 0x3d, 0xa0}, + }, +} diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go new file mode 100644 index 000000000..f6ac5edbb --- /dev/null +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go @@ -0,0 +1,62 @@ +package goldilocks + +import ( + "crypto/subtle" + + mlsb "github.com/cloudflare/circl/math/mlsbset" +) + +const ( + // MLSBRecoding parameters + fxT = 448 + fxV = 2 + fxW = 3 + fx2w1 = 1 << (uint(fxW) - 1) +) + +// ScalarBaseMult returns kG where G is the generator point. +func (e twistCurve) ScalarBaseMult(k *Scalar) *twistPoint { + m, err := mlsb.New(fxT, fxV, fxW) + if err != nil { + panic(err) + } + if m.IsExtended() { + panic("not extended") + } + + var isZero int + if k.IsZero() { + isZero = 1 + } + subtle.ConstantTimeCopy(isZero, k[:], order[:]) + + minusK := *k + isEven := 1 - int(k[0]&0x1) + minusK.Neg() + subtle.ConstantTimeCopy(isEven, k[:], minusK[:]) + c, err := m.Encode(k[:]) + if err != nil { + panic(err) + } + + gP := c.Exp(groupMLSB{}) + P := gP.(*twistPoint) + P.cneg(uint(isEven)) + return P +} + +type groupMLSB struct{} + +func (e groupMLSB) ExtendedEltP() mlsb.EltP { return nil } +func (e groupMLSB) Sqr(x mlsb.EltG) { x.(*twistPoint).Double() } +func (e groupMLSB) Mul(x mlsb.EltG, y mlsb.EltP) { x.(*twistPoint).mixAddZ1(y.(*preTwistPointAffine)) } +func (e groupMLSB) Identity() mlsb.EltG { return twistCurve{}.Identity() } +func (e groupMLSB) NewEltP() mlsb.EltP { return &preTwistPointAffine{} } +func (e groupMLSB) Lookup(a mlsb.EltP, v uint, s, u int32) { + Tabj := &tabFixMult[v] + P := a.(*preTwistPointAffine) + for k := range Tabj { + P.cmov(&Tabj[k], uint(subtle.ConstantTimeEq(int32(k), u))) + } + P.cneg(int(s >> 31)) +} diff --git a/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/vendor/github.com/cloudflare/circl/internal/conv/conv.go new file mode 100644 index 000000000..649a8e931 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/conv/conv.go @@ -0,0 +1,140 @@ +package conv + +import ( + "encoding/binary" + "fmt" + "math/big" + "strings" +) + +// BytesLe2Hex returns an hexadecimal string of a number stored in a +// little-endian order slice x. +func BytesLe2Hex(x []byte) string { + b := &strings.Builder{} + b.Grow(2*len(x) + 2) + fmt.Fprint(b, "0x") + if len(x) == 0 { + fmt.Fprint(b, "00") + } + for i := len(x) - 1; i >= 0; i-- { + fmt.Fprintf(b, "%02x", x[i]) + } + return b.String() +} + +// BytesLe2BigInt converts a little-endian slice x into a big-endian +// math/big.Int. +func BytesLe2BigInt(x []byte) *big.Int { + n := len(x) + b := new(big.Int) + if len(x) > 0 { + y := make([]byte, n) + for i := 0; i < n; i++ { + y[n-1-i] = x[i] + } + b.SetBytes(y) + } + return b +} + +// BytesBe2Uint64Le converts a big-endian slice x to a little-endian slice of uint64. +func BytesBe2Uint64Le(x []byte) []uint64 { + l := len(x) + z := make([]uint64, (l+7)/8) + blocks := l / 8 + for i := 0; i < blocks; i++ { + z[i] = binary.BigEndian.Uint64(x[l-8*(i+1):]) + } + remBytes := l % 8 + for i := 0; i < remBytes; i++ { + z[blocks] |= uint64(x[l-1-8*blocks-i]) << uint(8*i) + } + return z +} + +// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z. +// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). +// If x does not fit in the slice or is negative, z is not modified. +func BigInt2BytesLe(z []byte, x *big.Int) { + xLen := (x.BitLen() + 7) >> 3 + zLen := len(z) + if zLen >= xLen && x.Sign() >= 0 { + y := x.Bytes() + for i := 0; i < xLen; i++ { + z[i] = y[xLen-1-i] + } + for i := xLen; i < zLen; i++ { + z[i] = 0 + } + } +} + +// Uint64Le2BigInt converts a little-endian slice x into a big number. +func Uint64Le2BigInt(x []uint64) *big.Int { + n := len(x) + b := new(big.Int) + var bi big.Int + for i := n - 1; i >= 0; i-- { + bi.SetUint64(x[i]) + b.Lsh(b, 64) + b.Add(b, &bi) + } + return b +} + +// Uint64Le2BytesLe converts a little-endian slice x to a little-endian slice of bytes. +func Uint64Le2BytesLe(x []uint64) []byte { + b := make([]byte, 8*len(x)) + n := len(x) + for i := 0; i < n; i++ { + binary.LittleEndian.PutUint64(b[i*8:], x[i]) + } + return b +} + +// Uint64Le2BytesBe converts a little-endian slice x to a big-endian slice of bytes. +func Uint64Le2BytesBe(x []uint64) []byte { + b := make([]byte, 8*len(x)) + n := len(x) + for i := 0; i < n; i++ { + binary.BigEndian.PutUint64(b[i*8:], x[n-1-i]) + } + return b +} + +// Uint64Le2Hex returns an hexadecimal string of a number stored in a +// little-endian order slice x. +func Uint64Le2Hex(x []uint64) string { + b := new(strings.Builder) + b.Grow(16*len(x) + 2) + fmt.Fprint(b, "0x") + if len(x) == 0 { + fmt.Fprint(b, "00") + } + for i := len(x) - 1; i >= 0; i-- { + fmt.Fprintf(b, "%016x", x[i]) + } + return b.String() +} + +// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z. +// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros). +// If x does not fit in the slice or is negative, z is not modified. +func BigInt2Uint64Le(z []uint64, x *big.Int) { + xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words + zLen := len(z) + if zLen >= xLen && x.Sign() > 0 { + var y, yi big.Int + y.Set(x) + two64 := big.NewInt(1) + two64.Lsh(two64, 64).Sub(two64, big.NewInt(1)) + for i := 0; i < xLen; i++ { + yi.And(&y, two64) + z[i] = yi.Uint64() + y.Rsh(&y, 64) + } + } + for i := xLen; i < zLen; i++ { + z[i] = 0 + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go new file mode 100644 index 000000000..7e0230907 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go @@ -0,0 +1,62 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sha3 implements the SHA-3 fixed-output-length hash functions and +// the SHAKE variable-output-length hash functions defined by FIPS-202. +// +// Both types of hash function use the "sponge" construction and the Keccak +// permutation. For a detailed specification see http://keccak.noekeon.org/ +// +// # Guidance +// +// If you aren't sure what function you need, use SHAKE256 with at least 64 +// bytes of output. The SHAKE instances are faster than the SHA3 instances; +// the latter have to allocate memory to conform to the hash.Hash interface. +// +// If you need a secret-key MAC (message authentication code), prepend the +// secret key to the input, hash with SHAKE256 and read at least 32 bytes of +// output. +// +// # Security strengths +// +// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security +// strength against preimage attacks of x bits. Since they only produce "x" +// bits of output, their collision-resistance is only "x/2" bits. +// +// The SHAKE-256 and -128 functions have a generic security strength of 256 and +// 128 bits against all attacks, provided that at least 2x bits of their output +// is used. Requesting more than 64 or 32 bytes of output, respectively, does +// not increase the collision-resistance of the SHAKE functions. +// +// # The sponge construction +// +// A sponge builds a pseudo-random function from a public pseudo-random +// permutation, by applying the permutation to a state of "rate + capacity" +// bytes, but hiding "capacity" of the bytes. +// +// A sponge starts out with a zero state. To hash an input using a sponge, up +// to "rate" bytes of the input are XORed into the sponge's state. The sponge +// is then "full" and the permutation is applied to "empty" it. This process is +// repeated until all the input has been "absorbed". The input is then padded. +// The digest is "squeezed" from the sponge in the same way, except that output +// is copied out instead of input being XORed in. +// +// A sponge is parameterized by its generic security strength, which is equal +// to half its capacity; capacity + rate is equal to the permutation's width. +// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means +// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2. +// +// # Recommendations +// +// The SHAKE functions are recommended for most new uses. They can produce +// output of arbitrary length. SHAKE256, with an output length of at least +// 64 bytes, provides 256-bit security against all attacks. The Keccak team +// recommends it for most applications upgrading from SHA2-512. (NIST chose a +// much stronger, but much slower, sponge instance for SHA3-512.) +// +// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions. +// They produce output of the same length, with the same security strengths +// against all attacks. This means, in particular, that SHA3-256 only has +// 128-bit collision resistance, because its output length is 32 bytes. +package sha3 diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go new file mode 100644 index 000000000..7d2365a76 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file provides functions for creating instances of the SHA-3 +// and SHAKE hash functions, as well as utility functions for hashing +// bytes. + +// New224 creates a new SHA3-224 hash. +// Its generic security strength is 224 bits against preimage attacks, +// and 112 bits against collision attacks. +func New224() State { + return State{rate: 144, outputLen: 28, dsbyte: 0x06} +} + +// New256 creates a new SHA3-256 hash. +// Its generic security strength is 256 bits against preimage attacks, +// and 128 bits against collision attacks. +func New256() State { + return State{rate: 136, outputLen: 32, dsbyte: 0x06} +} + +// New384 creates a new SHA3-384 hash. +// Its generic security strength is 384 bits against preimage attacks, +// and 192 bits against collision attacks. +func New384() State { + return State{rate: 104, outputLen: 48, dsbyte: 0x06} +} + +// New512 creates a new SHA3-512 hash. +// Its generic security strength is 512 bits against preimage attacks, +// and 256 bits against collision attacks. +func New512() State { + return State{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// Sum224 returns the SHA3-224 digest of the data. +func Sum224(data []byte) (digest [28]byte) { + h := New224() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum256 returns the SHA3-256 digest of the data. +func Sum256(data []byte) (digest [32]byte) { + h := New256() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum384 returns the SHA3-384 digest of the data. +func Sum384(data []byte) (digest [48]byte) { + h := New384() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} + +// Sum512 returns the SHA3-512 digest of the data. +func Sum512(data []byte) (digest [64]byte) { + h := New512() + _, _ = h.Write(data) + h.Sum(digest[:0]) + return +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go new file mode 100644 index 000000000..ab19d0ad1 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go @@ -0,0 +1,383 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// KeccakF1600 applies the Keccak permutation to a 1600b-wide +// state represented as a slice of 25 uint64s. +// nolint:funlen +func KeccakF1600(a *[25]uint64) { + // Implementation translated from Keccak-inplace.c + // in the keccak reference code. + var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 + + for i := 0; i < 24; i += 4 { + // Combines the 5 steps in each round into 2 steps. + // Unrolls 4 rounds per loop and spreads some steps across rounds. + + // Round 1 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[6] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[12] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[18] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[24] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i] + a[6] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[16] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[22] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[3] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[10] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[1] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[7] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[19] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[20] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[11] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[23] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[4] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[5] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[2] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[8] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[14] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[15] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + // Round 2 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[16] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[7] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[23] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[14] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+1] + a[16] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[11] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[2] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[18] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[20] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[6] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[22] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[4] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[15] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[1] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[8] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[24] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[10] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[12] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[3] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[19] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[5] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + // Round 3 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[11] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[22] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[8] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[19] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+2] + a[11] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[1] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[12] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[23] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[15] = bc0 ^ (bc2 &^ bc1) + a[1] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[16] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[2] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[24] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[5] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[6] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[3] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[14] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[20] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[7] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[18] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[4] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[10] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + // Round 4 + bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] + bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] + bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] + bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] + bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] + d0 = bc4 ^ (bc1<<1 | bc1>>63) + d1 = bc0 ^ (bc2<<1 | bc2>>63) + d2 = bc1 ^ (bc3<<1 | bc3>>63) + d3 = bc2 ^ (bc4<<1 | bc4>>63) + d4 = bc3 ^ (bc0<<1 | bc0>>63) + + bc0 = a[0] ^ d0 + t = a[1] ^ d1 + bc1 = t<<44 | t>>(64-44) + t = a[2] ^ d2 + bc2 = t<<43 | t>>(64-43) + t = a[3] ^ d3 + bc3 = t<<21 | t>>(64-21) + t = a[4] ^ d4 + bc4 = t<<14 | t>>(64-14) + a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+3] + a[1] = bc1 ^ (bc3 &^ bc2) + a[2] = bc2 ^ (bc4 &^ bc3) + a[3] = bc3 ^ (bc0 &^ bc4) + a[4] = bc4 ^ (bc1 &^ bc0) + + t = a[5] ^ d0 + bc2 = t<<3 | t>>(64-3) + t = a[6] ^ d1 + bc3 = t<<45 | t>>(64-45) + t = a[7] ^ d2 + bc4 = t<<61 | t>>(64-61) + t = a[8] ^ d3 + bc0 = t<<28 | t>>(64-28) + t = a[9] ^ d4 + bc1 = t<<20 | t>>(64-20) + a[5] = bc0 ^ (bc2 &^ bc1) + a[6] = bc1 ^ (bc3 &^ bc2) + a[7] = bc2 ^ (bc4 &^ bc3) + a[8] = bc3 ^ (bc0 &^ bc4) + a[9] = bc4 ^ (bc1 &^ bc0) + + t = a[10] ^ d0 + bc4 = t<<18 | t>>(64-18) + t = a[11] ^ d1 + bc0 = t<<1 | t>>(64-1) + t = a[12] ^ d2 + bc1 = t<<6 | t>>(64-6) + t = a[13] ^ d3 + bc2 = t<<25 | t>>(64-25) + t = a[14] ^ d4 + bc3 = t<<8 | t>>(64-8) + a[10] = bc0 ^ (bc2 &^ bc1) + a[11] = bc1 ^ (bc3 &^ bc2) + a[12] = bc2 ^ (bc4 &^ bc3) + a[13] = bc3 ^ (bc0 &^ bc4) + a[14] = bc4 ^ (bc1 &^ bc0) + + t = a[15] ^ d0 + bc1 = t<<36 | t>>(64-36) + t = a[16] ^ d1 + bc2 = t<<10 | t>>(64-10) + t = a[17] ^ d2 + bc3 = t<<15 | t>>(64-15) + t = a[18] ^ d3 + bc4 = t<<56 | t>>(64-56) + t = a[19] ^ d4 + bc0 = t<<27 | t>>(64-27) + a[15] = bc0 ^ (bc2 &^ bc1) + a[16] = bc1 ^ (bc3 &^ bc2) + a[17] = bc2 ^ (bc4 &^ bc3) + a[18] = bc3 ^ (bc0 &^ bc4) + a[19] = bc4 ^ (bc1 &^ bc0) + + t = a[20] ^ d0 + bc3 = t<<41 | t>>(64-41) + t = a[21] ^ d1 + bc4 = t<<2 | t>>(64-2) + t = a[22] ^ d2 + bc0 = t<<62 | t>>(64-62) + t = a[23] ^ d3 + bc1 = t<<55 | t>>(64-55) + t = a[24] ^ d4 + bc2 = t<<39 | t>>(64-39) + a[20] = bc0 ^ (bc2 &^ bc1) + a[21] = bc1 ^ (bc3 &^ bc2) + a[22] = bc2 ^ (bc4 &^ bc3) + a[23] = bc3 ^ (bc0 &^ bc4) + a[24] = bc4 ^ (bc1 &^ bc0) + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go new file mode 100644 index 000000000..6a3df42f3 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go @@ -0,0 +1,29 @@ +package sha3 + +// RC stores the round constants for use in the ι step. +var RC = [24]uint64{ + 0x0000000000000001, + 0x0000000000008082, + 0x800000000000808A, + 0x8000000080008000, + 0x000000000000808B, + 0x0000000080000001, + 0x8000000080008081, + 0x8000000000008009, + 0x000000000000008A, + 0x0000000000000088, + 0x0000000080008009, + 0x000000008000000A, + 0x000000008000808B, + 0x800000000000008B, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800A, + 0x800000008000000A, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go new file mode 100644 index 000000000..b35cd006b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go @@ -0,0 +1,195 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// spongeDirection indicates the direction bytes are flowing through the sponge. +type spongeDirection int + +const ( + // spongeAbsorbing indicates that the sponge is absorbing input. + spongeAbsorbing spongeDirection = iota + // spongeSqueezing indicates that the sponge is being squeezed. + spongeSqueezing +) + +const ( + // maxRate is the maximum size of the internal buffer. SHAKE-256 + // currently needs the largest buffer. + maxRate = 168 +) + +func (d *State) buf() []byte { + return d.storage.asBytes()[d.bufo:d.bufe] +} + +type State struct { + // Generic sponge components. + a [25]uint64 // main state of the hash + rate int // the number of bytes of state to use + + bufo int // offset of buffer in storage + bufe int // end of buffer in storage + + // dsbyte contains the "domain separation" bits and the first bit of + // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the + // SHA-3 and SHAKE functions by appending bitstrings to the message. + // Using a little-endian bit-ordering convention, these are "01" for SHA-3 + // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the + // padding rule from section 5.1 is applied to pad the message to a multiple + // of the rate, which involves adding a "1" bit, zero or more "0" bits, and + // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, + // giving 00000110b (0x06) and 00011111b (0x1f). + // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf + // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and + // Extendable-Output Functions (May 2014)" + dsbyte byte + + storage storageBuf + + // Specific to SHA-3 and SHAKE. + outputLen int // the default output size in bytes + state spongeDirection // whether the sponge is absorbing or squeezing +} + +// BlockSize returns the rate of sponge underlying this hash function. +func (d *State) BlockSize() int { return d.rate } + +// Size returns the output size of the hash function in bytes. +func (d *State) Size() int { return d.outputLen } + +// Reset clears the internal state by zeroing the sponge state and +// the byte buffer, and setting Sponge.state to absorbing. +func (d *State) Reset() { + // Zero the permutation's state. + for i := range d.a { + d.a[i] = 0 + } + d.state = spongeAbsorbing + d.bufo = 0 + d.bufe = 0 +} + +func (d *State) clone() *State { + ret := *d + return &ret +} + +// permute applies the KeccakF-1600 permutation. It handles +// any input-output buffering. +func (d *State) permute() { + switch d.state { + case spongeAbsorbing: + // If we're absorbing, we need to xor the input into the state + // before applying the permutation. + xorIn(d, d.buf()) + d.bufe = 0 + d.bufo = 0 + KeccakF1600(&d.a) + case spongeSqueezing: + // If we're squeezing, we need to apply the permutation before + // copying more output. + KeccakF1600(&d.a) + d.bufe = d.rate + d.bufo = 0 + copyOut(d, d.buf()) + } +} + +// pads appends the domain separation bits in dsbyte, applies +// the multi-bitrate 10..1 padding rule, and permutes the state. +func (d *State) padAndPermute(dsbyte byte) { + // Pad with this instance's domain-separator bits. We know that there's + // at least one byte of space in d.buf() because, if it were full, + // permute would have been called to empty it. dsbyte also contains the + // first one bit for the padding. See the comment in the state struct. + zerosStart := d.bufe + 1 + d.bufe = d.rate + buf := d.buf() + buf[zerosStart-1] = dsbyte + for i := zerosStart; i < d.rate; i++ { + buf[i] = 0 + } + // This adds the final one bit for the padding. Because of the way that + // bits are numbered from the LSB upwards, the final bit is the MSB of + // the last byte. + buf[d.rate-1] ^= 0x80 + // Apply the permutation + d.permute() + d.state = spongeSqueezing + d.bufe = d.rate + copyOut(d, buf) +} + +// Write absorbs more data into the hash's state. It produces an error +// if more data is written to the ShakeHash after writing +func (d *State) Write(p []byte) (written int, err error) { + if d.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + written = len(p) + + for len(p) > 0 { + bufl := d.bufe - d.bufo + if bufl == 0 && len(p) >= d.rate { + // The fast path; absorb a full "rate" bytes of input and apply the permutation. + xorIn(d, p[:d.rate]) + p = p[d.rate:] + KeccakF1600(&d.a) + } else { + // The slow path; buffer the input until we can fill the sponge, and then xor it in. + todo := d.rate - bufl + if todo > len(p) { + todo = len(p) + } + d.bufe += todo + buf := d.buf() + copy(buf[bufl:], p[:todo]) + p = p[todo:] + + // If the sponge is full, apply the permutation. + if d.bufe == d.rate { + d.permute() + } + } + } + + return written, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (d *State) Read(out []byte) (n int, err error) { + // If we're still absorbing, pad and apply the permutation. + if d.state == spongeAbsorbing { + d.padAndPermute(d.dsbyte) + } + + n = len(out) + + // Now, do the squeezing. + for len(out) > 0 { + buf := d.buf() + n := copy(out, buf) + d.bufo += n + out = out[n:] + + // Apply the permutation if we've squeezed the sponge dry. + if d.bufo == d.bufe { + d.permute() + } + } + + return +} + +// Sum applies padding to the hash state and then squeezes out the desired +// number of output bytes. +func (d *State) Sum(in []byte) []byte { + // Make a copy of the original hash so that caller can keep writing + // and summing. + dup := d.clone() + hash := make([]byte, dup.outputLen) + _, _ = dup.Read(hash) + return append(in, hash...) +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s new file mode 100644 index 000000000..8a4458f63 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!appengine + +#include "textflag.h" + +// func kimd(function code, chain *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, chain *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD chain+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go new file mode 100644 index 000000000..b92c5b7d7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go @@ -0,0 +1,79 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha3 + +// This file defines the ShakeHash interface, and provides +// functions for creating SHAKE and cSHAKE instances, as well as utility +// functions for hashing bytes to arbitrary-length output. +// +// +// SHAKE implementation is based on FIPS PUB 202 [1] +// cSHAKE implementations is based on NIST SP 800-185 [2] +// +// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +// [2] https://doi.org/10.6028/NIST.SP.800-185 + +import ( + "io" +) + +// ShakeHash defines the interface to hash functions that +// support arbitrary-length output. +type ShakeHash interface { + // Write absorbs more data into the hash's state. It panics if input is + // written to it after output has been read from it. + io.Writer + + // Read reads more output from the hash; reading affects the hash's + // state. (ShakeHash.Read is thus very different from Hash.Sum) + // It never returns an error. + io.Reader + + // Clone returns a copy of the ShakeHash in its current state. + Clone() ShakeHash + + // Reset resets the ShakeHash to its initial state. + Reset() +} + +// Consts for configuring initial SHA-3 state +const ( + dsbyteShake = 0x1f + rate128 = 168 + rate256 = 136 +) + +// Clone returns copy of SHAKE context within its current state. +func (d *State) Clone() ShakeHash { + return d.clone() +} + +// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +func NewShake128() State { + return State{rate: rate128, dsbyte: dsbyteShake} +} + +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +func NewShake256() State { + return State{rate: rate256, dsbyte: dsbyteShake} +} + +// ShakeSum128 writes an arbitrary-length digest of data into hash. +func ShakeSum128(hash, data []byte) { + h := NewShake128() + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// ShakeSum256 writes an arbitrary-length digest of data into hash. +func ShakeSum256(hash, data []byte) { + h := NewShake256() + _, _ = h.Write(data) + _, _ = h.Read(hash) +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go new file mode 100644 index 000000000..1e2133745 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !386 && !ppc64le) || appengine +// +build !amd64,!386,!ppc64le appengine + +package sha3 + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate]byte + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(b) +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go new file mode 100644 index 000000000..2b0c66179 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 || appengine) && (!386 || appengine) && (!ppc64le || appengine) +// +build !amd64 appengine +// +build !386 appengine +// +build !ppc64le appengine + +package sha3 + +import "encoding/binary" + +// xorIn xors the bytes in buf into the state; it +// makes no non-portable assumptions about memory layout +// or alignment. +func xorIn(d *State, buf []byte) { + n := len(buf) / 8 + + for i := 0; i < n; i++ { + a := binary.LittleEndian.Uint64(buf) + d.a[i] ^= a + buf = buf[8:] + } +} + +// copyOut copies ulint64s to a byte buffer. +func copyOut(d *State, b []byte) { + for i := 0; len(b) >= 8; i++ { + binary.LittleEndian.PutUint64(b, d.a[i]) + b = b[8:] + } +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go new file mode 100644 index 000000000..052fc8d32 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || 386 || ppc64le) && !appengine +// +build amd64 386 ppc64le +// +build !appengine + +package sha3 + +import "unsafe" + +// A storageBuf is an aligned array of maxRate bytes. +type storageBuf [maxRate / 8]uint64 + +func (b *storageBuf) asBytes() *[maxRate]byte { + return (*[maxRate]byte)(unsafe.Pointer(b)) +} + +// xorInuses unaligned reads and writes to update d.a to contain d.a +// XOR buf. +func xorIn(d *State, buf []byte) { + n := len(buf) + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] + if n >= 72 { + d.a[0] ^= bw[0] + d.a[1] ^= bw[1] + d.a[2] ^= bw[2] + d.a[3] ^= bw[3] + d.a[4] ^= bw[4] + d.a[5] ^= bw[5] + d.a[6] ^= bw[6] + d.a[7] ^= bw[7] + d.a[8] ^= bw[8] + } + if n >= 104 { + d.a[9] ^= bw[9] + d.a[10] ^= bw[10] + d.a[11] ^= bw[11] + d.a[12] ^= bw[12] + } + if n >= 136 { + d.a[13] ^= bw[13] + d.a[14] ^= bw[14] + d.a[15] ^= bw[15] + d.a[16] ^= bw[16] + } + if n >= 144 { + d.a[17] ^= bw[17] + } + if n >= 168 { + d.a[18] ^= bw[18] + d.a[19] ^= bw[19] + d.a[20] ^= bw[20] + } +} + +func copyOut(d *State, buf []byte) { + ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) + copy(buf, ab[:]) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go new file mode 100644 index 000000000..57a50ff5e --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go @@ -0,0 +1,205 @@ +// Package fp25519 provides prime field arithmetic over GF(2^255-19). +package fp25519 + +import ( + "errors" + + "github.com/cloudflare/circl/internal/conv" +) + +// Size in bytes of an element. +const Size = 32 + +// Elt is a prime field element. +type Elt [Size]byte + +func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } + +// p is the prime modulus 2^255-19. +var p = Elt{ + 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, +} + +// P returns the prime modulus 2^255-19. +func P() Elt { return p } + +// ToBytes stores in b the little-endian byte representation of x. +func ToBytes(b []byte, x *Elt) error { + if len(b) != Size { + return errors.New("wrong size") + } + Modp(x) + copy(b, x[:]) + return nil +} + +// IsZero returns true if x is equal to 0. +func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } + +// SetOne assigns x=1. +func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 } + +// Neg calculates z = -x. +func Neg(z, x *Elt) { Sub(z, &p, x) } + +// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is +// indicated by returning isQR = true. Otherwise, when x/y is a quadratic +// non-residue, z will have an undetermined value and isQR = false. +func InvSqrt(z, x, y *Elt) (isQR bool) { + sqrtMinusOne := &Elt{ + 0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4, + 0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f, + 0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b, + 0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b, + } + t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{} + + Mul(t0, x, y) // t0 = u*v + Sqr(t1, y) // t1 = v^2 + Mul(t2, t0, t1) // t2 = u*v^3 + Sqr(t0, t1) // t0 = v^4 + Mul(t1, t0, t2) // t1 = u*v^7 + + var Tab [4]*Elt + Tab[0] = &Elt{} + Tab[1] = &Elt{} + Tab[2] = t3 + Tab[3] = t1 + + *Tab[0] = *t1 + Sqr(Tab[0], Tab[0]) + Sqr(Tab[1], Tab[0]) + Sqr(Tab[1], Tab[1]) + Mul(Tab[1], Tab[1], Tab[3]) + Mul(Tab[0], Tab[0], Tab[1]) + Sqr(Tab[0], Tab[0]) + Mul(Tab[0], Tab[0], Tab[1]) + Sqr(Tab[1], Tab[0]) + for i := 0; i < 4; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[0]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 4; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[0]) + Sqr(Tab[1], Tab[2]) + for i := 0; i < 14; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[2]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 29; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[1]) + Sqr(Tab[1], Tab[2]) + for i := 0; i < 59; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[2]) + for i := 0; i < 5; i++ { + Sqr(Tab[1], Tab[1]) + } + Mul(Tab[1], Tab[1], Tab[0]) + Sqr(Tab[2], Tab[1]) + for i := 0; i < 124; i++ { + Sqr(Tab[2], Tab[2]) + } + Mul(Tab[2], Tab[2], Tab[1]) + Sqr(Tab[2], Tab[2]) + Sqr(Tab[2], Tab[2]) + Mul(Tab[2], Tab[2], Tab[3]) + + Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8 + // Checking whether y z^2 == x + Sqr(t0, z) // t0 = z^2 + Mul(t0, t0, y) // t0 = yz^2 + Sub(t1, t0, x) // t1 = t0-u + Add(t2, t0, x) // t2 = t0+u + if IsZero(t1) { + return true + } else if IsZero(t2) { + Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1) + return true + } else { + return false + } +} + +// Inv calculates z = 1/x mod p. +func Inv(z, x *Elt) { + x0, x1, x2 := &Elt{}, &Elt{}, &Elt{} + Sqr(x1, x) + Sqr(x0, x1) + Sqr(x0, x0) + Mul(x0, x0, x) + Mul(z, x0, x1) + Sqr(x1, z) + Mul(x0, x0, x1) + Sqr(x1, x0) + for i := 0; i < 4; i++ { + Sqr(x1, x1) + } + Mul(x0, x0, x1) + Sqr(x1, x0) + for i := 0; i < 9; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x0) + Sqr(x2, x1) + for i := 0; i < 19; i++ { + Sqr(x2, x2) + } + Mul(x2, x2, x1) + for i := 0; i < 10; i++ { + Sqr(x2, x2) + } + Mul(x2, x2, x0) + Sqr(x0, x2) + for i := 0; i < 49; i++ { + Sqr(x0, x0) + } + Mul(x0, x0, x2) + Sqr(x1, x0) + for i := 0; i < 99; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x0) + for i := 0; i < 50; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, x2) + for i := 0; i < 5; i++ { + Sqr(x1, x1) + } + Mul(z, z, x1) +} + +// Cmov assigns y to x if n is 1. +func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } + +// Cswap interchanges x and y if n is 1. +func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } + +// Add calculates z = x+y mod p. +func Add(z, x, y *Elt) { add(z, x, y) } + +// Sub calculates z = x-y mod p. +func Sub(z, x, y *Elt) { sub(z, x, y) } + +// AddSub calculates (x,y) = (x+y mod p, x-y mod p). +func AddSub(x, y *Elt) { addsub(x, y) } + +// Mul calculates z = x*y mod p. +func Mul(z, x, y *Elt) { mul(z, x, y) } + +// Sqr calculates z = x^2 mod p. +func Sqr(z, x *Elt) { sqr(z, x) } + +// Modp ensures that z is between [0,p-1]. +func Modp(z *Elt) { modp(z) } diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go new file mode 100644 index 000000000..057f0d280 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go @@ -0,0 +1,45 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package fp25519 + +import ( + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } +func add(z, x, y *Elt) { addAmd64(z, x, y) } +func sub(z, x, y *Elt) { subAmd64(z, x, y) } +func addsub(x, y *Elt) { addsubAmd64(x, y) } +func mul(z, x, y *Elt) { mulAmd64(z, x, y) } +func sqr(z, x *Elt) { sqrAmd64(z, x) } +func modp(z *Elt) { modpAmd64(z) } + +//go:noescape +func cmovAmd64(x, y *Elt, n uint) + +//go:noescape +func cswapAmd64(x, y *Elt, n uint) + +//go:noescape +func addAmd64(z, x, y *Elt) + +//go:noescape +func subAmd64(z, x, y *Elt) + +//go:noescape +func addsubAmd64(x, y *Elt) + +//go:noescape +func mulAmd64(z, x, y *Elt) + +//go:noescape +func sqrAmd64(z, x *Elt) + +//go:noescape +func modpAmd64(z *Elt) diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h new file mode 100644 index 000000000..b884b584a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h @@ -0,0 +1,351 @@ +// This code was imported from https://github.com/armfazh/rfc7748_precomputed + +// CHECK_BMI2ADX triggers bmi2adx if supported, +// otherwise it fallbacks to legacy code. +#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ + CMPB ·hasBmi2Adx(SB), $0 \ + JE label \ + bmi2adx \ + RET \ + label: \ + legacy \ + RET + +// cselect is a conditional move +// if b=1: it copies y into x; +// if b=0: x remains with the same value; +// if b<> 0,1: undefined. +// Uses: AX, DX, FLAGS +// Instr: x86_64, cmov +#define cselect(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ + MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ + MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ + MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; + +// cswap is a conditional swap +// if b=1: x,y <- y,x; +// if b=0: x,y remain with the same values; +// if b<> 0,1: undefined. +// Uses: AX, DX, R8, FLAGS +// Instr: x86_64, cmov +#define cswap(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ + MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ + MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ + MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; + +// additionLeg adds x and y and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov +#define additionLeg(z,x,y) \ + MOVL $38, AX; \ + MOVL $0, DX; \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ $0, R11; MOVQ R11, 24+z; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + ADDQ DX, R8; MOVQ R8, 0+z; + +// additionAdx adds x and y and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov, adx +#define additionAdx(z,x,y) \ + MOVL $38, AX; \ + XORL DX, DX; \ + MOVQ 0+x, R8; ADCXQ 0+y, R8; \ + MOVQ 8+x, R9; ADCXQ 8+y, R9; \ + MOVQ 16+x, R10; ADCXQ 16+y, R10; \ + MOVQ 24+x, R11; ADCXQ 24+y, R11; \ + CMOVQCS AX, DX ; \ + XORL AX, AX; \ + ADCXQ DX, R8; \ + ADCXQ AX, R9; MOVQ R9, 8+z; \ + ADCXQ AX, R10; MOVQ R10, 16+z; \ + ADCXQ AX, R11; MOVQ R11, 24+z; \ + MOVL $38, DX; \ + CMOVQCS DX, AX; \ + ADDQ AX, R8; MOVQ R8, 0+z; + +// subtraction subtracts y from x and stores in z +// Uses: AX, DX, R8-R11, FLAGS +// Instr: x86_64, cmov +#define subtraction(z,x,y) \ + MOVL $38, AX; \ + MOVQ 0+x, R8; SUBQ 0+y, R8; \ + MOVQ 8+x, R9; SBBQ 8+y, R9; \ + MOVQ 16+x, R10; SBBQ 16+y, R10; \ + MOVQ 24+x, R11; SBBQ 24+y, R11; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + SUBQ DX, R8; \ + SBBQ $0, R9; MOVQ R9, 8+z; \ + SBBQ $0, R10; MOVQ R10, 16+z; \ + SBBQ $0, R11; MOVQ R11, 24+z; \ + MOVL $0, DX; \ + CMOVQCS AX, DX; \ + SUBQ DX, R8; MOVQ R8, 0+z; + +// integerMulAdx multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerMulAdx(z,x,y) \ + MOVL $0,R15; \ + MOVQ 0+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \ + MULXQ 8+x, AX, R9; ADCXQ AX, R8; \ + MULXQ 16+x, AX, R10; ADCXQ AX, R9; \ + MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \ + MOVQ 8+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \ + MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \ + MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \ + MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \ + MOVQ 16+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \ + MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \ + MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \ + MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \ + MOVQ 24+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \ + MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \ + MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \ + MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \ + MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z; + +// integerMulLeg multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerMulLeg(z,x,y) \ + MOVQ 0+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R13, R15; \ + ADCQ R14, R10; MOVQ R10, 16+z; \ + ADCQ AX, R11; MOVQ R11, 24+z; \ + ADCQ $0, DX; MOVQ DX, 32+z; \ + MOVQ 8+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 8+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 16+z, R9; MOVQ R9, R15; \ + ADCQ 24+z, R10; MOVQ R10, 24+z; \ + ADCQ 32+z, R11; MOVQ R11, 32+z; \ + ADCQ $0, DX; MOVQ DX, 40+z; \ + MOVQ 16+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 16+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 24+z, R9; MOVQ R9, R15; \ + ADCQ 32+z, R10; MOVQ R10, 32+z; \ + ADCQ 40+z, R11; MOVQ R11, 40+z; \ + ADCQ $0, DX; MOVQ DX, 48+z; \ + MOVQ 24+y, R8; \ + MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R8; \ + ADDQ R12, R15; MOVQ R15, 24+z; \ + ADCQ R13, R9; \ + ADCQ R14, R10; \ + ADCQ AX, R11; \ + ADCQ $0, DX; \ + ADCQ 32+z, R9; MOVQ R9, 32+z; \ + ADCQ 40+z, R10; MOVQ R10, 40+z; \ + ADCQ 48+z, R11; MOVQ R11, 48+z; \ + ADCQ $0, DX; MOVQ DX, 56+z; + +// integerSqrLeg squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerSqrLeg(z,x) \ + MOVQ 0+x, R8; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \ + MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \ + MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \ + MOVQ 24+x, R8; \ + MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \ + MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \ + \ + ADDQ R14, R10;\ + ADCQ R15, R11; MOVL $0, R15;\ + ADCQ CX, R12;\ + ADCQ AX, R13;\ + ADCQ $0, DX; MOVQ DX, R14;\ + MOVQ 8+x, AX; MULQ 16+x;\ + \ + ADDQ AX, R11;\ + ADCQ DX, R12;\ + ADCQ $0, R13;\ + ADCQ $0, R14;\ + ADCQ $0, R15;\ + \ + SHLQ $1, R14, R15; MOVQ R15, 56+z;\ + SHLQ $1, R13, R14; MOVQ R14, 48+z;\ + SHLQ $1, R12, R13; MOVQ R13, 40+z;\ + SHLQ $1, R11, R12; MOVQ R12, 32+z;\ + SHLQ $1, R10, R11; MOVQ R11, 24+z;\ + SHLQ $1, R9, R10; MOVQ R10, 16+z;\ + SHLQ $1, R9; MOVQ R9, 8+z;\ + \ + MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\ + MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\ + MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\ + MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\ + \ + ADDQ 8+z, R9; MOVQ R9, 8+z;\ + ADCQ 16+z, R10; MOVQ R10, 16+z;\ + ADCQ 24+z, R11; MOVQ R11, 24+z;\ + ADCQ 32+z, R12; MOVQ R12, 32+z;\ + ADCQ 40+z, R13; MOVQ R13, 40+z;\ + ADCQ 48+z, R14; MOVQ R14, 48+z;\ + ADCQ 56+z, R15; MOVQ R15, 56+z; + +// integerSqrAdx squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerSqrAdx(z,x) \ + MOVQ 0+x, DX; /* A[0] */ \ + MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \ + MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \ + MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \ + MOVQ 24+x, DX; /* A[3] */ \ + MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \ + MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \ + MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \ + MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \ + XORL R15, R15; \ + ADOXQ AX, R10; ADCXQ R8, R8; \ + ADOXQ CX, R11; ADCXQ R9, R9; \ + ADOXQ R15, R12; ADCXQ R10, R10; \ + ADOXQ R15, R13; ADCXQ R11, R11; \ + ADOXQ R15, R14; ADCXQ R12, R12; \ + ;;;;;;;;;;;;;;; ADCXQ R13, R13; \ + ;;;;;;;;;;;;;;; ADCXQ R14, R14; \ + MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \ + ;;;;;;;;;;;;;;; MOVQ AX, 0+z; \ + ADDQ CX, R8; MOVQ R8, 8+z; \ + MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \ + ADCQ AX, R9; MOVQ R9, 16+z; \ + ADCQ CX, R10; MOVQ R10, 24+z; \ + MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \ + ADCQ AX, R11; MOVQ R11, 32+z; \ + ADCQ CX, R12; MOVQ R12, 40+z; \ + MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \ + ADCQ AX, R13; MOVQ R13, 48+z; \ + ADCQ CX, R14; MOVQ R14, 56+z; + +// reduceFromDouble finds z congruent to x modulo p such that 0> 63) + // PUT BIT 255 IN CARRY FLAG AND CLEAR + x3 &^= 1 << 63 + + x0, c0 := bits.Add64(x0, cx, 0) + x1, c1 := bits.Add64(x1, 0, c0) + x2, c2 := bits.Add64(x2, 0, c1) + x3, _ = bits.Add64(x3, 0, c2) + + // TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19 + // cx = C[255] ? 0 : 19 + cx = uint64(19) &^ (-(x3 >> 63)) + // CLEAR BIT 255 + x3 &^= 1 << 63 + + x0, c0 = bits.Sub64(x0, cx, 0) + x1, c1 = bits.Sub64(x1, 0, c0) + x2, c2 = bits.Sub64(x2, 0, c1) + x3, _ = bits.Sub64(x3, 0, c2) + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) +} + +func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) { + h0, l0 := bits.Mul64(x4, 38) + h1, l1 := bits.Mul64(x5, 38) + h2, l2 := bits.Mul64(x6, 38) + h3, l3 := bits.Mul64(x7, 38) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + l0, c0 = bits.Add64(l0, x0, 0) + l1, c1 = bits.Add64(l1, x1, c0) + l2, c2 = bits.Add64(l2, x2, c1) + l3, c3 := bits.Add64(l3, x3, c2) + l4, _ = bits.Add64(l4, 0, c3) + + _, l4 = bits.Mul64(l4, 38) + l0, c0 = bits.Add64(l0, l4, 0) + z1, c1 := bits.Add64(l1, 0, c0) + z2, c2 := bits.Add64(l2, 0, c1) + z3, c3 := bits.Add64(l3, 0, c2) + z0, _ := bits.Add64(l0, (-c3)&38, 0) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go new file mode 100644 index 000000000..26ca4d01b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go @@ -0,0 +1,13 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package fp25519 + +func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } +func add(z, x, y *Elt) { addGeneric(z, x, y) } +func sub(z, x, y *Elt) { subGeneric(z, x, y) } +func addsub(x, y *Elt) { addsubGeneric(x, y) } +func mul(z, x, y *Elt) { mulGeneric(z, x, y) } +func sqr(z, x *Elt) { sqrGeneric(z, x) } +func modp(z *Elt) { modpGeneric(z) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp.go b/vendor/github.com/cloudflare/circl/math/fp448/fp.go new file mode 100644 index 000000000..a5e36600b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp.go @@ -0,0 +1,164 @@ +// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1). +package fp448 + +import ( + "errors" + + "github.com/cloudflare/circl/internal/conv" +) + +// Size in bytes of an element. +const Size = 56 + +// Elt is a prime field element. +type Elt [Size]byte + +func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) } + +// p is the prime modulus 2^448-2^224-1. +var p = Elt{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +} + +// P returns the prime modulus 2^448-2^224-1. +func P() Elt { return p } + +// ToBytes stores in b the little-endian byte representation of x. +func ToBytes(b []byte, x *Elt) error { + if len(b) != Size { + return errors.New("wrong size") + } + Modp(x) + copy(b, x[:]) + return nil +} + +// IsZero returns true if x is equal to 0. +func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} } + +// IsOne returns true if x is equal to 1. +func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} } + +// SetOne assigns x=1. +func SetOne(x *Elt) { *x = Elt{1} } + +// One returns the 1 element. +func One() (x Elt) { x = Elt{1}; return } + +// Neg calculates z = -x. +func Neg(z, x *Elt) { Sub(z, &p, x) } + +// Modp ensures that z is between [0,p-1]. +func Modp(z *Elt) { Sub(z, z, &p) } + +// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so, +// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue, +// and z = sqrt(-x/y). +func InvSqrt(z, x, y *Elt) (isQR bool) { + // First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x + // so that's x if x is a quadratic residue and -x otherwise. + // Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y). + // So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y). + // Thus if x and y are quadratic residues, then z is indeed sqrt(x/y). + t0, t1 := &Elt{}, &Elt{} + Mul(t0, x, y) // x*y + Sqr(t1, y) // y^2 + Mul(t1, t0, t1) // x*y^3 + powPminus3div4(z, t1) // (x*y^3)^k + Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1) + + // Check if x/y is a quadratic residue + Sqr(t0, z) // z^2 + Mul(t0, t0, y) // y*z^2 + Sub(t0, t0, x) // y*z^2-x + return IsZero(t0) +} + +// Inv calculates z = 1/x mod p. +func Inv(z, x *Elt) { + // Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4. + t := &Elt{} + powPminus3div4(t, x) // t = x^k + Sqr(t, t) // t = x^2k + Sqr(t, t) // t = x^4k + Mul(z, t, x) // z = x^(4k+1) +} + +// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4. +func powPminus3div4(z, x *Elt) { + x0, x1 := &Elt{}, &Elt{} + Sqr(z, x) + Mul(z, z, x) + Sqr(x0, z) + Mul(x0, x0, x) + Sqr(z, x0) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 5; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + for i := 0; i < 11; i++ { + Sqr(z, z) + } + Mul(z, z, x1) + Sqr(z, z) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 26; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + for i := 0; i < 53; i++ { + Sqr(z, z) + } + Mul(z, z, x1) + Sqr(z, z) + Sqr(z, z) + Sqr(z, z) + Mul(z, z, x0) + Sqr(x1, z) + for i := 0; i < 110; i++ { + Sqr(x1, x1) + } + Mul(x1, x1, z) + Sqr(z, x1) + Mul(z, z, x) + for i := 0; i < 223; i++ { + Sqr(z, z) + } + Mul(z, z, x1) +} + +// Cmov assigns y to x if n is 1. +func Cmov(x, y *Elt, n uint) { cmov(x, y, n) } + +// Cswap interchanges x and y if n is 1. +func Cswap(x, y *Elt, n uint) { cswap(x, y, n) } + +// Add calculates z = x+y mod p. +func Add(z, x, y *Elt) { add(z, x, y) } + +// Sub calculates z = x-y mod p. +func Sub(z, x, y *Elt) { sub(z, x, y) } + +// AddSub calculates (x,y) = (x+y mod p, x-y mod p). +func AddSub(x, y *Elt) { addsub(x, y) } + +// Mul calculates z = x*y mod p. +func Mul(z, x, y *Elt) { mul(z, x, y) } + +// Sqr calculates z = x^2 mod p. +func Sqr(z, x *Elt) { sqr(z, x) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go new file mode 100644 index 000000000..6a12209a7 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go @@ -0,0 +1,43 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package fp448 + +import ( + "golang.org/x/sys/cpu" +) + +var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX + +var _ = hasBmi2Adx + +func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) } +func add(z, x, y *Elt) { addAmd64(z, x, y) } +func sub(z, x, y *Elt) { subAmd64(z, x, y) } +func addsub(x, y *Elt) { addsubAmd64(x, y) } +func mul(z, x, y *Elt) { mulAmd64(z, x, y) } +func sqr(z, x *Elt) { sqrAmd64(z, x) } + +/* Functions defined in fp_amd64.s */ + +//go:noescape +func cmovAmd64(x, y *Elt, n uint) + +//go:noescape +func cswapAmd64(x, y *Elt, n uint) + +//go:noescape +func addAmd64(z, x, y *Elt) + +//go:noescape +func subAmd64(z, x, y *Elt) + +//go:noescape +func addsubAmd64(x, y *Elt) + +//go:noescape +func mulAmd64(z, x, y *Elt) + +//go:noescape +func sqrAmd64(z, x *Elt) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h new file mode 100644 index 000000000..536fe5bdf --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h @@ -0,0 +1,591 @@ +// This code was imported from https://github.com/armfazh/rfc7748_precomputed + +// CHECK_BMI2ADX triggers bmi2adx if supported, +// otherwise it fallbacks to legacy code. +#define CHECK_BMI2ADX(label, legacy, bmi2adx) \ + CMPB ·hasBmi2Adx(SB), $0 \ + JE label \ + bmi2adx \ + RET \ + label: \ + legacy \ + RET + +// cselect is a conditional move +// if b=1: it copies y into x; +// if b=0: x remains with the same value; +// if b<> 0,1: undefined. +// Uses: AX, DX, FLAGS +// Instr: x86_64, cmov +#define cselect(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \ + MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \ + MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \ + MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \ + MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \ + MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \ + MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x; + +// cswap is a conditional swap +// if b=1: x,y <- y,x; +// if b=0: x,y remain with the same values; +// if b<> 0,1: undefined. +// Uses: AX, DX, R8, FLAGS +// Instr: x86_64, cmov +#define cswap(x,y,b) \ + TESTQ b, b \ + MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \ + MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \ + MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \ + MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \ + MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \ + MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \ + MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y; + +// additionLeg adds x and y and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64 +#define additionLeg(z,x,y) \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + MOVQ 32+x, R12; ADCQ 32+y, R12; \ + MOVQ 40+x, R13; ADCQ 40+y, R13; \ + MOVQ 48+x, R14; ADCQ 48+y, R14; \ + MOVQ $0, AX; ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ $0, AX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ DX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ R8, 0+z; \ + ADCQ $0, R9; MOVQ R9, 8+z; \ + ADCQ $0, R10; MOVQ R10, 16+z; \ + ADCQ DX, R11; MOVQ R11, 24+z; \ + ADCQ $0, R12; MOVQ R12, 32+z; \ + ADCQ $0, R13; MOVQ R13, 40+z; \ + ADCQ $0, R14; MOVQ R14, 48+z; + + +// additionAdx adds x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, adx +#define additionAdx(z,x,y) \ + MOVL $32, R15; \ + XORL DX, DX; \ + MOVQ 0+x, R8; ADCXQ 0+y, R8; \ + MOVQ 8+x, R9; ADCXQ 8+y, R9; \ + MOVQ 16+x, R10; ADCXQ 16+y, R10; \ + MOVQ 24+x, R11; ADCXQ 24+y, R11; \ + MOVQ 32+x, R12; ADCXQ 32+y, R12; \ + MOVQ 40+x, R13; ADCXQ 40+y, R13; \ + MOVQ 48+x, R14; ADCXQ 48+y, R14; \ + ;;;;;;;;;;;;;;; ADCXQ DX, DX; \ + XORL AX, AX; \ + ADCXQ DX, R8; SHLXQ R15, DX, DX; \ + ADCXQ AX, R9; \ + ADCXQ AX, R10; \ + ADCXQ DX, R11; \ + ADCXQ AX, R12; \ + ADCXQ AX, R13; \ + ADCXQ AX, R14; \ + ADCXQ AX, AX; \ + XORL DX, DX; \ + ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \ + ADCXQ DX, R9; MOVQ R9, 8+z; \ + ADCXQ DX, R10; MOVQ R10, 16+z; \ + ADCXQ AX, R11; MOVQ R11, 24+z; \ + ADCXQ DX, R12; MOVQ R12, 32+z; \ + ADCXQ DX, R13; MOVQ R13, 40+z; \ + ADCXQ DX, R14; MOVQ R14, 48+z; + +// subtraction subtracts y from x and stores in z +// Uses: AX, DX, R8-R14, FLAGS +// Instr: x86_64 +#define subtraction(z,x,y) \ + MOVQ 0+x, R8; SUBQ 0+y, R8; \ + MOVQ 8+x, R9; SBBQ 8+y, R9; \ + MOVQ 16+x, R10; SBBQ 16+y, R10; \ + MOVQ 24+x, R11; SBBQ 24+y, R11; \ + MOVQ 32+x, R12; SBBQ 32+y, R12; \ + MOVQ 40+x, R13; SBBQ 40+y, R13; \ + MOVQ 48+x, R14; SBBQ 48+y, R14; \ + MOVQ $0, AX; SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ $0, AX; \ + SBBQ $0, R9; \ + SBBQ $0, R10; \ + SBBQ DX, R11; \ + SBBQ $0, R12; \ + SBBQ $0, R13; \ + SBBQ $0, R14; \ + SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ R8, 0+z; \ + SBBQ $0, R9; MOVQ R9, 8+z; \ + SBBQ $0, R10; MOVQ R10, 16+z; \ + SBBQ DX, R11; MOVQ R11, 24+z; \ + SBBQ $0, R12; MOVQ R12, 32+z; \ + SBBQ $0, R13; MOVQ R13, 40+z; \ + SBBQ $0, R14; MOVQ R14, 48+z; + +// maddBmi2Adx multiplies x and y and accumulates in z +// Uses: AX, DX, R15, FLAGS +// Instr: x86_64, bmi2, adx +#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \ + MOVQ i+y, DX; XORL AX, AX; \ + MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \ + MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \ + MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \ + MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \ + MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \ + MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \ + MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \ + ;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0; + +// integerMulAdx multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerMulAdx(z,x,y) \ + MOVL $0,R15; \ + MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \ + MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \ + MULXQ 8+x, AX, R10; ADCXQ AX, R9; \ + MULXQ 16+x, AX, R11; ADCXQ AX, R10; \ + MULXQ 24+x, AX, R12; ADCXQ AX, R11; \ + MULXQ 32+x, AX, R13; ADCXQ AX, R12; \ + MULXQ 40+x, AX, R14; ADCXQ AX, R13; \ + MULXQ 48+x, AX, R15; ADCXQ AX, R14; \ + ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \ + maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \ + maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \ + maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \ + maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \ + maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \ + maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \ + MOVQ R15, 56+z; \ + MOVQ R9, 64+z; \ + MOVQ R10, 72+z; \ + MOVQ R11, 80+z; \ + MOVQ R12, 88+z; \ + MOVQ R13, 96+z; \ + MOVQ R14, 104+z; + +// maddLegacy multiplies x and y and accumulates in z +// Uses: AX, DX, R15, FLAGS +// Instr: x86_64 +#define maddLegacy(z,x,y,i) \ + MOVQ i+y, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \ + ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \ + ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \ + ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \ + ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \ + ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \ + ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \ + ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \ + ADCQ $0, DX; MOVQ DX, 56+i+z; + +// integerMulLeg multiplies x and y and stores in z +// Uses: AX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerMulLeg(z,x,y) \ + MOVQ 0+y, R15; \ + MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \ + MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ + MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \ + MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \ + MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \ + MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \ + MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \ + maddLegacy(z,x,y, 8) \ + maddLegacy(z,x,y,16) \ + maddLegacy(z,x,y,24) \ + maddLegacy(z,x,y,32) \ + maddLegacy(z,x,y,40) \ + maddLegacy(z,x,y,48) + +// integerSqrLeg squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64 +#define integerSqrLeg(z,x) \ + XORL R15, R15; \ + MOVQ 0+x, CX; \ + MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \ + ADDQ CX, CX; ADCQ $0, R15; \ + MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \ + MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \ + \ + MOVQ 8+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ + ADDQ 8+x, CX; ADCQ $0, R15; \ + MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \ + \ + MOVQ 16+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ + ADDQ 16+x, CX; ADCQ $0, R15; \ + MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \ + \ + MOVQ 24+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ + ADDQ 24+x, CX; ADCQ $0, R15; \ + MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \ + \ + MOVQ 32+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \ + ADDQ 32+x, CX; ADCQ $0, R15; \ + MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \ + \ + XORL R13, R13; \ + XORL R14, R14; \ + MOVQ 40+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \ + ADDQ 40+x, CX; ADCQ $0, R15; \ + MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \ + \ + XORL R9, R9; \ + MOVQ 48+x, CX; \ + MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \ + ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \ + MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \ + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z; + + +// integerSqrAdx squares x and stores in z +// Uses: AX, CX, DX, R8-R15, FLAGS +// Instr: x86_64, bmi2, adx +#define integerSqrAdx(z,x) \ + XORL R15, R15; \ + MOVQ 0+x, DX; \ + ;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \ + ADDQ DX, DX; ADCQ $0, R15; CLC; \ + MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \ + MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\ + MULXQ 24+x, AX, R11; ADCXQ AX, R10; \ + MULXQ 32+x, AX, R12; ADCXQ AX, R11; \ + MULXQ 40+x, AX, R13; ADCXQ AX, R12; \ + MULXQ 48+x, AX, R14; ADCXQ AX, R13; \ + ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \ + \ + MOVQ 8+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \ + ADDQ AX, R9; MOVQ R9, 16+z; \ + ADCQ CX, R8; \ + ADCQ $0, R11; \ + ADDQ 8+x, DX; \ + ADCQ $0, R15; \ + XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ + MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \ + MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \ + \ + MOVQ 16+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \ + ADDQ AX, R11; MOVQ R11, 32+z; \ + ADCQ CX, R8; \ + ADCQ $0, R13; \ + ADDQ 16+x, DX; \ + ADCQ $0, R15; \ + XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ + MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \ + \ + MOVQ 24+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \ + ADDQ AX, R13; MOVQ R13, 48+z; \ + ADCQ CX, R8; \ + ADCQ $0, R9; \ + ADDQ 24+x, DX; \ + ADCQ $0, R15; \ + XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \ + MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \ + \ + MOVQ 32+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \ + ADDQ AX, R9; MOVQ R9, 64+z; \ + ADCQ CX, R8; \ + ADCQ $0, R11; \ + ADDQ 32+x, DX; \ + ADCQ $0, R15; \ + XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \ + MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \ + \ + MOVQ 40+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \ + ADDQ AX, R11; MOVQ R11, 80+z; \ + ADCQ CX, R8; \ + ADCQ $0, R13; \ + ADDQ 40+x, DX; \ + ADCQ $0, R15; \ + XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \ + MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \ + ;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \ + \ + MOVQ 48+x, DX; \ + MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \ + MULXQ AX, AX, CX; \ + MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \ + XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \ + ;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \ + ;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z; + +// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z +// Uses: AX, R8-R15, FLAGS +// Instr: x86_64 +#define reduceFromDoubleLeg(z,x) \ + /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ + /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ + MOVQ 80+x,AX; MOVQ AX,R10; \ + MOVQ $0xFFFFFFFF00000000, R8; \ + ANDQ R8,R10; \ + \ + MOVQ $0,R14; \ + MOVQ 104+x,R13; SHLQ $1,R13,R14; \ + MOVQ 96+x,R12; SHLQ $1,R12,R13; \ + MOVQ 88+x,R11; SHLQ $1,R11,R12; \ + MOVQ 72+x, R9; SHLQ $1,R10,R11; \ + MOVQ 64+x, R8; SHLQ $1,R10; \ + MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ + MOVQ 56+x,R15; \ + \ + ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ + ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ + ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ + ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ + ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ + ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ + ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ + ADCQ $0,R14; \ + /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ + /* ( r9, r8, r15, r13, r12, r11, r10) */ \ + MOVQ R10, AX; \ + SHRQ $32,R11,R10; \ + SHRQ $32,R12,R11; \ + SHRQ $32,R13,R12; \ + SHRQ $32,R15,R13; \ + SHRQ $32, R8,R15; \ + SHRQ $32, R9, R8; \ + SHRQ $32, AX, R9; \ + \ + ADDQ 0+z,R10; \ + ADCQ 8+z,R11; \ + ADCQ 16+z,R12; \ + ADCQ 24+z,R13; \ + ADCQ 32+z,R15; \ + ADCQ 40+z, R8; \ + ADCQ 48+z, R9; \ + ADCQ $0,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + ADDQ R14,R10; MOVQ $0,R14; \ + ADCQ $0,R11; \ + ADCQ $0,R12; \ + ADCQ AX,R13; \ + ADCQ $0,R15; \ + ADCQ $0, R8; \ + ADCQ $0, R9; \ + ADCQ $0,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32,AX; \ + ADDQ R14,R10; MOVQ R10, 0+z; \ + ADCQ $0,R11; MOVQ R11, 8+z; \ + ADCQ $0,R12; MOVQ R12,16+z; \ + ADCQ AX,R13; MOVQ R13,24+z; \ + ADCQ $0,R15; MOVQ R15,32+z; \ + ADCQ $0, R8; MOVQ R8,40+z; \ + ADCQ $0, R9; MOVQ R9,48+z; + +// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z +// Uses: AX, R8-R15, FLAGS +// Instr: x86_64, adx +#define reduceFromDoubleAdx(z,x) \ + /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \ + /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \ + MOVQ 80+x,AX; MOVQ AX,R10; \ + MOVQ $0xFFFFFFFF00000000, R8; \ + ANDQ R8,R10; \ + \ + MOVQ $0,R14; \ + MOVQ 104+x,R13; SHLQ $1,R13,R14; \ + MOVQ 96+x,R12; SHLQ $1,R12,R13; \ + MOVQ 88+x,R11; SHLQ $1,R11,R12; \ + MOVQ 72+x, R9; SHLQ $1,R10,R11; \ + MOVQ 64+x, R8; SHLQ $1,R10; \ + MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \ + MOVQ 56+x,R15; \ + \ + XORL AX,AX; \ + ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \ + ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \ + ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \ + ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \ + ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \ + ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \ + ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \ + ADCXQ AX,R14; \ + /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \ + /* ( r9, r8, r15, r13, r12, r11, r10) */ \ + MOVQ R10, AX; \ + SHRQ $32,R11,R10; \ + SHRQ $32,R12,R11; \ + SHRQ $32,R13,R12; \ + SHRQ $32,R15,R13; \ + SHRQ $32, R8,R15; \ + SHRQ $32, R9, R8; \ + SHRQ $32, AX, R9; \ + \ + XORL AX,AX; \ + ADCXQ 0+z,R10; \ + ADCXQ 8+z,R11; \ + ADCXQ 16+z,R12; \ + ADCXQ 24+z,R13; \ + ADCXQ 32+z,R15; \ + ADCXQ 40+z, R8; \ + ADCXQ 48+z, R9; \ + ADCXQ AX,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + CLC; \ + ADCXQ R14,R10; MOVQ $0,R14; \ + ADCXQ R14,R11; \ + ADCXQ R14,R12; \ + ADCXQ AX,R13; \ + ADCXQ R14,R15; \ + ADCXQ R14, R8; \ + ADCXQ R14, R9; \ + ADCXQ R14,R14; \ + /* ( c7) + (c6,...,c0) */ \ + /* (r14) */ \ + MOVQ R14, AX; SHLQ $32, AX; \ + CLC; \ + ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \ + ADCXQ R14,R11; MOVQ R11, 8+z; \ + ADCXQ R14,R12; MOVQ R12,16+z; \ + ADCXQ AX,R13; MOVQ R13,24+z; \ + ADCXQ R14,R15; MOVQ R15,32+z; \ + ADCXQ R14, R8; MOVQ R8,40+z; \ + ADCXQ R14, R9; MOVQ R9,48+z; + +// addSub calculates two operations: x,y = x+y,x-y +// Uses: AX, DX, R8-R15, FLAGS +#define addSub(x,y) \ + MOVQ 0+x, R8; ADDQ 0+y, R8; \ + MOVQ 8+x, R9; ADCQ 8+y, R9; \ + MOVQ 16+x, R10; ADCQ 16+y, R10; \ + MOVQ 24+x, R11; ADCQ 24+y, R11; \ + MOVQ 32+x, R12; ADCQ 32+y, R12; \ + MOVQ 40+x, R13; ADCQ 40+y, R13; \ + MOVQ 48+x, R14; ADCQ 48+y, R14; \ + MOVQ $0, AX; ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ $0, AX; \ + ADCQ $0, R9; \ + ADCQ $0, R10; \ + ADCQ DX, R11; \ + ADCQ $0, R12; \ + ADCQ $0, R13; \ + ADCQ $0, R14; \ + ADCQ $0, AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \ + ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \ + ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \ + ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \ + ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \ + ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \ + ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \ + SUBQ 0+y, R8; \ + SBBQ 8+y, R9; \ + SBBQ 16+y, R10; \ + SBBQ 24+y, R11; \ + SBBQ 32+y, R12; \ + SBBQ 40+y, R13; \ + SBBQ 48+y, R14; \ + MOVQ $0, AX; SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ $0, AX; \ + SBBQ $0, R9; \ + SBBQ $0, R10; \ + SBBQ DX, R11; \ + SBBQ $0, R12; \ + SBBQ $0, R13; \ + SBBQ $0, R14; \ + SETCS AX; \ + MOVQ AX, DX; \ + SHLQ $32, DX; \ + SUBQ AX, R8; MOVQ R8, 0+y; \ + SBBQ $0, R9; MOVQ R9, 8+y; \ + SBBQ $0, R10; MOVQ R10, 16+y; \ + SBBQ DX, R11; MOVQ R11, 24+y; \ + SBBQ $0, R12; MOVQ R12, 32+y; \ + SBBQ $0, R13; MOVQ R13, 40+y; \ + SBBQ $0, R14; MOVQ R14, 48+y; diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s new file mode 100644 index 000000000..435addf5e --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s @@ -0,0 +1,74 @@ +// +build amd64 + +#include "textflag.h" +#include "fp_amd64.h" + +// func cmovAmd64(x, y *Elt, n uint) +TEXT ·cmovAmd64(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + MOVQ n+16(FP), BX + cselect(0(DI),0(SI),BX) + RET + +// func cswapAmd64(x, y *Elt, n uint) +TEXT ·cswapAmd64(SB),NOSPLIT,$0-24 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + MOVQ n+16(FP), BX + cswap(0(DI),0(SI),BX) + RET + +// func subAmd64(z, x, y *Elt) +TEXT ·subAmd64(SB),NOSPLIT,$0-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + subtraction(0(DI),0(SI),0(BX)) + RET + +// func addsubAmd64(x, y *Elt) +TEXT ·addsubAmd64(SB),NOSPLIT,$0-16 + MOVQ x+0(FP), DI + MOVQ y+8(FP), SI + addSub(0(DI),0(SI)) + RET + +#define addLegacy \ + additionLeg(0(DI),0(SI),0(BX)) +#define addBmi2Adx \ + additionAdx(0(DI),0(SI),0(BX)) + +#define mulLegacy \ + integerMulLeg(0(SP),0(SI),0(BX)) \ + reduceFromDoubleLeg(0(DI),0(SP)) +#define mulBmi2Adx \ + integerMulAdx(0(SP),0(SI),0(BX)) \ + reduceFromDoubleAdx(0(DI),0(SP)) + +#define sqrLegacy \ + integerSqrLeg(0(SP),0(SI)) \ + reduceFromDoubleLeg(0(DI),0(SP)) +#define sqrBmi2Adx \ + integerSqrAdx(0(SP),0(SI)) \ + reduceFromDoubleAdx(0(DI),0(SP)) + +// func addAmd64(z, x, y *Elt) +TEXT ·addAmd64(SB),NOSPLIT,$0-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx) + +// func mulAmd64(z, x, y *Elt) +TEXT ·mulAmd64(SB),NOSPLIT,$112-24 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + MOVQ y+16(FP), BX + CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx) + +// func sqrAmd64(z, x *Elt) +TEXT ·sqrAmd64(SB),NOSPLIT,$112-16 + MOVQ z+0(FP), DI + MOVQ x+8(FP), SI + CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx) diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go new file mode 100644 index 000000000..47a0b6320 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go @@ -0,0 +1,339 @@ +package fp448 + +import ( + "encoding/binary" + "math/bits" +) + +func cmovGeneric(x, y *Elt, n uint) { + m := -uint64(n & 0x1) + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + x0 = (x0 &^ m) | (y0 & m) + x1 = (x1 &^ m) | (y1 & m) + x2 = (x2 &^ m) | (y2 & m) + x3 = (x3 &^ m) | (y3 & m) + x4 = (x4 &^ m) | (y4 & m) + x5 = (x5 &^ m) | (y5 & m) + x6 = (x6 &^ m) | (y6 & m) + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) + binary.LittleEndian.PutUint64(x[4*8:5*8], x4) + binary.LittleEndian.PutUint64(x[5*8:6*8], x5) + binary.LittleEndian.PutUint64(x[6*8:7*8], x6) +} + +func cswapGeneric(x, y *Elt, n uint) { + m := -uint64(n & 0x1) + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + t0 := m & (x0 ^ y0) + t1 := m & (x1 ^ y1) + t2 := m & (x2 ^ y2) + t3 := m & (x3 ^ y3) + t4 := m & (x4 ^ y4) + t5 := m & (x5 ^ y5) + t6 := m & (x6 ^ y6) + x0 ^= t0 + x1 ^= t1 + x2 ^= t2 + x3 ^= t3 + x4 ^= t4 + x5 ^= t5 + x6 ^= t6 + y0 ^= t0 + y1 ^= t1 + y2 ^= t2 + y3 ^= t3 + y4 ^= t4 + y5 ^= t5 + y6 ^= t6 + + binary.LittleEndian.PutUint64(x[0*8:1*8], x0) + binary.LittleEndian.PutUint64(x[1*8:2*8], x1) + binary.LittleEndian.PutUint64(x[2*8:3*8], x2) + binary.LittleEndian.PutUint64(x[3*8:4*8], x3) + binary.LittleEndian.PutUint64(x[4*8:5*8], x4) + binary.LittleEndian.PutUint64(x[5*8:6*8], x5) + binary.LittleEndian.PutUint64(x[6*8:7*8], x6) + + binary.LittleEndian.PutUint64(y[0*8:1*8], y0) + binary.LittleEndian.PutUint64(y[1*8:2*8], y1) + binary.LittleEndian.PutUint64(y[2*8:3*8], y2) + binary.LittleEndian.PutUint64(y[3*8:4*8], y3) + binary.LittleEndian.PutUint64(y[4*8:5*8], y4) + binary.LittleEndian.PutUint64(y[5*8:6*8], y5) + binary.LittleEndian.PutUint64(y[6*8:7*8], y6) +} + +func addGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + z0, c0 := bits.Add64(x0, y0, 0) + z1, c1 := bits.Add64(x1, y1, c0) + z2, c2 := bits.Add64(x2, y2, c1) + z3, c3 := bits.Add64(x3, y3, c2) + z4, c4 := bits.Add64(x4, y4, c3) + z5, c5 := bits.Add64(x5, y5, c4) + z6, z7 := bits.Add64(x6, y6, c5) + + z0, c0 = bits.Add64(z0, z7, 0) + z1, c1 = bits.Add64(z1, 0, c0) + z2, c2 = bits.Add64(z2, 0, c1) + z3, c3 = bits.Add64(z3, z7<<32, c2) + z4, c4 = bits.Add64(z4, 0, c3) + z5, c5 = bits.Add64(z5, 0, c4) + z6, z7 = bits.Add64(z6, 0, c5) + + z0, c0 = bits.Add64(z0, z7, 0) + z1, c1 = bits.Add64(z1, 0, c0) + z2, c2 = bits.Add64(z2, 0, c1) + z3, c3 = bits.Add64(z3, z7<<32, c2) + z4, c4 = bits.Add64(z4, 0, c3) + z5, c5 = bits.Add64(z5, 0, c4) + z6, _ = bits.Add64(z6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) + binary.LittleEndian.PutUint64(z[4*8:5*8], z4) + binary.LittleEndian.PutUint64(z[5*8:6*8], z5) + binary.LittleEndian.PutUint64(z[6*8:7*8], z6) +} + +func subGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + z0, c0 := bits.Sub64(x0, y0, 0) + z1, c1 := bits.Sub64(x1, y1, c0) + z2, c2 := bits.Sub64(x2, y2, c1) + z3, c3 := bits.Sub64(x3, y3, c2) + z4, c4 := bits.Sub64(x4, y4, c3) + z5, c5 := bits.Sub64(x5, y5, c4) + z6, z7 := bits.Sub64(x6, y6, c5) + + z0, c0 = bits.Sub64(z0, z7, 0) + z1, c1 = bits.Sub64(z1, 0, c0) + z2, c2 = bits.Sub64(z2, 0, c1) + z3, c3 = bits.Sub64(z3, z7<<32, c2) + z4, c4 = bits.Sub64(z4, 0, c3) + z5, c5 = bits.Sub64(z5, 0, c4) + z6, z7 = bits.Sub64(z6, 0, c5) + + z0, c0 = bits.Sub64(z0, z7, 0) + z1, c1 = bits.Sub64(z1, 0, c0) + z2, c2 = bits.Sub64(z2, 0, c1) + z3, c3 = bits.Sub64(z3, z7<<32, c2) + z4, c4 = bits.Sub64(z4, 0, c3) + z5, c5 = bits.Sub64(z5, 0, c4) + z6, _ = bits.Sub64(z6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], z0) + binary.LittleEndian.PutUint64(z[1*8:2*8], z1) + binary.LittleEndian.PutUint64(z[2*8:3*8], z2) + binary.LittleEndian.PutUint64(z[3*8:4*8], z3) + binary.LittleEndian.PutUint64(z[4*8:5*8], z4) + binary.LittleEndian.PutUint64(z[5*8:6*8], z5) + binary.LittleEndian.PutUint64(z[6*8:7*8], z6) +} + +func addsubGeneric(x, y *Elt) { + z := &Elt{} + addGeneric(z, x, y) + subGeneric(y, x, y) + *x = *z +} + +func mulGeneric(z, x, y *Elt) { + x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8]) + x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8]) + x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8]) + x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8]) + x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8]) + x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8]) + x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8]) + + y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8]) + y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8]) + y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8]) + y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8]) + y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8]) + y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8]) + y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8]) + + yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6} + zz := [7]uint64{} + + yi := yy[0] + h0, l0 := bits.Mul64(x0, yi) + h1, l1 := bits.Mul64(x1, yi) + h2, l2 := bits.Mul64(x2, yi) + h3, l3 := bits.Mul64(x3, yi) + h4, l4 := bits.Mul64(x4, yi) + h5, l5 := bits.Mul64(x5, yi) + h6, l6 := bits.Mul64(x6, yi) + + zz[0] = l0 + a0, c0 := bits.Add64(h0, l1, 0) + a1, c1 := bits.Add64(h1, l2, c0) + a2, c2 := bits.Add64(h2, l3, c1) + a3, c3 := bits.Add64(h3, l4, c2) + a4, c4 := bits.Add64(h4, l5, c3) + a5, c5 := bits.Add64(h5, l6, c4) + a6, _ := bits.Add64(h6, 0, c5) + + for i := 1; i < 7; i++ { + yi = yy[i] + h0, l0 = bits.Mul64(x0, yi) + h1, l1 = bits.Mul64(x1, yi) + h2, l2 = bits.Mul64(x2, yi) + h3, l3 = bits.Mul64(x3, yi) + h4, l4 = bits.Mul64(x4, yi) + h5, l5 = bits.Mul64(x5, yi) + h6, l6 = bits.Mul64(x6, yi) + + zz[i], c0 = bits.Add64(a0, l0, 0) + a0, c1 = bits.Add64(a1, l1, c0) + a1, c2 = bits.Add64(a2, l2, c1) + a2, c3 = bits.Add64(a3, l3, c2) + a3, c4 = bits.Add64(a4, l4, c3) + a4, c5 = bits.Add64(a5, l5, c4) + a5, a6 = bits.Add64(a6, l6, c5) + + a0, c0 = bits.Add64(a0, h0, 0) + a1, c1 = bits.Add64(a1, h1, c0) + a2, c2 = bits.Add64(a2, h2, c1) + a3, c3 = bits.Add64(a3, h3, c2) + a4, c4 = bits.Add64(a4, h4, c3) + a5, c5 = bits.Add64(a5, h5, c4) + a6, _ = bits.Add64(a6, h6, c5) + } + red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6}) +} + +func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) } + +func red64(z *Elt, l, h *[7]uint64) { + /* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */ + h0 := h[0] + h1 := h[1] + h2 := h[2] + h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF) + h4 := (h[3] >> 63) | (h[4] << 1) + h5 := (h[4] >> 63) | (h[5] << 1) + h6 := (h[5] >> 63) | (h[6] << 1) + h7 := (h[6] >> 63) + + l0, c0 := bits.Add64(h0, l[0], 0) + l1, c1 := bits.Add64(h1, l[1], c0) + l2, c2 := bits.Add64(h2, l[2], c1) + l3, c3 := bits.Add64(h3, l[3], c2) + l4, c4 := bits.Add64(h4, l[4], c3) + l5, c5 := bits.Add64(h5, l[5], c4) + l6, c6 := bits.Add64(h6, l[6], c5) + l7, _ := bits.Add64(h7, 0, c6) + + /* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */ + h0 = (h[3] >> 32) | (h[4] << 32) + h1 = (h[4] >> 32) | (h[5] << 32) + h2 = (h[5] >> 32) | (h[6] << 32) + h3 = (h[6] >> 32) | (h[0] << 32) + h4 = (h[0] >> 32) | (h[1] << 32) + h5 = (h[1] >> 32) | (h[2] << 32) + h6 = (h[2] >> 32) | (h[3] << 32) + + l0, c0 = bits.Add64(l0, h0, 0) + l1, c1 = bits.Add64(l1, h1, c0) + l2, c2 = bits.Add64(l2, h2, c1) + l3, c3 = bits.Add64(l3, h3, c2) + l4, c4 = bits.Add64(l4, h4, c3) + l5, c5 = bits.Add64(l5, h5, c4) + l6, c6 = bits.Add64(l6, h6, c5) + l7, _ = bits.Add64(l7, 0, c6) + + /* (C7) + (C6,...,C0) */ + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, l7 = bits.Add64(l6, 0, c5) + + /* (C7) + (C6,...,C0) */ + l0, c0 = bits.Add64(l0, l7, 0) + l1, c1 = bits.Add64(l1, 0, c0) + l2, c2 = bits.Add64(l2, 0, c1) + l3, c3 = bits.Add64(l3, l7<<32, c2) + l4, c4 = bits.Add64(l4, 0, c3) + l5, c5 = bits.Add64(l5, 0, c4) + l6, _ = bits.Add64(l6, 0, c5) + + binary.LittleEndian.PutUint64(z[0*8:1*8], l0) + binary.LittleEndian.PutUint64(z[1*8:2*8], l1) + binary.LittleEndian.PutUint64(z[2*8:3*8], l2) + binary.LittleEndian.PutUint64(z[3*8:4*8], l3) + binary.LittleEndian.PutUint64(z[4*8:5*8], l4) + binary.LittleEndian.PutUint64(z[5*8:6*8], l5) + binary.LittleEndian.PutUint64(z[6*8:7*8], l6) +} diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go new file mode 100644 index 000000000..a62225d29 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go @@ -0,0 +1,12 @@ +//go:build !amd64 || purego +// +build !amd64 purego + +package fp448 + +func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) } +func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) } +func add(z, x, y *Elt) { addGeneric(z, x, y) } +func sub(z, x, y *Elt) { subGeneric(z, x, y) } +func addsub(x, y *Elt) { addsubGeneric(x, y) } +func mul(z, x, y *Elt) { mulGeneric(z, x, y) } +func sqr(z, x *Elt) { sqrGeneric(z, x) } diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go new file mode 100644 index 000000000..2d7afc805 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go @@ -0,0 +1,75 @@ +//go:build gofuzz +// +build gofuzz + +// How to run the fuzzer: +// +// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz +// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build +// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a +// $ clang -fsanitize=fuzzer lib.a -o fu.exe +// $ ./fu.exe +package fp448 + +import ( + "encoding/binary" + "fmt" + "math/big" + + "github.com/cloudflare/circl/internal/conv" +) + +// FuzzReduction is a fuzzer target for red64 function, which reduces t +// (112 bits) to a number t' (56 bits) congruent modulo p448. +func FuzzReduction(data []byte) int { + if len(data) != 2*Size { + return -1 + } + var got, want Elt + var lo, hi [7]uint64 + a := data[:Size] + b := data[Size:] + lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8]) + lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8]) + lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8]) + lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8]) + lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8]) + lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8]) + lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8]) + + hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8]) + hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8]) + hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8]) + hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8]) + hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8]) + hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8]) + hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8]) + + red64(&got, &lo, &hi) + + t := conv.BytesLe2BigInt(data[:2*Size]) + + two448 := big.NewInt(1) + two448.Lsh(two448, 448) // 2^448 + mask448 := big.NewInt(1) + mask448.Sub(two448, mask448) // 2^448-1 + two224plus1 := big.NewInt(1) + two224plus1.Lsh(two224plus1, 224) + two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1 + + var loBig, hiBig big.Int + for t.Cmp(two448) >= 0 { + loBig.And(t, mask448) + hiBig.Rsh(t, 448) + t.Mul(&hiBig, two224plus1) + t.Add(t, &loBig) + } + conv.BigInt2BytesLe(want[:], t) + + if got != want { + fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size])) + fmt.Printf("got: %v\n", got) + fmt.Printf("want: %v\n", want) + panic("error found") + } + return 1 +} diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go new file mode 100644 index 000000000..a43851b8b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go @@ -0,0 +1,122 @@ +// Package mlsbset provides a constant-time exponentiation method with precomputation. +// +// References: "Efficient and secure algorithms for GLV-based scalar +// multiplication and their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) +// - https://doi.org/10.1007/s13389-014-0085-7 +// - https://eprint.iacr.org/2013/158 +package mlsbset + +import ( + "errors" + "fmt" + "math/big" + + "github.com/cloudflare/circl/internal/conv" +) + +// EltG is a group element. +type EltG interface{} + +// EltP is a precomputed group element. +type EltP interface{} + +// Group defines the operations required by MLSBSet exponentiation method. +type Group interface { + Identity() EltG // Returns the identity of the group. + Sqr(x EltG) // Calculates x = x^2. + Mul(x EltG, y EltP) // Calculates x = x*y. + NewEltP() EltP // Returns an arbitrary precomputed element. + ExtendedEltP() EltP // Returns the precomputed element x^(2^(w*d)). + Lookup(a EltP, v uint, s, u int32) // Sets a = s*T[v][u]. +} + +// Params contains the parameters of the encoding. +type Params struct { + T uint // T is the maximum size (in bits) of exponents. + V uint // V is the number of tables. + W uint // W is the window size. + E uint // E is the number of digits per table. + D uint // D is the number of digits in total. + L uint // L is the length of the code. +} + +// Encoder allows to convert integers into valid powers. +type Encoder struct{ p Params } + +// New produces an encoder of the MLSBSet algorithm. +func New(t, v, w uint) (Encoder, error) { + if !(t > 1 && v >= 1 && w >= 2) { + return Encoder{}, errors.New("t>1, v>=1, w>=2") + } + e := (t + w*v - 1) / (w * v) + d := e * v + l := d * w + return Encoder{Params{t, v, w, e, d, l}}, nil +} + +// Encode converts an odd integer k into a valid power for exponentiation. +func (m Encoder) Encode(k []byte) (*Power, error) { + if len(k) == 0 { + return nil, errors.New("empty slice") + } + if !(len(k) <= int(m.p.L+7)>>3) { + return nil, errors.New("k too big") + } + if k[0]%2 == 0 { + return nil, errors.New("k must be odd") + } + ap := int((m.p.L+7)/8) - len(k) + k = append(k, make([]byte, ap)...) + s := m.signs(k) + b := make([]int32, m.p.L-m.p.D) + c := conv.BytesLe2BigInt(k) + c.Rsh(c, m.p.D) + var bi big.Int + for i := m.p.D; i < m.p.L; i++ { + c0 := int32(c.Bit(0)) + b[i-m.p.D] = s[i%m.p.D] * c0 + bi.SetInt64(int64(b[i-m.p.D] >> 1)) + c.Rsh(c, 1) + c.Sub(c, &bi) + } + carry := int(c.Int64()) + return &Power{m, s, b, carry}, nil +} + +// signs calculates the set of signs. +func (m Encoder) signs(k []byte) []int32 { + s := make([]int32, m.p.D) + s[m.p.D-1] = 1 + for i := uint(1); i < m.p.D; i++ { + ki := int32((k[i>>3] >> (i & 0x7)) & 0x1) + s[i-1] = 2*ki - 1 + } + return s +} + +// GetParams returns the complementary parameters of the encoding. +func (m Encoder) GetParams() Params { return m.p } + +// tableSize returns the size of each table. +func (m Encoder) tableSize() uint { return 1 << (m.p.W - 1) } + +// Elts returns the total number of elements that must be precomputed. +func (m Encoder) Elts() uint { return m.p.V * m.tableSize() } + +// IsExtended returns true if the element x^(2^(wd)) must be calculated. +func (m Encoder) IsExtended() bool { q := m.p.T / (m.p.V * m.p.W); return m.p.T == q*m.p.V*m.p.W } + +// Ops returns the number of squares and multiplications executed during an exponentiation. +func (m Encoder) Ops() (S uint, M uint) { + S = m.p.E + M = m.p.E * m.p.V + if m.IsExtended() { + M++ + } + return +} + +func (m Encoder) String() string { + return fmt.Sprintf("T: %v W: %v V: %v e: %v d: %v l: %v wv|t: %v", + m.p.T, m.p.W, m.p.V, m.p.E, m.p.D, m.p.L, m.IsExtended()) +} diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go new file mode 100644 index 000000000..3f214c304 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go @@ -0,0 +1,64 @@ +package mlsbset + +import "fmt" + +// Power is a valid exponent produced by the MLSBSet encoding algorithm. +type Power struct { + set Encoder // parameters of code. + s []int32 // set of signs. + b []int32 // set of digits. + c int // carry is {0,1}. +} + +// Exp is calculates x^k, where x is a predetermined element of a group G. +func (p *Power) Exp(G Group) EltG { + a, b := G.Identity(), G.NewEltP() + for e := int(p.set.p.E - 1); e >= 0; e-- { + G.Sqr(a) + for v := uint(0); v < p.set.p.V; v++ { + sgnElt, idElt := p.Digit(v, uint(e)) + G.Lookup(b, v, sgnElt, idElt) + G.Mul(a, b) + } + } + if p.set.IsExtended() && p.c == 1 { + G.Mul(a, G.ExtendedEltP()) + } + return a +} + +// Digit returns the (v,e)-th digit and its sign. +func (p *Power) Digit(v, e uint) (sgn, dig int32) { + sgn = p.bit(0, v, e) + dig = 0 + for i := p.set.p.W - 1; i > 0; i-- { + dig = 2*dig + p.bit(i, v, e) + } + mask := dig >> 31 + dig = (dig + mask) ^ mask + return sgn, dig +} + +// bit returns the (w,v,e)-th bit of the code. +func (p *Power) bit(w, v, e uint) int32 { + if !(w < p.set.p.W && + v < p.set.p.V && + e < p.set.p.E) { + panic(fmt.Errorf("indexes outside (%v,%v,%v)", w, v, e)) + } + if w == 0 { + return p.s[p.set.p.E*v+e] + } + return p.b[p.set.p.D*(w-1)+p.set.p.E*v+e] +} + +func (p *Power) String() string { + dig := "" + for j := uint(0); j < p.set.p.V; j++ { + for i := uint(0); i < p.set.p.E; i++ { + s, d := p.Digit(j, i) + dig += fmt.Sprintf("(%2v,%2v) = %+2v %+2v\n", j, i, s, d) + } + } + return fmt.Sprintf("len: %v\ncarry: %v\ndigits:\n%v", len(p.b)+len(p.s), p.c, dig) +} diff --git a/vendor/github.com/cloudflare/circl/math/wnaf.go b/vendor/github.com/cloudflare/circl/math/wnaf.go new file mode 100644 index 000000000..94a1ec504 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/wnaf.go @@ -0,0 +1,84 @@ +// Package math provides some utility functions for big integers. +package math + +import "math/big" + +// SignedDigit obtains the signed-digit recoding of n and returns a list L of +// digits such that n = sum( L[i]*2^(i*(w-1)) ), and each L[i] is an odd number +// in the set {±1, ±3, ..., ±2^(w-1)-1}. The third parameter ensures that the +// output has ceil(l/(w-1)) digits. +// +// Restrictions: +// - n is odd and n > 0. +// - 1 < w < 32. +// - l >= bit length of n. +// +// References: +// - Alg.6 in "Exponent Recoding and Regular Exponentiation Algorithms" +// by Joye-Tunstall. http://doi.org/10.1007/978-3-642-02384-2_21 +// - Alg.6 in "Selecting Elliptic Curves for Cryptography: An Efficiency and +// Security Analysis" by Bos et al. http://doi.org/10.1007/s13389-015-0097-y +func SignedDigit(n *big.Int, w, l uint) []int32 { + if n.Sign() <= 0 || n.Bit(0) == 0 { + panic("n must be non-zero, odd, and positive") + } + if w <= 1 || w >= 32 { + panic("Verify that 1 < w < 32") + } + if uint(n.BitLen()) > l { + panic("n is too big to fit in l digits") + } + lenN := (l + (w - 1) - 1) / (w - 1) // ceil(l/(w-1)) + L := make([]int32, lenN+1) + var k, v big.Int + k.Set(n) + + var i uint + for i = 0; i < lenN; i++ { + words := k.Bits() + value := int32(words[0] & ((1 << w) - 1)) + value -= int32(1) << (w - 1) + L[i] = value + v.SetInt64(int64(value)) + k.Sub(&k, &v) + k.Rsh(&k, w-1) + } + L[i] = int32(k.Int64()) + return L +} + +// OmegaNAF obtains the window-w Non-Adjacent Form of a positive number n and +// 1 < w < 32. The returned slice L holds n = sum( L[i]*2^i ). +// +// Reference: +// - Alg.9 "Efficient arithmetic on Koblitz curves" by Solinas. +// http://doi.org/10.1023/A:1008306223194 +func OmegaNAF(n *big.Int, w uint) (L []int32) { + if n.Sign() < 0 { + panic("n must be positive") + } + if w <= 1 || w >= 32 { + panic("Verify that 1 < w < 32") + } + + L = make([]int32, n.BitLen()+1) + var k, v big.Int + k.Set(n) + + i := 0 + for ; k.Sign() > 0; i++ { + value := int32(0) + if k.Bit(0) == 1 { + words := k.Bits() + value = int32(words[0] & ((1 << w) - 1)) + if value >= (int32(1) << (w - 1)) { + value -= int32(1) << w + } + v.SetInt64(int64(value)) + k.Sub(&k, &v) + } + L[i] = value + k.Rsh(&k, 1) + } + return L[:i] +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go new file mode 100644 index 000000000..08ca65d79 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go @@ -0,0 +1,453 @@ +// Package ed25519 implements Ed25519 signature scheme as described in RFC-8032. +// +// This package provides optimized implementations of the three signature +// variants and maintaining closer compatiblilty with crypto/ed25519. +// +// | Scheme Name | Sign Function | Verification | Context | +// |-------------|-------------------|---------------|-------------------| +// | Ed25519 | Sign | Verify | None | +// | Ed25519Ph | SignPh | VerifyPh | Yes, can be empty | +// | Ed25519Ctx | SignWithCtx | VerifyWithCtx | Yes, non-empty | +// | All above | (PrivateKey).Sign | VerifyAny | As above | +// +// Specific functions for sign and verify are defined. A generic signing +// function for all schemes is available through the crypto.Signer interface, +// which is implemented by the PrivateKey type. A correspond all-in-one +// verification method is provided by the VerifyAny function. +// +// Signing with Ed25519Ph or Ed25519Ctx requires a context string for domain +// separation. This parameter is passed using a SignerOptions struct defined +// in this package. While Ed25519Ph accepts an empty context, Ed25519Ctx +// enforces non-empty context strings. +// +// # Compatibility with crypto.ed25519 +// +// These functions are compatible with the “Ed25519” function defined in +// RFC-8032. However, unlike RFC 8032's formulation, this package's private +// key representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the +// RFC-8032 private key as the “seed”. +// +// References +// +// - RFC-8032: https://rfc-editor.org/rfc/rfc8032.txt +// - Ed25519: https://ed25519.cr.yp.to/ +// - EdDSA: High-speed high-security signatures. https://doi.org/10.1007/s13389-012-0027-1 +package ed25519 + +import ( + "bytes" + "crypto" + cryptoRand "crypto/rand" + "crypto/sha512" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "github.com/cloudflare/circl/sign" +) + +const ( + // ContextMaxSize is the maximum length (in bytes) allowed for context. + ContextMaxSize = 255 + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +const ( + paramB = 256 / 8 // Size of keys in bytes. +) + +// SignerOptions implements crypto.SignerOpts and augments with parameters +// that are specific to the Ed25519 signature schemes. +type SignerOptions struct { + // Hash must be crypto.Hash(0) for Ed25519/Ed25519ctx, or crypto.SHA512 + // for Ed25519ph. + crypto.Hash + + // Context is an optional domain separation string for Ed25519ph and a + // must for Ed25519ctx. Its length must be less or equal than 255 bytes. + Context string + + // Scheme is an identifier for choosing a signature scheme. The zero value + // is ED25519. + Scheme SchemeID +} + +// SchemeID is an identifier for each signature scheme. +type SchemeID uint + +const ( + ED25519 SchemeID = iota + ED25519Ph + ED25519Ctx +) + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + return ok && subtle.ConstantTimeCompare(priv, xx) == 1 +} + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return publicKey +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +func (priv PrivateKey) Scheme() sign.Scheme { return sch } + +func (pub PublicKey) Scheme() sign.Scheme { return sch } + +func (priv PrivateKey) MarshalBinary() (data []byte, err error) { + privateKey := make(PrivateKey, PrivateKeySize) + copy(privateKey, priv) + return privateKey, nil +} + +func (pub PublicKey) MarshalBinary() (data []byte, err error) { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, pub) + return publicKey, nil +} + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + return ok && bytes.Equal(pub, xx) +} + +// Sign creates a signature of a message with priv key. +// This function is compatible with crypto.ed25519 and also supports the +// three signature variants defined in RFC-8032, namely Ed25519 (or pure +// EdDSA), Ed25519Ph, and Ed25519Ctx. +// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx +// variant. This can be achieved by passing crypto.Hash(0) as the value for +// opts. +// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. +// This can be achieved by passing crypto.SHA512 as the value for opts. +// Use a SignerOptions struct (defined in this package) to pass a context +// string for signing. +func (priv PrivateKey) Sign( + rand io.Reader, + message []byte, + opts crypto.SignerOpts, +) (signature []byte, err error) { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): + return Sign(priv, message), nil + case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: + return SignPh(priv, message, ctx), nil + case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: + return SignWithCtx(priv, message, ctx), nil + default: + return nil, errors.New("ed25519: bad hash algorithm") + } +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptoRand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + privateKey := make(PrivateKey, PrivateKeySize) + newKeyFromSeed(privateKey, seed) + return privateKey +} + +func newKeyFromSeed(privateKey, seed []byte) { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + var P pointR1 + k := sha512.Sum512(seed) + clamp(k[:]) + reduceModOrder(k[:paramB], false) + P.fixedMult(k[:paramB]) + copy(privateKey[:SeedSize], seed) + _ = P.ToBytes(privateKey[SeedSize:]) +} + +func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + H := sha512.New() + var PHM []byte + + if preHash { + _, _ = H.Write(message) + PHM = H.Sum(nil) + H.Reset() + } else { + PHM = message + } + + // 1. Hash the 32-byte private key using SHA-512. + _, _ = H.Write(privateKey[:SeedSize]) + h := H.Sum(nil) + clamp(h[:]) + prefix, s := h[paramB:], h[:paramB] + + // 2. Compute SHA-512(dom2(F, C) || prefix || PH(M)) + H.Reset() + + writeDom(H, ctx, preHash) + + _, _ = H.Write(prefix) + _, _ = H.Write(PHM) + r := H.Sum(nil) + reduceModOrder(r[:], true) + + // 3. Compute the point [r]B. + var P pointR1 + P.fixedMult(r[:paramB]) + R := (&[paramB]byte{})[:] + if err := P.ToBytes(R); err != nil { + panic(err) + } + + // 4. Compute SHA512(dom2(F, C) || R || A || PH(M)). + H.Reset() + + writeDom(H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(privateKey[SeedSize:]) + _, _ = H.Write(PHM) + hRAM := H.Sum(nil) + + reduceModOrder(hRAM[:], true) + + // 5. Compute S = (r + k * s) mod order. + S := (&[paramB]byte{})[:] + calculateS(S, r[:paramB], hRAM[:paramB], s) + + // 6. The signature is the concatenation of R and S. + copy(signature[:paramB], R[:]) + copy(signature[paramB:], S[:]) +} + +// Sign signs the message with privateKey and returns a signature. +// This function supports the signature variant defined in RFC-8032: Ed25519, +// also known as the pure version of EdDSA. +// It will panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(""), false) + return signature +} + +// SignPh creates a signature of a message with private key and context. +// This function supports the signature variant defined in RFC-8032: Ed25519ph, +// meaning it internally hashes the message using SHA-512, and optionally +// accepts a context string. +// It will panic if len(privateKey) is not PrivateKeySize. +// Context could be passed to this function, which length should be no more than +// ContextMaxSize=255. It can be empty. +func SignPh(privateKey PrivateKey, message []byte, ctx string) []byte { + if len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed25519: bad context length: %v", len(ctx))) + } + + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(ctx), true) + return signature +} + +// SignWithCtx creates a signature of a message with private key and context. +// This function supports the signature variant defined in RFC-8032: Ed25519ctx, +// meaning it accepts a non-empty context string. +// It will panic if len(privateKey) is not PrivateKeySize. +// Context must be passed to this function, which length should be no more than +// ContextMaxSize=255 and cannot be empty. +func SignWithCtx(privateKey PrivateKey, message []byte, ctx string) []byte { + if len(ctx) == 0 || len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed25519: bad context length: %v > %v", len(ctx), ContextMaxSize)) + } + + signature := make([]byte, SignatureSize) + signAll(signature, privateKey, message, []byte(ctx), false) + return signature +} + +func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { + if len(public) != PublicKeySize || + len(signature) != SignatureSize || + !isLessThanOrder(signature[paramB:]) { + return false + } + + var P pointR1 + if ok := P.FromBytes(public); !ok { + return false + } + + H := sha512.New() + var PHM []byte + + if preHash { + _, _ = H.Write(message) + PHM = H.Sum(nil) + H.Reset() + } else { + PHM = message + } + + R := signature[:paramB] + + writeDom(H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(public) + _, _ = H.Write(PHM) + hRAM := H.Sum(nil) + reduceModOrder(hRAM[:], true) + + var Q pointR1 + encR := (&[paramB]byte{})[:] + P.neg() + Q.doubleMult(&P, signature[paramB:], hRAM[:paramB]) + _ = Q.ToBytes(encR) + return bytes.Equal(R, encR) +} + +// VerifyAny returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports all the three signature variants defined in RFC-8032, +// namely Ed25519 (or pure EdDSA), Ed25519Ph, and Ed25519Ctx. +// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx +// variant. This can be achieved by passing crypto.Hash(0) as the value for opts. +// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant. +// This can be achieved by passing crypto.SHA512 as the value for opts. +// Use a SignerOptions struct to pass a context string for signing. +func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0): + return Verify(public, message, signature) + case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512: + return VerifyPh(public, message, signature, ctx) + case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0: + return VerifyWithCtx(public, message, signature, ctx) + default: + return false + } +} + +// Verify returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed25519, +// also known as the pure version of EdDSA. +func Verify(public PublicKey, message, signature []byte) bool { + return verify(public, message, signature, []byte(""), false) +} + +// VerifyPh returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed25519ph, +// meaning it internally hashes the message using SHA-512. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), true) +} + +// VerifyWithCtx returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded, or when context is +// not provided. +// This function supports the signature variant defined in RFC-8032: Ed25519ctx, +// meaning it does not handle prehashed messages. Non-empty context string must be +// provided, and must not be more than 255 of length. +func VerifyWithCtx(public PublicKey, message, signature []byte, ctx string) bool { + if len(ctx) == 0 || len(ctx) > ContextMaxSize { + return false + } + + return verify(public, message, signature, []byte(ctx), false) +} + +func clamp(k []byte) { + k[0] &= 248 + k[paramB-1] = (k[paramB-1] & 127) | 64 +} + +// isLessThanOrder returns true if 0 <= x < order. +func isLessThanOrder(x []byte) bool { + i := len(order) - 1 + for i > 0 && x[i] == order[i] { + i-- + } + return x[i] < order[i] +} + +func writeDom(h io.Writer, ctx []byte, preHash bool) { + dom2 := "SigEd25519 no Ed25519 collisions" + + if len(ctx) > 0 { + _, _ = h.Write([]byte(dom2)) + if preHash { + _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) + } else { + _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) + } + _, _ = h.Write(ctx) + } else if preHash { + _, _ = h.Write([]byte(dom2)) + _, _ = h.Write([]byte{0x01, 0x00}) + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go new file mode 100644 index 000000000..10efafdca --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go @@ -0,0 +1,175 @@ +package ed25519 + +import ( + "encoding/binary" + "math/bits" +) + +var order = [paramB]byte{ + 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, + 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, +} + +// isLessThan returns true if 0 <= x < y, and assumes that slices have the same length. +func isLessThan(x, y []byte) bool { + i := len(x) - 1 + for i > 0 && x[i] == y[i] { + i-- + } + return x[i] < y[i] +} + +// reduceModOrder calculates k = k mod order of the curve. +func reduceModOrder(k []byte, is512Bit bool) { + var X [((2 * paramB) * 8) / 64]uint64 + numWords := len(k) >> 3 + for i := 0; i < numWords; i++ { + X[i] = binary.LittleEndian.Uint64(k[i*8 : (i+1)*8]) + } + red512(&X, is512Bit) + for i := 0; i < numWords; i++ { + binary.LittleEndian.PutUint64(k[i*8:(i+1)*8], X[i]) + } +} + +// red512 calculates x = x mod Order of the curve. +func red512(x *[8]uint64, full bool) { + // Implementation of Algs.(14.47)+(14.52) of Handbook of Applied + // Cryptography, by A. Menezes, P. van Oorschot, and S. Vanstone. + const ( + ell0 = uint64(0x5812631a5cf5d3ed) + ell1 = uint64(0x14def9dea2f79cd6) + ell160 = uint64(0x812631a5cf5d3ed0) + ell161 = uint64(0x4def9dea2f79cd65) + ell162 = uint64(0x0000000000000001) + ) + + var c0, c1, c2, c3 uint64 + r0, r1, r2, r3, r4 := x[0], x[1], x[2], x[3], uint64(0) + + if full { + q0, q1, q2, q3 := x[4], x[5], x[6], x[7] + + for i := 0; i < 3; i++ { + h0, s0 := bits.Mul64(q0, ell160) + h1, s1 := bits.Mul64(q1, ell160) + h2, s2 := bits.Mul64(q2, ell160) + h3, s3 := bits.Mul64(q3, ell160) + + s1, c0 = bits.Add64(h0, s1, 0) + s2, c1 = bits.Add64(h1, s2, c0) + s3, c2 = bits.Add64(h2, s3, c1) + s4, _ := bits.Add64(h3, 0, c2) + + h0, l0 := bits.Mul64(q0, ell161) + h1, l1 := bits.Mul64(q1, ell161) + h2, l2 := bits.Mul64(q2, ell161) + h3, l3 := bits.Mul64(q3, ell161) + + l1, c0 = bits.Add64(h0, l1, 0) + l2, c1 = bits.Add64(h1, l2, c0) + l3, c2 = bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + s1, c0 = bits.Add64(s1, l0, 0) + s2, c1 = bits.Add64(s2, l1, c0) + s3, c2 = bits.Add64(s3, l2, c1) + s4, c3 = bits.Add64(s4, l3, c2) + s5, s6 := bits.Add64(l4, 0, c3) + + s2, c0 = bits.Add64(s2, q0, 0) + s3, c1 = bits.Add64(s3, q1, c0) + s4, c2 = bits.Add64(s4, q2, c1) + s5, c3 = bits.Add64(s5, q3, c2) + s6, s7 := bits.Add64(s6, 0, c3) + + q := q0 | q1 | q2 | q3 + m := -((q | -q) >> 63) // if q=0 then m=0...0 else m=1..1 + s0 &= m + s1 &= m + s2 &= m + s3 &= m + q0, q1, q2, q3 = s4, s5, s6, s7 + + if (i+1)%2 == 0 { + r0, c0 = bits.Add64(r0, s0, 0) + r1, c1 = bits.Add64(r1, s1, c0) + r2, c2 = bits.Add64(r2, s2, c1) + r3, c3 = bits.Add64(r3, s3, c2) + r4, _ = bits.Add64(r4, 0, c3) + } else { + r0, c0 = bits.Sub64(r0, s0, 0) + r1, c1 = bits.Sub64(r1, s1, c0) + r2, c2 = bits.Sub64(r2, s2, c1) + r3, c3 = bits.Sub64(r3, s3, c2) + r4, _ = bits.Sub64(r4, 0, c3) + } + } + + m := -(r4 >> 63) + r0, c0 = bits.Add64(r0, m&ell160, 0) + r1, c1 = bits.Add64(r1, m&ell161, c0) + r2, c2 = bits.Add64(r2, m&ell162, c1) + r3, c3 = bits.Add64(r3, 0, c2) + r4, _ = bits.Add64(r4, m&1, c3) + x[4], x[5], x[6], x[7] = 0, 0, 0, 0 + } + + q0 := (r4 << 4) | (r3 >> 60) + r3 &= (uint64(1) << 60) - 1 + + h0, s0 := bits.Mul64(ell0, q0) + h1, s1 := bits.Mul64(ell1, q0) + s1, c0 = bits.Add64(h0, s1, 0) + s2, _ := bits.Add64(h1, 0, c0) + + r0, c0 = bits.Sub64(r0, s0, 0) + r1, c1 = bits.Sub64(r1, s1, c0) + r2, c2 = bits.Sub64(r2, s2, c1) + r3, _ = bits.Sub64(r3, 0, c2) + + x[0], x[1], x[2], x[3] = r0, r1, r2, r3 +} + +// calculateS performs s = r+k*a mod Order of the curve. +func calculateS(s, r, k, a []byte) { + K := [4]uint64{ + binary.LittleEndian.Uint64(k[0*8 : 1*8]), + binary.LittleEndian.Uint64(k[1*8 : 2*8]), + binary.LittleEndian.Uint64(k[2*8 : 3*8]), + binary.LittleEndian.Uint64(k[3*8 : 4*8]), + } + S := [8]uint64{ + binary.LittleEndian.Uint64(r[0*8 : 1*8]), + binary.LittleEndian.Uint64(r[1*8 : 2*8]), + binary.LittleEndian.Uint64(r[2*8 : 3*8]), + binary.LittleEndian.Uint64(r[3*8 : 4*8]), + } + var c3 uint64 + for i := range K { + ai := binary.LittleEndian.Uint64(a[i*8 : (i+1)*8]) + + h0, l0 := bits.Mul64(K[0], ai) + h1, l1 := bits.Mul64(K[1], ai) + h2, l2 := bits.Mul64(K[2], ai) + h3, l3 := bits.Mul64(K[3], ai) + + l1, c0 := bits.Add64(h0, l1, 0) + l2, c1 := bits.Add64(h1, l2, c0) + l3, c2 := bits.Add64(h2, l3, c1) + l4, _ := bits.Add64(h3, 0, c2) + + S[i+0], c0 = bits.Add64(S[i+0], l0, 0) + S[i+1], c1 = bits.Add64(S[i+1], l1, c0) + S[i+2], c2 = bits.Add64(S[i+2], l2, c1) + S[i+3], c3 = bits.Add64(S[i+3], l3, c2) + S[i+4], _ = bits.Add64(S[i+4], l4, c3) + } + red512(&S, true) + binary.LittleEndian.PutUint64(s[0*8:1*8], S[0]) + binary.LittleEndian.PutUint64(s[1*8:2*8], S[1]) + binary.LittleEndian.PutUint64(s[2*8:3*8], S[2]) + binary.LittleEndian.PutUint64(s[3*8:4*8], S[3]) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go new file mode 100644 index 000000000..3216aae30 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go @@ -0,0 +1,180 @@ +package ed25519 + +import ( + "crypto/subtle" + "encoding/binary" + "math/bits" + + "github.com/cloudflare/circl/internal/conv" + "github.com/cloudflare/circl/math" + fp "github.com/cloudflare/circl/math/fp25519" +) + +var paramD = fp.Elt{ + 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, + 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, + 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, + 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52, +} + +// mLSBRecoding parameters. +const ( + fxT = 257 + fxV = 2 + fxW = 3 + fx2w1 = 1 << (uint(fxW) - 1) + numWords64 = (paramB * 8 / 64) +) + +// mLSBRecoding is the odd-only modified LSB-set. +// +// Reference: +// +// "Efficient and secure algorithms for GLV-based scalar multiplication and +// their implementation on GLV–GLS curves" by (Faz-Hernandez et al.) +// http://doi.org/10.1007/s13389-014-0085-7. +func mLSBRecoding(L []int8, k []byte) { + const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) + const dd = ee * fxV + const ll = dd * fxW + if len(L) == (ll + 1) { + var m [numWords64 + 1]uint64 + for i := 0; i < numWords64; i++ { + m[i] = binary.LittleEndian.Uint64(k[8*i : 8*i+8]) + } + condAddOrderN(&m) + L[dd-1] = 1 + for i := 0; i < dd-1; i++ { + kip1 := (m[(i+1)/64] >> (uint(i+1) % 64)) & 0x1 + L[i] = int8(kip1<<1) - 1 + } + { // right-shift by d + right := uint(dd % 64) + left := uint(64) - right + lim := ((numWords64+1)*64 - dd) / 64 + j := dd / 64 + for i := 0; i < lim; i++ { + m[i] = (m[i+j] >> right) | (m[i+j+1] << left) + } + m[lim] = m[lim+j] >> right + } + for i := dd; i < ll; i++ { + L[i] = L[i%dd] * int8(m[0]&0x1) + div2subY(m[:], int64(L[i]>>1), numWords64) + } + L[ll] = int8(m[0]) + } +} + +// absolute returns always a positive value. +func absolute(x int32) int32 { + mask := x >> 31 + return (x + mask) ^ mask +} + +// condAddOrderN updates x = x+order if x is even, otherwise x remains unchanged. +func condAddOrderN(x *[numWords64 + 1]uint64) { + isOdd := (x[0] & 0x1) - 1 + c := uint64(0) + for i := 0; i < numWords64; i++ { + orderWord := binary.LittleEndian.Uint64(order[8*i : 8*i+8]) + o := isOdd & orderWord + x0, c0 := bits.Add64(x[i], o, c) + x[i] = x0 + c = c0 + } + x[numWords64], _ = bits.Add64(x[numWords64], 0, c) +} + +// div2subY update x = (x/2) - y. +func div2subY(x []uint64, y int64, l int) { + s := uint64(y >> 63) + for i := 0; i < l-1; i++ { + x[i] = (x[i] >> 1) | (x[i+1] << 63) + } + x[l-1] = (x[l-1] >> 1) + + b := uint64(0) + x0, b0 := bits.Sub64(x[0], uint64(y), b) + x[0] = x0 + b = b0 + for i := 1; i < l-1; i++ { + x0, b0 := bits.Sub64(x[i], s, b) + x[i] = x0 + b = b0 + } + x[l-1], _ = bits.Sub64(x[l-1], s, b) +} + +func (P *pointR1) fixedMult(scalar []byte) { + if len(scalar) != paramB { + panic("wrong scalar size") + } + const ee = (fxT + fxW*fxV - 1) / (fxW * fxV) + const dd = ee * fxV + const ll = dd * fxW + + L := make([]int8, ll+1) + mLSBRecoding(L[:], scalar) + S := &pointR3{} + P.SetIdentity() + for ii := ee - 1; ii >= 0; ii-- { + P.double() + for j := 0; j < fxV; j++ { + dig := L[fxW*dd-j*ee+ii-ee] + for i := (fxW-1)*dd - j*ee + ii - ee; i >= (2*dd - j*ee + ii - ee); i = i - dd { + dig = 2*dig + L[i] + } + idx := absolute(int32(dig)) + sig := L[dd-j*ee+ii-ee] + Tabj := &tabSign[fxV-j-1] + for k := 0; k < fx2w1; k++ { + S.cmov(&Tabj[k], subtle.ConstantTimeEq(int32(k), idx)) + } + S.cneg(subtle.ConstantTimeEq(int32(sig), -1)) + P.mixAdd(S) + } + } +} + +const ( + omegaFix = 7 + omegaVar = 5 +) + +// doubleMult returns P=mG+nQ. +func (P *pointR1) doubleMult(Q *pointR1, m, n []byte) { + nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m), omegaFix) + nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n), omegaVar) + + if len(nafFix) > len(nafVar) { + nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...) + } else if len(nafFix) < len(nafVar) { + nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...) + } + + var TabQ [1 << (omegaVar - 2)]pointR2 + Q.oddMultiples(TabQ[:]) + P.SetIdentity() + for i := len(nafFix) - 1; i >= 0; i-- { + P.double() + // Generator point + if nafFix[i] != 0 { + idxM := absolute(nafFix[i]) >> 1 + R := tabVerif[idxM] + if nafFix[i] < 0 { + R.neg() + } + P.mixAdd(&R) + } + // Variable input point + if nafVar[i] != 0 { + idxN := absolute(nafVar[i]) >> 1 + S := TabQ[idxN] + if nafVar[i] < 0 { + S.neg() + } + P.add(&S) + } + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go new file mode 100644 index 000000000..374a69503 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go @@ -0,0 +1,195 @@ +package ed25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +type ( + pointR1 struct{ x, y, z, ta, tb fp.Elt } + pointR2 struct { + pointR3 + z2 fp.Elt + } +) +type pointR3 struct{ addYX, subYX, dt2 fp.Elt } + +func (P *pointR1) neg() { + fp.Neg(&P.x, &P.x) + fp.Neg(&P.ta, &P.ta) +} + +func (P *pointR1) SetIdentity() { + P.x = fp.Elt{} + fp.SetOne(&P.y) + fp.SetOne(&P.z) + P.ta = fp.Elt{} + P.tb = fp.Elt{} +} + +func (P *pointR1) toAffine() { + fp.Inv(&P.z, &P.z) + fp.Mul(&P.x, &P.x, &P.z) + fp.Mul(&P.y, &P.y, &P.z) + fp.Modp(&P.x) + fp.Modp(&P.y) + fp.SetOne(&P.z) + P.ta = P.x + P.tb = P.y +} + +func (P *pointR1) ToBytes(k []byte) error { + P.toAffine() + var x [fp.Size]byte + err := fp.ToBytes(k[:fp.Size], &P.y) + if err != nil { + return err + } + err = fp.ToBytes(x[:], &P.x) + if err != nil { + return err + } + b := x[0] & 1 + k[paramB-1] = k[paramB-1] | (b << 7) + return nil +} + +func (P *pointR1) FromBytes(k []byte) bool { + if len(k) != paramB { + panic("wrong size") + } + signX := k[paramB-1] >> 7 + copy(P.y[:], k[:fp.Size]) + P.y[fp.Size-1] &= 0x7F + p := fp.P() + if !isLessThan(P.y[:], p[:]) { + return false + } + + one, u, v := &fp.Elt{}, &fp.Elt{}, &fp.Elt{} + fp.SetOne(one) + fp.Sqr(u, &P.y) // u = y^2 + fp.Mul(v, u, ¶mD) // v = dy^2 + fp.Sub(u, u, one) // u = y^2-1 + fp.Add(v, v, one) // v = dy^2+1 + isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v) + if !isQR { + return false + } + fp.Modp(&P.x) // x = x mod p + if fp.IsZero(&P.x) && signX == 1 { + return false + } + if signX != (P.x[0] & 1) { + fp.Neg(&P.x, &P.x) + } + P.ta = P.x + P.tb = P.y + fp.SetOne(&P.z) + return true +} + +// double calculates 2P for curves with A=-1. +func (P *pointR1) double() { + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb + fp.Add(e, Px, Py) // x+y + fp.Sqr(a, Px) // A = x^2 + fp.Sqr(b, Py) // B = y^2 + fp.Sqr(c, Pz) // z^2 + fp.Add(c, c, c) // C = 2*z^2 + fp.Add(h, a, b) // H = A+B + fp.Sqr(e, e) // (x+y)^2 + fp.Sub(e, e, h) // E = (x+y)^2-A-B + fp.Sub(g, b, a) // G = B-A + fp.Sub(f, c, g) // F = C-G + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *pointR1) mixAdd(Q *pointR3) { + fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 + P.coreAddition(Q) +} + +func (P *pointR1) add(Q *pointR2) { + fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2 + P.coreAddition(&Q.pointR3) +} + +// coreAddition calculates P=P+Q for curves with A=-1. +func (P *pointR1) coreAddition(Q *pointR3) { + Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb + addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2 + a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb + fp.Mul(c, Pta, Ptb) // t1 = ta*tb + fp.Sub(h, Py, Px) // y1-x1 + fp.Add(b, Py, Px) // y1+x1 + fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2) + fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2) + fp.Mul(c, c, dt2) // C = 2*D*t1*t2 + fp.Sub(e, b, a) // E = B-A + fp.Add(h, b, a) // H = B+A + fp.Sub(f, d, c) // F = D-C + fp.Add(g, d, c) // G = D+C + fp.Mul(Pz, f, g) // Z = F * G + fp.Mul(Px, e, f) // X = E * F + fp.Mul(Py, g, h) // Y = G * H, T = E * H +} + +func (P *pointR1) oddMultiples(T []pointR2) { + var R pointR2 + n := len(T) + T[0].fromR1(P) + _2P := *P + _2P.double() + R.fromR1(&_2P) + for i := 1; i < n; i++ { + P.add(&R) + T[i].fromR1(P) + } +} + +func (P *pointR1) isEqual(Q *pointR1) bool { + l, r := &fp.Elt{}, &fp.Elt{} + fp.Mul(l, &P.x, &Q.z) + fp.Mul(r, &Q.x, &P.z) + fp.Sub(l, l, r) + b := fp.IsZero(l) + fp.Mul(l, &P.y, &Q.z) + fp.Mul(r, &Q.y, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + fp.Mul(l, &P.ta, &P.tb) + fp.Mul(l, l, &Q.z) + fp.Mul(r, &Q.ta, &Q.tb) + fp.Mul(r, r, &P.z) + fp.Sub(l, l, r) + b = b && fp.IsZero(l) + return b +} + +func (P *pointR3) neg() { + P.addYX, P.subYX = P.subYX, P.addYX + fp.Neg(&P.dt2, &P.dt2) +} + +func (P *pointR2) fromR1(Q *pointR1) { + fp.Add(&P.addYX, &Q.y, &Q.x) + fp.Sub(&P.subYX, &Q.y, &Q.x) + fp.Mul(&P.dt2, &Q.ta, &Q.tb) + fp.Mul(&P.dt2, &P.dt2, ¶mD) + fp.Add(&P.dt2, &P.dt2, &P.dt2) + fp.Add(&P.z2, &Q.z, &Q.z) +} + +func (P *pointR3) cneg(b int) { + t := &fp.Elt{} + fp.Cswap(&P.addYX, &P.subYX, uint(b)) + fp.Neg(t, &P.dt2) + fp.Cmov(&P.dt2, t, uint(b)) +} + +func (P *pointR3) cmov(Q *pointR3, b int) { + fp.Cmov(&P.addYX, &Q.addYX, uint(b)) + fp.Cmov(&P.subYX, &Q.subYX, uint(b)) + fp.Cmov(&P.dt2, &Q.dt2, uint(b)) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go new file mode 100644 index 000000000..c3505b67a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go @@ -0,0 +1,9 @@ +//go:build go1.13 +// +build go1.13 + +package ed25519 + +import cryptoEd25519 "crypto/ed25519" + +// PublicKey is the type of Ed25519 public keys. +type PublicKey cryptoEd25519.PublicKey diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go new file mode 100644 index 000000000..d57d86eff --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go @@ -0,0 +1,7 @@ +//go:build !go1.13 +// +build !go1.13 + +package ed25519 + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go new file mode 100644 index 000000000..e4520f520 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go @@ -0,0 +1,87 @@ +package ed25519 + +import ( + "crypto/rand" + "encoding/asn1" + + "github.com/cloudflare/circl/sign" +) + +var sch sign.Scheme = &scheme{} + +// Scheme returns a signature interface. +func Scheme() sign.Scheme { return sch } + +type scheme struct{} + +func (*scheme) Name() string { return "Ed25519" } +func (*scheme) PublicKeySize() int { return PublicKeySize } +func (*scheme) PrivateKeySize() int { return PrivateKeySize } +func (*scheme) SignatureSize() int { return SignatureSize } +func (*scheme) SeedSize() int { return SeedSize } +func (*scheme) TLSIdentifier() uint { return 0x0807 } +func (*scheme) SupportsContext() bool { return false } +func (*scheme) Oid() asn1.ObjectIdentifier { + return asn1.ObjectIdentifier{1, 3, 101, 112} +} + +func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { + return GenerateKey(rand.Reader) +} + +func (*scheme) Sign( + sk sign.PrivateKey, + message []byte, + opts *sign.SignatureOpts, +) []byte { + priv, ok := sk.(PrivateKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + if opts != nil && opts.Context != "" { + panic(sign.ErrContextNotSupported) + } + return Sign(priv, message) +} + +func (*scheme) Verify( + pk sign.PublicKey, + message, signature []byte, + opts *sign.SignatureOpts, +) bool { + pub, ok := pk.(PublicKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + if opts != nil { + if opts.Context != "" { + panic(sign.ErrContextNotSupported) + } + } + return Verify(pub, message, signature) +} + +func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + return publicKey, privateKey +} + +func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { + if len(buf) < PublicKeySize { + return nil, sign.ErrPubKeySize + } + pub := make(PublicKey, PublicKeySize) + copy(pub, buf[:PublicKeySize]) + return pub, nil +} + +func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { + if len(buf) < PrivateKeySize { + return nil, sign.ErrPrivKeySize + } + priv := make(PrivateKey, PrivateKeySize) + copy(priv, buf[:PrivateKeySize]) + return priv, nil +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go new file mode 100644 index 000000000..8763b426f --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go @@ -0,0 +1,213 @@ +package ed25519 + +import fp "github.com/cloudflare/circl/math/fp25519" + +var tabSign = [fxV][fx2w1]pointR3{ + { + pointR3{ + addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, + subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, + dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, + }, + { + addYX: fp.Elt{0x7c, 0xb0, 0x9e, 0xe6, 0xc5, 0xbf, 0xfa, 0x13, 0x8e, 0x0d, 0x22, 0xde, 0xc8, 0xd1, 0xce, 0x52, 0x02, 0xd5, 0x62, 0x31, 0x71, 0x0e, 0x8e, 0x9d, 0xb0, 0xd6, 0x00, 0xa5, 0x5a, 0x0e, 0xce, 0x72}, + subYX: fp.Elt{0x1a, 0x8e, 0x5c, 0xdc, 0xa4, 0xb3, 0x6c, 0x51, 0x18, 0xa0, 0x09, 0x80, 0x9a, 0x46, 0x33, 0xd5, 0xe0, 0x3c, 0x4d, 0x3b, 0xfc, 0x49, 0xa2, 0x43, 0x29, 0xe1, 0x29, 0xa9, 0x93, 0xea, 0x7c, 0x35}, + dt2: fp.Elt{0x08, 0x46, 0x6f, 0x68, 0x7f, 0x0b, 0x7c, 0x9e, 0xad, 0xba, 0x07, 0x61, 0x74, 0x83, 0x2f, 0xfc, 0x26, 0xd6, 0x09, 0xb9, 0x00, 0x34, 0x36, 0x4f, 0x01, 0xf3, 0x48, 0xdb, 0x43, 0xba, 0x04, 0x44}, + }, + { + addYX: fp.Elt{0x4c, 0xda, 0x0d, 0x13, 0x66, 0xfd, 0x82, 0x84, 0x9f, 0x75, 0x5b, 0xa2, 0x17, 0xfe, 0x34, 0xbf, 0x1f, 0xcb, 0xba, 0x90, 0x55, 0x80, 0x83, 0xfd, 0x63, 0xb9, 0x18, 0xf8, 0x5b, 0x5d, 0x94, 0x1e}, + subYX: fp.Elt{0xb9, 0xdb, 0x6c, 0x04, 0x88, 0x22, 0xd8, 0x79, 0x83, 0x2f, 0x8d, 0x65, 0x6b, 0xd2, 0xab, 0x1b, 0xdd, 0x65, 0xe5, 0x93, 0x63, 0xf8, 0xa2, 0xd8, 0x3c, 0xf1, 0x4b, 0xc5, 0x99, 0xd1, 0xf2, 0x12}, + dt2: fp.Elt{0x05, 0x4c, 0xb8, 0x3b, 0xfe, 0xf5, 0x9f, 0x2e, 0xd1, 0xb2, 0xb8, 0xff, 0xfe, 0x6d, 0xd9, 0x37, 0xe0, 0xae, 0xb4, 0x5a, 0x51, 0x80, 0x7e, 0x9b, 0x1d, 0xd1, 0x8d, 0x8c, 0x56, 0xb1, 0x84, 0x35}, + }, + { + addYX: fp.Elt{0x39, 0x71, 0x43, 0x34, 0xe3, 0x42, 0x45, 0xa1, 0xf2, 0x68, 0x71, 0xa7, 0xe8, 0x23, 0xfd, 0x9f, 0x86, 0x48, 0xff, 0xe5, 0x96, 0x74, 0xcf, 0x05, 0x49, 0xe2, 0xb3, 0x6c, 0x17, 0x77, 0x2f, 0x6d}, + subYX: fp.Elt{0x73, 0x3f, 0xc1, 0xc7, 0x6a, 0x66, 0xa1, 0x20, 0xdd, 0x11, 0xfb, 0x7a, 0x6e, 0xa8, 0x51, 0xb8, 0x3f, 0x9d, 0xa2, 0x97, 0x84, 0xb5, 0xc7, 0x90, 0x7c, 0xab, 0x48, 0xd6, 0x84, 0xa3, 0xd5, 0x1a}, + dt2: fp.Elt{0x63, 0x27, 0x3c, 0x49, 0x4b, 0xfc, 0x22, 0xf2, 0x0b, 0x50, 0xc2, 0x0f, 0xb4, 0x1f, 0x31, 0x0c, 0x2f, 0x53, 0xab, 0xaa, 0x75, 0x6f, 0xe0, 0x69, 0x39, 0x56, 0xe0, 0x3b, 0xb7, 0xa8, 0xbf, 0x45}, + }, + }, + { + { + addYX: fp.Elt{0x00, 0x45, 0xd9, 0x0d, 0x58, 0x03, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0x0a, 0x13, 0xd5, 0xa1, 0x06, 0xb7, 0x1a, 0x15, 0x6b, 0x41}, + subYX: fp.Elt{0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0x0a, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0x0d}, + dt2: fp.Elt{0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x07, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19}, + }, + { + addYX: fp.Elt{0x6a, 0x6d, 0x6d, 0xd1, 0xfa, 0xf5, 0x03, 0x30, 0xbd, 0x6d, 0xc2, 0xc8, 0xf5, 0x38, 0x80, 0x4f, 0xb2, 0xbe, 0xa1, 0x76, 0x50, 0x1a, 0x73, 0xf2, 0x78, 0x2b, 0x8e, 0x3a, 0x1e, 0x34, 0x47, 0x7b}, + subYX: fp.Elt{0xc3, 0x2c, 0x36, 0xdc, 0xc5, 0x45, 0xbc, 0xef, 0x1b, 0x64, 0xd6, 0x65, 0x28, 0xe9, 0xda, 0x84, 0x13, 0xbe, 0x27, 0x8e, 0x3f, 0x98, 0x2a, 0x37, 0xee, 0x78, 0x97, 0xd6, 0xc0, 0x6f, 0xb4, 0x53}, + dt2: fp.Elt{0x58, 0x5d, 0xa7, 0xa3, 0x68, 0xbb, 0x20, 0x30, 0x2e, 0x03, 0xe9, 0xb1, 0xd4, 0x90, 0x72, 0xe3, 0x71, 0xb2, 0x36, 0x3e, 0x73, 0xa0, 0x2e, 0x3d, 0xd1, 0x85, 0x33, 0x62, 0x4e, 0xa7, 0x7b, 0x31}, + }, + { + addYX: fp.Elt{0xbf, 0xc4, 0x38, 0x53, 0xfb, 0x68, 0xa9, 0x77, 0xce, 0x55, 0xf9, 0x05, 0xcb, 0xeb, 0xfb, 0x8c, 0x46, 0xc2, 0x32, 0x7c, 0xf0, 0xdb, 0xd7, 0x2c, 0x62, 0x8e, 0xdd, 0x54, 0x75, 0xcf, 0x3f, 0x33}, + subYX: fp.Elt{0x49, 0x50, 0x1f, 0x4e, 0x6e, 0x55, 0x55, 0xde, 0x8c, 0x4e, 0x77, 0x96, 0x38, 0x3b, 0xfe, 0xb6, 0x43, 0x3c, 0x86, 0x69, 0xc2, 0x72, 0x66, 0x1f, 0x6b, 0xf9, 0x87, 0xbc, 0x4f, 0x37, 0x3e, 0x3c}, + dt2: fp.Elt{0xd2, 0x2f, 0x06, 0x6b, 0x08, 0x07, 0x69, 0x77, 0xc0, 0x94, 0xcc, 0xae, 0x43, 0x00, 0x59, 0x6e, 0xa3, 0x63, 0xa8, 0xdd, 0xfa, 0x24, 0x18, 0xd0, 0x35, 0xc7, 0x78, 0xf7, 0x0d, 0xd4, 0x5a, 0x1e}, + }, + { + addYX: fp.Elt{0x45, 0xc1, 0x17, 0x51, 0xf8, 0xed, 0x7e, 0xc7, 0xa9, 0x1a, 0x11, 0x6e, 0x2d, 0xef, 0x0b, 0xd5, 0x3f, 0x98, 0xb0, 0xa3, 0x9d, 0x65, 0xf1, 0xcd, 0x53, 0x4a, 0x8a, 0x18, 0x70, 0x0a, 0x7f, 0x23}, + subYX: fp.Elt{0xdd, 0xef, 0xbe, 0x3a, 0x31, 0xe0, 0xbc, 0xbe, 0x6d, 0x5d, 0x79, 0x87, 0xd6, 0xbe, 0x68, 0xe3, 0x59, 0x76, 0x8c, 0x86, 0x0e, 0x7a, 0x92, 0x13, 0x14, 0x8f, 0x67, 0xb3, 0xcb, 0x1a, 0x76, 0x76}, + dt2: fp.Elt{0x56, 0x7a, 0x1c, 0x9d, 0xca, 0x96, 0xf9, 0xf9, 0x03, 0x21, 0xd4, 0xe8, 0xb3, 0xd5, 0xe9, 0x52, 0xc8, 0x54, 0x1e, 0x1b, 0x13, 0xb6, 0xfd, 0x47, 0x7d, 0x02, 0x32, 0x33, 0x27, 0xe2, 0x1f, 0x19}, + }, + }, +} + +var tabVerif = [1 << (omegaFix - 2)]pointR3{ + { /* 1P */ + addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07}, + subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44}, + dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f}, + }, + { /* 3P */ + addYX: fp.Elt{0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x02, 0x32, 0x67, 0x01, 0x9f, 0x02, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a}, + subYX: fp.Elt{0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a}, + dt2: fp.Elt{0x89, 0xd8, 0xd0, 0x0d, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a}, + }, + { /* 5P */ + addYX: fp.Elt{0x33, 0xbb, 0xa5, 0x08, 0x44, 0xbc, 0x12, 0xa2, 0x02, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0x0c, 0xeb, 0x1b, 0xdd, 0xeb, 0x06, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29}, + subYX: fp.Elt{0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x00, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15}, + dt2: fp.Elt{0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x07, 0x08, 0x0e, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0x0b, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43}, + }, + { /* 7P */ + addYX: fp.Elt{0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46}, + subYX: fp.Elt{0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d}, + dt2: fp.Elt{0xa2, 0xb3, 0xb8, 0x01, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x05, 0x47, 0x5f, 0x03, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0x0f, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a}, + }, + { /* 9P */ + addYX: fp.Elt{0x2f, 0x63, 0xa8, 0xa6, 0x8a, 0x67, 0x2e, 0x9b, 0xc5, 0x46, 0xbc, 0x51, 0x6f, 0x9e, 0x50, 0xa6, 0xb5, 0xf5, 0x86, 0xc6, 0xc9, 0x33, 0xb2, 0xce, 0x59, 0x7f, 0xdd, 0x8a, 0x33, 0xed, 0xb9, 0x34}, + subYX: fp.Elt{0x64, 0x80, 0x9d, 0x03, 0x7e, 0x21, 0x6e, 0xf3, 0x9b, 0x41, 0x20, 0xf5, 0xb6, 0x81, 0xa0, 0x98, 0x44, 0xb0, 0x5e, 0xe7, 0x08, 0xc6, 0xcb, 0x96, 0x8f, 0x9c, 0xdc, 0xfa, 0x51, 0x5a, 0xc0, 0x49}, + dt2: fp.Elt{0x1b, 0xaf, 0x45, 0x90, 0xbf, 0xe8, 0xb4, 0x06, 0x2f, 0xd2, 0x19, 0xa7, 0xe8, 0x83, 0xff, 0xe2, 0x16, 0xcf, 0xd4, 0x93, 0x29, 0xfc, 0xf6, 0xaa, 0x06, 0x8b, 0x00, 0x1b, 0x02, 0x72, 0xc1, 0x73}, + }, + { /* 11P */ + addYX: fp.Elt{0xde, 0x2a, 0x80, 0x8a, 0x84, 0x00, 0xbf, 0x2f, 0x27, 0x2e, 0x30, 0x02, 0xcf, 0xfe, 0xd9, 0xe5, 0x06, 0x34, 0x70, 0x17, 0x71, 0x84, 0x3e, 0x11, 0xaf, 0x8f, 0x6d, 0x54, 0xe2, 0xaa, 0x75, 0x42}, + subYX: fp.Elt{0x48, 0x43, 0x86, 0x49, 0x02, 0x5b, 0x5f, 0x31, 0x81, 0x83, 0x08, 0x77, 0x69, 0xb3, 0xd6, 0x3e, 0x95, 0xeb, 0x8d, 0x6a, 0x55, 0x75, 0xa0, 0xa3, 0x7f, 0xc7, 0xd5, 0x29, 0x80, 0x59, 0xab, 0x18}, + dt2: fp.Elt{0xe9, 0x89, 0x60, 0xfd, 0xc5, 0x2c, 0x2b, 0xd8, 0xa4, 0xe4, 0x82, 0x32, 0xa1, 0xb4, 0x1e, 0x03, 0x22, 0x86, 0x1a, 0xb5, 0x99, 0x11, 0x31, 0x44, 0x48, 0xf9, 0x3d, 0xb5, 0x22, 0x55, 0xc6, 0x3d}, + }, + { /* 13P */ + addYX: fp.Elt{0x6d, 0x7f, 0x00, 0xa2, 0x22, 0xc2, 0x70, 0xbf, 0xdb, 0xde, 0xbc, 0xb5, 0x9a, 0xb3, 0x84, 0xbf, 0x07, 0xba, 0x07, 0xfb, 0x12, 0x0e, 0x7a, 0x53, 0x41, 0xf2, 0x46, 0xc3, 0xee, 0xd7, 0x4f, 0x23}, + subYX: fp.Elt{0x93, 0xbf, 0x7f, 0x32, 0x3b, 0x01, 0x6f, 0x50, 0x6b, 0x6f, 0x77, 0x9b, 0xc9, 0xeb, 0xfc, 0xae, 0x68, 0x59, 0xad, 0xaa, 0x32, 0xb2, 0x12, 0x9d, 0xa7, 0x24, 0x60, 0x17, 0x2d, 0x88, 0x67, 0x02}, + dt2: fp.Elt{0x78, 0xa3, 0x2e, 0x73, 0x19, 0xa1, 0x60, 0x53, 0x71, 0xd4, 0x8d, 0xdf, 0xb1, 0xe6, 0x37, 0x24, 0x33, 0xe5, 0xa7, 0x91, 0xf8, 0x37, 0xef, 0xa2, 0x63, 0x78, 0x09, 0xaa, 0xfd, 0xa6, 0x7b, 0x49}, + }, + { /* 15P */ + addYX: fp.Elt{0xa0, 0xea, 0xcf, 0x13, 0x03, 0xcc, 0xce, 0x24, 0x6d, 0x24, 0x9c, 0x18, 0x8d, 0xc2, 0x48, 0x86, 0xd0, 0xd4, 0xf2, 0xc1, 0xfa, 0xbd, 0xbd, 0x2d, 0x2b, 0xe7, 0x2d, 0xf1, 0x17, 0x29, 0xe2, 0x61}, + subYX: fp.Elt{0x0b, 0xcf, 0x8c, 0x46, 0x86, 0xcd, 0x0b, 0x04, 0xd6, 0x10, 0x99, 0x2a, 0xa4, 0x9b, 0x82, 0xd3, 0x92, 0x51, 0xb2, 0x07, 0x08, 0x30, 0x08, 0x75, 0xbf, 0x5e, 0xd0, 0x18, 0x42, 0xcd, 0xb5, 0x43}, + dt2: fp.Elt{0x16, 0xb5, 0xd0, 0x9b, 0x2f, 0x76, 0x9a, 0x5d, 0xee, 0xde, 0x3f, 0x37, 0x4e, 0xaf, 0x38, 0xeb, 0x70, 0x42, 0xd6, 0x93, 0x7d, 0x5a, 0x2e, 0x03, 0x42, 0xd8, 0xe4, 0x0a, 0x21, 0x61, 0x1d, 0x51}, + }, + { /* 17P */ + addYX: fp.Elt{0x81, 0x9d, 0x0e, 0x95, 0xef, 0x76, 0xc6, 0x92, 0x4f, 0x04, 0xd7, 0xc0, 0xcd, 0x20, 0x46, 0xa5, 0x48, 0x12, 0x8f, 0x6f, 0x64, 0x36, 0x9b, 0xaa, 0xe3, 0x55, 0xb8, 0xdd, 0x24, 0x59, 0x32, 0x6d}, + subYX: fp.Elt{0x87, 0xde, 0x20, 0x44, 0x48, 0x86, 0x13, 0x08, 0xb4, 0xed, 0x92, 0xb5, 0x16, 0xf0, 0x1c, 0x8a, 0x25, 0x2d, 0x94, 0x29, 0x27, 0x4e, 0xfa, 0x39, 0x10, 0x28, 0x48, 0xe2, 0x6f, 0xfe, 0xa7, 0x71}, + dt2: fp.Elt{0x54, 0xc8, 0xc8, 0xa5, 0xb8, 0x82, 0x71, 0x6c, 0x03, 0x2a, 0x5f, 0xfe, 0x79, 0x14, 0xfd, 0x33, 0x0c, 0x8d, 0x77, 0x83, 0x18, 0x59, 0xcf, 0x72, 0xa9, 0xea, 0x9e, 0x55, 0xb6, 0xc4, 0x46, 0x47}, + }, + { /* 19P */ + addYX: fp.Elt{0x2b, 0x9a, 0xc6, 0x6d, 0x3c, 0x7b, 0x77, 0xd3, 0x17, 0xf6, 0x89, 0x6f, 0x27, 0xb2, 0xfa, 0xde, 0xb5, 0x16, 0x3a, 0xb5, 0xf7, 0x1c, 0x65, 0x45, 0xb7, 0x9f, 0xfe, 0x34, 0xde, 0x51, 0x9a, 0x5c}, + subYX: fp.Elt{0x47, 0x11, 0x74, 0x64, 0xc8, 0x46, 0x85, 0x34, 0x49, 0xc8, 0xfc, 0x0e, 0xdd, 0xae, 0x35, 0x7d, 0x32, 0xa3, 0x72, 0x06, 0x76, 0x9a, 0x93, 0xff, 0xd6, 0xe6, 0xb5, 0x7d, 0x49, 0x63, 0x96, 0x21}, + dt2: fp.Elt{0x67, 0x0e, 0xf1, 0x79, 0xcf, 0xf1, 0x10, 0xf5, 0x5b, 0x51, 0x58, 0xe6, 0xa1, 0xda, 0xdd, 0xff, 0x77, 0x22, 0x14, 0x10, 0x17, 0xa7, 0xc3, 0x09, 0xbb, 0x23, 0x82, 0x60, 0x3c, 0x50, 0x04, 0x48}, + }, + { /* 21P */ + addYX: fp.Elt{0xc7, 0x7f, 0xa3, 0x2c, 0xd0, 0x9e, 0x24, 0xc4, 0xab, 0xac, 0x15, 0xa6, 0xe3, 0xa0, 0x59, 0xa0, 0x23, 0x0e, 0x6e, 0xc9, 0xd7, 0x6e, 0xa9, 0x88, 0x6d, 0x69, 0x50, 0x16, 0xa5, 0x98, 0x33, 0x55}, + subYX: fp.Elt{0x75, 0xd1, 0x36, 0x3a, 0xd2, 0x21, 0x68, 0x3b, 0x32, 0x9e, 0x9b, 0xe9, 0xa7, 0x0a, 0xb4, 0xbb, 0x47, 0x8a, 0x83, 0x20, 0xe4, 0x5c, 0x9e, 0x5d, 0x5e, 0x4c, 0xde, 0x58, 0x88, 0x09, 0x1e, 0x77}, + dt2: fp.Elt{0xdf, 0x1e, 0x45, 0x78, 0xd2, 0xf5, 0x12, 0x9a, 0xcb, 0x9c, 0x89, 0x85, 0x79, 0x5d, 0xda, 0x3a, 0x08, 0x95, 0xa5, 0x9f, 0x2d, 0x4a, 0x7f, 0x47, 0x11, 0xa6, 0xf5, 0x8f, 0xd6, 0xd1, 0x5e, 0x5a}, + }, + { /* 23P */ + addYX: fp.Elt{0x83, 0x0e, 0x15, 0xfe, 0x2a, 0x12, 0x95, 0x11, 0xd8, 0x35, 0x4b, 0x7e, 0x25, 0x9a, 0x20, 0xcf, 0x20, 0x1e, 0x71, 0x1e, 0x29, 0xf8, 0x87, 0x73, 0xf0, 0x92, 0xbf, 0xd8, 0x97, 0xb8, 0xac, 0x44}, + subYX: fp.Elt{0x59, 0x73, 0x52, 0x58, 0xc5, 0xe0, 0xe5, 0xba, 0x7e, 0x9d, 0xdb, 0xca, 0x19, 0x5c, 0x2e, 0x39, 0xe9, 0xab, 0x1c, 0xda, 0x1e, 0x3c, 0x65, 0x28, 0x44, 0xdc, 0xef, 0x5f, 0x13, 0x60, 0x9b, 0x01}, + dt2: fp.Elt{0x83, 0x4b, 0x13, 0x5e, 0x14, 0x68, 0x60, 0x1e, 0x16, 0x4c, 0x30, 0x24, 0x4f, 0xe6, 0xf5, 0xc4, 0xd7, 0x3e, 0x1a, 0xfc, 0xa8, 0x88, 0x6e, 0x50, 0x92, 0x2f, 0xad, 0xe6, 0xfd, 0x49, 0x0c, 0x15}, + }, + { /* 25P */ + addYX: fp.Elt{0x38, 0x11, 0x47, 0x09, 0x95, 0xf2, 0x7b, 0x8e, 0x51, 0xa6, 0x75, 0x4f, 0x39, 0xef, 0x6f, 0x5d, 0xad, 0x08, 0xa7, 0x25, 0xc4, 0x79, 0xaf, 0x10, 0x22, 0x99, 0xb9, 0x5b, 0x07, 0x5a, 0x2b, 0x6b}, + subYX: fp.Elt{0x68, 0xa8, 0xdc, 0x9c, 0x3c, 0x86, 0x49, 0xb8, 0xd0, 0x4a, 0x71, 0xb8, 0xdb, 0x44, 0x3f, 0xc8, 0x8d, 0x16, 0x36, 0x0c, 0x56, 0xe3, 0x3e, 0xfe, 0xc1, 0xfb, 0x05, 0x1e, 0x79, 0xd7, 0xa6, 0x78}, + dt2: fp.Elt{0x76, 0xb9, 0xa0, 0x47, 0x4b, 0x70, 0xbf, 0x58, 0xd5, 0x48, 0x17, 0x74, 0x55, 0xb3, 0x01, 0xa6, 0x90, 0xf5, 0x42, 0xd5, 0xb1, 0x1f, 0x2b, 0xaa, 0x00, 0x5d, 0xd5, 0x4a, 0xfc, 0x7f, 0x5c, 0x72}, + }, + { /* 27P */ + addYX: fp.Elt{0xb2, 0x99, 0xcf, 0xd1, 0x15, 0x67, 0x42, 0xe4, 0x34, 0x0d, 0xa2, 0x02, 0x11, 0xd5, 0x52, 0x73, 0x9f, 0x10, 0x12, 0x8b, 0x7b, 0x15, 0xd1, 0x23, 0xa3, 0xf3, 0xb1, 0x7c, 0x27, 0xc9, 0x4c, 0x79}, + subYX: fp.Elt{0xc0, 0x98, 0xd0, 0x1c, 0xf7, 0x2b, 0x80, 0x91, 0x66, 0x63, 0x5e, 0xed, 0xa4, 0x6c, 0x41, 0xfe, 0x4c, 0x99, 0x02, 0x49, 0x71, 0x5d, 0x58, 0xdf, 0xe7, 0xfa, 0x55, 0xf8, 0x25, 0x46, 0xd5, 0x4c}, + dt2: fp.Elt{0x53, 0x50, 0xac, 0xc2, 0x26, 0xc4, 0xf6, 0x4a, 0x58, 0x72, 0xf6, 0x32, 0xad, 0xed, 0x9a, 0xbc, 0x21, 0x10, 0x31, 0x0a, 0xf1, 0x32, 0xd0, 0x2a, 0x85, 0x8e, 0xcc, 0x6f, 0x7b, 0x35, 0x08, 0x70}, + }, + { /* 29P */ + addYX: fp.Elt{0x01, 0x3f, 0x77, 0x38, 0x27, 0x67, 0x88, 0x0b, 0xfb, 0xcc, 0xfb, 0x95, 0xfa, 0xc8, 0xcc, 0xb8, 0xb6, 0x29, 0xad, 0xb9, 0xa3, 0xd5, 0x2d, 0x8d, 0x6a, 0x0f, 0xad, 0x51, 0x98, 0x7e, 0xef, 0x06}, + subYX: fp.Elt{0x34, 0x4a, 0x58, 0x82, 0xbb, 0x9f, 0x1b, 0xd0, 0x2b, 0x79, 0xb4, 0xd2, 0x63, 0x64, 0xab, 0x47, 0x02, 0x62, 0x53, 0x48, 0x9c, 0x63, 0x31, 0xb6, 0x28, 0xd4, 0xd6, 0x69, 0x36, 0x2a, 0xa9, 0x13}, + dt2: fp.Elt{0xe5, 0x7d, 0x57, 0xc0, 0x1c, 0x77, 0x93, 0xca, 0x5c, 0xdc, 0x35, 0x50, 0x1e, 0xe4, 0x40, 0x75, 0x71, 0xe0, 0x02, 0xd8, 0x01, 0x0f, 0x68, 0x24, 0x6a, 0xf8, 0x2a, 0x8a, 0xdf, 0x6d, 0x29, 0x3c}, + }, + { /* 31P */ + addYX: fp.Elt{0x13, 0xa7, 0x14, 0xd9, 0xf9, 0x15, 0xad, 0xae, 0x12, 0xf9, 0x8f, 0x8c, 0xf9, 0x7b, 0x2f, 0xa9, 0x30, 0xd7, 0x53, 0x9f, 0x17, 0x23, 0xf8, 0xaf, 0xba, 0x77, 0x0c, 0x49, 0x93, 0xd3, 0x99, 0x7a}, + subYX: fp.Elt{0x41, 0x25, 0x1f, 0xbb, 0x2e, 0x4d, 0xeb, 0xfc, 0x1f, 0xb9, 0xad, 0x40, 0xc7, 0x10, 0x95, 0xb8, 0x05, 0xad, 0xa1, 0xd0, 0x7d, 0xa3, 0x71, 0xfc, 0x7b, 0x71, 0x47, 0x07, 0x70, 0x2c, 0x89, 0x0a}, + dt2: fp.Elt{0xe8, 0xa3, 0xbd, 0x36, 0x24, 0xed, 0x52, 0x8f, 0x94, 0x07, 0xe8, 0x57, 0x41, 0xc8, 0xa8, 0x77, 0xe0, 0x9c, 0x2f, 0x26, 0x63, 0x65, 0xa9, 0xa5, 0xd2, 0xf7, 0x02, 0x83, 0xd2, 0x62, 0x67, 0x28}, + }, + { /* 33P */ + addYX: fp.Elt{0x25, 0x5b, 0xe3, 0x3c, 0x09, 0x36, 0x78, 0x4e, 0x97, 0xaa, 0x6b, 0xb2, 0x1d, 0x18, 0xe1, 0x82, 0x3f, 0xb8, 0xc7, 0xcb, 0xd3, 0x92, 0xc1, 0x0c, 0x3a, 0x9d, 0x9d, 0x6a, 0x04, 0xda, 0xf1, 0x32}, + subYX: fp.Elt{0xbd, 0xf5, 0x2e, 0xce, 0x2b, 0x8e, 0x55, 0x7c, 0x63, 0xbc, 0x47, 0x67, 0xb4, 0x6c, 0x98, 0xe4, 0xb8, 0x89, 0xbb, 0x3b, 0x9f, 0x17, 0x4a, 0x15, 0x7a, 0x76, 0xf1, 0xd6, 0xa3, 0xf2, 0x86, 0x76}, + dt2: fp.Elt{0x6a, 0x7c, 0x59, 0x6d, 0xa6, 0x12, 0x8d, 0xaa, 0x2b, 0x85, 0xd3, 0x04, 0x03, 0x93, 0x11, 0x8f, 0x22, 0xb0, 0x09, 0xc2, 0x73, 0xdc, 0x91, 0x3f, 0xa6, 0x28, 0xad, 0xa9, 0xf8, 0x05, 0x13, 0x56}, + }, + { /* 35P */ + addYX: fp.Elt{0xd1, 0xae, 0x92, 0xec, 0x8d, 0x97, 0x0c, 0x10, 0xe5, 0x73, 0x6d, 0x4d, 0x43, 0xd5, 0x43, 0xca, 0x48, 0xba, 0x47, 0xd8, 0x22, 0x1b, 0x13, 0x83, 0x2c, 0x4d, 0x5d, 0xe3, 0x53, 0xec, 0xaa}, + subYX: fp.Elt{0xd5, 0xc0, 0xb0, 0xe7, 0x28, 0xcc, 0x22, 0x67, 0x53, 0x5c, 0x07, 0xdb, 0xbb, 0xe9, 0x9d, 0x70, 0x61, 0x0a, 0x01, 0xd7, 0xa7, 0x8d, 0xf6, 0xca, 0x6c, 0xcc, 0x57, 0x2c, 0xef, 0x1a, 0x0a, 0x03}, + dt2: fp.Elt{0xaa, 0xd2, 0x3a, 0x00, 0x73, 0xf7, 0xb1, 0x7b, 0x08, 0x66, 0x21, 0x2b, 0x80, 0x29, 0x3f, 0x0b, 0x3e, 0xd2, 0x0e, 0x52, 0x86, 0xdc, 0x21, 0x78, 0x80, 0x54, 0x06, 0x24, 0x1c, 0x9c, 0xbe, 0x20}, + }, + { /* 37P */ + addYX: fp.Elt{0xa6, 0x73, 0x96, 0x24, 0xd8, 0x87, 0x53, 0xe1, 0x93, 0xe4, 0x46, 0xf5, 0x2d, 0xbc, 0x43, 0x59, 0xb5, 0x63, 0x6f, 0xc3, 0x81, 0x9a, 0x7f, 0x1c, 0xde, 0xc1, 0x0a, 0x1f, 0x36, 0xb3, 0x0a, 0x75}, + subYX: fp.Elt{0x60, 0x5e, 0x02, 0xe2, 0x4a, 0xe4, 0xe0, 0x20, 0x38, 0xb9, 0xdc, 0xcb, 0x2f, 0x3b, 0x3b, 0xb0, 0x1c, 0x0d, 0x5a, 0xf9, 0x9c, 0x63, 0x5d, 0x10, 0x11, 0xe3, 0x67, 0x50, 0x54, 0x4c, 0x76, 0x69}, + dt2: fp.Elt{0x37, 0x10, 0xf8, 0xa2, 0x83, 0x32, 0x8a, 0x1e, 0xf1, 0xcb, 0x7f, 0xbd, 0x23, 0xda, 0x2e, 0x6f, 0x63, 0x25, 0x2e, 0xac, 0x5b, 0xd1, 0x2f, 0xb7, 0x40, 0x50, 0x07, 0xb7, 0x3f, 0x6b, 0xf9, 0x54}, + }, + { /* 39P */ + addYX: fp.Elt{0x79, 0x92, 0x66, 0x29, 0x04, 0xf2, 0xad, 0x0f, 0x4a, 0x72, 0x7d, 0x7d, 0x04, 0xa2, 0xdd, 0x3a, 0xf1, 0x60, 0x57, 0x8c, 0x82, 0x94, 0x3d, 0x6f, 0x9e, 0x53, 0xb7, 0x2b, 0xc5, 0xe9, 0x7f, 0x3d}, + subYX: fp.Elt{0xcd, 0x1e, 0xb1, 0x16, 0xc6, 0xaf, 0x7d, 0x17, 0x79, 0x64, 0x57, 0xfa, 0x9c, 0x4b, 0x76, 0x89, 0x85, 0xe7, 0xec, 0xe6, 0x10, 0xa1, 0xa8, 0xb7, 0xf0, 0xdb, 0x85, 0xbe, 0x9f, 0x83, 0xe6, 0x78}, + dt2: fp.Elt{0x6b, 0x85, 0xb8, 0x37, 0xf7, 0x2d, 0x33, 0x70, 0x8a, 0x17, 0x1a, 0x04, 0x43, 0x5d, 0xd0, 0x75, 0x22, 0x9e, 0xe5, 0xa0, 0x4a, 0xf7, 0x0f, 0x32, 0x42, 0x82, 0x08, 0x50, 0xf3, 0x68, 0xf2, 0x70}, + }, + { /* 41P */ + addYX: fp.Elt{0x47, 0x5f, 0x80, 0xb1, 0x83, 0x45, 0x86, 0x66, 0x19, 0x7c, 0xdd, 0x60, 0xd1, 0xc5, 0x35, 0xf5, 0x06, 0xb0, 0x4c, 0x1e, 0xb7, 0x4e, 0x87, 0xe9, 0xd9, 0x89, 0xd8, 0xfa, 0x5c, 0x34, 0x0d, 0x7c}, + subYX: fp.Elt{0x55, 0xf3, 0xdc, 0x70, 0x20, 0x11, 0x24, 0x23, 0x17, 0xe1, 0xfc, 0xe7, 0x7e, 0xc9, 0x0c, 0x38, 0x98, 0xb6, 0x52, 0x35, 0xed, 0xde, 0x1d, 0xb3, 0xb9, 0xc4, 0xb8, 0x39, 0xc0, 0x56, 0x4e, 0x40}, + dt2: fp.Elt{0x8a, 0x33, 0x78, 0x8c, 0x4b, 0x1f, 0x1f, 0x59, 0xe1, 0xb5, 0xe0, 0x67, 0xb1, 0x6a, 0x36, 0xa0, 0x44, 0x3d, 0x5f, 0xb4, 0x52, 0x41, 0xbc, 0x5c, 0x77, 0xc7, 0xae, 0x2a, 0x76, 0x54, 0xd7, 0x20}, + }, + { /* 43P */ + addYX: fp.Elt{0x58, 0xb7, 0x3b, 0xc7, 0x6f, 0xc3, 0x8f, 0x5e, 0x9a, 0xbb, 0x3c, 0x36, 0xa5, 0x43, 0xe5, 0xac, 0x22, 0xc9, 0x3b, 0x90, 0x7d, 0x4a, 0x93, 0xa9, 0x62, 0xec, 0xce, 0xf3, 0x46, 0x1e, 0x8f, 0x2b}, + subYX: fp.Elt{0x43, 0xf5, 0xb9, 0x35, 0xb1, 0xfe, 0x74, 0x9d, 0x6c, 0x95, 0x8c, 0xde, 0xf1, 0x7d, 0xb3, 0x84, 0xa9, 0x8b, 0x13, 0x57, 0x07, 0x2b, 0x32, 0xe9, 0xe1, 0x4c, 0x0b, 0x79, 0xa8, 0xad, 0xb8, 0x38}, + dt2: fp.Elt{0x5d, 0xf9, 0x51, 0xdf, 0x9c, 0x4a, 0xc0, 0xb5, 0xac, 0xde, 0x1f, 0xcb, 0xae, 0x52, 0x39, 0x2b, 0xda, 0x66, 0x8b, 0x32, 0x8b, 0x6d, 0x10, 0x1d, 0x53, 0x19, 0xba, 0xce, 0x32, 0xeb, 0x9a, 0x04}, + }, + { /* 45P */ + addYX: fp.Elt{0x31, 0x79, 0xfc, 0x75, 0x0b, 0x7d, 0x50, 0xaa, 0xd3, 0x25, 0x67, 0x7a, 0x4b, 0x92, 0xef, 0x0f, 0x30, 0x39, 0x6b, 0x39, 0x2b, 0x54, 0x82, 0x1d, 0xfc, 0x74, 0xf6, 0x30, 0x75, 0xe1, 0x5e, 0x79}, + subYX: fp.Elt{0x7e, 0xfe, 0xdc, 0x63, 0x3c, 0x7d, 0x76, 0xd7, 0x40, 0x6e, 0x85, 0x97, 0x48, 0x59, 0x9c, 0x20, 0x13, 0x7c, 0x4f, 0xe1, 0x61, 0x68, 0x67, 0xb6, 0xfc, 0x25, 0xd6, 0xc8, 0xe0, 0x65, 0xc6, 0x51}, + dt2: fp.Elt{0x81, 0xbd, 0xec, 0x52, 0x0a, 0x5b, 0x4a, 0x25, 0xe7, 0xaf, 0x34, 0xe0, 0x6e, 0x1f, 0x41, 0x5d, 0x31, 0x4a, 0xee, 0xca, 0x0d, 0x4d, 0xa2, 0xe6, 0x77, 0x44, 0xc5, 0x9d, 0xf4, 0x9b, 0xd1, 0x6c}, + }, + { /* 47P */ + addYX: fp.Elt{0x86, 0xc3, 0xaf, 0x65, 0x21, 0x61, 0xfe, 0x1f, 0x10, 0x1b, 0xd5, 0xb8, 0x88, 0x2a, 0x2a, 0x08, 0xaa, 0x0b, 0x99, 0x20, 0x7e, 0x62, 0xf6, 0x76, 0xe7, 0x43, 0x9e, 0x42, 0xa7, 0xb3, 0x01, 0x5e}, + subYX: fp.Elt{0xa3, 0x9c, 0x17, 0x52, 0x90, 0x61, 0x87, 0x7e, 0x85, 0x9f, 0x2c, 0x0b, 0x06, 0x0a, 0x1d, 0x57, 0x1e, 0x71, 0x99, 0x84, 0xa8, 0xba, 0xa2, 0x80, 0x38, 0xe6, 0xb2, 0x40, 0xdb, 0xf3, 0x20, 0x75}, + dt2: fp.Elt{0xa1, 0x57, 0x93, 0xd3, 0xe3, 0x0b, 0xb5, 0x3d, 0xa5, 0x94, 0x9e, 0x59, 0xdd, 0x6c, 0x7b, 0x96, 0x6e, 0x1e, 0x31, 0xdf, 0x64, 0x9a, 0x30, 0x1a, 0x86, 0xc9, 0xf3, 0xce, 0x9c, 0x2c, 0x09, 0x71}, + }, + { /* 49P */ + addYX: fp.Elt{0xcf, 0x1d, 0x05, 0x74, 0xac, 0xd8, 0x6b, 0x85, 0x1e, 0xaa, 0xb7, 0x55, 0x08, 0xa4, 0xf6, 0x03, 0xeb, 0x3c, 0x74, 0xc9, 0xcb, 0xe7, 0x4a, 0x3a, 0xde, 0xab, 0x37, 0x71, 0xbb, 0xa5, 0x73, 0x41}, + subYX: fp.Elt{0x8c, 0x91, 0x64, 0x03, 0x3f, 0x52, 0xd8, 0x53, 0x1c, 0x6b, 0xab, 0x3f, 0xf4, 0x04, 0xb4, 0xa2, 0xa4, 0xe5, 0x81, 0x66, 0x9e, 0x4a, 0x0b, 0x08, 0xa7, 0x7b, 0x25, 0xd0, 0x03, 0x5b, 0xa1, 0x0e}, + dt2: fp.Elt{0x8a, 0x21, 0xf9, 0xf0, 0x31, 0x6e, 0xc5, 0x17, 0x08, 0x47, 0xfc, 0x1a, 0x2b, 0x6e, 0x69, 0x5a, 0x76, 0xf1, 0xb2, 0xf4, 0x68, 0x16, 0x93, 0xf7, 0x67, 0x3a, 0x4e, 0x4a, 0x61, 0x65, 0xc5, 0x5f}, + }, + { /* 51P */ + addYX: fp.Elt{0x8e, 0x98, 0x90, 0x77, 0xe6, 0xe1, 0x92, 0x48, 0x22, 0xd7, 0x5c, 0x1c, 0x0f, 0x95, 0xd5, 0x01, 0xed, 0x3e, 0x92, 0xe5, 0x9a, 0x81, 0xb0, 0xe3, 0x1b, 0x65, 0x46, 0x9d, 0x40, 0xc7, 0x14, 0x32}, + subYX: fp.Elt{0xe5, 0x7a, 0x6d, 0xc4, 0x0d, 0x57, 0x6e, 0x13, 0x8f, 0xdc, 0xf8, 0x54, 0xcc, 0xaa, 0xd0, 0x0f, 0x86, 0xad, 0x0d, 0x31, 0x03, 0x9f, 0x54, 0x59, 0xa1, 0x4a, 0x45, 0x4c, 0x41, 0x1c, 0x71, 0x62}, + dt2: fp.Elt{0x70, 0x17, 0x65, 0x06, 0x74, 0x82, 0x29, 0x13, 0x36, 0x94, 0x27, 0x8a, 0x66, 0xa0, 0xa4, 0x3b, 0x3c, 0x22, 0x5d, 0x18, 0xec, 0xb8, 0xb6, 0xd9, 0x3c, 0x83, 0xcb, 0x3e, 0x07, 0x94, 0xea, 0x5b}, + }, + { /* 53P */ + addYX: fp.Elt{0xf8, 0xd2, 0x43, 0xf3, 0x63, 0xce, 0x70, 0xb4, 0xf1, 0xe8, 0x43, 0x05, 0x8f, 0xba, 0x67, 0x00, 0x6f, 0x7b, 0x11, 0xa2, 0xa1, 0x51, 0xda, 0x35, 0x2f, 0xbd, 0xf1, 0x44, 0x59, 0x78, 0xd0, 0x4a}, + subYX: fp.Elt{0xe4, 0x9b, 0xc8, 0x12, 0x09, 0xbf, 0x1d, 0x64, 0x9c, 0x57, 0x6e, 0x7d, 0x31, 0x8b, 0xf3, 0xac, 0x65, 0xb0, 0x97, 0xf6, 0x02, 0x9e, 0xfe, 0xab, 0xec, 0x1e, 0xf6, 0x48, 0xc1, 0xd5, 0xac, 0x3a}, + dt2: fp.Elt{0x01, 0x83, 0x31, 0xc3, 0x34, 0x3b, 0x8e, 0x85, 0x26, 0x68, 0x31, 0x07, 0x47, 0xc0, 0x99, 0xdc, 0x8c, 0xa8, 0x9d, 0xd3, 0x2e, 0x5b, 0x08, 0x34, 0x3d, 0x85, 0x02, 0xd9, 0xb1, 0x0c, 0xff, 0x3a}, + }, + { /* 55P */ + addYX: fp.Elt{0x05, 0x35, 0xc5, 0xf4, 0x0b, 0x43, 0x26, 0x92, 0x83, 0x22, 0x1f, 0x26, 0x13, 0x9c, 0xe4, 0x68, 0xc6, 0x27, 0xd3, 0x8f, 0x78, 0x33, 0xef, 0x09, 0x7f, 0x9e, 0xd9, 0x2b, 0x73, 0x9f, 0xcf, 0x2c}, + subYX: fp.Elt{0x5e, 0x40, 0x20, 0x3a, 0xeb, 0xc7, 0xc5, 0x87, 0xc9, 0x56, 0xad, 0xed, 0xef, 0x11, 0xe3, 0x8e, 0xf9, 0xd5, 0x29, 0xad, 0x48, 0x2e, 0x25, 0x29, 0x1d, 0x25, 0xcd, 0xf4, 0x86, 0x7e, 0x0e, 0x11}, + dt2: fp.Elt{0xe4, 0xf5, 0x03, 0xd6, 0x9e, 0xd8, 0xc0, 0x57, 0x0c, 0x20, 0xb0, 0xf0, 0x28, 0x86, 0x88, 0x12, 0xb7, 0x3b, 0x2e, 0xa0, 0x09, 0x27, 0x17, 0x53, 0x37, 0x3a, 0x69, 0xb9, 0xe0, 0x57, 0xc5, 0x05}, + }, + { /* 57P */ + addYX: fp.Elt{0xb0, 0x0e, 0xc2, 0x89, 0xb0, 0xbb, 0x76, 0xf7, 0x5c, 0xd8, 0x0f, 0xfa, 0xf6, 0x5b, 0xf8, 0x61, 0xfb, 0x21, 0x44, 0x63, 0x4e, 0x3f, 0xb9, 0xb6, 0x05, 0x12, 0x86, 0x41, 0x08, 0xef, 0x9f, 0x28}, + subYX: fp.Elt{0x6f, 0x7e, 0xc9, 0x1f, 0x31, 0xce, 0xf9, 0xd8, 0xae, 0xfd, 0xf9, 0x11, 0x30, 0x26, 0x3f, 0x7a, 0xdd, 0x25, 0xed, 0x8b, 0xa0, 0x7e, 0x5b, 0xe1, 0x5a, 0x87, 0xe9, 0x8f, 0x17, 0x4c, 0x15, 0x6e}, + dt2: fp.Elt{0xbf, 0x9a, 0xd6, 0xfe, 0x36, 0x63, 0x61, 0xcf, 0x4f, 0xc9, 0x35, 0x83, 0xe7, 0xe4, 0x16, 0x9b, 0xe7, 0x7f, 0x3a, 0x75, 0x65, 0x97, 0x78, 0x13, 0x19, 0xa3, 0x5c, 0xa9, 0x42, 0xf6, 0xfb, 0x6a}, + }, + { /* 59P */ + addYX: fp.Elt{0xcc, 0xa8, 0x13, 0xf9, 0x70, 0x50, 0xe5, 0x5d, 0x61, 0xf5, 0x0c, 0x2b, 0x7b, 0x16, 0x1d, 0x7d, 0x89, 0xd4, 0xea, 0x90, 0xb6, 0x56, 0x29, 0xda, 0xd9, 0x1e, 0x80, 0xdb, 0xce, 0x93, 0xc0, 0x12}, + subYX: fp.Elt{0xc1, 0xd2, 0xf5, 0x62, 0x0c, 0xde, 0xa8, 0x7d, 0x9a, 0x7b, 0x0e, 0xb0, 0xa4, 0x3d, 0xfc, 0x98, 0xe0, 0x70, 0xad, 0x0d, 0xda, 0x6a, 0xeb, 0x7d, 0xc4, 0x38, 0x50, 0xb9, 0x51, 0xb8, 0xb4, 0x0d}, + dt2: fp.Elt{0x0f, 0x19, 0xb8, 0x08, 0x93, 0x7f, 0x14, 0xfc, 0x10, 0xe3, 0x1a, 0xa1, 0xa0, 0x9d, 0x96, 0x06, 0xfd, 0xd7, 0xc7, 0xda, 0x72, 0x55, 0xe7, 0xce, 0xe6, 0x5c, 0x63, 0xc6, 0x99, 0x87, 0xaa, 0x33}, + }, + { /* 61P */ + addYX: fp.Elt{0xb1, 0x6c, 0x15, 0xfc, 0x88, 0xf5, 0x48, 0x83, 0x27, 0x6d, 0x0a, 0x1a, 0x9b, 0xba, 0xa2, 0x6d, 0xb6, 0x5a, 0xca, 0x87, 0x5c, 0x2d, 0x26, 0xe2, 0xa6, 0x89, 0xd5, 0xc8, 0xc1, 0xd0, 0x2c, 0x21}, + subYX: fp.Elt{0xf2, 0x5c, 0x08, 0xbd, 0x1e, 0xf5, 0x0f, 0xaf, 0x1f, 0x3f, 0xd3, 0x67, 0x89, 0x1a, 0xf5, 0x78, 0x3c, 0x03, 0x60, 0x50, 0xe1, 0xbf, 0xc2, 0x6e, 0x86, 0x1a, 0xe2, 0xe8, 0x29, 0x6f, 0x3c, 0x23}, + dt2: fp.Elt{0x81, 0xc7, 0x18, 0x7f, 0x10, 0xd5, 0xf4, 0xd2, 0x28, 0x9d, 0x7e, 0x52, 0xf2, 0xcd, 0x2e, 0x12, 0x41, 0x33, 0x3d, 0x3d, 0x2a, 0x86, 0x0a, 0xa7, 0xe3, 0x4c, 0x91, 0x11, 0x89, 0x77, 0xb7, 0x1d}, + }, + { /* 63P */ + addYX: fp.Elt{0xb6, 0x1a, 0x70, 0xdd, 0x69, 0x47, 0x39, 0xb3, 0xa5, 0x8d, 0xcf, 0x19, 0xd4, 0xde, 0xb8, 0xe2, 0x52, 0xc8, 0x2a, 0xfd, 0x61, 0x41, 0xdf, 0x15, 0xbe, 0x24, 0x7d, 0x01, 0x8a, 0xca, 0xe2, 0x7a}, + subYX: fp.Elt{0x6f, 0xc2, 0x6b, 0x7c, 0x39, 0x52, 0xf3, 0xdd, 0x13, 0x01, 0xd5, 0x53, 0xcc, 0xe2, 0x97, 0x7a, 0x30, 0xa3, 0x79, 0xbf, 0x3a, 0xf4, 0x74, 0x7c, 0xfc, 0xad, 0xe2, 0x26, 0xad, 0x97, 0xad, 0x31}, + dt2: fp.Elt{0x62, 0xb9, 0x20, 0x09, 0xed, 0x17, 0xe8, 0xb7, 0x9d, 0xda, 0x19, 0x3f, 0xcc, 0x18, 0x85, 0x1e, 0x64, 0x0a, 0x56, 0x25, 0x4f, 0xc1, 0x91, 0xe4, 0x83, 0x2c, 0x62, 0xa6, 0x53, 0xfc, 0xd1, 0x1e}, + }, +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go new file mode 100644 index 000000000..324bd8f33 --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go @@ -0,0 +1,411 @@ +// Package ed448 implements Ed448 signature scheme as described in RFC-8032. +// +// This package implements two signature variants. +// +// | Scheme Name | Sign Function | Verification | Context | +// |-------------|-------------------|---------------|-------------------| +// | Ed448 | Sign | Verify | Yes, can be empty | +// | Ed448Ph | SignPh | VerifyPh | Yes, can be empty | +// | All above | (PrivateKey).Sign | VerifyAny | As above | +// +// Specific functions for sign and verify are defined. A generic signing +// function for all schemes is available through the crypto.Signer interface, +// which is implemented by the PrivateKey type. A correspond all-in-one +// verification method is provided by the VerifyAny function. +// +// Both schemes require a context string for domain separation. This parameter +// is passed using a SignerOptions struct defined in this package. +// +// References: +// +// - RFC8032: https://rfc-editor.org/rfc/rfc8032.txt +// - EdDSA for more curves: https://eprint.iacr.org/2015/677 +// - High-speed high-security signatures: https://doi.org/10.1007/s13389-012-0027-1 +package ed448 + +import ( + "bytes" + "crypto" + cryptoRand "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "github.com/cloudflare/circl/ecc/goldilocks" + "github.com/cloudflare/circl/internal/sha3" + "github.com/cloudflare/circl/sign" +) + +const ( + // ContextMaxSize is the maximum length (in bytes) allowed for context. + ContextMaxSize = 255 + // PublicKeySize is the length in bytes of Ed448 public keys. + PublicKeySize = 57 + // PrivateKeySize is the length in bytes of Ed448 private keys. + PrivateKeySize = 114 + // SignatureSize is the length in bytes of signatures. + SignatureSize = 114 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 57 +) + +const ( + paramB = 456 / 8 // Size of keys in bytes. + hashSize = 2 * paramB // Size of the hash function's output. +) + +// SignerOptions implements crypto.SignerOpts and augments with parameters +// that are specific to the Ed448 signature schemes. +type SignerOptions struct { + // Hash must be crypto.Hash(0) for both Ed448 and Ed448Ph. + crypto.Hash + + // Context is an optional domain separation string for signing. + // Its length must be less or equal than 255 bytes. + Context string + + // Scheme is an identifier for choosing a signature scheme. + Scheme SchemeID +} + +// SchemeID is an identifier for each signature scheme. +type SchemeID uint + +const ( + ED448 SchemeID = iota + ED448Ph +) + +// PublicKey is the type of Ed448 public keys. +type PublicKey []byte + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + return ok && bytes.Equal(pub, xx) +} + +// PrivateKey is the type of Ed448 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + return ok && subtle.ConstantTimeCompare(priv, xx) == 1 +} + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +func (priv PrivateKey) Scheme() sign.Scheme { return sch } + +func (pub PublicKey) Scheme() sign.Scheme { return sch } + +func (priv PrivateKey) MarshalBinary() (data []byte, err error) { + privateKey := make(PrivateKey, PrivateKeySize) + copy(privateKey, priv) + return privateKey, nil +} + +func (pub PublicKey) MarshalBinary() (data []byte, err error) { + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, pub) + return publicKey, nil +} + +// Sign creates a signature of a message given a key pair. +// This function supports all the two signature variants defined in RFC-8032, +// namely Ed448 (or pure EdDSA) and Ed448Ph. +// The opts.HashFunc() must return zero to the specify Ed448 variant. This can +// be achieved by passing crypto.Hash(0) as the value for opts. +// Use an Options struct to pass a bool indicating that the ed448Ph variant +// should be used. +// The struct can also be optionally used to pass a context string for signing. +func (priv PrivateKey) Sign( + rand io.Reader, + message []byte, + opts crypto.SignerOpts, +) (signature []byte, err error) { + var ctx string + var scheme SchemeID + + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): + return Sign(priv, message, ctx), nil + case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): + return SignPh(priv, message, ctx), nil + default: + return nil, errors.New("ed448: bad hash algorithm") + } +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptoRand.Reader + } + + seed := make(PrivateKey, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + privateKey := make([]byte, PrivateKeySize) + newKeyFromSeed(privateKey, seed) + return privateKey +} + +func newKeyFromSeed(privateKey, seed []byte) { + if l := len(seed); l != SeedSize { + panic("ed448: bad seed length: " + strconv.Itoa(l)) + } + + var h [hashSize]byte + H := sha3.NewShake256() + _, _ = H.Write(seed) + _, _ = H.Read(h[:]) + s := &goldilocks.Scalar{} + deriveSecretScalar(s, h[:paramB]) + + copy(privateKey[:SeedSize], seed) + _ = goldilocks.Curve{}.ScalarBaseMult(s).ToBytes(privateKey[SeedSize:]) +} + +func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) { + if len(ctx) > ContextMaxSize { + panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx)))) + } + + H := sha3.NewShake256() + var PHM []byte + + if preHash { + var h [64]byte + _, _ = H.Write(message) + _, _ = H.Read(h[:]) + PHM = h[:] + H.Reset() + } else { + PHM = message + } + + // 1. Hash the 57-byte private key using SHAKE256(x, 114). + var h [hashSize]byte + _, _ = H.Write(privateKey[:SeedSize]) + _, _ = H.Read(h[:]) + s := &goldilocks.Scalar{} + deriveSecretScalar(s, h[:paramB]) + prefix := h[paramB:] + + // 2. Compute SHAKE256(dom4(F, C) || prefix || PH(M), 114). + var rPM [hashSize]byte + H.Reset() + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(prefix) + _, _ = H.Write(PHM) + _, _ = H.Read(rPM[:]) + + // 3. Compute the point [r]B. + r := &goldilocks.Scalar{} + r.FromBytes(rPM[:]) + R := (&[paramB]byte{})[:] + if err := (goldilocks.Curve{}.ScalarBaseMult(r).ToBytes(R)); err != nil { + panic(err) + } + // 4. Compute SHAKE256(dom4(F, C) || R || A || PH(M), 114) + var hRAM [hashSize]byte + H.Reset() + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(privateKey[SeedSize:]) + _, _ = H.Write(PHM) + _, _ = H.Read(hRAM[:]) + + // 5. Compute S = (r + k * s) mod order. + k := &goldilocks.Scalar{} + k.FromBytes(hRAM[:]) + S := &goldilocks.Scalar{} + S.Mul(k, s) + S.Add(S, r) + + // 6. The signature is the concatenation of R and S. + copy(signature[:paramB], R[:]) + copy(signature[paramB:], S[:]) +} + +// Sign signs the message with privateKey and returns a signature. +// This function supports the signature variant defined in RFC-8032: Ed448, +// also known as the pure version of EdDSA. +// It will panic if len(privateKey) is not PrivateKeySize. +func Sign(priv PrivateKey, message []byte, ctx string) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, priv, message, []byte(ctx), false) + return signature +} + +// SignPh creates a signature of a message given a keypair. +// This function supports the signature variant defined in RFC-8032: Ed448ph, +// meaning it internally hashes the message using SHAKE-256. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func SignPh(priv PrivateKey, message []byte, ctx string) []byte { + signature := make([]byte, SignatureSize) + signAll(signature, priv, message, []byte(ctx), true) + return signature +} + +func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool { + if len(public) != PublicKeySize || + len(signature) != SignatureSize || + len(ctx) > ContextMaxSize || + !isLessThanOrder(signature[paramB:]) { + return false + } + + P, err := goldilocks.FromBytes(public) + if err != nil { + return false + } + + H := sha3.NewShake256() + var PHM []byte + + if preHash { + var h [64]byte + _, _ = H.Write(message) + _, _ = H.Read(h[:]) + PHM = h[:] + H.Reset() + } else { + PHM = message + } + + var hRAM [hashSize]byte + R := signature[:paramB] + + writeDom(&H, ctx, preHash) + + _, _ = H.Write(R) + _, _ = H.Write(public) + _, _ = H.Write(PHM) + _, _ = H.Read(hRAM[:]) + + k := &goldilocks.Scalar{} + k.FromBytes(hRAM[:]) + S := &goldilocks.Scalar{} + S.FromBytes(signature[paramB:]) + + encR := (&[paramB]byte{})[:] + P.Neg() + _ = goldilocks.Curve{}.CombinedMult(S, k, P).ToBytes(encR) + return bytes.Equal(R, encR) +} + +// VerifyAny returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports all the two signature variants defined in RFC-8032, +// namely Ed448 (or pure EdDSA) and Ed448Ph. +// The opts.HashFunc() must return zero, this can be achieved by passing +// crypto.Hash(0) as the value for opts. +// Use a SignerOptions struct to pass a context string for signing. +func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool { + var ctx string + var scheme SchemeID + if o, ok := opts.(SignerOptions); ok { + ctx = o.Context + scheme = o.Scheme + } + + switch true { + case scheme == ED448 && opts.HashFunc() == crypto.Hash(0): + return Verify(public, message, signature, ctx) + case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0): + return VerifyPh(public, message, signature, ctx) + default: + return false + } +} + +// Verify returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed448, +// also known as the pure version of EdDSA. +func Verify(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), false) +} + +// VerifyPh returns true if the signature is valid. Failure cases are invalid +// signature, or when the public key cannot be decoded. +// This function supports the signature variant defined in RFC-8032: Ed448ph, +// meaning it internally hashes the message using SHAKE-256. +// Context could be passed to this function, which length should be no more than +// 255. It can be empty. +func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool { + return verify(public, message, signature, []byte(ctx), true) +} + +func deriveSecretScalar(s *goldilocks.Scalar, h []byte) { + h[0] &= 0xFC // The two least significant bits of the first octet are cleared, + h[paramB-1] = 0x00 // all eight bits the last octet are cleared, and + h[paramB-2] |= 0x80 // the highest bit of the second to last octet is set. + s.FromBytes(h[:paramB]) +} + +// isLessThanOrder returns true if 0 <= x < order and if the last byte of x is zero. +func isLessThanOrder(x []byte) bool { + order := goldilocks.Curve{}.Order() + i := len(order) - 1 + for i > 0 && x[i] == order[i] { + i-- + } + return x[paramB-1] == 0 && x[i] < order[i] +} + +func writeDom(h io.Writer, ctx []byte, preHash bool) { + dom4 := "SigEd448" + _, _ = h.Write([]byte(dom4)) + + if preHash { + _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))}) + } else { + _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))}) + } + _, _ = h.Write(ctx) +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go new file mode 100644 index 000000000..22da8bc0a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go @@ -0,0 +1,87 @@ +package ed448 + +import ( + "crypto/rand" + "encoding/asn1" + + "github.com/cloudflare/circl/sign" +) + +var sch sign.Scheme = &scheme{} + +// Scheme returns a signature interface. +func Scheme() sign.Scheme { return sch } + +type scheme struct{} + +func (*scheme) Name() string { return "Ed448" } +func (*scheme) PublicKeySize() int { return PublicKeySize } +func (*scheme) PrivateKeySize() int { return PrivateKeySize } +func (*scheme) SignatureSize() int { return SignatureSize } +func (*scheme) SeedSize() int { return SeedSize } +func (*scheme) TLSIdentifier() uint { return 0x0808 } +func (*scheme) SupportsContext() bool { return true } +func (*scheme) Oid() asn1.ObjectIdentifier { + return asn1.ObjectIdentifier{1, 3, 101, 113} +} + +func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) { + return GenerateKey(rand.Reader) +} + +func (*scheme) Sign( + sk sign.PrivateKey, + message []byte, + opts *sign.SignatureOpts, +) []byte { + priv, ok := sk.(PrivateKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + ctx := "" + if opts != nil { + ctx = opts.Context + } + return Sign(priv, message, ctx) +} + +func (*scheme) Verify( + pk sign.PublicKey, + message, signature []byte, + opts *sign.SignatureOpts, +) bool { + pub, ok := pk.(PublicKey) + if !ok { + panic(sign.ErrTypeMismatch) + } + ctx := "" + if opts != nil { + ctx = opts.Context + } + return Verify(pub, message, signature, ctx) +} + +func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) { + privateKey := NewKeyFromSeed(seed) + publicKey := make(PublicKey, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + return publicKey, privateKey +} + +func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) { + if len(buf) < PublicKeySize { + return nil, sign.ErrPubKeySize + } + pub := make(PublicKey, PublicKeySize) + copy(pub, buf[:PublicKeySize]) + return pub, nil +} + +func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) { + if len(buf) < PrivateKeySize { + return nil, sign.ErrPrivKeySize + } + priv := make(PrivateKey, PrivateKeySize) + copy(priv, buf[:PrivateKeySize]) + return priv, nil +} diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go new file mode 100644 index 000000000..13b20fa4b --- /dev/null +++ b/vendor/github.com/cloudflare/circl/sign/sign.go @@ -0,0 +1,110 @@ +// Package sign provides unified interfaces for signature schemes. +// +// A register of schemes is available in the package +// +// github.com/cloudflare/circl/sign/schemes +package sign + +import ( + "crypto" + "encoding" + "errors" +) + +type SignatureOpts struct { + // If non-empty, includes the given context in the signature if supported + // and will cause an error during signing otherwise. + Context string +} + +// A public key is used to verify a signature set by the corresponding private +// key. +type PublicKey interface { + // Returns the signature scheme for this public key. + Scheme() Scheme + Equal(crypto.PublicKey) bool + encoding.BinaryMarshaler + crypto.PublicKey +} + +// A private key allows one to create signatures. +type PrivateKey interface { + // Returns the signature scheme for this private key. + Scheme() Scheme + Equal(crypto.PrivateKey) bool + // For compatibility with Go standard library + crypto.Signer + crypto.PrivateKey + encoding.BinaryMarshaler +} + +// A Scheme represents a specific instance of a signature scheme. +type Scheme interface { + // Name of the scheme. + Name() string + + // GenerateKey creates a new key-pair. + GenerateKey() (PublicKey, PrivateKey, error) + + // Creates a signature using the PrivateKey on the given message and + // returns the signature. opts are additional options which can be nil. + // + // Panics if key is nil or wrong type or opts context is not supported. + Sign(sk PrivateKey, message []byte, opts *SignatureOpts) []byte + + // Checks whether the given signature is a valid signature set by + // the private key corresponding to the given public key on the + // given message. opts are additional options which can be nil. + // + // Panics if key is nil or wrong type or opts context is not supported. + Verify(pk PublicKey, message []byte, signature []byte, opts *SignatureOpts) bool + + // Deterministically derives a keypair from a seed. If you're unsure, + // you're better off using GenerateKey(). + // + // Panics if seed is not of length SeedSize(). + DeriveKey(seed []byte) (PublicKey, PrivateKey) + + // Unmarshals a PublicKey from the provided buffer. + UnmarshalBinaryPublicKey([]byte) (PublicKey, error) + + // Unmarshals a PublicKey from the provided buffer. + UnmarshalBinaryPrivateKey([]byte) (PrivateKey, error) + + // Size of binary marshalled public keys. + PublicKeySize() int + + // Size of binary marshalled public keys. + PrivateKeySize() int + + // Size of signatures. + SignatureSize() int + + // Size of seeds. + SeedSize() int + + // Returns whether contexts are supported. + SupportsContext() bool +} + +var ( + // ErrTypeMismatch is the error used if types of, for instance, private + // and public keys don't match. + ErrTypeMismatch = errors.New("types mismatch") + + // ErrSeedSize is the error used if the provided seed is of the wrong + // size. + ErrSeedSize = errors.New("wrong seed size") + + // ErrPubKeySize is the error used if the provided public key is of + // the wrong size. + ErrPubKeySize = errors.New("wrong size for public key") + + // ErrPrivKeySize is the error used if the provided private key is of + // the wrong size. + ErrPrivKeySize = errors.New("wrong size for private key") + + // ErrContextNotSupported is the error used if a context is not + // supported. + ErrContextNotSupported = errors.New("context not supported") +) diff --git a/vendor/github.com/common-nighthawk/go-figure/.travis.yml b/vendor/github.com/common-nighthawk/go-figure/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/common-nighthawk/go-figure/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/common-nighthawk/go-figure/LICENSE b/vendor/github.com/common-nighthawk/go-figure/LICENSE new file mode 100644 index 000000000..7d44d62ab --- /dev/null +++ b/vendor/github.com/common-nighthawk/go-figure/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Daniel Deutsch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/common-nighthawk/go-figure/README.md b/vendor/github.com/common-nighthawk/go-figure/README.md new file mode 100644 index 000000000..ddf8d7a1c --- /dev/null +++ b/vendor/github.com/common-nighthawk/go-figure/README.md @@ -0,0 +1,437 @@ +[![Build Status](https://travis-ci.com/common-nighthawk/go-figure.svg?branch=master)](https://travis-ci.com/common-nighthawk/go-figure) + +# Go Figure + +## Description +Go Figure prints beautiful ASCII art from text. +It supports [FIGlet](http://www.figlet.org/) files, +and most of its features. + +This package was inspired by the Ruby gem [artii](https://github.com/miketierney/artii), +but built from scratch and with a different feature set. + +## Installation +`go get github.com/common-nighthawk/go-figure` + +## Basic Example +```go +package main + +import("github.com/common-nighthawk/go-figure") + +func main() { + myFigure := figure.NewFigure("Hello World", "", true) + myFigure.Print() +} +``` + +```txt + _ _ _ _ __ __ _ _ + | | | | ___ | | | | ___ \ \ / / ___ _ __ | | __| | + | |_| | / _ \ | | | | / _ \ \ \ /\ / / / _ \ | '__| | | / _` | + | _ | | __/ | | | | | (_) | \ V V / | (_) | | | | | | (_| | + |_| |_| \___| |_| |_| \___/ \_/\_/ \___/ |_| |_| \__,_| +``` + +You can also make colorful figures: + +```go +func main() { + myFigure := figure.NewColorFigure("Hello World", "", "green", true) + myFigure.Print() +} +``` + +## Documentation +### Create a Figure +There are three ways to create a Figure. These are-- +the method `func NewFigure`, +the method `func NewColorFigure`, and +the method `func NewFigureWithFont`. + +Each constructor takes the arguments: the text, font, and strict mode. +The "color" constructor takes a color as an additional arg. +The "with font" specifies the font differently. +The method signature are: +``` +func NewFigure(phrase, fontName string, strict bool) figure +func NewColorFigure(phrase, fontName string, color string, strict bool) figure +func NewFigureWithFont(phrase string, reader io.Reader, strict bool) figure +``` + +`NewFigure` requires only the name of the font, and uses the font file shipped +with this package stored in bindata. + +If passed an empty string for the font name, a default is provided. +That is, these are both valid-- + +`myFigure := figure.NewFigure("Foo Bar", "", true)` + +`myFigure := figure.NewFigure("Foo Bar", "alphabet", true)` + +Please note that font names are case sensitive. + +`NewFigureWithFont`, on the other hand, accepts the font file directly. +This allows you to BYOF (bring your own font). +Provide the absolute path to the flf. +You can point to a file the comes with this project +or you can store the file anywhere you'd like and use that location. + +The font files are available in the [fonts folder](https://github.com/common-nighthawk/go-figure/tree/master/fonts) +and on [figlet.org](http://www.figlet.org/fontdb.cgi). + +Here are two examples-- + +`myFigure := figure.NewFigureWithFont("Foo Bar", "/home/ubuntu/go/src/github.com/common-nighthawk/go-figure/fonts/alphabet.flf", true)` + +`myFigure := figure.NewFigureWithFont("Foo Bar", "/home/lib/fonts/alaphabet.flf", true)` + +You can also make colorful figures! The current supported colors are: +blue, cyan, gray, green, purple, red, white, yellow. + +An example-- + +`myFigure := figure.NewColorFigure("Foo Bar", "", "green", true) + +Strict mode dictates how to handle characters outside of standard ASCII. +When set to true, a non-ASCII character (outside character codes 32-127) +will cause the program to panic. +When set to false, these characters are replaced with a question mark ('?'). +Examples of each-- + +`figure.NewFigure("Foo 👍 Bar", "alphabet", true).Print()` + +```txt +2016/12/01 19:35:38 invalid input. +``` + +`figure.NewFigure("Foo 👍 Bar", "alphabet", false).Print()` + +```txt + _____ ___ ____ + | ___| ___ ___ |__ \ | __ ) __ _ _ __ + | |_ / _ \ / _ \ / / | _ \ / _` | | '__| + | _| | (_) | | (_) | |_| | |_) | | (_| | | | + |_| \___/ \___/ (_) |____/ \__,_| |_| +``` + +### Methods: stdout +#### Print() +The most basic, and common, method is func Print. +A figure responds to Print(), and will write the output to the terminal. +There is no return value. + +`myFigure.Print()` + +But if you're feeling adventurous, +explore the methods below. + +#### Blink(duration, timeOn, timeOff int) +A figure responds to the func Blink, taking three arguments. +`duration` is the total time the banner will display, in milliseconds. +`timeOn` is the length of time the text will blink on (also in ms). +`timeOff` is the length of time the text will blink off (ms). +For an even blink, set `timeOff` to -1 +(same as setting `timeOff` to the value of `timeOn`). +There is no return value. + +`myFigure.Blink(5000, 1000, 500)` + +`myFigure.Blink(5000, 1000, -1)` + +#### Scroll(duration, stillness int, direction string) +A figure responds to the func Scroll, taking three arguments. +`duration` is the total time the banner will display, in milliseconds. +`stillness` is the length of time the text will not move (also in ms). +Therefore, the lower the stillness the faster the scroll speed. +`direction` can be either "right" or "left" (case insensitive). +The direction will be left if an invalid option (e.g. "foo") is passed. +There is no return value. + +`myFigure.Scroll(5000, 200, "right")` + +`myFigure.Scroll(5000, 100, "left")` + +#### Dance(duration, freeze int) +A figure responds to the func Dance, taking two arguments. +`duration` is the total time the banner will display, in milliseconds. +`freeze` is the length of time between dance moves (also in ms). +Therefore, the lower the freeze the faster the dancing. +There is no return value. + +`myFigure.Dance(5000, 800)` + +### Methods: Writers +#### Write(w io.Writer, fig figure) +Unlike the above methods that operate on a figure value, +func Write is a function that takes two arguments. +`w` is a value that implements all the methods in the io.Writer interface. +`fig` is the figure that will be written. + +`figure.Write(w, myFigure)` + +This method would be useful, for example, to add a nifty banner to a web page-- +```go +func landingPage(w http.ResponseWriter, r *http.Request) { + figure.Write(w, myFigure) +} +``` + +### Methods: Misc +#### Slicify() ([]string) +If you want to do something outside of the created methods, +you can grab the internal slice. +This gives you a good start to build anything +with the ASCII art, if manually. + +A figure responds to the func Slicify, +and will return the slice of strings. + +`myFigure.Slicify()` + +returns + +```txt +["FFFF BBBB ", + "F B B ", + "FFF ooo ooo BBBB aa rrr", + "F o o o o B B a a r ", + "F ooo ooo BBBB aaa r "] +``` + +## More Examples +`figure.NewFigure("Go-Figure", "isometric1", true).Print()` + +``` + ___ ___ ___ ___ ___ ___ ___ + /\ \ /\ \ /\ \ ___ /\ \ /\__\ /\ \ /\ \ + /::\ \ /::\ \ /::\ \ /\ \ /::\ \ /:/ / /::\ \ /::\ \ + /:/\:\ \ /:/\:\ \ /:/\:\ \ \:\ \ /:/\:\ \ /:/ / /:/\:\ \ /:/\:\ \ + /:/ \:\ \ /:/ \:\ \ /::\~\:\ \ /::\__\ /:/ \:\ \ /:/ / ___ /::\~\:\ \ /::\~\:\ \ + /:/__/_\:\__\ /:/__/ \:\__\ /:/\:\ \:\__\ __/:/\/__/ /:/__/_\:\__\ /:/__/ /\__\ /:/\:\ \:\__\ /:/\:\ \:\__\ + \:\ /\ \/__/ \:\ \ /:/ / \/__\:\ \/__/ /\/:/ / \:\ /\ \/__/ \:\ \ /:/ / \/_|::\/:/ / \:\~\:\ \/__/ + \:\ \:\__\ \:\ /:/ / \:\__\ \::/__/ \:\ \:\__\ \:\ /:/ / |:|::/ / \:\ \:\__\ + \:\/:/ / \:\/:/ / \/__/ \:\__\ \:\/:/ / \:\/:/ / |:|\/__/ \:\ \/__/ + \::/ / \::/ / \/__/ \::/ / \::/ / |:| | \:\__\ + \/__/ \/__/ \/__/ \/__/ \|__| \/__/ +``` + +`figure.NewFigure("Foo Bar Pop", "smkeyboard", true).Print()` + +``` + ____ ____ ____ ____ ____ ____ ____ ____ ____ +||F ||||o ||||o ||||B ||||a ||||r ||||P ||||o ||||p || +||__||||__||||__||||__||||__||||__||||__||||__||||__|| +|/__\||/__\||/__\||/__\||/__\||/__\||/__\||/__\||/__\| +``` + +`figure.NewFigure("Keep Your Eyes On Me", "rectangles", true).Print()` + +``` + + _____ __ __ _____ _____ _____ +| | | ___ ___ ___ | | | ___ _ _ ___ | __| _ _ ___ ___ | | ___ | | ___ +| -|| -_|| -_|| . ||_ _|| . || | || _|| __|| | || -_||_ -|| | || || | | || -_| +|__|__||___||___|| _| |_| |___||___||_| |_____||_ ||___||___||_____||_|_||_|_|_||___| + |_| |___| +``` + +`figure.NewFigure("ABCDEFGHIJ", "eftichess", true).Print()` + +``` +######### ######### ___ ######### ######### +##[`'`']# \`~'/ ##'\v/`## /\*/\ ##|`+'|## '\v/` ##\`~'/## [`'`'] '\v/` \`~'/ +###| |## (o o) ##(o 0)## /(o o)\ ##(o o)## (o 0) ##(o o)## | | (o 0) (o o) +###|__|## \ / \ ###(_)### (_) ###(_)### (_) ###\ / \# |__| (_) \ / \ +######### " ######### ######### ####"#### " +``` + +`figure.NewFigure("Give your reasons", "doom", true).Blink(10000, 500, -1)` + +![blink](docs/blink.gif "blink") + +`figure.NewFigure("I mean, I could...", "basic", true).Scroll(10000, 200, "right")` + +`figure.NewFigure("But why would I want to?", "basic", true).Scroll(10000, 200, "left")` + +![scroll](docs/scroll.gif "scroll") + +`figure.NewFigure("It's been waiting for you", "larry3d", true).Dance(10000, 500)` + +![dance](docs/dance.gif "dance") + +`figure.Write(w, figure.NewFigure("Hello, It's Me", "puffy", true))` + +![web](docs/web.png "web") + + +## Supported Fonts +* 3-d +* 3x5 +* 5lineoblique +* acrobatic +* alligator +* alligator2 +* alphabet +* avatar +* banner +* banner3-D +* banner3 +* banner4 +* barbwire +* basic +* bell +* big +* bigchief +* binary +* block +* bubble +* bulbhead +* calgphy2 +* caligraphy +* catwalk +* chunky +* coinstak +* colossal +* computer +* contessa +* contrast +* cosmic +* cosmike +* cricket +* cursive +* cyberlarge +* cybermedium +* cybersmall +* diamond +* digital +* doh +* doom +* dotmatrix +* drpepper +* eftichess +* eftifont +* eftipiti +* eftirobot +* eftitalic +* eftiwall +* eftiwater +* epic +* fender +* fourtops +* fuzzy +* goofy +* gothic +* graffiti +* hollywood +* invita +* isometric1 +* isometric2 +* isometric3 +* isometric4 +* italic +* ivrit +* jazmine +* jerusalem +* katakana +* kban +* larry3d +* lcd +* lean +* letters +* linux +* lockergnome +* madrid +* marquee +* maxfour +* mike +* mini +* mirror +* mnemonic +* morse +* moscow +* nancyj-fancy +* nancyj-underlined +* nancyj +* nipples +* ntgreek +* o8 +* ogre +* pawp +* peaks +* pebbles +* pepper +* poison +* puffy +* pyramid +* rectangles +* relief +* relief2 +* rev +* roman +* rot13 +* rounded +* rowancap +* rozzo +* runic +* runyc +* sblood +* script +* serifcap +* shadow +* short +* slant +* slide +* slscript +* small +* smisome1 +* smkeyboard +* smscript +* smshadow +* smslant +* smtengwar +* speed +* stampatello +* standard +* starwars +* stellar +* stop +* straight +* tanja +* tengwar +* term +* thick +* thin +* threepoint +* ticks +* ticksslant +* tinker-toy +* tombstone +* trek +* tsalagi +* twopoint +* univers +* usaflag +* wavy +* weird + +## Contributing +Because this project is small, we can dispense with formality. +Submit a pull request, open an issue, request a change. +All good! + +## Wanna Say Thanks? +GitHub stars are helpful. +Most importantly, they help with discoverability. +Projects with more stars are displayed higher +in search results when people are looking for packages. +Also--they make contributors feel good :) + +If you are feeling especially generous, +give a shout to [@cmmn_nighthawk](https://twitter.com/cmmn_nighthawk). + +## TODO +* Add proper support for spaces +* More animations +* Implement graceful line-wrapping and smushing +* Deep-copy font for Dance (current implementation is destructive) diff --git a/vendor/github.com/common-nighthawk/go-figure/bindata.go b/vendor/github.com/common-nighthawk/go-figure/bindata.go new file mode 100644 index 000000000..f85d3bb57 --- /dev/null +++ b/vendor/github.com/common-nighthawk/go-figure/bindata.go @@ -0,0 +1,3641 @@ +// Code generated by go-bindata. +// sources: +// fonts/3-d.flf +// fonts/3x5.flf +// fonts/5lineoblique.flf +// fonts/acrobatic.flf +// fonts/alligator.flf +// fonts/alligator2.flf +// fonts/alphabet.flf +// fonts/avatar.flf +// fonts/banner.flf +// fonts/banner3-D.flf +// fonts/banner3.flf +// fonts/banner4.flf +// fonts/barbwire.flf +// fonts/basic.flf +// fonts/bell.flf +// fonts/big.flf +// fonts/bigchief.flf +// fonts/binary.flf +// fonts/block.flf +// fonts/bubble.flf +// fonts/bulbhead.flf +// fonts/calgphy2.flf +// fonts/caligraphy.flf +// fonts/catwalk.flf +// fonts/chunky.flf +// fonts/coinstak.flf +// fonts/colossal.flf +// fonts/computer.flf +// fonts/contessa.flf +// fonts/contrast.flf +// fonts/cosmic.flf +// fonts/cosmike.flf +// fonts/cricket.flf +// fonts/cursive.flf +// fonts/cyberlarge.flf +// fonts/cybermedium.flf +// fonts/cybersmall.flf +// fonts/diamond.flf +// fonts/digital.flf +// fonts/doh.flf +// fonts/doom.flf +// fonts/dotmatrix.flf +// fonts/drpepper.flf +// fonts/eftichess.flf +// fonts/eftifont.flf +// fonts/eftipiti.flf +// fonts/eftirobot.flf +// fonts/eftitalic.flf +// fonts/eftiwall.flf +// fonts/eftiwater.flf +// fonts/elite.flf +// fonts/epic.flf +// fonts/fender.flf +// fonts/fourtops.flf +// fonts/fuzzy.flf +// fonts/goofy.flf +// fonts/gothic.flf +// fonts/graffiti.flf +// fonts/hollywood.flf +// fonts/invita.flf +// fonts/isometric1.flf +// fonts/isometric2.flf +// fonts/isometric3.flf +// fonts/isometric4.flf +// fonts/italic.flf +// fonts/ivrit.flf +// fonts/jazmine.flf +// fonts/jerusalem.flf +// fonts/katakana.flf +// fonts/kban.flf +// fonts/larry3d.flf +// fonts/lcd.flf +// fonts/lean.flf +// fonts/letters.flf +// fonts/linux.flf +// fonts/lockergnome.flf +// fonts/madrid.flf +// fonts/marquee.flf +// fonts/maxfour.flf +// fonts/mike.flf +// fonts/mini.flf +// fonts/mirror.flf +// fonts/mnemonic.flf +// fonts/morse.flf +// fonts/moscow.flf +// fonts/nancyj-fancy.flf +// fonts/nancyj-underlined.flf +// fonts/nancyj.flf +// fonts/nipples.flf +// fonts/ntgreek.flf +// fonts/o8.flf +// fonts/ogre.flf +// fonts/pawp.flf +// fonts/peaks.flf +// fonts/pebbles.flf +// fonts/pepper.flf +// fonts/poison.flf +// fonts/puffy.flf +// fonts/pyramid.flf +// fonts/rectangles.flf +// fonts/relief.flf +// fonts/relief2.flf +// fonts/rev.flf +// fonts/roman.flf +// fonts/rot13.flf +// fonts/rounded.flf +// fonts/rowancap.flf +// fonts/rozzo.flf +// fonts/runic.flf +// fonts/runyc.flf +// fonts/sblood.flf +// fonts/script.flf +// fonts/serifcap.flf +// fonts/shadow.flf +// fonts/short.flf +// fonts/slant.flf +// fonts/slide.flf +// fonts/slscript.flf +// fonts/small.flf +// fonts/smisome1.flf +// fonts/smkeyboard.flf +// fonts/smscript.flf +// fonts/smshadow.flf +// fonts/smslant.flf +// fonts/smtengwar.flf +// fonts/speed.flf +// fonts/stampatello.flf +// fonts/standard.flf +// fonts/starwars.flf +// fonts/stellar.flf +// fonts/stop.flf +// fonts/straight.flf +// fonts/tanja.flf +// fonts/tengwar.flf +// fonts/term.flf +// fonts/thick.flf +// fonts/thin.flf +// fonts/threepoint.flf +// fonts/ticks.flf +// fonts/ticksslant.flf +// fonts/tinker-toy.flf +// fonts/tombstone.flf +// fonts/trek.flf +// fonts/tsalagi.flf +// fonts/twopoint.flf +// fonts/univers.flf +// fonts/usaflag.flf +// fonts/wavy.flf +// fonts/weird.flf +// DO NOT EDIT! + +package figure + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _fonts3DFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\xb9\x8e\x1c\x37\x10\xcd\xf7\x2b\x2a\xd8\xa8\x00\xa9\x2c\x39\x51\x60\x18\x13\x28\xf0\x6f\xac\xb5\xb3\xb6\x00\x61\x04\xe8\x08\xfc\xf7\x06\x59\xd7\x2b\x92\x7d\xcc\x1a\xf0\x0a\xd3\x4d\x76\xb3\xc9\x3a\x5e\x3d\xbe\x6e\xbd\x7c\x79\x79\xff\xf4\x48\x1f\xe8\x03\xbd\xff\x85\xde\xbc\xa3\x77\x0f\xbf\xbe\xf9\x48\x2f\x5f\x6f\x3f\xe8\xd3\xb7\xeb\xd3\x8f\xeb\x33\xfd\xf9\x0f\x7d\x7c\xba\x7d\xbe\x7e\xa1\x3f\xae\xb7\xdb\xe7\xdb\x5f\xd7\x6f\xf4\xdb\xf3\xd3\xdf\xbd\x73\xb9\x7e\xfd\xfe\xf6\xf6\xe9\xfb\xcf\xb7\xd7\xe7\x9f\xbf\x3f\x3c\x3e\x5e\xf6\x7e\x97\x07\x62\xbe\x3c\xc8\xea\x20\xe4\x77\x85\xfa\x40\xa2\x7e\x8f\xa4\x9d\x88\xfa\xfd\xf6\xb7\x7b\xb2\xb3\xff\xf5\x2e\x33\xd9\xb1\xaf\x90\x7f\x6d\x2d\x61\x16\x3d\xf6\xc1\xd2\x07\xcb\xc9\xc1\xd2\x26\xee\x47\x5d\x18\x9e\x6a\xa6\x4b\x1f\x27\xf0\x78\xf3\x85\xb0\xaf\xf3\x0c\x96\xf7\x31\x6a\x72\x0f\x47\x18\xef\xce\x80\x5b\xad\xa9\xad\xc5\x63\xed\x70\xc9\xc1\x7d\xac\x98\x95\x32\x5c\x63\x8d\x79\x6b\xf6\xb0\xb7\x28\xb4\x56\x37\xb7\xdf\x6d\xae\x93\x25\xa7\xdb\xdf\x2f\xaa\x31\xcb\x83\xad\x6c\x26\x9a\x79\x3d\x20\x78\x12\xbd\x27\xa2\x23\x3d\xfd\xeb\x9b\xe5\x54\x26\xad\x41\xb4\x90\x44\x78\x24\x23\xd3\x27\xd3\x0b\x8b\x34\x43\xa2\xed\x57\xe6\x48\x83\x1c\x26\x3d\xc6\xb6\xac\x02\x62\xc2\xa1\x77\x18\x3a\x02\xd9\x8b\x85\x7b\x88\xa5\x61\x6c\x1e\x46\x8a\x93\x9c\x0d\x16\xda\x38\x94\x2c\xf9\x63\x8b\x73\xba\x9d\x0b\xcf\xe7\xc3\x95\xa0\x78\x3d\x09\xe1\x39\xfb\x3c\x77\x21\x99\x22\xb3\x61\x41\x47\x63\xbb\x2f\x62\xe4\xd1\xf0\xab\x0d\xe3\x0a\xcd\x55\x80\x38\x30\xec\x10\xf6\x89\xcc\x75\x84\xd9\xea\x94\xc1\x99\x2d\x20\xe5\x26\x5d\xa6\x7b\xc2\x56\x51\x8a\x0a\x77\x47\x18\x63\x7c\xc7\x44\x76\x8b\x76\x5d\xc9\xf8\xb2\xd8\xfa\x5a\xbc\x54\x96\xf5\x1c\x2c\x1a\x64\x35\x17\xcc\xa5\x48\x50\xb3\xc9\xa1\xc1\x01\xca\x23\x8b\x26\xd7\xfa\xe8\x32\x63\xbd\xb5\x31\x51\x0d\x5b\x59\x5f\x0b\xca\xdc\x77\x20\x39\x8c\x0c\x44\xb2\xc6\xcd\xc6\xb2\xa7\x2c\x3a\x39\xfa\x84\x45\x9b\x05\x3b\x6c\x84\x07\x63\x86\xf2\xce\xa2\x73\xea\xea\x0d\x7b\x54\xb2\xa6\xb2\xe8\xac\x63\x50\x10\x28\xdb\x52\xcd\xd8\x18\xe1\xbc\xba\x32\x3e\x35\x16\xf5\x09\x03\x0e\x5c\x58\xd1\x02\x3b\x2d\x30\x39\x21\x65\x75\x30\xe5\xbe\xbd\xb8\xb2\x91\x5e\x36\x7e\x71\x2d\xc2\x62\x5b\x2c\x1b\xf9\x0b\x15\xfe\x8e\x98\x31\x90\x77\x9f\x37\x56\x15\x57\x18\xba\xb1\x00\xd3\x64\x0d\xb2\x81\xde\x29\x8d\x28\x3d\x82\xe4\xe8\x03\xe4\x25\x25\x0c\xfb\x37\x27\xf5\xe4\x00\x72\xda\xf4\xf9\x6c\x4d\x2f\x73\x14\x0d\x1c\x84\xc6\x6e\x8b\x79\xe5\xfc\x10\x69\x2e\x6d\x71\xcd\x65\xdb\x2d\x03\x9f\x0d\xc5\xed\x5a\x00\x6c\x2f\xb6\xed\xb5\xd1\x41\x53\x01\x36\x3f\x2e\x00\xd1\x44\x3b\xab\xe7\x3a\x80\xd6\x03\x02\xd7\x25\xe6\xff\x6d\xe2\xf5\xf6\x16\x53\x43\xd4\x4b\xdc\xa7\xc8\xc3\x74\x61\xaa\x82\xca\x9e\x13\x29\x73\x8a\xd4\x1c\x24\x6b\x54\x94\x0d\x9d\x53\xc8\xdc\x85\xe9\x86\xee\x5f\x4a\x07\x4e\xaa\x3f\x6e\x99\x36\x53\xc7\x81\xb0\xd1\xc5\x30\x2f\x20\x16\xc0\xb3\xba\x6c\x6e\x01\x18\x63\x9c\xa4\x37\x20\x4d\xe7\x34\xde\xd7\xdc\x84\x15\x79\x22\xd3\xa2\x4e\x40\x9c\x76\x05\xb5\x99\x75\x8c\xe1\x17\x2f\x7d\x5f\x51\xa6\xf4\x8c\x19\xaa\x5c\x82\x70\x88\xb7\xa1\x5c\xbc\x2e\x5d\x17\x66\x5c\x56\xc2\x09\xf2\x2e\x82\x42\x2a\xde\x09\xf1\x9e\xba\xde\x9e\x04\xdb\x57\x3d\x09\xab\x09\xf1\x4e\x88\xf7\x4a\x09\x48\x39\x61\xf1\x48\x86\xaf\xab\x5c\x1a\x5d\x19\x7c\xa1\x6a\x3e\x56\xaf\xa4\x18\x85\x3d\x31\xfd\xf1\x17\x24\x75\xa8\x72\xd1\x9a\x45\x65\x66\x48\xe6\x30\x4b\x60\xf3\xc9\xac\x85\x63\xa8\x53\x6d\x7f\xf3\xd2\x9f\xb9\x7f\x42\x74\xaa\x88\x28\xd3\x71\x9f\xa9\xfb\x00\x08\x55\x78\x03\x12\xdc\x44\x5f\xd5\x49\xc9\x55\xc1\x3d\xa2\xe9\x7c\x1b\x40\x42\xaf\x61\x53\xdc\xbd\x34\xfe\x20\x44\x00\x43\x24\x07\x9e\xd4\x95\x70\xa9\xc2\x09\x8c\x85\xda\x61\x96\x58\xc7\x42\x55\x8e\xab\xdc\x50\x39\x0f\xcc\x07\xeb\x8b\xf1\x20\x72\x40\xe3\x94\xa2\x5e\xa1\x2c\x05\x83\x24\x4f\x47\x38\x7c\xf6\x08\xcc\x61\x53\x68\x16\x01\x40\x2e\x74\xd7\xab\xe8\xb6\x08\xb0\x82\x8a\x7a\xd8\x3d\xe3\x5b\xf6\xa0\x86\x73\x35\xcc\x79\x71\x59\x18\x34\x72\x7a\x81\x95\x36\x58\x96\x55\xb8\x7f\x9e\x2c\xa3\x2a\x5e\x99\x85\x90\xc8\x5c\x43\xc2\x0e\xbf\xfa\xdc\x71\xd4\xd9\xf9\x22\xb1\x75\x5e\xc4\xb0\x6b\xd9\xfe\xf2\xbc\xfd\x81\x02\xd7\x1d\xdb\xb0\x57\x14\x80\x40\x8d\x23\xad\xc2\xde\x4d\x03\x1c\x3c\x99\xeb\x56\xd9\x55\x72\xd7\x84\x4d\x27\xde\xb3\x8b\xc5\xb5\x55\x6d\x82\xcd\x37\x96\xdb\x16\x42\x51\xc1\x49\x16\xd8\xaa\x24\x3f\x58\x28\x83\xd3\x77\x5a\x88\xb5\xa2\xb6\x6e\x4c\x68\xd7\xc8\x3f\xbc\x41\xe5\x95\x4f\xb4\x1b\x8d\x45\xec\xb6\x22\xb6\x72\x0b\x02\xb1\x12\x91\xaf\xce\x2e\x0a\x2e\x04\x6e\xbe\xc6\xef\xea\x61\x2f\x56\xa1\xda\x1f\x8a\x38\x5d\x32\xe3\xd1\xf4\xb0\x37\xf6\x84\x90\xc1\xa1\x82\x43\xf3\x85\xdc\x53\x9b\x53\x08\xe8\x57\xb1\x9d\xa3\x27\x6b\xfc\x12\x3f\xf5\x72\xd3\x37\x34\x48\x2c\x49\x26\x2c\x69\xd5\x63\xec\x89\x59\x47\xc3\x82\xb5\x39\xae\x93\xd3\xd2\xd4\x04\x35\x8d\xd9\x5a\x4f\x4c\x33\x45\x8c\x62\x72\xd4\x0c\x49\x33\x23\x50\xcf\x32\x84\x8a\xa3\x8a\x3f\x23\xe2\x05\xf2\x29\x3f\x8a\xf8\xfb\x0c\xf2\x98\x9b\xc3\xf9\x49\xd0\x69\x41\x4e\x7c\xfc\xe9\x13\x7b\x8a\xc8\x23\x69\x80\x32\xd3\x64\x61\xd9\x18\xc1\x08\xe0\x6c\x19\x6a\xc7\x50\x8e\xf8\xbf\x1a\x48\x00\xe7\xd8\x22\xbc\x9d\x93\x80\x4e\xa2\xba\xda\x29\xe9\x1d\x62\x9c\x70\x48\x45\xb1\xa5\x18\x94\x03\xb5\x33\xc6\x70\xb1\xaf\xf2\xa4\xd0\xb0\x7e\xd8\xd8\xa1\xe7\x97\x25\x62\xce\x92\x82\x4f\xe6\xac\x6f\x85\x24\x88\xc3\x25\x9f\xbd\xf1\x84\xc6\x9b\x5f\x9a\x6b\xd2\xf1\xdb\x10\xc5\x87\x89\x98\x0a\x95\xdc\x4a\x91\xcd\x80\x5f\xe3\x33\x53\x0d\x52\x74\x1a\x83\x44\xcb\x21\x78\x06\x5c\x39\x9a\x97\x70\x72\x7e\x36\x6a\x6f\x09\xb6\x1f\x4d\xfd\xd3\xd3\xed\x18\x41\xf0\x1a\x90\xcc\x16\xf4\xe9\x8c\xb8\xc9\x5b\xfb\xcd\xcb\xc3\xf0\xef\xff\xba\xf0\x6f\x00\x00\x00\xff\xff\x99\xec\xf4\xdf\x8e\x1e\x00\x00") + +func fonts3DFlfBytes() ([]byte, error) { + return bindataRead( + _fonts3DFlf, + "fonts/3-d.flf", + ) +} + +func fonts3DFlf() (*asset, error) { + bytes, err := fonts3DFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/3-d.flf", size: 7822, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fonts3x5Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\xcd\x8e\xdb\x3c\x0c\xbc\xe7\x29\x06\xe0\x77\x48\x80\x6f\x0d\xb4\xdd\xee\xa1\xa7\x00\x2d\xd0\x43\x2f\x45\xdf\x40\x89\xed\x44\x88\x37\x5e\xd8\x0e\xda\x14\xe8\xbb\xf7\x90\x1f\x4b\xe4\xd0\x4e\x2f\x82\xe2\x50\x24\x35\x1c\x0e\x55\x37\xf5\xfb\xf0\x1f\x5e\xf0\x8c\x17\x3c\xbd\xc3\xf3\xe2\xc3\xaf\x8f\xa8\xdb\xe3\x80\xcd\x19\x3f\xe2\x76\x1f\xba\x12\xdf\x62\x77\xc0\xb2\x0b\x87\xf5\xb6\x6b\xfb\x3a\x56\x4d\x59\x6c\xdb\xe2\x74\x58\x15\x8b\xef\x6d\x37\x54\x25\x86\x16\x75\xdc\x35\xd5\xf0\x3f\xc2\xb1\x44\xdf\xc4\xdd\x7e\x68\xce\xd8\xee\xc3\x71\x57\x95\x58\xfe\x8c\xc3\xbe\x3d\x0d\x78\xab\xba\xd7\xd8\xf7\xb1\x3d\xe2\xd3\xd3\x9f\xd5\x62\x73\xc6\x97\x70\x8c\x55\x83\xcf\x61\x53\xfd\x0e\xf8\xda\x85\x1e\xcb\x4d\xe8\xca\x76\x5d\xc6\x50\xd4\xb1\x38\xbd\xbd\x16\x55\xbf\x5a\x2c\x00\x60\x3d\xbd\xde\x7f\x88\x59\xc7\xef\xb7\x1f\x02\xb9\xaf\xae\xa7\xab\x91\x08\xdb\x8f\x6e\x2f\x7f\xe0\xfe\x77\xf2\x25\xf7\x84\xcb\x7a\x31\x1d\xbf\xe7\x89\x4b\xea\x75\x0c\x3a\x26\xa8\x7d\x30\x08\x12\xa3\x1c\x08\x49\x73\xe2\x68\x89\x05\xf3\x76\x2f\xb5\x57\xd0\x19\x53\x95\x93\x59\x49\xb8\x64\xbd\x7a\x62\xb7\x9b\xf0\xa7\x20\x20\x88\xa7\x65\x49\x51\x86\x87\xf8\xf5\x5e\x06\xa7\xd4\xe8\x96\x6c\x4a\x92\x49\x23\xe8\x03\x8c\x99\xd9\x01\x18\x4f\xc2\x43\xfb\x46\xe4\x76\x26\xc4\x54\x38\xda\x0e\xbe\x11\xc9\x29\x6b\x51\xbd\x9f\x33\x12\x8f\xe3\x19\x95\x61\xc2\xa5\x4c\x4a\xf6\xb4\x11\xb4\xd7\xa9\xda\x59\x55\xc9\xf8\xe4\x84\xbb\x39\x37\x60\xe6\x88\xa7\x46\x6a\xaf\xa4\x07\x8a\xdd\x97\xef\xd4\x53\xc6\x2d\x92\x78\x7a\xc0\xa1\x2f\x31\x82\x9b\x53\x22\xb2\xd3\x1c\xa7\x10\x70\x11\x53\xcd\xa9\x89\x2b\x30\xfa\x64\xae\x4d\xc3\x79\x2b\x49\x5c\x1e\x48\x9c\x20\xce\xc4\x5e\x1b\x29\x3e\x59\x16\x78\x12\xad\xb1\xd6\x1d\xec\xf0\x89\x8c\x32\xd3\x0e\x84\x2a\x64\xb6\xb8\x88\x7b\xc2\xea\x19\x4d\xd4\xce\x99\xc1\xe6\x34\x2f\xb0\x36\x62\x89\x8b\x33\x36\x08\xe9\x08\x04\x0e\x9f\x32\x6d\xb1\x65\x61\x1c\x77\x0a\xec\xbf\x7a\x9c\x55\x6c\x4e\x79\x36\xae\xfa\x5a\x35\x37\x9a\xc9\x24\xdf\x78\x62\xb3\xca\xe8\xf8\xbf\x78\x12\x57\x58\xc5\x79\xb1\xb8\x39\x8d\xdd\x42\x70\xb2\x7d\xcc\xde\x99\x50\xf5\x9f\x37\xb2\xb3\x05\xbc\x39\xe7\x9e\xb5\x73\x38\x79\x89\x3b\x73\x61\xa2\x76\x0f\x15\x58\xf2\x62\xcf\x7b\x72\x06\x75\xd6\x39\x1e\x33\xb3\x57\x19\x91\xc3\xac\x45\xbd\xc4\xc9\x53\x6b\xd2\xc8\x7d\xab\x58\x7d\x72\x8d\x4c\x37\xfb\x9e\xdc\xf7\x93\xcc\xb0\xc0\x22\x64\xf5\xc9\x51\x70\x36\x80\xb2\x40\xac\xa5\x54\xe5\x99\xaa\x10\x71\x76\x75\xdc\xce\x4f\x3b\x36\x9c\x0a\xea\x70\x33\x22\xf6\x80\xf4\x90\x70\xc4\x48\xcc\x60\x4d\xdb\xfc\x6f\x00\x00\x00\xff\xff\x6e\x78\x13\xbb\x6d\x0f\x00\x00") + +func fonts3x5FlfBytes() ([]byte, error) { + return bindataRead( + _fonts3x5Flf, + "fonts/3x5.flf", + ) +} + +func fonts3x5Flf() (*asset, error) { + bytes, err := fonts3x5FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/3x5.flf", size: 3949, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fonts5lineobliqueFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x59\xcd\x8e\x1b\x37\x0c\xbe\xfb\x29\x78\xe0\x41\x03\xb4\x56\xbd\xed\xb6\x0d\x10\x2c\x9c\xfe\x9c\x7a\x2b\xd0\x9b\x00\xc1\xe9\xda\xc0\x22\x8b\x4d\xd0\x6c\x0e\x01\xf4\xf0\x85\x48\x51\xa2\x34\x9a\xf1\x8c\xed\x2c\x12\x8f\x38\x92\x48\xf1\x13\xff\xa4\x39\x3d\x9f\xee\x0e\x08\xbf\xc0\x3d\xdc\xfd\x00\xbb\x7b\xb8\xdb\xdc\x3f\x3f\xbd\x1c\x3f\xbe\x7f\xde\x9e\x9e\x4f\xb0\xdb\xd9\x37\x3f\xc1\xa7\x0f\x3f\xff\xba\xdb\x7d\xde\x1f\xfe\x3d\x3c\x6e\x1f\xff\x3b\x7c\x38\x6e\x8f\x8f\x5f\xbe\x83\x2f\x9f\x1e\x0f\xaf\xc7\x47\x80\x9d\x7d\x73\x0f\x9f\xbf\xbe\xff\x71\xff\xee\xb7\x3f\xff\xde\xbe\xfb\x7d\xfb\xcf\x5f\x9b\x3f\x8e\xa7\xa7\x97\xa7\xd7\xe3\xf3\x57\x38\xc0\xfd\xf7\x91\x2d\x9c\x3e\xbe\xbc\x6e\x37\x88\xfb\xa9\xff\xfb\x0d\xf0\x5f\x6a\x20\x37\xd0\x5a\x8c\x8d\xf8\x8c\x6f\xe8\x09\xfb\x0d\xc6\x11\xb1\x41\x34\x80\xcc\xcf\x0f\x0c\x10\xe2\x4c\x44\x9e\x37\x7e\x64\x89\x65\x9a\xcc\x05\xef\x6d\xfc\x17\x97\x26\x4d\x7a\x6f\x01\x2c\x60\x3b\x5e\xb5\x35\x51\x77\x65\xca\x7b\x6b\xbd\x50\x06\xac\x05\x18\x30\xf5\x39\x27\x23\x0d\x0d\x1b\x50\xe6\xc5\x61\x8d\x08\xdd\x30\x03\x24\xa4\x68\x64\x6c\xd0\x8c\xfd\x26\x3e\x4d\x16\x00\x73\x7a\x03\x18\x03\x00\x43\x1a\xed\x5c\xe6\x85\xd6\x39\x9b\xf6\xc4\x5a\x5e\x66\x94\x6a\xbc\xf7\xce\x65\x30\x09\xda\x08\x14\xaf\x25\x81\x0d\xe5\xb7\x96\x3f\x6a\x79\xc8\x2a\x64\x59\x28\x4a\x30\x14\xc6\x00\x9e\x63\x23\x0a\x40\x32\x1a\x6e\x24\x86\x28\xa8\xe0\x34\x9a\x64\x7f\xfa\x0d\x7a\xe7\xd8\x1c\xc0\x5a\x87\x6a\x8c\x0c\x1a\x3f\x89\x3f\x59\x50\x08\x3c\x35\x04\xa2\xf9\x7d\x46\x4c\xe1\x36\x7e\x43\x48\x2a\x9e\xd1\x72\x3b\x4f\xef\x3d\x61\xd7\xbc\xa7\x89\xf2\x83\xf9\x87\x47\x44\x30\x87\x69\x00\x72\x63\xda\xae\x70\xb1\xc5\x7b\xa8\xec\x78\x48\x16\x4f\x84\x05\x2b\x5b\x9c\x08\xda\x66\xef\xbd\x85\xc5\x22\x34\x15\xbd\xcb\x62\x59\x3f\x58\xd5\x97\x08\xd6\x84\x87\x5d\xab\x05\x75\x89\x16\x60\x23\xe5\x05\x23\xb0\x91\xba\x1a\xa8\xd8\x95\x81\xe2\xbf\x01\x86\xe2\x83\xac\xc9\x72\xa0\x12\xfa\x6a\xdd\xb4\x4e\x48\xe8\x83\xc2\x2d\x83\xd8\xba\xdd\xb4\x08\xb1\x46\x50\x71\x2b\xe9\xc4\x0a\x5e\xaf\x05\xa3\x7a\x4e\x84\x60\x78\x81\x08\xde\x0a\x84\x91\x99\x2a\x6c\x6e\x00\x94\x9a\x68\xd5\x76\x1b\xa3\xb6\x3b\xf5\x94\x90\xbb\xdc\x2f\x7c\x0b\x06\x4d\x55\x60\x2c\xd3\xa2\xfb\x4b\x49\xc5\x0c\x58\xc7\xf5\xe9\xa1\x90\xf3\xf5\x28\x59\x83\x84\x3f\x8e\x35\x6f\xdf\xf2\x4b\xe7\x50\xfa\x3a\x61\x35\x3f\xa3\x29\xc4\x48\xe9\xbd\xd5\x4f\x50\x61\x56\x89\xe2\x00\x19\x53\x18\x31\x7f\x78\xc0\x94\x5e\xa4\x4f\xe1\xd8\x36\xf3\x7e\xc5\x3c\x59\x7c\xb3\x98\x86\x29\x88\x9a\x61\xbc\x47\x1d\xa2\x31\xb3\xc8\x54\x82\xa1\x85\x94\x06\x9d\x8b\xf9\x5d\xe4\x39\x97\x2c\xe0\x2c\x6f\x6b\x21\x55\x41\xec\x17\x01\x42\x09\x51\xc0\x04\x3b\x0f\xf7\x24\x47\xa2\x39\xb5\x35\x41\x97\xac\x43\x54\x15\x4e\x48\x46\xc4\x03\x4a\x6c\xcf\x46\x4c\x23\x27\xac\x78\x99\x24\xdd\x5b\xa8\x22\x49\x48\x32\xf3\x29\x49\xad\xa8\x2a\x9c\x34\xc2\x2a\x67\x54\xfe\xaf\xc9\x56\xb3\x15\xaa\xc5\x19\x05\xc4\x1c\x15\x38\x97\xf4\x55\xbb\x10\xc4\x5a\x12\x56\x92\x00\x27\x24\x69\x56\xb7\xd8\xae\x94\x13\x1b\xfc\xae\xd9\xae\x5a\x2d\xa8\xb3\x31\xc0\xe4\x76\xc1\x74\xc8\x16\x97\xf7\xfc\x8b\x75\xec\x6f\xc2\x66\x53\x6b\x34\xc5\x06\x55\x68\x5c\x0b\x74\x25\x4d\xa8\x96\xe5\xa1\xa2\xeb\x2a\xa0\xce\x44\x6a\x71\xa8\x2b\xa8\x4b\xed\x43\x9b\xbd\x57\x55\x15\x8d\x74\xe0\x34\x8a\x91\x5e\x98\xbd\xad\x3e\x44\xcd\x96\x67\x65\x5e\xaa\xa4\x68\x79\x2d\x5a\x63\xfc\x32\xf3\xc0\x8a\xa9\x30\x18\x72\x60\x14\x73\x0c\xac\x7a\x28\x01\x31\xb0\x86\x2a\x2a\x86\x1c\x27\xc7\xe2\x27\xec\x32\x40\x0d\x67\x68\xc2\x48\x68\xc2\x48\x68\xec\x32\xac\xdf\xbc\xda\xe5\xea\x98\x55\x87\xac\x2c\xea\x42\x33\x99\x89\xfa\xd1\x6c\x70\x6d\x18\xb9\x55\x2c\x8e\x46\xa9\x35\xf3\x6c\x95\xb7\x52\x0d\x0c\x28\xd5\xa2\x7d\x28\xd5\xa2\x71\x5c\x26\x89\x8e\xdd\x7a\x70\xba\x07\xa8\xea\x64\xcc\x4a\xe5\xdd\x9b\x2d\x00\xc8\x63\x3d\x17\x43\x8d\x9f\x69\x37\xd3\x5e\x96\x1c\x73\x1d\x5e\xca\xc4\xaf\x36\xba\xaa\x19\x42\xe6\x1e\x42\xe6\x1c\x42\xe6\x1a\x42\xe6\x18\x60\xc2\x84\x3b\x86\x95\xf8\x86\x9a\xb9\xf8\x9f\x48\x10\xe6\x22\x86\x07\x89\x2c\x1a\x3e\xbf\x7c\xbe\x40\x11\x70\x1c\xe4\xb8\x5d\x42\xb6\x05\x1b\x77\x5a\x76\x21\xd6\xbb\x7a\xb9\x75\x9b\x6d\x22\x31\x74\x4e\x41\xed\x9c\xca\x02\x2a\x3d\x4d\x20\x8c\xe9\x6c\xc3\xb7\x32\x2a\xbf\xd8\xf1\x12\xb1\xb0\x93\xfc\x35\x57\x20\x53\xaa\xc4\xb1\x96\x8d\xad\x91\xa5\x35\xc7\xe2\xfa\x89\x49\x61\xa9\x7f\x51\x0e\x04\xdc\xdf\x20\xd5\x34\x59\x41\x2c\xd9\x86\x9b\x28\x87\x1f\x41\x26\x65\xe5\xde\x32\xe8\x50\x00\x94\x35\x54\xf6\x28\x4c\x21\x81\x07\xcd\xe5\x0c\x2e\x78\x7a\x2a\x7f\xe4\x72\x26\x9d\x7c\x30\xea\x8b\xa9\x95\x3b\x66\x1d\xbc\x25\x74\xe1\xa8\xc2\x8b\x76\x4b\x72\x3c\x03\x66\x94\xa8\xcf\x10\x16\x16\xf2\xee\x9d\xaf\xaf\x59\x77\x1a\xc6\xb1\x7c\x79\x20\x97\x3f\xd4\x12\x2c\x6a\x19\x3a\x47\x89\x11\xcc\x65\xc3\x55\x5a\x78\xef\x57\x69\xd1\xa3\xd4\x85\x00\xdd\x05\x7b\x40\x85\x12\xca\xc8\x42\xa4\x44\x84\xc2\x65\x3a\x94\x34\xed\xb2\x74\x85\x7f\xb9\x21\xa0\xf7\xe4\x51\xdf\xc0\x6a\x14\xfe\x73\x57\xba\xd2\x32\xe9\x7c\x9c\x3d\x58\x15\x4d\x6b\x6e\x5e\x1a\x2a\xb3\x05\x15\x1c\x84\x80\xb4\x79\x13\x27\x85\xd9\x36\x55\x0d\x62\x77\x5c\x31\xab\x4a\x25\x97\x0e\xb1\x4a\x99\xb9\x89\x95\xfc\x9a\x2f\x49\x12\x97\xee\x27\x90\x09\x5d\xa7\x68\x5f\xdd\x43\x59\xcb\xa7\xf4\xb2\x53\x16\xac\xde\x2c\x21\x57\x7b\xf9\xc5\xb6\x70\xc3\xc8\x77\x53\xde\xc5\x3d\x92\x8f\x2f\xba\xef\x59\xc2\xbb\x72\x3d\x20\xf3\x1c\x45\xec\xf9\x76\xcd\xb7\x0d\x45\xb4\xe2\x6e\x9d\xb7\x62\xc1\xfc\xa9\x48\x2a\xd8\x74\x18\x14\x30\x52\xb9\x3a\x5f\x29\x70\x33\x55\xa9\x98\x8b\x0d\x1c\x55\x0a\xa3\x2f\x3e\x4b\xcc\xad\x10\xba\x4a\x3d\x9f\x0c\xcf\xb5\x82\xae\x1b\x73\xc5\xd8\xbd\x57\xee\xf8\xdc\xc2\x17\x72\xfd\xa7\x97\xad\x1d\x31\x2e\xdd\x80\xe9\x58\xf4\xd9\x96\x2a\x4a\x9d\x3a\xdc\x58\xb7\xb6\xdc\x98\x40\x78\x64\xbc\x0b\x97\x88\xd9\xb6\xb0\x5c\x53\xab\x03\x04\x17\x8c\xe7\x4c\x2a\x5f\xd3\xa4\x1b\x5d\xa0\xcb\xe4\xfc\x15\x50\xae\x1d\x8c\x51\xc5\xe7\x5e\x8a\x2e\xda\xd3\xd1\xcf\x44\x58\x2e\x15\xe6\x30\xd4\xf1\x19\x1e\x1e\xe4\x0e\xf7\x7c\xb4\xf1\x65\xdd\xd6\x46\x37\xf2\x79\xe5\xf4\x0d\x4f\xe6\x54\x44\x66\xd0\xe5\xed\xc1\x77\x37\x6c\xd5\x75\x70\x7b\xcd\x51\x31\x2f\x21\xfd\x5b\x7c\xd8\xab\x44\xe8\xbe\xea\xb8\x79\x89\x88\x6e\x5a\x52\xd2\x6e\x5d\x41\x5f\xc2\x7b\x69\x8e\xea\xa0\x74\x36\xd8\xf5\xa3\x54\x97\x1c\x5f\x85\x68\xa7\x24\x5d\x8c\x14\x33\x51\x35\xb9\xa0\xc8\x49\x26\xa1\xff\x7f\x00\x00\x00\xff\xff\xf8\xbb\xf0\xab\x69\x22\x00\x00") + +func fonts5lineobliqueFlfBytes() ([]byte, error) { + return bindataRead( + _fonts5lineobliqueFlf, + "fonts/5lineoblique.flf", + ) +} + +func fonts5lineobliqueFlf() (*asset, error) { + bytes, err := fonts5lineobliqueFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/5lineoblique.flf", size: 8809, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsAcrobaticFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x5b\xef\x6e\xe3\x36\x12\xff\xee\xa7\x18\x04\x04\xda\xe2\x76\xab\x4d\xae\x05\x6e\x0b\x41\xd8\x02\x77\xf7\x00\xf7\x99\x80\x4e\xb1\xe5\xc4\xa8\x63\x16\xfe\xd7\x2d\xa0\x87\x3f\x90\x9c\x19\xce\x50\x74\x2c\x25\xd9\xee\x02\xe7\x00\xb1\x48\x53\xe2\xfc\xe3\xcc\x6f\x86\xd4\x7a\xbb\xbe\xeb\x0c\xdc\xde\xc1\x47\xb8\xfb\x19\x3e\xc0\xed\xcf\xf0\x61\xf1\xeb\x72\xef\xee\xbb\xe3\x66\x09\xf7\x7f\xc2\x7f\xba\xdd\x2a\xfc\x3f\xb8\x27\xf8\x47\xf5\xf1\x27\xa8\xf7\xa1\xf1\xe9\xb0\x5c\xff\x78\x3a\x2c\x7f\xec\x57\xa7\x66\xf1\xef\xcd\xc3\xb6\x3f\xc2\xbe\xdf\xf6\xdd\xa1\x87\xbb\x1f\x6f\xe1\xfd\x7b\xf8\xf5\xf4\x70\x3a\x1c\xe1\xf6\xee\x1d\xdc\x7e\xfc\xf8\xd3\xe2\x9f\xdd\xb1\xff\x05\xee\xfe\xee\x7f\x88\x3d\x8b\x7f\x7d\xfe\x7d\xdb\xed\xba\xe3\xc6\xed\xc0\xad\x61\xbd\xd9\x1f\x8e\xb0\xdd\xec\xfa\x5f\x16\x9e\x3a\x78\x0f\x37\x4f\xdd\xc3\x66\x09\xbb\xd3\xd3\x7d\xbf\xbf\x81\xb5\xdb\xc3\x7a\xb3\xed\x61\xb3\xea\x77\xc7\xcd\x7a\xb3\x0c\x37\x2f\x3a\x00\x80\xf7\x70\x78\x74\xa7\xed\x0a\xba\xed\x1f\xdd\x9f\x07\xb8\xef\xe1\xbf\xdd\x77\xef\xc2\x4d\x3b\xf7\xc7\xc2\xc4\x41\xc7\xc7\x1e\x6e\x1e\xbb\xfd\xea\x7e\xdb\xed\x7e\xbb\xf1\xa4\xfe\xbe\xdf\xec\x8e\x07\xe8\x0e\xd0\x41\xe8\x7d\x07\xf7\xa7\x23\x2c\xbb\xdd\x77\x47\xff\x98\xc3\xd3\xe9\xf0\xd8\xaf\x16\xb7\x77\xe1\x09\x8f\xfd\xe6\xe1\xf1\xe8\x29\xee\x60\xf9\xd8\xed\xbb\xe5\xb1\xdf\x2f\x3e\xc2\xe5\x1f\xdf\xc1\xce\x1d\x61\xb3\x5b\x6e\x4f\xab\xcd\xee\x01\x56\xfd\x61\xd9\xef\x56\xfd\xfe\xb0\xb8\xfb\x39\xdc\xf6\xd4\x7d\x0e\x9c\xc3\xb6\xdf\x3d\x1c\x1f\xe1\xfb\xfe\x33\x0d\x5e\xba\xa7\xa7\x7e\x17\x05\x73\xf8\x01\xfe\x06\x1d\xac\x4f\xab\x87\x1e\xd6\xdd\xf2\xe8\xf6\x8b\x0f\x71\xe2\x55\xbf\xee\x4e\xdb\x63\x24\xf6\xc9\xad\xfa\xc0\xf8\xf1\x71\x73\x80\xb5\xdb\x1d\xe1\xfb\xed\xe6\xb7\x1e\x6e\xde\x3f\xc1\x87\x1b\x70\xbb\xf0\xd8\x6e\xb7\x0a\x8f\xfd\x61\x71\x1b\xc9\x88\x82\xf6\xd4\xab\x59\x69\x8e\xfd\xd1\x6d\x17\x0b\x63\x8c\xf9\xf4\xfa\xff\x9f\xbc\x46\x1c\x80\x6f\x43\x3d\x34\xf1\xa2\x02\x1b\x2f\xac\xab\xe2\x05\x0c\x34\x06\x70\x8c\xff\x18\x79\x7b\x5b\x0f\x4d\x6b\x3e\x2d\x20\x7e\x0a\x17\x7e\xb4\x03\x7f\x83\x1f\xef\x67\xab\x87\x26\x5c\x42\xe3\x1f\xcc\xcf\x05\x78\xf9\x25\x40\x3e\xaf\xbe\x24\x6e\x20\xf1\x14\x88\x4a\x7c\xb4\x43\xd3\x42\x5b\x0f\x6d\x6b\x66\x0f\xae\x5d\x03\x10\xfe\x97\xee\xf5\x72\x05\x92\x2e\xc8\xcf\x95\xa6\x6c\x5f\xbe\xf6\x52\x18\x5a\x47\x74\x57\x00\xa4\x47\xb0\x4e\x48\xea\x6c\x85\xd4\x6a\x8b\x9c\x58\xd7\xb6\x55\xea\x1f\x32\x71\xea\xb9\x48\x95\x51\x16\x15\xe9\x33\x7c\xaa\x33\x90\x52\x43\xb3\x91\x2a\x72\x95\x52\x59\x75\x56\x2a\xac\x1a\xa9\xd2\x38\x96\xec\x85\xc6\x92\xd1\xd0\x58\xb4\x1c\x78\x4e\x80\xe3\xb6\xef\x30\x7e\x06\x94\x0c\x8e\x31\xb5\xe7\x3b\x72\x62\xb0\x0b\xac\x85\x0a\x49\x31\xb1\xcb\x55\x2d\xb4\xf1\xce\xc0\x3c\x18\xfc\xdd\x2b\xb8\x3a\xfb\x51\x82\x3e\x5b\x35\xe1\x59\xc6\xf2\xfc\x95\x75\xb1\x8b\xa5\xe8\xbc\x5e\xc2\xe3\x4d\x6d\xdb\x16\xda\x36\x3c\xa0\xb6\x23\xde\x0a\xec\x5c\xe8\x42\x29\x92\xf4\x48\x6a\x24\xed\x19\xdf\x7a\x0e\xfe\x8e\x17\x06\xa2\xcd\x85\x4b\x3f\x0f\xca\xad\x6a\x50\x86\xc6\x6b\x32\x8a\x38\x08\xd8\xe0\x43\x8c\xb5\x42\xf4\x60\x59\xe6\x10\xe4\x62\x70\xa2\x20\x90\xcb\xb6\x98\xd9\x65\xdb\xba\x34\x81\xb7\xf5\x34\x41\x6d\xc5\x04\xe0\x55\x40\x0c\xc0\xd0\x10\x03\x50\x55\xc4\x00\x54\xc4\x40\x30\xc2\xc8\x80\x57\x8c\x79\x96\x88\x7c\xa1\x58\xd6\x31\x79\xd3\x73\xb4\x04\x6f\xd1\x28\xe4\xda\x92\xf9\xf3\x02\xb4\xae\x1a\x48\x0d\xad\xf5\x6c\x0d\x5e\xd0\x55\xcb\x23\x2a\xb0\x83\x50\x54\x58\x2f\x61\x32\x74\x00\x71\xc1\x9c\x2d\xb9\x9f\x64\x91\x23\x07\x54\x30\xa0\x71\x47\x5a\xc5\xf1\x8b\x97\x31\xad\xfd\xd4\x01\x59\x47\x74\x7d\x92\x19\xd0\xcc\x80\x85\x2a\x7b\x86\xcb\x9e\x81\xde\xb4\x38\xcb\x4b\x98\xc9\x7f\x9a\x76\x61\x92\x47\x22\x57\x04\x29\x7a\x91\x8a\x71\x19\x18\x5c\x03\x33\xc3\x59\x90\x50\x2b\x65\xf3\x36\xa1\x4f\x76\x3d\xff\x65\x52\x94\x46\x67\x6e\xca\x23\xd3\x04\xe4\x6b\x52\x33\x7a\xc2\xd4\x6c\x0c\xdf\x17\x7c\xaf\xe1\xe7\xa0\xc0\x80\x57\x67\x94\x1b\xad\x5c\xf2\x1f\xb8\xaa\xc3\xba\x49\x0a\x09\x3e\xad\xc0\xf0\xb4\xb6\xf0\x61\x20\x7d\x07\x12\x05\x71\x05\x25\x27\xa2\x56\x51\x72\x6f\xfc\x89\x6e\x25\x06\x92\xf4\x89\xfe\x25\x38\x3c\xfe\x24\x47\x23\x02\x83\x76\x81\xe1\xe3\xa4\xa0\x38\x38\x8c\x7c\xd0\x45\x16\x9f\xe9\x44\x7f\xc9\x58\x26\x47\x7b\x6e\xd4\x03\x79\x8f\x82\x42\x57\xf0\x9f\xe7\xaa\x6d\x1d\xbb\x67\x2f\x38\x1f\xf5\xa2\x1a\x87\x28\xd5\x18\x18\x63\xc4\x0d\x46\xa2\xc0\x82\x86\x0a\x1a\x28\x28\x98\x40\xe1\x2e\xac\xa4\xaa\xcd\x7c\xc4\xb3\xad\x28\x7f\x34\x08\xd6\x3c\x1b\x03\x6a\x5d\x18\x42\xa2\x11\x80\xfd\x3c\xdb\x64\x5b\x0f\x90\x8d\xc0\x15\x2d\xac\x81\x6e\x91\x4a\x8f\x88\x44\x28\xfc\xa5\x4e\x9b\x30\x6b\xc2\x69\x04\xa2\x82\x07\xad\xc1\x06\x6a\x90\xe3\x00\x18\xec\x10\x1a\x83\xe4\x24\x36\x62\xab\x76\x29\x62\xf2\x2f\x18\x34\x33\xcc\xf2\x4c\x23\x10\x17\x67\xf5\x4a\x42\x02\x83\x98\xe3\x08\x89\x24\xb9\x23\x60\xd5\xb6\x4d\x1d\x51\xa4\xb9\xd7\x94\x92\xd6\x4b\xec\x4b\x49\xfa\x25\x66\x43\x61\x2e\x31\xe3\x25\x0f\x92\x99\xe1\xab\x30\x43\x51\x88\x4d\x22\xf2\x01\x01\x79\x28\x21\x57\xca\xbc\x1d\xfa\x30\x6c\xa3\x63\xe7\x45\xdc\x90\x87\x25\xc8\x42\x7e\x1e\x11\x0b\xfb\xf9\xe8\x6a\xcd\x05\x0a\xa7\xf9\xf5\xf9\x8b\x38\x03\x6b\x60\xc0\x67\x78\x72\x11\x57\x5f\x45\x1b\xaf\x63\x26\x2e\x50\x03\x36\x30\x13\x01\x58\xb6\x4e\x86\xbf\xd2\xb4\x4a\xe8\xc2\xa4\xc5\x2c\xbf\x66\xe0\x10\x1c\x1c\xd2\xb8\x06\x2f\xf0\x06\x43\x10\xa9\x74\x71\xf9\x2e\x17\x43\xb4\xf1\x48\x26\x42\x0d\xa3\x30\x43\x95\xd2\x9e\x94\xf5\x24\x54\x90\x72\x1e\x99\xf2\x88\x8c\xc7\xa5\x68\x8f\xd9\x1f\xc4\x44\x65\x6a\x25\x23\x7c\xe6\x02\xc9\x19\xf0\x72\x02\x11\x56\xe4\x5b\x12\x29\xa9\x6c\x4b\x26\x5b\x29\xd7\x62\x04\xe4\x38\xd3\x4a\xee\x22\x79\x8a\xeb\x44\xa8\xc5\x41\x10\x11\x28\xa5\x56\x0b\x43\x5a\xbd\x53\x7e\x0c\x53\x64\x6a\xa9\xba\x85\x16\x8d\xaa\x62\x84\xea\x4f\x21\x0b\xb9\xd2\xcc\x70\xa7\x29\xa0\x4e\x53\xc0\x9c\x46\x20\x4e\xbc\xb5\x49\x51\xb3\xc6\xa8\x5d\x25\xb4\x59\x83\xd4\xe6\x78\x81\xe7\x48\xb3\x80\x33\x0b\xd8\x71\x62\x97\xc8\x0e\xdc\x18\xa2\x87\x05\x97\x75\x62\xf5\x51\x75\x1a\xcc\x6c\x8d\xe8\x34\xc4\x6a\xa3\x00\x3b\x3e\x4d\x98\xa1\x00\xe7\x36\xb3\x0e\x60\x39\x4b\x1b\x61\x49\xbf\x01\xb8\x06\x69\x95\x94\x13\xd0\x84\xa8\x8a\x1a\xc1\x57\xa4\x13\x4d\x32\x62\x80\x1a\x9d\x32\x01\x80\x30\x92\x01\x43\x25\x7f\x8b\x6e\x32\x3c\x33\x6a\x6d\x2e\xde\x05\x4d\x6c\x6e\x89\x63\x43\x1c\x65\x3e\x26\x4f\x7b\x0c\x8c\x72\x9e\xb1\x11\x16\x6c\x70\x6c\x82\x63\x01\x4f\xea\x11\x1a\x60\xd4\xc1\x4a\x60\x3f\xc3\x7a\x40\xdc\x62\x99\x2b\xcc\xe5\x04\x00\x23\xd8\x4c\x0c\x44\x0f\x96\x6e\x61\xec\x93\xd4\x22\x66\xb9\x9c\xb7\x4d\x0d\x9b\x09\x2b\x67\x50\x39\x62\x49\x46\xca\x9a\x0a\x46\x60\x11\x65\x27\x04\x96\xfd\x9e\xb0\x68\x76\x3f\x3e\x3f\x32\x61\x4b\x09\xd5\x24\x44\xf6\xa5\x18\x60\x08\x19\x19\x10\x10\x72\x00\x5d\x2a\x48\xbe\xe5\xf5\xa5\x82\x62\xa5\xa0\x58\x28\x28\xd5\x09\x40\x97\x09\x52\x4c\xd6\x2b\x66\x28\x3b\xee\x62\x8d\xa0\xe0\xba\x8b\xae\x69\x72\x9f\x2a\xf3\xab\xfc\x11\x52\x0e\x99\x74\x46\x38\x4e\x2c\x17\xc4\x70\x41\x6b\x02\x76\xe6\x23\x92\xe1\xc5\xbc\x72\x3c\x42\x2c\xb9\x3c\xbf\x2c\x92\x5f\xce\x68\x12\xec\x91\x7b\x32\x11\xf8\x3e\xb3\xd7\xf5\xb2\x5a\x07\x44\xdb\x63\xbd\x02\xdb\xe2\x00\x22\x5f\x56\x5b\x42\x68\x9c\x48\x8c\xa8\x6b\xe1\x2d\xdc\x66\xe2\xec\x68\xc4\x17\x2c\x21\xc0\x78\xbb\x87\xf7\x3b\xc8\x0c\x78\xb7\x83\x72\x47\x5e\xbc\x5c\x52\x00\xed\x7e\xec\xc8\xff\xf0\xf6\x08\x9b\x41\x16\x34\x2f\x44\xe8\xe9\xd9\x47\x1a\x10\x58\xc9\x7c\x84\x70\x21\xd1\xb5\xa7\x24\x55\x78\x1c\x41\x8f\x01\x1d\x3a\x72\xf2\x99\xf8\x0b\xde\xf3\x2a\x58\x94\x1a\xe0\x95\xc8\xb2\xa8\x38\x2e\xd1\xae\x54\x05\x0c\x2b\xce\x61\x53\xca\xdb\x14\xaf\xac\xda\x06\xef\x94\x2f\x2d\x10\x77\x01\x69\xa2\xca\xd6\xe3\xa5\xbb\xe0\x9a\xe8\x2f\x46\xe8\x67\x18\xab\x33\xc6\x14\x89\xb8\x8d\xa2\x19\x1b\x93\xe8\xef\xcc\x18\x3b\xdb\x11\x63\xb5\x1d\x33\x56\xbf\x86\x31\xf8\xbf\x2e\x2b\xbf\x1a\xf9\x6a\xed\xa8\x52\x99\xc9\x57\x22\x3f\xb3\x40\xfd\x1c\xe4\xfb\x55\xb5\x64\x5d\x49\x4b\xc3\x48\x4b\x40\x19\xd2\xdb\x6a\x09\x20\xd7\x14\xe4\xf8\x98\x9c\x9e\xd0\x18\x08\xd7\x8e\x35\xb2\x0c\x5a\x8e\x9d\x7b\xc1\xbb\x17\xdc\x7b\x39\x03\x9b\xbe\xf6\x66\x57\xcb\xbe\xd9\xaa\xb2\xf1\x64\x31\x70\x46\x52\xe2\xe9\x0e\x7d\xca\x03\xd0\x8a\x88\x83\x54\xb7\x8f\x3d\xc0\x88\x02\x31\x9b\xd8\x0d\xc7\x51\x72\x4b\x2e\xee\xf3\xca\x25\x65\x80\xb7\xef\x53\xc2\xee\x5d\xeb\x25\x55\x4c\xaf\x13\x4c\x00\x9a\x39\x04\x54\x89\x5a\x1e\xa5\x06\x28\x63\xd3\xbf\xac\xbe\xa9\x3e\x39\x47\x92\x2d\xb5\xbd\xc1\xc7\x59\x8c\x55\x8f\x70\xe4\x2e\xce\x72\xcf\xe8\xcc\x0a\xac\x53\x30\x6e\xa4\x0e\x09\x0e\x64\x6a\x44\x48\x90\x69\x32\xc2\x82\xcb\xca\x9c\xdb\x3b\x53\x10\x72\xf1\x17\xb1\x08\x94\xf1\x08\x14\xd4\x1d\x9f\xab\x44\xe9\x2a\x3e\xb1\x93\x44\xe9\x9d\xc2\xd9\x66\xa2\x8c\x9e\x01\xcf\xed\xbc\x91\x20\xc4\xbe\x82\xc1\x4a\x29\x40\xda\x22\x37\x78\x00\x44\x56\x4c\xf5\x06\xf9\xa0\x83\xd3\xa8\xc4\x95\x17\xb7\x8a\xa5\xcf\xe7\x88\x2d\x60\xce\x37\x25\x3a\x79\x0a\x6c\x5a\x8e\x1f\xd7\x07\xcf\x20\xfa\xca\xb6\x55\x82\xfe\x95\xf4\x88\xa0\xa2\xaf\xd8\xb6\xc2\x66\xc3\xee\x52\x6e\x38\xa7\x6d\x2b\xec\xa0\x6d\xab\x57\x15\x49\x80\xe2\x4d\xfd\xba\x2c\x15\x8f\x25\xe6\x53\xc9\x2c\x35\xa8\x58\x56\x44\x82\x8e\x65\x51\x36\x28\x59\xfa\x84\xe0\x54\x54\x29\x96\x30\x92\x08\x13\x16\x8c\xf2\x2c\x62\xab\x03\x3b\xd2\x7e\x07\x76\x14\xe2\xfd\x14\x69\xf1\xf9\x85\x70\x6e\xef\xcb\xe6\xf4\x86\xbc\x08\x9f\x34\x38\x5b\x3a\x67\x30\xda\x5d\x78\xe5\x75\x99\x6b\x75\x2d\x1a\x2f\xbe\xbc\xb4\x71\xd4\x26\xd4\x93\x43\x0c\x7d\x49\x36\xe4\xfb\xce\x34\x2c\x49\x62\xc6\xb7\xa6\x8e\xbf\xe7\x39\x00\x09\xfd\x62\x6d\x99\x1c\x22\x1e\x65\x40\x87\x88\xd1\x85\x42\x8e\x45\x0f\x47\xf1\x8a\x8e\x3a\x44\xc4\x5d\x48\x70\x27\x26\xec\x12\x58\x37\xb2\x8e\x50\x31\x02\x53\xc5\x12\xa3\xd0\xf3\x99\x0e\xc3\x08\x08\x9e\x55\x9b\xb3\x5a\x73\x56\x69\x9e\x4b\xf4\xa5\x1f\xc7\x0d\x23\xea\x5a\xd1\xe5\xc5\x12\x96\x91\x87\x71\x6a\x89\x3c\x39\xff\x89\xf7\x04\x0f\x59\x57\x33\x8e\x8a\x00\x30\x2c\x15\xd5\xb3\x7a\x90\x07\x51\x28\x63\x57\x26\xa0\x2c\x20\x33\x00\xa5\xff\x4c\xfd\x17\xb5\x3f\x21\xa7\x9c\x2a\x73\xa2\x15\xb0\x36\x9a\x36\xa7\xf4\x31\xe7\x2a\xea\x9a\xf1\x1c\x02\x7c\xc4\x30\xde\x72\xd0\x5e\xc3\x91\xa7\x6a\x74\x24\x7a\x82\xea\xe7\x6c\xa1\x26\x74\x8a\x3b\x24\x4d\x4b\xea\x11\x16\xa9\x33\x3f\x9d\x1a\xea\xfc\xb2\x0c\x3a\xe7\xd8\xeb\xd5\xb4\xff\x2d\x4c\xc2\x47\x36\xaf\x2c\xfd\xb6\x02\xfe\xe6\x30\x79\x44\x8c\xcb\xa7\xb8\x4d\xe6\x0d\xb4\x33\xd0\xbe\x40\xbb\x02\xed\x09\xae\x56\x4e\xe2\x4f\xaa\x8a\x35\xdb\x76\xc7\x2f\x74\x44\xa2\x55\xac\x9c\xf4\xaa\x88\x7e\xcf\xa1\x18\x4f\xe3\x05\x3f\x17\x9b\xfa\x4d\x12\x9e\xbe\x3c\x98\x48\xa1\x1f\xc5\xac\x46\x16\xd2\xb3\xca\x7b\x6c\x21\x1a\x60\xbc\x83\xa4\x0b\x45\x82\xd4\xa4\x49\x8e\xdd\x08\x3d\x4a\x1f\x9d\xfc\x73\x52\x0a\x17\xc8\xd3\x8b\x00\xae\x92\x35\x10\xa9\xdc\xc9\x47\x3f\xd4\xb9\x98\x94\x2b\xe1\xa4\x57\x3a\xe5\x13\xe9\xab\x90\xbb\x4c\xed\x62\x9b\xa5\x32\x5a\x32\x47\xfc\x2f\x12\x3c\xfe\x9f\x4c\x98\xff\x8f\x6f\x84\xf2\x8d\x2f\x25\x75\x72\x88\x63\x96\x14\x33\x9a\x0d\xc5\x80\x22\x3d\x1f\x36\x6d\xd2\x97\x45\x8e\xa9\x1e\x9b\xef\x54\x6e\x0d\x83\xe9\x33\x9b\xed\x6f\x05\x1a\xbc\x44\x73\xbf\xa6\xdd\x9a\xf6\x6a\x52\xa2\xe9\x74\x03\x91\x48\xc3\x4c\x16\x52\x54\x40\x19\xd9\xf4\x94\x76\x8a\x13\x4a\xaa\x03\xad\xf3\x14\x2b\x00\x74\xb4\x00\xd0\xf1\x22\xde\x9f\x22\x06\x88\x74\x93\x7f\xe7\xfc\x13\x40\xbc\x8f\x02\x10\x5f\x7a\x79\x3b\x7b\xc5\x90\x45\xd2\x35\xba\x84\xae\x2a\xe8\x2f\xb2\xd7\x6b\x0d\x23\xf6\x41\x4d\xc2\x8b\x80\xe5\x19\x4e\xc5\xcf\x36\xe5\xd9\xe9\x38\x38\xa1\xc5\x39\xc4\xa1\x2a\x52\xc1\x29\x95\x88\x92\x8c\x0c\xef\x4f\x67\x7d\xe2\x57\x20\xb4\x15\x54\x29\x96\x73\xf1\x6a\xe6\x2a\xce\x56\x22\xd7\xc6\xb2\xea\x60\xbe\xb7\x3e\xc0\x78\x15\x23\x3a\x41\xd0\xf1\xc5\xa0\x14\xcf\xaa\x08\xd6\xf4\xe6\xa5\x37\x55\x72\x53\xa5\xb6\x71\x89\x6d\x0e\xba\x2e\xb3\x73\xa5\xf7\xed\xca\xb3\x5f\xb5\xa6\x58\x1e\xa2\xaf\x8d\x7a\x4b\xf1\x6c\x55\xf1\x8e\x53\x44\xc3\x78\x24\xba\x3b\x15\x41\xb2\xba\xcc\xc5\xeb\xaf\x6f\x42\xf4\xc6\x1e\xb9\x8f\x54\xa6\x4a\x6f\xed\x4d\x2b\x9b\xd0\x66\x10\xc1\x5f\x2c\x15\x1a\x50\x07\x55\x55\x44\x88\x41\xa9\x49\xa7\x78\x00\x20\x7b\xd3\x57\x43\x38\x48\x2f\x4e\x86\x4a\x1f\x16\x13\xeb\x81\xe2\x48\xc5\x20\x93\x4f\x50\x58\x7a\x59\xc6\x7b\x00\x3e\x65\x8f\x3b\x17\x79\xc1\xef\x92\x7b\x9a\x0e\x13\x27\x77\xa6\xf7\x2e\xf1\xc8\x3c\x19\xd6\x90\x8a\xc3\xe2\x08\x30\xa7\xec\xbc\x0b\x3a\x00\x4b\x18\x45\x19\xeb\x71\x65\x1e\x14\x37\x88\x3b\xc3\x87\x92\x39\x9b\xea\xaf\x72\xd7\x0e\xbe\xbd\xf6\xa7\xc5\x85\xbf\x6f\xf5\x87\xff\x05\x00\x00\xff\xff\x10\xbf\x87\x51\xb4\x42\x00\x00") + +func fontsAcrobaticFlfBytes() ([]byte, error) { + return bindataRead( + _fontsAcrobaticFlf, + "fonts/acrobatic.flf", + ) +} + +func fontsAcrobaticFlf() (*asset, error) { + bytes, err := fontsAcrobaticFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/acrobatic.flf", size: 17076, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsAlligatorFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\x4d\x6f\xdc\x36\x10\xbd\xeb\x57\x0c\x20\xde\x54\x04\x88\x5d\xd4\x08\x51\x14\x6a\x8f\xbd\xf6\x17\xd8\xad\xd3\x06\x75\x13\xc0\x4d\x0e\xfe\xf7\xc5\x8a\xe4\xcc\x7b\x33\xd4\x7a\x65\xd9\x40\x0f\xda\x0d\x16\xa2\xb2\xcb\xf9\xe0\x9b\x37\x1f\xf2\xc7\x87\x8f\x57\xb7\x49\x6e\xe4\x46\xae\x7e\x90\xeb\x2b\xb9\x1e\x7e\x7e\x78\xf8\xf4\xe7\xed\xd7\x2f\x8f\x72\xf7\x24\xbf\x7d\xfa\xe7\xcb\x67\xf9\xe5\xf1\xf6\x8f\x87\xfb\x27\xf9\xf1\xdf\xa7\xbb\xeb\xf9\xf6\xee\xfe\xf1\xdd\xed\xef\xef\xbe\xfd\xfd\xd3\xf0\xfe\xe6\xeb\x5f\xf2\xeb\xb7\xcf\xf7\xdf\xc9\xfb\x0f\x1f\xbe\x1f\x86\x34\xc7\x7f\xf3\x20\xcb\x2b\xe7\x9c\xea\x75\x9e\x72\x92\x72\x3d\xe5\x29\x49\xb9\x9e\xc6\xd3\xe5\x72\x5d\x2f\x4f\xd7\x49\x52\xf9\xfd\x3c\x8c\xe3\xd8\xae\x61\x53\xb7\x31\x6e\x5e\xbe\xdd\x36\x85\xe5\xb2\x86\x65\x11\x64\x4b\x99\x07\x5a\x9a\xc0\x45\x62\x31\x46\x54\x89\x29\x37\xa3\x8a\xf6\x99\xde\x55\xb9\x69\x9c\xd4\xde\xd3\xf7\xa6\x91\xde\xf5\xb7\xe3\x34\xca\xf2\xa9\x8a\xc8\x38\x2e\xb7\xd4\x78\x50\xa6\xe8\x51\x15\x51\x89\xa9\x2a\x01\xc2\x16\xf1\x53\x9e\x4c\x50\x51\xa8\xad\x55\x0f\xfd\x3e\xc8\x5b\x04\x92\xdd\xde\x62\x33\x4c\xb7\xd6\x8d\x74\xe1\xec\x63\xcb\xc8\xc1\x19\xdd\x0b\xb2\x4c\x14\x8b\x2b\xc6\x89\x4c\x65\x5b\x35\xad\xd8\x73\x12\xd4\xa4\xaa\x5a\xa3\xbe\x92\x07\x94\x81\x49\xad\x4b\x66\x4d\x32\x5b\x92\x9d\x51\xd2\xb3\x19\x12\xb9\xcd\x6d\x4a\x20\x69\x9b\x82\x83\xf4\xb2\x9c\xc9\x33\xd0\x4f\x80\x40\xbd\x36\xc4\x95\x8d\xf1\x44\xea\x09\xa4\xd5\x4d\x93\x86\x41\x22\xf7\x07\x11\x78\x00\x0a\xad\xd3\x67\x0a\xe8\x12\x38\x02\x14\x0f\xb2\x02\xae\x93\xc0\xe9\x36\xb7\x09\x9a\xa8\xfe\x02\xd1\x0c\xbe\xfa\x7d\x73\x63\x8d\x7a\x3d\x1f\x32\xbb\x5e\x9e\xfe\xbb\x91\x47\x52\xde\x48\xfc\xe3\x62\x8e\x7a\x31\x3a\xd1\x5e\xb6\x2f\xdc\x23\x7a\xd2\xdf\x34\xe4\xa2\x2f\x81\xa8\x60\x6f\x65\x2b\x94\xd7\x28\x8b\x74\x78\x25\x0b\xab\x89\x70\x40\xe2\x10\x28\x9e\x0f\xe8\x98\xea\x9a\x0f\x06\xd6\x8c\xcc\x76\x07\x29\x48\x1c\xf4\x33\xc5\x14\xe2\xf3\x04\xce\x2a\xbb\x82\x13\xd3\x4c\x5d\xb6\x54\xc3\x74\xbb\x88\x1c\x3b\x61\xe1\xb3\x17\x90\x11\x66\x30\xca\x61\x8e\x05\xd5\x38\x16\x12\x78\x2f\xbb\xc0\xb3\xb8\x4b\x5e\x5c\xf2\xe4\x9a\x2e\x71\x29\x99\xe8\x5d\xca\x3e\x35\xe1\x09\x6c\x6d\x90\x5c\x40\x9a\x08\x8e\xce\xaf\xd1\xb1\xe7\x3d\x4b\x08\x6a\x24\xa3\x87\x58\x85\x63\xce\xac\xbf\xab\x62\xca\xca\x7b\x36\xe7\x75\xdb\x04\x10\x03\x95\x02\xe6\xca\x8e\x79\xd1\xbe\x68\xe0\xc5\x9e\x45\xe1\x41\xb6\x02\x76\xa3\x67\x33\x1b\x8d\x1c\x6e\xf1\x19\x52\xa8\x01\x09\x71\x04\x54\x60\xa6\x22\xa8\xb6\x19\xaa\x10\x52\x4b\xf7\x18\xba\x47\x6e\x8d\xa3\x17\x43\x97\x72\x64\xda\x46\xa9\x31\xe7\xed\xd9\xed\x5c\x71\xd0\xab\x38\x90\x88\x37\x54\x1c\x97\xe6\x35\xae\x7e\xb1\xf6\xb6\x7d\x2a\xd4\xa9\xfe\x7d\x49\x5e\xbb\xa0\x0a\x12\x63\x8a\x4b\xaa\xa0\xd5\xc8\x71\x81\xe3\xe2\x66\x1e\x7c\xd4\x50\xa5\xc1\x41\x13\x59\x1f\x2a\xde\xda\xc4\xf4\xaa\x79\xa6\xc5\xa5\xc4\xca\x53\x2b\xb6\x94\x1e\x2d\xc9\x61\x7d\x2f\xec\x6b\xab\x82\x29\xc7\x72\x8e\xa7\x36\x47\xbb\x2b\x28\xc3\xc1\xd1\x50\xb9\x18\x67\x43\x11\xca\x05\xe0\xe2\x90\xb2\x2e\x1a\x84\x70\x5e\x4f\x84\x67\x23\x1a\x92\x3e\x64\xe3\xd5\xa0\x7e\x21\x9b\x20\x5d\x13\xa8\x69\xfd\x12\x16\xdb\x66\x36\xc3\xf1\x15\xcc\x76\x4d\xba\xa1\x19\xea\x39\xbb\x85\x14\x1e\x42\x40\x3a\xc9\xa3\x5b\x84\xec\xd5\x22\x8f\x39\x4f\x39\x6f\xd7\x62\x05\xf9\x1b\x21\x90\xc7\xdc\x81\xc0\xa4\x45\xc9\x16\x08\x48\x28\x35\x25\xcc\x4b\x24\x8c\x4d\x6a\xdc\xf5\xd1\xdf\xc1\x81\x45\xdf\xd9\x33\xa0\xea\x5e\x0f\x22\xd6\xf7\x70\x8f\x19\xd0\xdf\x12\xae\xd7\x22\x1e\xbc\x32\x7d\x5d\xa2\x2a\x51\x93\xa8\x08\xeb\x51\x09\x31\xa8\x91\x56\x00\xd1\x3d\x18\x5f\xae\x72\xc7\x63\x4d\x9c\xe0\xc1\x50\x17\x14\xc7\x3d\x67\x0f\x06\x87\x22\xc9\x05\x04\xd5\x6d\x14\x04\x04\x7f\x0a\x3f\x2e\xc3\x63\x3a\x6d\x99\x28\x51\x49\x0e\x85\xb9\x56\xe6\x30\x6c\x50\x0a\xb4\x74\xdb\xb8\x1f\x4a\xf5\x46\xff\xb1\x60\x5f\x4e\x3d\xfa\xdb\x8f\x81\x4a\x7a\xf4\xa1\x50\x74\x74\x99\x80\x47\x5b\x36\x0c\xea\x44\xc3\xde\xea\xd2\x73\xf2\xee\xaa\x76\x67\x1a\xa4\xee\xcf\xb3\xe0\xf3\x2d\xf6\x9b\x9b\xfd\x8a\xd9\x3f\x6f\xca\xfe\x14\x64\xbb\x9b\xb5\x3d\xbd\x44\x87\xe4\x1c\xc3\x39\x7a\x73\xdc\xe6\x08\xf6\xa2\x66\x2d\x44\xd8\x5a\x53\x1c\x8b\x8b\xdc\xef\x4c\x37\x34\xc5\x55\xb8\x4a\xf7\xed\x69\xf3\x72\x93\xad\x95\x64\x13\x4d\x43\x1c\xb1\xf1\x35\x78\xa5\x6b\x76\x13\x0c\x86\x37\xd9\x60\xba\x1e\xb2\x83\xb3\xb1\x9c\x58\x9d\x0d\x53\xce\xaa\x06\x32\x8b\x42\xcd\x4f\xd3\xfb\x87\xd0\xc9\xf7\x94\x57\x2c\x93\x29\x22\x80\x84\xe3\xb0\xf5\xf9\x7c\xcf\xac\xef\x38\xb6\x37\xe6\xed\xd6\x58\x0e\x75\x6b\xcd\x4e\xb7\xae\x78\x8b\xa9\xe1\x39\x46\xf5\x21\xe6\x22\xcc\x05\xd8\x05\x86\x92\xa4\x22\xa0\xf0\x45\xc9\x91\xa5\xf9\x2c\x9f\xe5\xb1\x07\x25\xde\x7e\x71\x33\x77\x5b\x4d\x9e\x2e\xf2\x78\x31\x46\x9d\x37\xbf\x41\x4d\x62\x43\xc7\xb5\xa5\x25\x77\xf3\x6b\x98\x44\x27\x71\xed\x7a\x55\xc5\xdd\xe8\x0c\x0f\xc2\xe4\x20\x8c\xc3\x71\xbe\x89\xc2\xf8\x06\x89\xf7\x84\xe3\x89\xdd\x3d\xd6\xa9\x02\xd5\x99\xf6\x7c\xc7\x1e\xef\xd8\xd3\x9d\xc4\xf4\x85\xbe\x0c\xae\x9c\x8f\xde\xf8\xe8\x8d\xe7\xa3\x37\x3e\x7a\xe3\xa3\x37\x3e\x7a\xe3\xa3\x37\x3e\x7a\xe3\xa3\x37\x3e\x7a\xe3\xa3\x37\x3e\x7a\xe3\x2e\xe8\xfe\x4f\xbd\x31\x3d\x30\x56\xa0\x57\x79\x6c\x51\xff\xa1\x71\x15\xd0\xcb\xcd\x18\xbb\x18\xb7\xf3\x20\xf8\x48\xdb\xd5\x65\x29\xb8\xac\xa7\xb0\xc4\x27\xbf\x5c\x74\x91\x6f\x96\x05\x93\x8e\xdf\xbb\x9e\xc6\x4a\x55\x83\x7f\x8a\x66\xce\x05\xc4\xf9\x9b\x9d\xce\xb9\xde\x8d\x37\x3b\x1d\x74\x55\x8d\xde\x6f\xb9\xfc\x2f\x00\x00\xff\xff\xbc\x12\x7c\xa7\x15\x2c\x00\x00") + +func fontsAlligatorFlfBytes() ([]byte, error) { + return bindataRead( + _fontsAlligatorFlf, + "fonts/alligator.flf", + ) +} + +func fontsAlligatorFlf() (*asset, error) { + bytes, err := fontsAlligatorFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/alligator.flf", size: 11285, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsAlligator2Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\x5f\x8f\xe3\x34\x10\x7f\xcf\xa7\x18\xc9\x96\x78\x30\xaa\xc4\x72\x62\x75\x16\x42\xad\x00\x1d\xcb\x9f\x17\x58\x09\x78\xec\xd1\x94\x46\x97\x6d\x4f\x6d\xf6\x8e\xe5\xd3\x23\x7b\x3c\xe3\x19\x27\xd9\xa4\x3d\x4e\xba\x87\xb6\x2b\x37\x76\x9c\x99\xf1\xf8\x37\x3f\x8f\x9d\xdd\xb6\xdb\x9b\xb5\x85\x5b\xb8\x85\x9b\xaf\xe0\xcb\x1b\x78\x51\xad\xda\xb6\xf9\x7b\xdd\x1d\x8e\x37\xf0\xfa\x09\xbe\x5b\xef\x9b\xba\x85\xdf\x9b\x7f\x17\xb0\xfa\x69\x05\xbf\xd4\xc7\xb6\xd9\xc3\xab\x63\xfd\xf4\xfe\xd0\x6e\xe1\xeb\x87\xd8\xb0\x7c\x7d\x5c\xef\x1e\x4e\x8b\xc7\x4d\xdd\x2e\xea\xcd\xe3\x37\xd5\xcd\x6d\xb7\x83\x1f\x1f\xdb\xa7\xcf\xe1\x8b\x97\x2f\x5f\x54\xf7\xbb\xe6\x04\xcd\x09\xba\x5d\x0d\xbf\xdd\xff\xba\xba\x7b\xf5\xc3\x3d\xbc\xab\x8f\xa7\xe6\xb0\x87\xc3\x36\xb6\x1f\xeb\x77\xcd\xa9\xde\x00\x9b\x00\xdb\xc3\xbe\x83\x3b\xa8\x37\x4d\x57\x6f\x16\xd5\x5d\xf7\xd9\x09\xbe\xff\x63\xf5\xed\xfd\xcf\x7f\x42\xdb\xbc\xa9\xe1\xe1\x09\x0e\xdd\xae\x3e\xc2\xdb\xc3\xa9\xab\x37\xf8\x40\xfd\xcf\x5f\xf5\xdb\x2e\x8a\xec\x9a\xb6\x83\xf7\xeb\x13\x74\xeb\x37\xf5\x1e\x0e\x8f\xdd\x62\x51\xd9\x65\xff\x6f\x59\x79\xef\xed\xb2\xf2\x2e\x94\xce\xbb\x50\x9a\x5c\x5a\xb0\xb0\xac\x8c\x31\xa9\x2b\x70\x77\xc0\x47\x2c\x84\x4f\xe8\x34\xf3\x6a\x59\x01\x04\x29\x58\x86\x66\x6c\x71\x1e\x4b\x6c\x71\x5e\x7d\x6d\xec\xe3\x8c\x0b\xa5\x77\xa9\x8f\x51\x5f\xec\x63\x9c\xc1\x92\x25\x1b\x63\xb0\x8c\x2d\xb1\x89\x55\xc7\x1e\xac\xc6\xa2\x62\x56\x11\xef\x3a\xe3\x9c\x77\x41\x3e\xa4\x67\x83\x19\xa4\x8f\xd5\xf3\xdd\xa4\x08\x92\xbf\xb2\xaa\x20\x4e\x8d\x92\x0c\xcc\x63\x92\x2d\x66\xba\x25\x8f\x95\x5b\x78\xac\x61\xa0\x1e\x3f\x76\x40\xf7\xb2\x0a\x3a\xfb\x03\x05\x70\x28\x2e\x0d\x32\x8d\x8c\x35\x85\xee\xcb\x2a\x68\x48\x1f\x0b\x02\x43\x71\xa2\x63\x8f\x91\x32\xba\x3b\x76\x86\xec\x6f\x9b\x26\x53\xfd\x02\xfb\x34\x83\x0f\x6f\xa4\x07\xd1\x69\xc9\x70\xf9\x4b\x26\xc7\x79\x08\x1a\x03\x86\x11\x7c\x2c\x00\x31\x40\x82\xa4\x2b\xa2\x05\x38\xe1\xa1\xcc\xc0\x4b\xee\x47\x05\xe4\xfe\xf8\xbc\x96\x9f\xa6\x82\x1a\x69\xb2\x78\xce\x09\x44\x34\xe3\x83\x0a\xc5\x6c\x13\xac\x10\xd1\x5c\xcf\xf2\x29\x48\xc7\x4a\xc4\x47\x9a\xa9\xf4\x50\x11\x96\xe3\xf5\xd2\xb6\x33\x9f\x9f\x6b\x1b\xe2\x95\x23\x53\xfa\x0c\xaf\x09\x26\xd9\x33\x34\xe3\x00\x19\x2e\x78\x4d\x11\x88\x93\xc1\x41\x20\x23\x20\xc5\xb9\x67\xc2\x03\x07\x7c\x6d\x08\x4a\x2a\xbe\x24\xde\x29\xa8\xa3\x48\x97\x70\x29\x6c\x64\x10\x8b\x2b\x34\x30\x5a\x67\x8a\x00\x15\xc6\x29\x56\x75\x29\xa2\xe4\xa0\xad\x42\x06\x8f\x3a\x0b\x9e\x29\x3b\x0d\x37\xc9\x46\x47\xdb\x4c\x34\x34\xf8\x62\xf4\x34\x7c\x4d\xa0\x1a\xdc\x39\x98\x12\x8d\x80\x88\x71\x4d\xd6\x90\xdd\xc2\xfc\x65\x89\x3a\xc9\xfe\xc2\xfc\x38\x71\x3d\x78\x9e\x65\xfe\xa8\x6b\xc6\x65\xe3\xd2\x33\x2d\xdb\x0f\x19\x9e\xa4\x03\xc8\x59\x05\x11\xe6\x3a\xe6\xb1\x46\x53\x9b\x6a\x79\x55\x99\x34\x3f\xf1\x22\xd9\x7f\x8e\xf9\xe7\xca\x16\x94\x30\x29\x1b\x23\x9e\x30\x38\xcc\x04\x16\x66\x77\xcd\x58\x24\xc6\x20\xba\xd0\x4b\x4a\x42\xa1\x8c\x42\x4a\x0b\xec\x0c\x42\x2c\xd3\x90\x01\x82\x54\xa8\x9e\x24\xc4\x22\xef\x81\xcc\x1f\x43\xd8\x90\xc8\x90\xb8\xa0\x85\xb1\xc4\x84\x9c\xb8\x99\xb0\x13\x24\x2a\xd6\x95\x3e\xec\x00\x24\xb8\x81\x97\x52\xe1\x20\x4a\xa0\x10\x2c\x44\x04\x89\x60\xb1\xcd\xa8\x36\xe3\x74\x3e\x15\xe0\x23\xdc\x99\x33\x38\xa6\xf4\x9c\xc3\x15\x29\x5c\x76\x24\x6b\x2f\x16\x30\xcb\x71\xa0\xc1\x0a\xbc\x0a\x31\x07\xc9\x48\x7e\x3e\x14\x66\xb2\x84\xb9\x90\x81\x46\x2a\x73\x18\x68\xd2\xee\x7e\x65\xca\xee\x92\xdc\x68\x4e\x28\x8d\xa3\x0a\x33\x4f\x8e\x42\xbc\x93\xd7\xab\xde\x82\x35\x53\xb6\x37\xde\x3b\xef\x67\xc8\xb6\x02\x2e\xf3\xfc\xed\x8d\x2f\xfd\xed\x92\x1b\x9e\xf5\x37\x30\x09\xcd\xc0\xc9\xa4\xbf\x87\x30\xc8\x39\x11\x39\x46\x66\x45\xc3\x81\xac\x6b\x79\x35\x31\x63\x6e\xff\x10\x15\x29\x6e\x39\x19\xcb\x01\x3b\xec\xa1\xfe\x66\x8b\x3d\xc4\x49\x78\x66\x29\xb1\xa7\x1b\xf0\x90\xc0\xc6\x73\x88\x1c\xa9\x4c\x20\x32\x19\xee\xf3\x52\x00\xbc\x14\x24\x7e\x53\x2c\x87\xe4\xc3\x3b\xe8\x24\x59\xd3\x4d\x49\x38\x6a\xba\x95\xa7\x44\xbe\x8a\x5a\xa1\x20\x54\xa1\x28\x6d\xd6\xe4\x5e\x4d\x03\x6a\x22\x2d\xbc\x84\x1d\x2e\x65\x1e\xc5\x98\xe7\x46\xb0\x16\x0e\x4a\x3a\x28\xd3\xa9\x66\x68\xfd\x44\xe3\x43\x9b\xcd\xbb\xd8\x0b\x09\xdf\x4f\x12\x3e\xf4\xb7\xe2\x73\x53\xce\x73\xf2\xaa\x8f\x41\x12\x30\x74\x90\x31\x87\xe3\x2e\x42\x0d\xf4\x84\x17\xce\x51\x12\x45\x0e\x2a\x33\x23\x3e\x87\x19\x35\xbf\xa7\xa4\x37\x07\xcf\x04\x72\x94\x2e\xd2\x14\x4a\x54\xf8\x8c\xa2\xd8\xbd\x8c\xf8\x0b\x32\xe5\xe5\xc3\x25\xb9\x55\x92\x40\x1d\x5b\x13\x06\xc8\x54\x9e\xe7\xb0\xe0\x81\x1d\x73\x79\x32\x66\x41\x6f\xb7\x3e\x7c\xeb\x5d\x2e\x2d\x79\x35\x67\x4e\x16\xa9\x39\x5f\xb0\x5d\xea\xf9\x7c\x86\x52\x6e\x31\x33\x50\xd5\xae\x49\xed\xa7\xe4\xb1\x1c\x94\x46\xc9\xfc\x58\x9e\x30\x16\x17\x32\x0d\xb2\x13\x99\xa7\x86\x6b\x4e\x58\xf5\x3e\x60\xa2\x26\xaa\xea\xde\xa5\x95\xde\x9a\x86\x87\x6f\xa1\xb4\x78\x7c\x35\x58\x5e\x73\xec\x6b\x8e\x7d\xcd\xb1\xaf\x39\xf6\x35\xc7\xbe\xe6\xd8\xd7\x1c\xfb\x9a\x63\x5f\x73\xec\x4f\x32\xc7\x26\xea\xe4\xf3\x6f\xf5\x26\x4a\x42\xaf\xff\x26\x4a\x50\x56\xff\x9f\x00\x2c\x05\x8b\x2d\xde\xd2\x79\x71\x7a\x3d\xf4\xee\x4b\xbf\x8f\x85\xde\xbb\x2f\x0b\xf9\x3d\xf0\x90\x9b\x93\x48\x99\x13\xff\x0f\xf5\x65\xa5\xbe\x1f\xb3\xfa\x5f\x00\x00\x00\xff\xff\x2d\x9b\xb0\x75\x5d\x22\x00\x00") + +func fontsAlligator2FlfBytes() ([]byte, error) { + return bindataRead( + _fontsAlligator2Flf, + "fonts/alligator2.flf", + ) +} + +func fontsAlligator2Flf() (*asset, error) { + bytes, err := fontsAlligator2FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/alligator2.flf", size: 8797, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsAlphabetFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x57\x69\x5b\xda\x5a\x17\xfd\x7e\x7e\xc5\xaa\x55\xd4\x2a\x15\x50\xac\x76\xf2\x60\x9d\xaa\x75\xc2\x3a\x4f\x0d\x90\x40\x24\x24\x48\x42\x95\x7a\xbd\xbf\xfd\x7d\xf6\x3e\x09\x19\xe1\xf6\x4d\x1e\xf7\xda\x6b\xad\xb3\xcf\x94\x09\x0d\xcb\x28\xe1\x03\xca\x28\x16\x90\x2f\xa2\x58\x12\x15\xab\xdb\xd2\x6a\xba\x87\xda\x00\xe7\xba\xdd\xd0\x2d\x0b\x3b\x66\xbd\xad\xdb\x28\x16\x17\x56\x17\x31\xf3\xd4\x62\x2a\xbb\x5a\x4f\x73\x1d\xc3\x7b\x5f\x77\x3a\xb3\x42\x6c\x3e\x77\x2d\xcd\xd6\x3c\xd3\xb1\xe1\x18\x30\xcc\x9e\xeb\xc1\x32\x6d\xfd\xa3\xe0\x51\xf2\x98\xe8\x68\x4d\xb3\x0e\xbb\xdf\xa9\xe9\xbd\x09\x18\x4e\x0f\x86\x69\xe9\x30\x1b\xba\xed\x99\x86\x59\xe7\x62\xa1\x01\x40\x1e\x6e\xcb\xe9\x5b\x0d\x68\xd6\x93\x36\x70\x51\xd3\xf1\x4b\x9b\x9e\xe7\x22\xdb\x79\x12\x93\xaa\x91\xd7\xd2\x31\xd1\xd2\x7a\x8d\x9a\xa5\xd9\xed\x09\xe4\xf3\xe8\xf6\x4c\xdb\x73\xa1\xb9\xd0\xc0\xea\x3c\x6a\x7d\x0f\x75\xcd\x9e\xf6\xa8\x1b\xb7\xd3\x77\x5b\x7a\x43\x2c\xab\x1e\x5a\xba\xd9\x6c\x79\x34\x63\x0d\xf5\x96\xd6\xd3\xea\x9e\xde\x13\xe5\x31\xe6\x3c\x6c\xc7\x83\x69\xd7\xad\x7e\xc3\xb4\x9b\x68\xe8\x6e\x9d\x36\xaa\xe7\x8a\x52\x81\xcb\x3a\xda\x33\xaf\x1c\x96\x6e\x37\xbd\x16\x66\xf4\xe7\xa0\x71\xdd\xe9\x74\x74\x5b\x6d\x8c\x3b\x8b\x39\x68\x30\xfa\x8d\xa6\x0e\x43\xab\x7b\x4e\x4f\x14\xcb\xdc\x43\x43\x37\xb4\xbe\xe5\xa9\xc9\x76\x9c\x86\xce\x0b\xf7\x5a\xa6\x0b\xc3\xb1\x3d\xcc\x58\x66\x5b\xc7\x44\xbe\x83\x62\x79\x02\x8e\xcd\xfd\x6a\x76\x83\xfb\x9d\x15\xc5\x12\xf7\xa2\x76\x9a\xa6\x1f\x1b\x56\x48\xff\x10\x6f\xde\xbc\x41\x3c\xd2\xba\xe3\x39\x47\x29\xa6\xa7\x31\x3d\x8d\x10\x81\xd0\x1e\x83\x52\xe0\x2d\xde\x92\xf0\x96\x0e\x8c\xe6\xe9\x42\x4c\x62\x52\x49\x93\x74\x50\x36\x99\xa1\xa9\x76\x93\x19\x1a\x77\x33\x05\x60\x6a\x4a\x75\x3c\x35\xa5\x4c\x42\x4a\x18\x21\x05\xe3\xd4\x70\xf0\x48\x22\x05\x72\x39\x56\x72\x40\x8e\x9d\x5c\x2e\x87\x1c\x71\x3a\x15\xcf\x65\xcd\x3f\xd8\x2e\x96\x32\x83\x14\x98\x99\x81\x14\x33\x33\x48\x44\xa5\xc7\x2e\xc0\xec\x2c\x93\xd9\xd9\x44\xf4\xf5\x68\xd3\x60\x2a\xef\x40\xa7\x14\x78\xf7\xee\x1d\x73\x3a\xa2\x3c\xf0\x11\x29\x94\x02\x73\x14\xe7\xe6\xe6\x86\x79\xa4\xf7\xc8\x18\xa9\x30\x3f\x4f\xd9\x7c\xb8\xba\x48\x59\x3e\x9f\xcf\xea\x68\x44\x77\xef\xdf\x07\x21\xba\x59\x00\x16\x16\x18\x17\x16\x58\x5d\x58\x60\x97\x00\x52\x30\x64\xde\x49\x85\x42\x81\x84\x02\x50\x28\x30\xd2\x29\x05\xa9\x84\x81\x9f\x2e\x2c\x16\x89\x17\x15\x20\x06\xc5\xa2\x55\x0c\xda\x06\x20\x05\x4a\x25\xa2\x25\xa0\xc4\x2a\x33\x8a\x24\x96\x4a\xa5\x74\xc5\xe2\xe2\xa2\x4f\x17\x09\x62\x2c\xf4\xa2\x15\x4b\xc0\x12\x42\x58\x5a\x5a\x52\x66\x1c\xa2\x15\xe5\x72\xb9\x0c\xa9\xde\x6a\xcc\x7c\xb3\x1c\x63\xb1\x75\x60\x59\xf1\x65\xa5\x2f\x2f\x2f\x2f\x33\x82\x1c\xd2\x15\x4f\x6d\xd9\x07\x3a\x94\xf0\x41\x19\x1f\x90\x8d\xc9\xbd\x5e\x59\x59\x21\x61\x05\xc0\x0a\x46\xf3\x74\xe1\xea\xea\xea\x2a\xa4\x58\x05\x40\x18\x70\xa6\x8c\xab\x23\x46\x24\xf6\xf1\x23\x52\x59\xfc\x9e\x93\xe2\xd3\x27\xc4\xb2\x4f\xc9\x36\x9f\x3f\x13\xf9\xfc\x99\x34\x8a\x21\x0b\xbc\xe4\xee\x12\xfb\xf2\xe5\x0b\xb2\xf2\xd8\x83\xf1\xf5\xab\x62\x84\x24\x7e\xfd\x1a\xb2\xc0\x4b\x74\xbe\xb6\xb6\x46\x7c\x0d\xc0\x9a\xd2\xd7\x94\xbf\x86\x38\x26\xf7\x43\x4a\x49\x82\x24\xaa\x50\xfa\xa8\x1a\xfa\x7e\xba\xb0\x52\x21\x5e\x01\x2a\x04\x95\x4a\x25\xc2\x14\x24\xe6\xb8\xbe\xbe\xbe\x4e\x7c\x1d\xc0\x3a\x46\xf3\xf4\x50\xdf\xbe\x7d\x83\x14\xdf\x94\x1c\x07\xdf\x4b\x0c\xb5\xb1\xb1\x41\x74\x03\xd8\x48\x83\xf2\x12\x15\x9b\x9b\x9b\x9b\x90\x62\x53\xa9\x44\x62\x6c\x33\x5d\xb1\xb5\xb5\xb5\x05\x29\xb6\x94\x4a\x24\xc2\x90\x79\x91\xb6\xb7\xb7\x89\x6f\xfb\xfa\x36\xb0\xbd\xed\x73\xc2\xc0\x4f\x6d\xc0\x0e\xb0\x83\x10\x76\x76\xa2\x4c\x41\x62\xa8\xef\xdf\xbf\x13\xfb\x8e\x44\xf4\xf5\xe4\x87\x63\x17\x69\xdc\x0d\xf8\xee\xee\x6e\xe6\xac\xf6\x80\x3d\x10\xec\x91\xbc\xb7\xc7\x6e\xc0\x94\x97\x98\xd5\x0f\x45\xb3\xe1\xc7\x8f\x1f\xe9\x8a\x7d\x00\xfb\x90\x62\x7f\x1f\xfb\x8c\xd8\x57\x3c\xd0\x7d\x4c\x4d\xee\x00\xc0\x01\xa4\x38\x38\xf0\x11\x07\x3e\xe2\xc0\x47\xe6\xe9\x9b\xed\xf0\xf0\x90\x84\x43\x00\x87\x19\x18\xf8\xa9\xc2\xa3\xa3\xa3\x23\x12\x8e\x00\x1c\x21\xce\xa3\x98\x1e\xf1\xf8\xf8\x98\x15\x8a\xc7\xc9\xe4\x98\x13\x6a\x73\x3c\xac\x3a\x1e\x96\x53\x7d\xb5\x5a\xad\x92\x55\x05\x50\x45\x94\x57\xe1\xeb\xd5\x6a\xe6\xc0\x27\x27\x27\x24\x9c\x04\x86\xcf\x89\x9d\x90\x1e\xe1\xb1\xc2\x9f\x7c\xb0\xf2\xf3\xa7\x6f\x8d\x49\x62\xe5\x54\x7f\x0a\xe0\x14\xa3\x11\xa7\xa7\xa7\x99\x03\x9f\x71\x72\x86\x68\x86\x33\x4e\xb8\xd9\x19\x13\x2e\x38\x8b\x0d\x1a\xcd\xa4\x38\xe7\xe4\x1c\x89\xec\x5c\x65\x38\xe7\x93\x4b\x38\x19\xd5\xcd\x05\x80\x0b\x92\x2e\x70\xa1\xac\x0b\xd5\xc4\xe7\x43\x3f\xb9\x8c\x4b\x00\x97\x24\x5c\xe2\x52\x19\x97\xc8\xc6\x44\xe1\x15\x1d\x4a\xb8\x52\xc6\x95\x6a\x70\xa5\x1a\x46\xfc\x78\xe1\xf5\xf5\xf5\x35\x08\x58\x4e\x02\x7b\x89\x27\xef\xe6\xc6\xe7\x9c\x90\x4e\x09\xfb\x37\x37\x7e\xbb\x9b\x9b\x8c\xa1\x6e\x6f\x6f\x6f\x99\x67\xc0\xd0\x8b\xbf\x17\xef\xee\x88\xde\x01\x77\x49\x33\x13\x32\xbe\x99\x7e\xbc\xbf\xbf\xbf\x0f\xa2\xdf\xf4\xd7\x2f\x24\xbf\xf0\x59\x1f\xfc\xb0\x23\x4d\x83\x14\x1a\x38\x72\x8e\xe8\xdb\xb2\xa6\x58\x00\xb5\x9a\xcf\x6a\x21\xcb\x98\x6b\x44\xad\xd7\xeb\x90\xa2\x1e\x63\xe9\x8a\x06\x42\x68\x34\x08\x1a\x31\x36\x66\x3f\x74\x5d\xa7\x08\x8e\x3a\x52\xaf\x7b\xc3\x20\x62\x90\x64\x44\x72\x3f\x26\xbf\x0c\x41\x6c\x36\x9b\x14\xd1\x1c\xe6\x08\x73\x29\x5a\xaa\x55\x00\xad\x96\xcf\x5a\x21\xa4\xa7\x2c\x85\x69\x22\x9a\x85\x34\xb9\x75\x0f\xc8\x60\x04\x0f\x3e\x7b\x78\xe0\x8a\xb6\x6a\xd3\x46\x9b\xa1\x1d\x63\x40\x3b\x3d\x0f\x0b\xe9\x3f\xe5\xcb\xd4\xcd\x2d\x3a\x9d\x4e\x87\x11\x74\x86\x98\x7a\x08\xe2\x17\xdd\xb6\x6d\x06\xc0\x0e\x61\xcc\x25\x74\x1c\x87\x22\x9c\x61\x9e\xba\x2e\x61\x07\xdd\x6e\x97\x01\xe8\xc6\x58\x08\xe9\x75\xe0\xf1\xf1\x91\xf0\x11\x78\x8c\x72\x04\x1c\x78\x7c\x4c\xce\xaa\xd7\xeb\x51\x54\x79\xf2\xd1\x4b\x3c\x44\xae\xcb\x91\x72\xd7\x4d\x37\xf5\x98\x70\xf4\x3c\x0f\xa1\xe2\x8d\x5f\x6b\x1f\xe8\x87\x80\x7e\xbf\x3f\x7e\x23\x7f\xe3\xf7\x30\xe2\x77\xe6\x94\xa3\xdb\xf2\x04\xe0\x89\xf1\x89\x91\x61\xd4\xe5\x0d\xe2\x33\x9e\x29\x7f\x8e\xe4\xa3\x17\x30\x00\x06\x21\x60\x30\x18\x28\x8f\xc5\xc1\x20\xd5\xf9\x9f\x3f\x9c\xfb\xf1\x4f\xba\xf3\x97\x17\x62\x2f\x2f\x24\x52\x0c\x59\xe0\xc5\x77\xe7\x9f\x7f\xb2\xff\xd8\x7c\x7d\x55\x6d\x09\x49\x7a\x7d\x0d\x59\xe0\x65\xbf\xdc\xfe\xc5\xbf\x90\x82\xe2\x7f\xbd\xba\x1d\xf0\x6d\x3d\xee\xdf\x87\x74\x85\x2a\x19\xf1\xcb\x6c\xe4\x2f\xb2\xa0\xf0\xff\xfe\x99\xa1\x9e\x3c\xfc\xcd\xf7\x20\xda\xf4\x3f\x1e\xdb\x60\xe5\x7f\x7f\x2b\xff\xed\x7f\x4c\xeb\xc9\x05\xfc\x2f\x00\x00\xff\xff\x82\x19\xcc\xe4\xe3\x15\x00\x00") + +func fontsAlphabetFlfBytes() ([]byte, error) { + return bindataRead( + _fontsAlphabetFlf, + "fonts/alphabet.flf", + ) +} + +func fontsAlphabetFlf() (*asset, error) { + bytes, err := fontsAlphabetFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/alphabet.flf", size: 5603, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsAvatarFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\xcd\x6e\xdb\x46\x10\xbe\xef\x53\x7c\x30\x04\x38\x41\x6d\x8f\x65\xd4\x06\xdc\x13\x83\xa2\xbd\x15\xe8\x03\x0c\xb0\x5d\x89\x4b\x89\x28\x45\x1a\x24\x95\xd8\xc0\x3e\x7c\xb1\x33\xa4\x44\x51\xa4\x63\xb7\x87\xe6\xe0\x83\xb2\x12\x39\x7f\xdf\x37\x3f\x3b\x71\x56\x64\x77\x6e\x81\x07\xdc\x63\x79\x8b\xe5\x03\x96\x0f\xe6\xcb\x57\xd7\xba\x1a\xab\x17\xfc\x5a\xb8\x7d\xea\xf1\x87\xab\xdb\xbc\x6c\x70\x7b\x47\x8f\xf7\xc6\xfc\x9e\x6f\x0a\xdf\xa2\xf6\x85\x77\x8d\xc7\xdd\xcd\x12\xd7\xd7\x58\xde\xe1\xcb\x7e\x83\xe5\xe3\xe3\xcf\xe6\x4f\x5f\xef\xf2\xa6\xc9\xab\x12\x79\x83\xad\xaf\xfd\xea\x05\x9b\xfc\xab\x2f\xd1\x56\xd8\x55\x69\x9e\xbd\xa0\xdd\xe6\x0d\xb2\xaa\x6c\xaf\xe0\x1a\x14\x55\xb9\x89\x67\xbb\xf5\x46\x04\x72\x5f\x5f\x36\x28\xdd\xce\x47\x1b\x4f\x85\x5b\xfb\x14\x55\x09\x87\x75\xb5\xdb\xf9\xb2\x45\x91\x97\xfe\xc6\x98\xdf\x9e\x9f\x0a\x57\xba\x36\x7a\xab\x32\x64\x79\xdd\xe8\xbb\x5f\x4c\x44\x87\x6b\x5c\xec\xdc\x26\x5f\xa3\xdc\xef\x56\xbe\xbe\x40\x56\xd5\xc8\xf2\xc2\x23\x4f\x7d\xd9\xe6\x59\xbe\x16\x65\xe3\x00\xe0\x1a\xcd\xb6\xda\x17\x29\x5c\xf1\xcd\xbd\x34\x58\x79\xfc\xe5\x2e\xaf\x44\xa9\xac\xbe\x99\x85\x0a\xb5\x5b\x8f\x8b\xad\xab\xd3\x55\xe1\xca\xbf\x2f\x22\x01\x4f\x75\x5e\xb6\x4d\xc4\xe0\x20\x4f\xaf\xb0\xda\xb7\x58\xbb\xf2\xb2\x8d\x66\x9a\xdd\xbe\xd9\xfa\xd4\x3c\xa8\x85\xad\xcf\x37\xdb\x36\x46\xec\xb0\xde\xba\xda\xad\x5b\x5f\x9b\xfb\x57\x5e\x5e\xa1\xac\x5a\xe4\xe5\xba\xd8\xa7\x79\xb9\x41\xea\x9b\xb5\x2f\x53\x5f\x37\x66\x79\xab\x7a\x3b\xf7\x2c\xd0\x51\xf8\x72\xd3\x6e\xf1\xc9\x3f\xf7\xd2\x43\xd6\x9a\xcf\xf8\x09\x0e\xd9\x3e\xdd\x78\x64\x6e\xdd\x56\xb5\x59\x3e\x88\x85\xd4\x67\x6e\x5f\xb4\x1a\xed\xae\x4a\xbd\x20\x3f\xe4\xaa\x17\x53\x2e\x63\x80\x27\x76\x8d\x59\x2c\x92\xa9\x4f\x62\x60\x91\x18\x02\x27\x26\x20\x24\x86\x2d\x25\xe6\x93\xfd\x9c\x18\x00\x49\x62\x2c\x60\x13\xc3\x21\x90\x3c\xc1\xc4\xbf\x7a\xc8\x6f\x4b\xc1\x06\x8e\x0a\x80\x05\x28\x5a\x8e\x5f\x38\x31\xe0\x80\x40\x38\x0a\x27\x06\x14\xc4\xb7\xe5\xa8\xc0\x51\x9f\x21\xdf\xa3\xa6\x48\x4b\x00\xd1\x32\x07\x8a\x02\x20\x90\xe8\x50\x0c\x9b\x09\x1c\x3a\x8b\x0a\x25\x8a\xd2\x0d\x8b\x06\x6c\x8c\x84\x02\x45\x6b\x6c\x2d\xf1\x50\x34\x31\x0a\xe9\xe4\x13\x5f\x58\x2b\x31\x51\x14\x88\x8f\x03\x8b\x2b\x6b\xf9\x80\xd7\x5a\x7d\x24\xb8\x34\x04\x4b\x21\x31\x64\x2d\x8d\x48\x11\x4e\x7a\x4a\x7a\x46\x7a\x42\x06\x7c\xa8\x82\x02\x80\x0d\x08\xd1\x43\x88\xbf\xad\x20\x0c\x36\x60\x40\xde\x90\x45\x8c\x3e\x0a\xed\x18\xc0\x21\x10\xdb\xc7\x6d\x0f\x60\x70\x1a\xc0\xf1\x13\x59\x60\xea\x49\xe9\xe4\x84\x41\x52\x10\xa4\xfc\x0f\x8a\xa0\x33\x2f\x19\x97\x72\xd2\xec\x04\xb0\x90\x23\x6e\x69\xe8\xee\xb4\xf2\x8e\xe5\x37\xb2\x67\x3b\xbe\xe8\xc8\x1f\xd9\x09\x7b\x03\x7c\xaa\x10\x35\x24\x37\x2c\x27\x9d\x05\x20\x8c\x27\x86\x18\xa4\xce\x59\x82\x00\xd0\x9f\x6c\xc3\x49\xc4\x3d\x44\x65\x30\x74\xa9\x8c\xa1\x60\x12\x62\xe7\x01\xd6\x0a\x50\x8a\xa1\x45\xac\xac\xa1\x4d\x70\x72\xf0\xa0\x96\x09\xe7\x21\xcd\x28\xd8\xbe\x8f\x48\x13\x64\x99\x5f\xf3\xd0\xb1\x34\x08\x05\xd3\x1e\x06\xd5\x30\xaa\x8a\x51\xa9\x1c\x0b\x4f\x9a\x08\xda\x45\xb1\x54\xa4\x1d\xa5\x8f\x70\x6c\xa4\x93\x26\x19\xd5\xe6\x4c\xad\x6a\xeb\x75\xbd\x27\xee\x65\x60\xc0\x6a\x51\x46\x96\x87\x3d\x7e\xc6\x25\x77\x08\x45\x3c\x22\xed\xe4\x99\xc6\x15\xac\xe9\x25\x1b\xb9\x0c\x60\x9e\xac\xb8\xf9\x92\x0f\xd7\x41\xab\xf9\x38\xa0\xa6\x3d\x04\xa8\x87\xf0\x9d\x6c\xc5\x42\xd2\x8a\xd3\xa1\x14\xb3\xf4\xb6\x90\x42\x1f\xd2\x5c\x17\xda\x6e\xe4\x41\x5e\x44\xcb\x9d\x07\x9a\xcc\xc0\x40\x41\xfa\x31\x0c\xb0\x68\x6a\x68\x34\x56\xce\x3c\x04\xb9\x54\x24\x24\x9a\xf4\xa0\x45\x11\xf3\x45\x5d\xec\x41\x4f\xcc\xd0\xfa\xfa\x24\xe9\xbb\x10\xda\xe7\x7d\x33\x11\xdb\x30\xd3\x85\x31\xd4\x68\x52\x2f\x8f\x20\x9a\x7a\xb2\x40\x0c\x3c\x1b\xb1\x92\x17\xfa\x53\xae\x9d\x49\x0f\xbd\x86\xb5\x1d\x48\xa6\x1e\xe5\x01\xe6\x00\xe7\x58\x0d\x3d\x37\x8c\x5e\x8d\x67\xd5\xde\x3d\x9a\xc7\x75\x0a\x56\x6f\x9a\xc2\xe9\x1c\xcf\x78\xe0\x30\x57\x45\xd3\x1e\xa0\xf7\x35\x8d\x29\x7e\xef\xec\xb5\x83\x49\xd7\x39\x27\xad\x6d\xc9\xbf\x7e\x11\x24\xe7\x14\x77\xc5\xc7\xc7\xa2\x7b\xed\x1a\x3b\x28\x84\x93\x9e\x66\x22\x6d\x20\x1a\x71\x35\x91\x44\xf4\x49\xa4\xb9\x24\xca\xe4\xd3\x3d\x82\xa3\x7d\x51\xd6\x95\x48\x8a\x32\x4e\x3f\x3e\x1b\x95\xd3\x0a\x12\x10\xcd\x26\xf1\xad\x37\xee\xc9\xb2\x24\x84\x86\x70\xb6\x2c\x75\x58\xbb\x9d\x4c\xa6\x36\x77\x8b\x13\xcd\x6c\x54\x41\x6e\xed\x30\xde\xa8\x62\x3a\xa6\x77\xd8\x8e\xd0\xb9\x73\x74\x9b\xc4\xa5\x32\x6e\x94\x33\xeb\xdf\xbf\x9e\xe8\xf8\xcf\x13\x1d\x1f\x13\xfd\x63\xa2\x7f\x4c\xf4\x8f\x89\xfe\x7f\x4d\x74\xed\x6b\xd6\xc5\x5d\xf7\x76\xee\x18\x1f\x4c\x74\x8b\x93\x96\x3c\xf6\xaa\x0d\x47\x1b\x56\x77\x6c\xd6\xfd\x9c\x46\x36\x48\x00\x2c\x98\x16\xc9\x2b\x7f\x57\x60\x62\x7a\xd7\x34\x9e\x55\x98\x4b\xf5\x41\xe1\xad\xb5\xf1\x03\x86\xf4\xdd\x2b\x88\x4e\xff\x47\xf7\x4f\x00\x00\x00\xff\xff\xc5\xb1\xe1\x30\x5d\x14\x00\x00") + +func fontsAvatarFlfBytes() ([]byte, error) { + return bindataRead( + _fontsAvatarFlf, + "fonts/avatar.flf", + ) +} + +func fontsAvatarFlf() (*asset, error) { + bytes, err := fontsAvatarFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/avatar.flf", size: 5213, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBannerFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x5d\x6d\x73\xe2\x38\xb6\xfe\xce\xaf\x78\x6a\x95\x0f\x3d\x55\x28\x03\x04\xd2\xc9\xd6\xd4\x14\x06\x4c\xc2\x86\x40\x86\x97\xce\x66\xea\x7e\xb8\x4e\xe2\x04\x6f\x88\x9d\xc1\xa6\xbb\x33\x75\xeb\xfe\xf6\x2d\x8c\x5f\x24\x59\x92\x65\xd2\xdd\x5b\x3b\x1c\x40\x3e\xcf\xd1\x79\x3f\xb2\x93\x3c\xad\x9f\x5a\xce\x11\xce\xf0\x19\x9d\x36\x1a\x68\xb6\xd0\xc0\x69\xbb\x76\xef\xf8\xbe\xbb\x39\x7e\x5a\x3f\xe1\xab\xbb\x09\xbd\xc0\x47\x0b\xf7\xef\x98\xbd\x3b\x3e\xee\x82\xed\xc3\x0b\x3e\xbd\xef\x5e\xba\x0f\xe1\xf1\x76\xe3\x3e\x7b\xbe\x73\xfc\xe0\xfc\x52\xfb\x34\xdc\x04\xaf\x70\xb0\xf5\xbd\xef\x78\xdb\x04\xcf\x1b\xe7\x15\x0f\xce\x7a\xed\x3e\x62\xcf\xf4\x97\xda\x08\xce\x2b\xfc\x20\xc2\xc6\x0d\xdf\x02\x3f\xf4\xee\xd7\x2e\x9e\x82\x0d\xb6\xa1\x8b\xe0\x09\xd1\xca\x0b\xf1\x14\xf8\x11\x50\x5b\xac\x1c\xff\x25\x44\x14\xe0\x62\xed\xfa\x3e\xfa\x2b\xe7\xed\xcd\x5d\xaf\xe3\xf5\xbb\x75\x2b\x77\xfd\x56\xbb\x72\x22\xe7\xc5\xf1\x1d\x3c\xac\x9c\x8d\xf3\x10\xb9\x9b\x70\x27\xed\x17\xcf\xf7\xdd\x77\x2c\x56\x8e\x87\xdf\xc2\xf0\xc9\xf3\xa2\xae\xbb\xf1\xc2\xe3\x9d\xd0\xaf\xf7\xc7\xee\xe3\xf6\xf7\x5a\xff\x7d\xe3\xad\xd7\xde\x03\x7b\xed\xd3\x6e\x13\xff\x78\x09\xbc\xb3\xef\x67\xff\x40\x6f\x30\x8c\xc5\x39\xae\x0d\x9c\xc8\xfd\x27\xac\xed\xf3\x36\x8c\xd0\x6c\xd6\xd1\x3c\x3f\x6f\xd7\x6a\xd7\xee\xe6\x79\xb7\xbf\x77\xfc\x2b\x58\xf9\xe8\x07\xdf\x1c\x1f\xbf\x3d\xec\x5e\xba\x0f\x0f\xde\xfa\x38\xd8\x3c\xff\x5e\xbb\x0e\x1e\xbd\x27\x6f\xbf\xee\xc6\xd9\xae\xd1\xdb\x6e\xa2\xc0\xc7\x6f\x61\xb0\xde\x46\x5e\xe0\x77\x5d\x67\x13\xad\xd6\x9e\xff\x72\xec\xbb\xd1\xef\x68\xb6\x7e\x3d\x3f\xdd\x6d\xdd\xf3\x1f\xd6\xdb\x47\x17\xbe\xfb\x0d\x6f\xce\xc6\x79\x75\x23\x77\x53\x0b\xb7\x6f\x6f\xc1\x26\xda\x33\x1c\x8e\x2e\xd6\x6e\x04\xc7\x7f\xdc\x91\xb7\x9e\x7f\x0c\x5c\x3b\xef\x70\xd6\x61\x80\x7b\x17\xe1\xda\x7b\x5e\x45\xeb\x77\xbc\xa6\x52\xec\x14\x78\xef\x46\x91\x1b\xeb\xbd\x16\x3c\xc5\xec\x9f\xb6\xeb\x35\xfd\xe6\x3d\x46\xab\x5f\x5f\xdc\x8d\xff\x6b\xf8\xba\x0d\x57\x70\xd6\x91\xbb\xf1\x9d\xc8\xfb\xea\x86\x75\xdc\x6f\x23\x3c\xba\x4f\xce\x76\x1d\x21\xd8\x46\x6f\xdb\x08\x5e\x88\xc9\x74\xb1\x53\xa1\xff\xec\x3e\x1e\xd7\x70\x84\xa3\xae\xf9\x7f\xbb\x35\x10\x42\x8e\x0a\x2f\x20\xf1\x12\x00\x92\xef\x90\x5d\xc8\x7c\xc3\x5c\x99\x5e\x7d\x04\xa4\x1c\x18\x12\x7a\x32\xa6\xc9\x8e\xcb\x11\x4f\x92\xfd\x3f\xa3\x4f\x39\x92\x61\x1c\xaf\xdd\x5f\x17\x0b\x4a\x72\x72\xbf\x38\x5f\x00\x71\x01\x29\x2e\xe0\x54\x91\x2d\x26\xe9\xfe\x63\xad\x80\xe1\x96\xcb\x96\xeb\x8a\x64\x18\x8c\x7e\x59\x55\xe4\xb2\x81\xd9\x1e\x2b\x71\x6a\x25\x70\xdc\x52\x13\xc6\x0b\x0a\x8c\xa5\xe6\xce\x85\x55\xbc\x24\xd8\xa9\x38\x29\x94\xec\x95\x95\x35\x05\x4f\x90\x73\x3d\xa4\xaa\x91\xbf\xa6\x5a\xcc\x0d\x99\x48\x90\x7b\x0d\x61\xd8\x94\xf9\x04\xbb\x56\xe1\x77\x8c\x89\x78\x82\x63\x28\xae\x01\x47\x68\x15\xa8\x53\xbc\x84\x17\x43\x30\x12\x28\xd7\x1c\x22\x81\x10\xd4\xfb\x7f\x39\x50\xae\x30\xb9\x27\xb3\x86\xc8\x1d\x40\x1e\xd4\xa4\x68\x09\x92\xc3\xc9\x49\xde\xc2\x44\x16\xd4\xb9\x81\x33\x97\x2c\xda\x48\x63\x50\x79\x76\xe0\xb4\x50\x08\x7e\x66\x93\x44\xb0\x91\x24\x88\x2b\x30\x86\x42\x0b\xf2\xb4\x23\x48\x41\x74\xa4\x20\x1b\xc3\x4d\x9a\xcf\xd2\xc5\xba\x9d\x56\x97\x58\xae\x0a\x39\x63\x95\x43\xa8\x32\x30\x27\xb1\xa9\xdf\x2a\x48\x33\x89\xe5\x5e\xf1\x03\x54\xc1\x5c\x27\xfa\x8a\x09\x63\x14\x33\x4c\x1e\xa7\x8a\xef\x94\x19\xca\x2c\x6d\x71\x29\x5b\xac\x0d\x50\xa7\x7c\x55\xde\x2b\xa6\xbb\xf2\x04\x68\x84\xa8\x90\xd0\x3c\x56\xf7\x69\xa8\xe0\x52\xac\x30\x85\x4f\x35\xe6\x8e\x9b\x04\x19\x59\x48\x34\x2a\x73\x0b\xad\x85\x36\xc7\x8a\x71\x5d\x70\xa9\x42\x48\xe9\xd2\x74\x85\x05\xa6\x9e\xaf\xcb\xae\xa5\x9e\x5f\x2e\x50\x29\x69\x96\x5d\xe4\xf9\x50\x2b\xbc\xbc\x34\xfc\x00\xc6\x42\x18\x54\xd0\x71\x89\x2b\xe8\x6a\x4e\xa9\x0a\xcd\xd4\x2d\xa8\x82\xcd\x49\x9a\x97\x92\x96\x45\x47\x56\xdf\x6a\xde\x9d\x67\x6a\xcb\x2c\xc3\xa8\x32\x37\x06\x5b\x79\x73\x8e\xd2\x7a\x7d\x00\xa9\x70\x23\x56\xfd\x59\xcb\x4d\xf6\xff\xcb\x45\x3c\xc0\x28\x02\x63\x66\xc6\x91\x32\xe6\xe6\x8f\xf2\xe4\x52\x3d\x3e\xa5\x7e\x56\x64\x5c\x25\x6b\xfd\x80\x88\x92\x91\xf2\x51\x8c\x7c\x3c\xcf\x12\x79\xc7\x57\x64\x5c\x9a\x03\x0e\xee\x3b\x39\xd5\x13\x69\xe1\xab\x42\x4a\xdd\xed\x30\xf2\xe0\xac\xc5\xb2\x80\x64\xa8\x34\x92\x58\x72\x62\x50\x4a\x22\x3e\x3b\xd1\x31\x36\x10\xa8\xa4\xee\x1f\xce\xb8\x8a\xf1\x84\x80\x3c\x60\x74\xd4\x84\x74\xb6\x23\x63\x82\x14\x26\x3c\xce\xf5\x73\x92\x91\x85\xdf\x19\x3b\x6d\x16\xc2\x44\x2a\x21\xf7\x6d\x19\x21\x91\x90\x19\x66\x19\x2b\xea\xdb\x5e\x69\x23\x0c\xe1\xc3\x0f\x91\x99\x5d\xba\xca\x73\x42\x76\x56\x90\xbc\x30\x75\x30\x5e\xcd\x78\xab\x58\x2f\x85\xc2\x20\x50\x85\x2d\x66\x97\xc8\xd8\x88\x9f\x49\xbe\x95\x32\x84\xea\x62\x48\x28\xc2\x5f\x61\x2e\xa1\x9e\x32\x60\x28\xca\x25\xa4\x7b\xc9\xb7\x25\x3a\xac\xc6\x50\x70\x95\xaa\x3a\x2c\x58\xb9\x4c\x87\x2a\x2d\x19\x68\x93\x65\x18\x7f\xa3\xfd\x4f\xd1\x61\x39\x3e\x4a\xea\xc0\xad\xe4\x07\x34\x15\xfb\x49\x68\xad\x52\x42\x95\xb8\x03\xd8\xbe\x2f\xeb\x27\x89\x89\xeb\x1a\x30\x64\xfa\xc8\xbc\x1e\xe6\x0a\x31\x61\xa8\xf4\x2f\xb5\x5c\x1f\x8a\x51\x6d\x2c\x1c\x26\x21\x11\x7d\x00\x42\x83\xf8\x01\x09\x8d\xdc\x46\x3c\x67\x90\x68\xc9\x58\x87\x47\xdd\x92\x63\x72\x0d\x61\x18\xec\x1f\xb2\xae\x31\x1b\xf6\xbc\x59\x63\xdd\x32\x86\x59\xac\x10\x43\x87\x2e\x93\x41\x57\x34\xa5\xc6\x60\xd8\x91\x8f\x19\x83\xef\x6f\x98\x56\x29\xbb\x94\x69\xaa\x34\xd9\x85\xbd\x47\xc8\x11\xa4\xf0\x09\xdb\x2a\x16\xfa\xb8\x23\x31\x5d\x2b\x52\x38\x7b\xc3\x80\x14\x09\x22\xff\x8a\xbf\xcd\x90\x8e\x52\x8c\x5a\xd9\x1b\x6a\x62\xeb\x5e\x85\x14\xb9\xfd\xd0\xd3\x3b\xf0\x91\x51\xfd\x58\xac\x7c\x94\x12\x9d\xf7\x63\x8c\xe5\x91\xa0\xe8\xec\x0e\x2d\x3e\xa6\x55\xc3\x38\x9b\xc8\x3e\x33\x66\x58\xfd\xb0\xa2\x74\x41\xcc\xb8\x79\xda\x00\x26\x53\xda\x9b\xd9\xd6\x15\xe6\x37\x56\xdf\xce\x74\xcd\xea\x5d\x4b\x13\x61\xcc\x67\x26\x23\x09\xbd\x03\x3d\x07\xfa\xd3\x9b\xbb\xd9\xe8\xe2\x72\x81\xf9\xe8\x62\x52\x8b\x17\xa9\xff\xdf\xad\x35\x3f\x9f\x02\x03\xfb\x62\x66\xdb\xc9\x15\x1f\x14\xb3\x64\xfd\x0e\xf1\x0c\x98\x2f\x6f\xec\xd9\xbc\x3f\x1b\xdd\x2c\xb0\xb8\x9d\x9a\x6c\x90\xa7\x2b\x2b\xe7\xec\x04\xb8\x1e\x0d\x06\x63\x1b\x83\xe9\xa2\xc6\x72\x12\x8b\xa7\xf6\x13\x9e\xe8\xd6\x9a\xe7\xa7\xc0\xd8\x5a\x8c\x26\xe8\x5b\x37\xa3\x85\x35\xc6\xd8\x5e\x2c\xec\x19\x2c\xdc\x8e\x16\x97\x18\x8c\x2c\x7b\x66\xcf\x47\xf3\x9f\x97\x6c\x5a\xcd\xb6\x42\x86\xa9\x4c\x86\x9f\x92\x97\x5a\xad\x86\x42\x86\xa5\x56\x06\x21\x7e\x3f\x28\xc3\x49\x2a\xc3\xfc\xda\x1a\x67\x12\xcc\x2f\xad\xd9\x0d\xe6\x3f\x2f\xde\x5b\xad\x33\x29\x70\xd1\x05\x84\xed\x7e\x38\xc9\xb6\xda\xa7\x52\xe4\xa2\xe1\x8d\x91\x65\x78\x92\xe4\xd9\x6a\x7f\x06\x06\xa3\x2f\xa3\xf9\x68\x3a\x49\x72\x47\xce\x4e\x4f\x10\xed\x9a\x6e\xad\xd5\x69\x49\x77\x55\x74\x25\xe9\xae\x24\x9f\x99\xee\xaa\xf1\xbd\xd1\x6e\x34\x81\xfe\xdd\x6c\x34\x1e\x8f\xfa\xa2\x33\x8f\xa6\x7c\xee\xf9\xf1\xf4\x5e\x86\x66\x43\x2d\x83\x95\xa4\xb6\xac\xd5\x27\xd9\x33\x41\x79\xaf\xcb\xcc\x71\x84\xcf\x63\x05\x52\xd8\x7e\x53\xb3\xfd\x9e\x5d\xe4\xc7\x24\x44\x36\x37\x43\x0d\x48\xa4\xd1\x1b\x63\xb7\xd4\xd8\x5f\xec\x6a\xac\x2b\x63\x9f\xa8\xb1\x2f\x2e\xcd\x36\x6e\x4e\x8a\xe0\x6d\x35\xf8\xc0\x4e\xba\xf0\x6c\x48\x85\x29\x29\xb7\xbd\x88\xdd\xd1\xf8\xfb\x21\x06\x57\x2c\x90\x62\x9f\xaa\xb1\xff\xdc\x2b\x1d\xb9\x7b\xb3\x93\x14\x3b\x98\xb1\x87\xb6\x44\x24\x59\x0e\x02\xf8\x67\x0d\xb8\xcd\x17\x48\x41\x7b\x5c\xe7\x21\x7c\xca\xae\x55\x7b\xdb\x99\x46\xe9\x05\xe7\x25\x32\x92\xe4\x2e\x2d\x75\x74\xa5\xbd\xcf\xd5\xd0\xf3\xcb\xe9\x6c\x91\x08\xc0\x4e\xd0\x52\x59\x0e\x16\xc0\x52\x0b\x70\x65\x49\x00\x33\x3b\xb2\x4f\x8f\xe5\x5e\xa6\x0d\x78\x11\xbb\xa7\xc6\xb6\xc7\x69\xa0\x89\xe9\xb4\x0a\xa9\xc1\xee\x6b\xb0\xaf\x35\x19\x4b\xaa\x73\xf9\x5a\x25\xf6\x40\x83\x3d\x31\xe0\x57\x5a\x46\x34\xd8\xb6\x1a\x7b\xaa\x0a\xb3\x2a\xa4\x26\xcc\x86\x6a\xe8\x1b\x49\x6e\xfb\x10\x29\x60\xb7\x34\x35\xdc\x9e\x1d\x50\xcc\x2a\xd4\x93\x96\xa6\x88\xdb\x73\xad\xce\x4b\x0b\x58\x99\xce\x5b\x9a\x22\xbe\xb0\x85\x12\xcc\x4c\x6b\x07\x92\x3c\xb6\xa6\x88\x2f\x8d\x3d\x49\xcc\xf3\x66\xdb\xd6\x94\x70\x7b\x98\x9e\xd8\x41\x60\xc2\x87\xb4\x84\xe4\x01\x55\xdb\xd6\x94\xf0\x4b\x4b\xdc\x00\x88\xb6\x8a\xb2\x05\x95\x48\x92\xab\x88\xad\x29\xe1\x8b\xb9\x5d\x48\xd1\x07\x91\x42\xff\x90\xf5\xc9\x2d\x4d\x09\xef\x27\xfd\xc3\x21\x16\xd7\x90\xcc\xce\x35\x35\x7c\x9e\xa8\x5d\x6f\x5c\x03\x52\xd5\x39\xb5\xb4\x55\xbc\xff\x93\xe0\x73\xc5\x6b\x6a\xf8\xa5\x35\x1b\xe4\x13\x61\x9e\x3b\x32\x12\x1c\xc9\x36\xb6\xd2\xca\xaa\x0a\x38\x4d\x29\xbf\xb3\x67\x26\xe1\xbe\xef\x23\xf2\x4e\x43\x42\x12\xc9\x8f\x6f\xc4\xe8\x9a\x62\x3e\x9f\x0e\x17\x99\x02\x4a\x53\xea\xc1\x13\x4b\x4b\x57\xd3\x8d\xda\xd7\xc3\x93\x9d\xa6\xa4\xdf\x2d\x0b\x7d\x9b\xdc\xb3\xca\xbc\x10\x0a\x6c\x4d\x4d\xbf\xb3\x4c\x5a\x38\xee\x46\x99\x7e\x7a\x16\xb0\x4f\xd8\x9a\xce\x9f\xf5\x28\x6f\x37\x10\xee\x86\x06\x21\x92\x12\xc0\x7f\x2a\x05\x6e\xaa\x80\x7b\x76\x01\x99\xe5\x66\xe8\x70\x6a\x53\x9f\xb4\x54\xc8\x5f\x8a\xc8\xd0\x37\xe8\x72\x57\x57\x22\x9f\xa8\x90\xe3\x79\x5c\xa3\x6e\x22\xce\x64\x62\x2c\xa0\x4c\xdd\x6d\x15\xf4\x40\x44\x96\xc2\x49\x5d\xd0\x4c\xdd\x1d\x15\xf2\xc8\x64\xcf\xa5\xed\xa3\x06\xf9\x54\x85\xfc\xa7\x5a\xdd\xe5\xf3\xb9\xc9\x24\x7e\xf2\x59\x09\x5d\x71\xd3\x09\xb4\xb9\xba\xcf\x94\xea\xd6\x6c\x19\x07\xcc\xc3\x22\xf0\xb9\x0a\x38\x9b\xc1\x79\x78\x2e\x94\x3f\x0e\x6f\xa9\xe0\xaf\x94\x99\x4c\x35\x97\x97\x0c\xe3\x22\x72\x4f\x85\xbc\x9f\xbf\xa5\xb6\xae\x30\x95\x6b\x90\xfb\x4a\xe4\x6b\xa3\x3d\x9b\x0f\xe2\x22\xf2\x40\x89\x3c\x31\xd4\xb6\xe9\x18\x2e\x22\xdb\x2a\xe4\x69\xd5\x6c\x52\xad\x43\x38\x19\xaa\x80\x6f\xf4\x65\xa3\xfa\x49\x80\x80\xdc\x56\xd6\xe8\xdd\xd4\x5d\x0d\xb9\x64\x00\x17\x91\x95\x45\xda\x9e\x9b\x6a\x5b\x3b\xe4\x2b\xb5\xdd\x56\x16\xe9\x85\x89\xb6\xa1\x98\xad\x0d\x26\xee\xb6\xb2\x48\x2f\xab\x7a\x76\xd9\x4c\x96\xca\x9b\x00\x2b\x4b\x74\x32\x6d\x33\xc8\xc6\x83\xb7\xd1\xb4\xdd\x56\x96\xe8\x4b\x93\xdc\xa9\x98\xc0\x4d\x66\xed\xb6\xb2\x44\xc7\x93\xf6\x81\xea\x16\x1a\x50\x48\x07\xbe\xb6\xb2\x44\xf7\xb5\xdd\x81\xb9\x79\xa1\xdc\xb5\xb2\x46\xcf\xb5\x0a\xff\x01\x53\x76\x5b\x53\xa5\xfb\x3f\x09\x3c\x57\xb9\xb2\x46\x33\x13\xb6\x32\xb6\x51\x61\xd8\x56\x65\x15\x65\xa9\xde\xcf\xd7\xd5\xfc\x4d\x3e\x6a\xab\xe6\xeb\xb6\xb2\x58\x33\xd3\xb5\x46\x00\xe3\x41\x5b\xb9\x79\x75\xcd\x36\xca\xa8\xa5\x63\xb6\x12\x58\x59\xb2\xef\xd4\x3a\x67\xda\x5e\x10\xd9\x90\xcd\x7c\x9a\xaf\x15\x91\x95\x35\xfb\x4e\x3d\xde\xca\x5b\xb3\xd2\x01\x88\x47\xee\x28\x2b\xe7\x28\xef\x53\x58\x68\x46\xdf\x1c\x43\x30\xfb\x66\x27\x7c\xa8\xe9\x9d\x04\xad\x56\xf3\x1c\xe8\x2d\xc7\x63\x7b\x81\xe9\x8d\x3d\xb3\x16\xd3\x19\x7f\x3c\x50\x46\xca\x92\x88\x9c\x4c\x00\x2d\x60\xfe\xc7\xd2\x9a\xd9\x98\x4d\xa7\x8b\xb4\xc7\x3f\xea\xb2\x14\x5f\x8e\x24\x9f\x71\xbc\x59\x6a\x0f\xb2\x4b\x9d\xd6\xf8\x7a\x3a\x5f\xc0\xfe\x63\x69\x8d\xb1\x98\xea\x2f\xab\x89\xea\x15\xc0\x8b\xd4\x1e\xe8\xb4\x0d\x8c\xed\xf9\x9c\x2e\x2e\xad\x09\xa6\x33\x06\x2e\xeb\x1c\xb4\x44\xf9\x4f\x97\xc7\x30\x1d\xe0\x62\x66\x5b\x0b\x7b\x26\x41\x12\xda\x94\x52\x5a\xe5\x3f\x72\x3a\x96\xe0\xa4\xd5\x00\x16\xd3\x1b\x5c\x5a\xe3\x21\x46\x93\x85\x7d\x31\xb3\x94\x73\x8b\xc8\xbf\x10\x96\x25\x5e\x95\x40\x36\x81\xde\x74\xb1\x98\x5e\x8b\xa8\x95\x54\xab\x5c\x13\xa3\x74\x1a\x8d\x1d\xca\xbf\x31\x98\x59\xb7\xa3\xc9\xc5\x1c\xe3\xf8\x39\xc2\xcb\xe9\x6c\xf4\xe7\x74\xb2\xb0\xc6\x35\x66\x08\xe5\xe3\x9f\x2d\x34\x79\xd6\xcd\x5b\x8b\xac\xca\x14\x3d\xb4\xd3\x68\x49\x61\xbf\xd8\xb3\xc5\xa8\x9f\x6b\x56\xeb\xa3\xa4\x08\xaa\x0d\x8b\x4e\xa3\x2f\x05\x1d\x4c\x6f\x27\xb0\x26\x03\xc4\x8f\x50\x9a\x43\x83\x4b\x6b\xac\x3f\x15\xa1\x9b\x72\x35\x67\xd0\x63\x7b\x98\x3e\x9e\xc8\x67\x4d\x46\x9f\x79\x93\xc1\x37\xf0\x5c\xc2\x2d\xac\x47\x92\x95\x33\x49\xda\x52\x49\x96\x37\xac\x0a\x8c\x05\xe1\x5d\x97\x69\x74\xb9\xa3\x0a\xb6\xcd\xc9\x05\x39\xd3\x09\x92\x29\x24\x61\x93\x70\x60\xde\x49\x5f\x58\x95\xcb\xad\x9d\xba\x58\xd1\xe2\x30\xd7\x29\x77\x4c\xcd\xd6\x3f\x65\x06\xe9\xb4\xe4\x7a\xe7\xc4\xd9\x6f\x9a\x4f\x25\x06\xa8\x1a\xcb\x2b\xa5\x29\x09\x05\x2e\xfe\xf3\x5f\x90\x91\xc8\x51\xe5\xfd\x1e\xef\x44\xeb\x75\x22\x1a\xcb\x41\x78\xb6\xe7\x83\x9f\x27\xd2\x18\xb8\x06\x2b\x13\xe7\xea\xc2\x81\x10\xe1\xcf\x89\x8a\xa1\x51\xba\x7e\x2f\x53\x47\xcc\x10\x83\xe9\xb2\x37\xb6\x25\x72\xe4\x9c\xf2\x7c\x20\xa6\x42\x42\x54\xf9\x48\xcc\x4c\x9d\xa6\x1c\x57\x9f\x8a\x15\x09\x98\xdb\xa7\x9c\x4a\x50\xc5\xfc\x1f\x7b\xde\x7c\x34\xb9\x18\xdb\x79\x64\x26\xb2\xa8\xdb\x39\xbe\xe5\x13\xc5\xab\x54\xe2\x3b\x9d\x13\x99\x48\x89\x36\x72\x91\xf6\x32\xd6\x40\x74\xcf\xb8\x16\x8b\xb0\xf2\xa9\xd7\x18\x5a\x8c\x8e\x04\x55\x5e\x99\xaa\x75\xc4\x95\xd5\xd0\x29\xb1\xcc\x2e\x49\x71\x86\x61\x4f\x35\x14\x77\xee\x8b\x9f\xca\x2e\x83\xa0\x95\xd3\x12\x83\xc4\x92\xa4\xf6\xa8\xda\xed\x1d\xa4\x9b\xcf\x25\x76\x4a\x13\x78\xd9\xf0\x51\xf4\xe2\xca\xa2\x88\xd5\x73\x79\xa3\x0b\x9f\x0f\xf9\x4d\x49\x47\x9d\x08\x74\x5e\x14\x48\x19\x3c\xe5\xbb\xd4\x09\x64\x34\xda\x75\x3a\x96\xdc\x58\x85\x4e\x47\x99\xaa\x6a\x05\x1b\x19\xa6\xd4\x9e\xd6\x36\x5c\x00\x99\x75\xee\x46\x63\x52\xa7\x23\x96\x35\xde\x04\x62\xb8\x08\xac\xb4\xe9\x4b\x3b\x43\x74\x06\x5a\x4d\xb3\x5d\x4d\x99\x02\x85\xca\x52\x56\x45\x6c\x01\x38\xab\xe0\x95\x2b\x89\x59\x6c\x1a\x7a\xde\x50\x25\x96\x2e\x20\x4c\xb2\x57\xe9\xe7\xba\x08\x3d\x55\x74\x18\x8a\x8e\xd8\xe0\x10\xa0\xd2\xa9\x44\xe7\x54\xec\x34\x64\xd6\x12\xaa\x8b\xc2\xdf\x4b\xdc\x56\xe6\xa4\xa7\x62\xc7\x21\x33\x4a\x31\x44\x0e\x4c\x52\xe5\x74\x22\x55\xb1\xe9\x28\xda\x24\x9d\x85\x08\xf3\x20\xf4\x47\xa9\x04\xbe\xd8\x78\xf0\xc5\x3e\xef\x3e\x99\x8c\x95\x6f\xa5\xfa\x6b\x82\x2b\x6d\x32\x18\x43\x30\xb8\x6c\xc3\x65\xae\xde\xaa\xe6\x4b\xe4\x2a\xb6\x1c\x7c\x81\xe7\x27\x15\x7e\x14\xe6\x29\x10\xd9\xa9\x49\x69\x8c\x88\x1d\x06\x5f\x3a\x24\xe6\xc8\xd9\x1c\xa9\x46\x64\xc9\x92\x04\x4d\xd2\x44\xe8\x8d\x20\x45\xe3\x41\xd5\x68\x62\x87\xc0\x97\x09\xfe\xc8\x89\x3f\x37\xe0\x0f\x33\x8a\x34\x61\x7e\x4c\x00\x2c\xcd\x78\x7e\x7e\xdb\x2d\x11\x47\xec\x0f\x64\xe9\x48\xe1\xff\x6a\xb3\x4b\x5d\xa1\xdc\xec\x62\xc7\x20\x4b\x4e\xf2\x98\x60\x4e\x2d\xd2\xdd\x17\xfb\x09\x7d\x12\x4d\x64\x10\xbb\x07\x59\x2a\x12\x8d\x54\xcc\xc5\x65\x84\x1c\xfc\xac\x01\x2c\x6f\x6e\xe2\x9b\x5c\xe3\x21\x7a\xe3\x69\xff\x4a\x7f\xd2\xa3\x7e\x49\x38\xb6\x81\xf1\xf4\x56\xe0\x78\x50\x5d\xd3\x1f\x82\x77\xce\xce\x80\xe1\x72\x3c\xd6\x61\xc8\xf8\x55\xc2\xe8\x63\x5f\x9c\xf8\xcd\x10\x6e\x9e\x52\x51\x7c\x07\xa3\xa2\xf6\x40\xe7\x0d\x24\xad\x89\xa0\x36\x21\x65\xaa\xc9\x62\xc3\xa4\x22\x13\xc4\x26\x92\xf3\x97\xf9\xa5\x95\x3f\x67\x77\x54\xec\x0a\x3f\xa0\xbf\xf3\x16\x70\x6d\x0f\x46\xcb\xeb\x1c\x45\xaf\x28\xe0\x00\xe5\x9d\x00\x03\x6b\x76\x95\x62\xfc\x88\x52\x69\xed\xba\xb6\xb1\xd5\xbf\x4a\x6e\x1b\xa9\xb4\x4a\x84\x7b\xf8\x87\xdc\x9e\x3a\x69\x58\x2d\xc0\xca\xeb\x28\x7f\x72\xcb\x04\x30\x77\xd6\xce\x3d\x19\x43\x58\xde\x84\x45\x22\x12\x5c\x16\xba\x8d\xfc\xf1\x37\x59\xea\x27\xcc\xef\x9a\x87\xf8\x7b\xe2\xca\x69\x16\xea\x14\xe9\x73\x2a\xe2\x9d\xe2\xcc\x04\xe9\x35\xac\xee\xf8\xbb\xbc\x42\x9b\x9d\x57\x1f\xed\x2e\xcf\xc0\xdc\x49\x06\x5b\xa6\xd4\x0a\x36\x7b\x27\x33\x1a\x07\x6d\x81\x79\xfc\x8b\xa8\x2f\x12\x2a\x2c\xff\xf7\x0f\xb8\x47\x8b\x85\xa3\x66\xa2\xdc\x75\x0f\xf9\x13\x86\x1a\x81\x09\xa7\x71\xee\x1d\x61\xad\xb1\x17\x84\xb0\x7e\xa5\xc6\x1e\x00\x57\xa3\xe4\xf7\x5e\x31\xae\x98\x24\x91\x0a\xbf\xdb\xb1\xe8\x10\x19\xc6\x10\xb8\x5a\x4a\xfc\x9f\xdd\x1f\x61\x19\x90\x1f\xe6\x51\xbd\x26\x70\x65\x9b\x60\x67\x1d\x41\xc1\x89\x0c\x23\xb8\x88\x7d\x02\x5c\x4d\x45\x77\x56\x38\x62\xc5\x77\x19\x13\x15\x76\x07\x98\x5b\x99\x8c\x0a\x7f\x26\x45\xdf\xe0\xde\x11\x55\x60\x11\xed\xbe\x3f\x03\xf3\x11\x3e\xcd\x2f\x47\xbf\xb0\x5a\x4a\x4d\x9b\xba\x24\x6b\x77\x1e\x96\xc5\x65\x8d\xcd\x5b\x3b\x7b\x23\x0a\x92\x4b\x72\x0e\xcc\x97\xd2\xc9\x87\x8d\x68\x49\xf7\xcc\x44\x36\x61\xe3\xa1\x68\x8f\x0c\xab\x07\xec\x1f\x38\xe3\xbd\x82\x68\x6c\xcf\x69\x42\x73\x5d\xe6\xb0\x2a\x8d\x0f\x80\xf9\xb4\x26\x0d\x22\x3e\x47\xf3\xc6\xe5\x74\xcc\x69\x98\xd3\x2f\x07\x9a\x61\x0e\x81\x85\xc5\x78\x03\x17\x58\x5c\xee\x49\x53\x09\x8b\x49\x0e\xc1\xec\x37\x81\xc5\x08\x9f\xfa\x89\x67\xa5\xc2\x27\x76\x22\x84\xc8\x3d\x56\x12\x73\xea\xa2\xc1\x39\x5c\x41\x82\x36\xb0\x58\xe2\xd3\x62\xbe\xfc\x25\xcb\x9a\xf9\x7d\xb4\x9f\xa8\xef\xfe\x29\x92\x07\x57\xd3\x8d\x8a\x12\x56\xdc\x27\x64\x81\xa5\xd8\xf5\x19\xf6\x8f\xad\x30\x39\x32\x95\x38\xdd\x7a\x1a\xdc\xdc\xf7\x0c\xc7\x8c\x97\x05\x4c\x0c\xaa\x9d\xa9\xe4\x5c\x2e\x2a\xa9\x06\xfd\x1e\x30\x19\x15\x1b\x0c\xfe\x9d\x5a\xbf\xa5\xda\xd6\x61\xf7\x81\xc9\xb2\x34\xff\x27\x29\x39\xdf\xa9\x38\xd1\xb3\x16\x23\xbc\xe7\x28\xab\x7c\x7f\x00\x4c\x6c\xb3\x96\x88\xab\xb9\x44\xa6\x93\xb4\xd3\x51\xda\x8a\xc7\xb6\x81\x09\x53\x05\x79\xb0\x9f\x14\x2b\x43\xb0\xcf\x1d\x17\x56\xa5\x9a\xe6\x7e\x5d\xa0\x58\x91\xd8\x1c\x2a\x65\xc0\x63\x0e\x5a\xc0\xe5\xa8\xc6\xe7\x7b\xe4\x93\x01\x61\x7f\xee\x53\x58\x23\xa3\x25\xd3\x49\x8e\xd5\x01\x2e\x97\xf8\x34\x8c\x93\x90\xb6\xb8\x19\x14\x3a\x41\xa5\x52\xc0\x33\xe0\x52\xd2\x99\xf3\xef\x38\x0e\x84\xe3\x2d\xa0\x26\xf2\x18\x05\xce\xa0\x07\x5c\x4e\xab\x25\x0c\x52\xec\x51\xd9\x5f\x5e\x4d\x38\xbd\x68\x9c\x77\x60\x03\xd7\x96\x59\xe0\x13\x45\x1b\x45\xd4\x6d\x94\xd8\x62\xf1\xd8\x43\xe0\x7a\x54\xe3\x0f\xe9\xd8\x9f\x67\x62\x4a\x69\xce\xa1\xec\xdb\x3d\x6f\xbb\x01\x5c\x2f\x2b\xa5\x56\xd9\xb8\x2a\xb6\x95\x90\xa6\x34\x11\xbb\x09\x5c\xdb\xfa\x1c\x20\xf4\x0a\x9c\x96\xf8\x1c\xc8\xa7\x40\x31\x03\x66\x98\x2d\xe0\x7a\x2a\x4b\xee\x44\x63\x55\xbe\x11\xd0\xbd\x83\xa6\x39\xb3\xdb\x88\x9f\x15\x16\x62\x80\x08\x4d\x26\xa3\x6b\x52\x68\xb0\x45\x34\x53\x1f\xb2\x4f\xc1\x3e\x21\xad\x2b\x71\x06\xef\xaa\x15\x3c\xfb\x0c\xb8\x2b\x8c\x3f\x32\x6c\x4d\x7c\x94\x14\x2b\x25\xf6\x39\x30\xb3\xaa\x34\x4b\x59\xa6\xaf\x7a\xa4\x51\xc4\xb6\x80\xd9\x88\xf9\x65\xb7\x1a\x82\x77\xf9\x23\xf6\x64\x9a\xe7\xd9\x03\x66\x4b\x26\x18\xf8\xd0\xd0\xbd\x23\x4c\x46\x64\x4b\x5c\x7e\x5e\x24\xee\x4e\xdc\x4f\x1f\x98\xd9\x66\x45\x2b\xf5\x71\x36\xd3\xee\x69\xb3\x3a\x63\x0f\x80\xd9\xb4\xa6\x30\xd4\x91\x38\xd5\x68\xde\x48\xbd\x45\x9e\x1b\x86\xc0\x6d\xe1\x58\x4f\xea\x0f\xca\x1c\xcf\xfb\xb0\xd8\xa6\xa9\x7d\x65\xd8\x00\x6e\x47\x92\xfa\xc5\xe7\x53\xa5\x95\xf5\x55\x50\x5f\xdb\x86\x4d\xe0\xd6\x56\xa8\xaa\xf8\x46\x96\xe3\x25\x6f\xe4\xa5\x20\xc3\x6c\x01\xb7\x53\xb3\x43\x10\x81\x5f\xa9\x76\x89\xbc\x59\xcb\xb1\x4f\x80\x89\xb8\x29\x0e\x98\x14\x36\xf5\x81\x3e\x94\x36\xbe\x37\x1a\x8d\x36\x70\x65\x2d\xac\x6b\xeb\x86\x15\xc9\xf8\x5f\xb7\xe6\x50\x0b\x1e\xed\x61\x4b\xfb\x70\xe9\x00\x01\xb5\xf1\xe2\xd0\x21\x5e\x3c\x7a\x81\x97\x2d\xbd\xc4\x8b\x4b\x47\x78\x09\xe8\xbf\xf2\xab\x42\x87\x5e\x21\x5c\x79\x74\x8c\x70\x4b\xaf\x11\xba\x74\x82\x30\xa0\x53\x44\x0e\xbd\xc1\xc3\xca\xa3\x7f\x20\x0a\xb7\x74\x86\xc8\xa5\x73\x44\x01\x5d\x74\x6b\xbe\x43\x97\xf0\x3d\xfa\x05\xfe\x96\xde\xc2\x77\xe9\xbf\xe1\x07\xf4\x0e\x2b\x87\xfe\x89\x95\x47\x1d\x3c\x6d\xe9\x3d\x56\x2e\x7d\xc0\x2a\xa0\x8f\x31\xd6\xab\x43\x5d\xbc\x7a\xf4\x09\xaf\x5b\xfa\x8c\x57\x97\xae\xf0\x1a\x50\x0f\xef\x0e\xfd\x0f\xde\xb7\xf4\x05\xdf\x5c\xba\xc6\x7b\x40\x5f\x73\x09\x37\x0e\xf5\xb1\xf1\x68\x80\xcd\x96\xbe\x61\xe3\xd2\xbf\xb0\x09\xe8\x06\xdf\x1c\x1a\xe2\x9b\x47\x23\x7c\x0b\xe8\x56\xd0\x86\x4f\xbf\x1e\xa0\xc3\x83\x34\x9f\x5a\xf0\x74\xd7\x39\xcc\xfb\xd3\xdb\x9d\x0d\x1d\xea\xd4\x71\x4f\xef\xeb\xf8\x4a\xbf\xd6\xf1\x4c\x9f\xeb\x78\xa4\x8f\x75\xb8\xd4\xad\xe3\xef\x15\xfd\x4f\x1d\x7f\xd3\xbf\xeb\xf0\xa8\xd7\xad\x85\xab\x60\x13\xc1\xa3\xbf\xd7\xf1\x42\x5f\xea\x58\xd3\x75\x1d\xaf\xf4\xb5\x0e\x9f\xfa\x75\x04\x34\xa8\xe3\x8d\xbe\xd5\xb1\xa1\x9b\x6e\x2d\xa4\x61\x1d\x11\x8d\xea\xd8\xd2\x6d\x1d\x4f\xf4\xa9\x8e\x97\x15\x5d\xd5\x11\x85\xf4\xaf\x3a\x1e\x56\xf4\xa1\x8e\x70\x45\xbf\xc5\x16\x5e\x3d\xac\xe8\xf7\x3a\x56\xce\xe6\x91\xfe\x4f\x1d\xef\xee\x66\x4b\xff\xaf\x8e\x30\x78\x8a\xe8\xaf\x75\x6c\xdc\xaf\xee\x26\x74\xe1\xd2\xff\x47\xb7\xf6\xbe\xa5\xff\x5b\xdf\x19\xe4\xdd\x44\x5f\x7d\xe7\xcd\x8b\x9c\x75\x18\xff\x11\xee\xb1\x13\x79\x3e\x1e\xf6\x1f\x61\x1d\xff\x95\xe8\xb0\x0e\xf7\xfb\x83\xfb\x16\xfd\x13\xdd\xda\x2c\x41\xb2\xe9\x6f\x75\xdc\x6d\x69\x57\xcb\x7b\x12\xec\x78\x85\xf1\x9f\x9c\x4e\xf4\xb3\xdf\xc4\x7e\x0b\xfb\x0d\x1c\x67\x36\xf8\x6f\x00\x00\x00\xff\xff\xf9\xa0\x34\x27\x95\x7c\x00\x00") + +func fontsBannerFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBannerFlf, + "fonts/banner.flf", + ) +} + +func fontsBannerFlf() (*asset, error) { + bytes, err := fontsBannerFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/banner.flf", size: 31893, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBanner3DFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\xcd\x8a\x23\x37\x10\xbe\xfb\x29\x0a\x14\xd0\x25\x2b\xb2\xbb\x39\xec\xd4\xc9\x81\x40\x4e\x79\x88\x19\xd6\x93\x04\xbc\x13\xf0\xc4\x84\x79\xfb\x60\xa9\x54\xf5\x95\x24\xb7\xd5\x63\x12\x08\xf4\x2e\x4c\x4b\xad\x9f\xfa\xff\xaa\xaa\xfd\x7c\x7c\xfe\xf4\xf8\x1d\x7d\xa1\x2f\xf4\xe9\x07\xfa\xf0\x91\x3e\xef\x9e\x1e\x5f\x5e\x0e\xa7\xcf\x1f\x7e\xa6\xa7\x37\xfa\xf5\x70\x3a\xfe\xf1\x42\xbf\x9c\x0e\x6f\x7f\xff\x79\x7c\xa6\x6f\x79\xbe\x7f\x3a\x3d\xfe\xfe\xed\x35\x9d\xbf\x1e\x8e\xe9\xf0\xf5\xbc\xfb\xe9\xfc\xdb\xf9\xf5\x2f\x7a\xf8\x9e\x3e\x3e\x3c\xfc\xb8\xdb\x31\xf3\x7e\xe2\xcf\x7e\x17\x43\x08\x81\xf7\x3b\x6a\x9e\x4c\x21\xe4\x5d\x29\xe5\xdd\xed\xbe\x94\x2e\x0b\xf5\x02\xb7\xaa\x5b\x2e\x57\x24\x77\x8f\x5c\xc6\xf6\x6f\x76\x72\x99\xc5\x4c\x48\x96\x28\x13\x32\xde\x82\x32\x56\x56\xd2\xf2\x8a\xbb\x20\x33\x96\x79\xcb\x84\xf4\x94\x5c\x71\x39\x97\xe5\xc8\x02\x66\xcd\x64\xe1\xb3\x84\xb6\x93\x53\x82\x9d\x51\x76\x92\xea\xc2\xef\x4c\x45\x83\xaa\xc2\x90\xe5\xcb\xec\x08\x67\x65\x5a\xb5\x2a\xa2\xb3\xe8\x3e\xd5\xa9\xe8\x29\x8a\x11\x42\x99\x0b\x75\x92\x79\xde\x4c\xb6\x2e\xc6\xa8\x36\xe4\x22\xb2\xdc\x26\x12\x97\x49\x61\xdc\x56\x94\x09\xd9\x15\x55\x2d\x7a\xa6\x1c\x11\x35\x70\x21\x82\xce\x82\x5e\x44\xaa\x64\x91\x0c\x2d\xdf\x3d\x2b\xa7\xc2\x0b\xdb\x51\xe1\xc1\x0f\x1a\x29\x44\x72\xe3\x44\x48\x56\x0f\x65\xae\xea\xea\x07\xb2\x87\xec\x94\x7a\x8b\xf3\xd8\xa8\x76\x14\xaa\xf1\xa6\x8f\xf2\x54\x98\xd8\x4c\xed\x69\x8c\xd9\xfd\xf5\xf2\xe4\x57\x99\xbd\x66\xe1\xc2\xfe\x39\x69\x24\xe0\xef\xda\x30\xa2\xc8\xe2\xf1\xb7\x8f\xc9\x78\xf9\x11\x8d\xc7\x6a\x0e\xe5\x28\x56\xeb\x99\x29\x18\x75\x66\x91\xa5\x43\xf3\x28\x74\x2a\x10\xd7\xf9\x5f\xbd\xe9\x12\x03\x5e\x3b\x6c\x30\x31\x9e\x94\xed\x04\x1e\x12\xc0\x46\xce\xad\x22\x90\x51\x47\x36\x9f\x46\xeb\x0e\x87\xe6\x12\x9c\x20\xd6\x63\x03\x6e\x05\xb5\x4c\x50\xd2\xf8\xc2\x6d\x66\x28\x6a\x26\xbd\x8d\xdf\x4f\x48\x18\x35\x04\x75\x7a\xeb\xf0\x53\xe1\xb3\xe5\x2e\x6a\xb8\x32\xc6\xae\x9b\xb4\x7c\x1b\x0c\xd8\xb6\x32\xf1\x38\xad\x8e\x57\x45\x05\x8d\xd8\xc5\xec\xee\xad\x4c\x7a\x59\x18\x4d\xb3\xac\x32\x21\xa1\xf4\x6c\x1b\x0d\xb6\xcd\xa8\xcc\x4b\x42\x66\x99\xce\x95\x00\x49\x96\xc7\x35\x52\x66\x24\x21\xad\x32\x26\xb6\x2d\x48\xb2\x96\x50\x78\xaf\x97\x55\x9f\x8a\x0d\x2a\x2a\x58\x2f\xaf\x5f\xad\xa0\xae\xc2\xad\xa6\x53\xf6\x90\xa0\xa8\x35\x4a\x82\x3e\xe5\x89\x59\x34\x9b\x39\x94\x64\x18\x44\x17\x0a\xe3\x37\xdd\xa9\x59\xb2\xf5\x0d\x59\x79\xe3\x90\x75\x02\x2b\x20\x1c\x29\x38\xff\xb4\xb4\xa6\xde\x07\x08\xdf\xac\xdc\x70\x97\x18\x6a\xc5\x54\xd2\xb3\x9f\xb8\x70\x4b\x50\xe9\x8c\xdd\x45\x2a\x15\x4b\x33\x88\xcb\xec\xb3\x86\xb1\xd0\x01\xcc\x28\x1a\x92\xa2\x92\x0b\xe5\xc5\x43\x93\x98\xe1\xb6\xa5\x61\xa4\x99\xe6\xf0\xfc\x10\x09\x9b\xf1\x0d\xf4\x9b\x97\x64\x72\x72\x4d\x92\x15\x38\xce\xc6\xcb\x50\xaa\xe0\xc2\x24\x75\x36\xb9\x9b\x80\x8e\x7d\x19\x12\xe1\xb4\x19\xc3\x59\xa3\x4e\x00\x5f\xae\x24\xc3\x84\xbc\x98\x49\x20\xc2\xe3\xac\xb2\x97\x3d\xf7\x96\x1b\x23\x6c\xde\x78\xc6\xd0\xf4\xa1\x45\x37\x50\xfa\x59\x11\x3f\x1c\x63\x42\x9e\x75\x4f\xaf\x09\x28\x32\xa0\x73\xd3\xa0\x27\x68\xa3\x08\x42\x3e\x3f\x1c\xba\x99\x0a\x16\x82\x67\xd5\xf8\xaa\x5b\x3a\x63\x06\xed\xa6\xf2\x11\x70\x14\x07\x7e\x89\x02\xe8\x29\xcd\x19\xd3\xb9\x86\x73\x13\x46\x54\x45\x22\xc1\xb5\x92\x61\x49\x55\x93\x59\x7f\x72\x32\x55\x28\xad\x46\xd7\x34\xac\x96\x79\x10\xd0\xf7\x48\x14\x41\x87\x9c\x30\x7a\xd4\x84\xd2\x57\xdc\x9b\x2f\x5c\xfa\x6e\xe0\x66\xe4\x04\x33\xf9\xa2\x0f\xb6\x3e\x38\x27\xf2\x45\xf6\xf3\xcc\x64\x5f\xb8\xae\x1e\x5b\x55\x32\x0f\x7e\xf7\x7b\xd9\x6a\x42\x7d\x23\x49\x5a\x18\x25\x0a\x4e\xa0\x56\x22\xa0\x14\x31\x20\xe1\x83\xd5\xfc\x2c\x13\x13\x8a\x45\xb0\x4a\xb2\x95\x2c\x21\x6c\xba\xaf\x23\x0d\xcb\x28\x4c\x07\x9d\x63\x6f\x43\x74\x4e\x0a\xc9\xf8\x01\x4b\x6b\xd2\x77\x3b\x83\x47\xd5\xaa\xfa\x51\xf7\xdd\xb6\x46\x1a\xf8\xb7\x8b\x06\x28\x07\x0c\x30\x26\x46\xee\xca\x2e\x9f\x24\xe0\x40\xb5\xd0\xab\x04\xbf\x2e\xb0\xeb\x88\xad\x89\xe8\x4a\x4f\xa1\x6a\x1b\x66\x46\x71\xc4\xb1\xd6\xcc\x05\x3d\x48\xc1\xc3\xd9\xdc\xb5\x23\x33\xc3\x55\x9b\xa7\x3f\x65\x2d\x7e\xd0\x34\x7d\x21\xb7\xc3\x0f\x9a\x5b\x9f\xb0\xf5\x09\x5b\x9f\xb0\x90\xe8\xb6\x3e\x61\xeb\x13\x68\xeb\x13\xfe\x8d\x3e\x21\x6d\x7d\x02\x6f\x7d\xc2\xd6\x27\xfc\x9f\xfa\x04\xcb\xd4\xe5\xf7\x47\x3c\xa8\xbf\x6b\x43\xc9\x8f\xab\xac\xf0\xc9\xee\xa7\x70\xd1\x59\xfd\xab\x3f\x01\x8c\xde\xd7\x9c\xca\x1a\x4e\x28\x52\x1d\x85\xc1\x3b\xcb\xc7\xac\x19\x19\x20\xa0\x4a\x7e\x81\xaa\x6a\x99\x42\xb3\xda\xa6\xf6\x22\x15\x34\x19\xfe\xad\x98\xed\x77\xcd\xff\xff\xea\xc5\x3f\x01\x00\x00\xff\xff\x31\xb1\xff\x2e\xed\x23\x00\x00") + +func fontsBanner3DFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBanner3DFlf, + "fonts/banner3-D.flf", + ) +} + +func fontsBanner3DFlf() (*asset, error) { + bytes, err := fontsBanner3DFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/banner3-D.flf", size: 9197, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBanner3Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\xcd\x6e\x13\x31\x10\xbe\xef\x53\x7c\x92\x7d\x84\x8a\xb6\x48\x55\x6f\xcb\x89\x13\x0f\xd1\xa8\x09\x20\xa5\x41\x4a\x89\x50\xdf\x1e\x35\xeb\x9f\x6f\xc6\x63\xaf\x37\x81\xdb\xa6\x52\x3c\xde\x1d\x8f\xe7\xf7\xf3\x38\xdd\xed\x77\x77\x4f\x1e\x0f\x78\xc0\xdd\x27\x7c\xbc\xc5\x3d\x86\xcd\xd3\xe1\xb0\x3d\xde\x63\xf3\x86\x6f\xdb\xe3\xfe\xe7\x01\x5f\x8f\xdb\xb7\x3f\xbf\xf6\x3b\xbc\x9c\xe7\xe3\xe6\xf8\xf4\xe3\xe5\xf5\xe6\xf4\xbc\xdd\xdf\x6c\x9f\x4f\xc3\x97\xd3\xf7\xd3\xeb\x6f\x3c\x7e\xc0\xed\xe3\xe3\xe7\x61\xf0\xde\x8f\xed\xaf\x71\x70\xce\x39\xaf\x07\x38\x87\xf7\x01\x80\x7e\x37\x8d\xa0\x67\xa0\x45\x08\x2b\x3d\xc2\x47\xd1\xb0\xe9\xf7\xc9\x59\x50\x78\x91\x69\x17\x3f\x9d\xcf\x89\x3e\x6b\x34\x7d\xa6\x15\xe7\x77\x08\x7a\x4f\x93\x69\x0d\xb3\x01\x06\x5b\xb4\x2f\xb1\x05\x05\x02\xef\xc4\x99\x64\x46\xf5\x68\x87\x28\x35\x4f\x83\xdf\x26\xfd\xe3\xda\x6c\x0e\xe2\xdb\x69\x1e\xe5\xa6\x4d\x7c\xf1\x3c\x6d\xc6\xb2\x58\xc5\xc0\x93\x83\x58\xc4\x3b\x09\x2e\x87\xb0\x5d\x54\x21\x8a\x36\x46\xa1\x5e\xd8\x2a\x87\x26\x9a\x6b\x8e\x51\x8b\x18\x3e\x4e\x16\x4a\xae\xae\x3c\x10\xfc\x22\xd1\x92\x5d\xac\x57\x96\x65\xbe\x92\xeb\x0b\x1f\x35\xbc\x99\xb6\xf7\x36\xc5\xea\x37\xf8\xce\xa4\xaf\x7f\x27\x05\xf2\x8e\x59\xa8\xcc\xec\x9c\x71\xd9\x44\xf2\x59\x0e\x26\x07\xde\x70\x6b\xe0\xc9\x79\x66\xd0\x3a\x6c\x1c\x5b\x99\x96\x5e\x3c\x6b\x53\x2e\xd7\x86\x2a\x70\x61\xb6\xaa\x5b\xb6\xab\xa4\xdd\xe5\x42\xf9\xb9\x32\x9e\xf1\x42\xef\xcc\x1b\x58\xb4\xce\x0c\xde\x8c\x41\xc8\xf1\xc6\xd0\xa4\xd2\x92\x78\x59\x47\xb4\xec\xb6\x1c\xd5\x4a\x00\x14\x38\xe9\x8a\x6d\x67\x73\xd1\x22\x67\x55\xac\xc5\xbb\xad\xe2\x92\x05\x3a\x0f\x5a\x42\x51\x3b\x50\xad\x77\x05\x88\xf4\x21\x8b\x40\x4e\x0d\xcd\xb0\x91\x37\x58\xe0\xf3\x28\x2c\x33\xe6\x09\x0b\x66\x24\xd7\x34\x99\xab\x29\x06\x28\x99\x15\x46\xd3\xd0\x95\x0d\x8e\x4e\x40\x45\x97\x48\x20\x03\x17\x94\xd0\x27\x4d\x1d\xfa\x8a\x22\xd4\x74\x5f\xd9\x2c\xe4\xe1\xda\xe5\xd2\xaa\x01\x41\x81\x3e\xaa\xfa\xfb\xb6\xef\xa1\x5b\xd5\x6f\x03\x54\x53\xdb\x28\xe1\xdf\x48\x03\x54\xf6\x54\x10\x99\xf8\x15\x7c\x69\x7e\x92\xb3\xcc\x43\x9d\x1e\x2d\x40\xa4\x31\xb8\x4a\x07\xd0\x22\x8b\xdc\x31\x52\x43\x72\xc8\x86\x3a\xb5\xbb\x28\x9e\x16\xfe\xe2\x93\xb0\x3f\x57\xdb\x39\x21\x1c\x8b\xd8\xa6\x3a\x71\x4b\x11\x18\xd0\xe3\xf0\x42\x66\x14\x49\x48\xc2\x77\x04\x76\x8e\x53\x06\xf7\x9c\x2f\x9d\xcd\xd4\xd2\x72\x2d\x78\x8c\xce\x07\x45\x49\x2c\xd2\x54\x3a\x96\x4b\xe2\x1a\xec\xab\xd7\xa5\xf0\x69\x1d\xfb\x88\xc1\xce\x75\x0b\xfb\x74\x3b\x72\x09\x79\x2d\x7a\x56\xe3\xbd\x78\xb1\xe8\xbc\x45\xbb\xa5\x34\x35\x2e\xbb\x97\x4c\x10\xea\xa3\x54\xb8\x43\x99\x9e\xb3\x56\xc7\x0f\xe2\x66\x71\x49\xa0\x38\xec\xb8\xe8\x9e\xa4\x4f\xa7\xa4\x71\x37\xa1\x90\x4c\xed\x20\xfb\x20\xb2\x42\x5c\x4d\x75\xa2\x8f\x83\xb0\xac\x9b\xc8\xda\x00\x7c\xe7\x13\x95\x39\x77\x57\x55\xb7\xd6\x1e\xc6\xf6\xd5\x78\xac\xfd\x3c\xc5\x9d\x32\xf4\xcf\x15\x6b\x1b\x77\x1d\x10\xd9\xf8\xd8\x3a\x91\xd7\x36\x6e\x6d\xe3\xd6\x36\x6e\x6d\xe3\xd6\x36\xae\x11\xef\xc5\x8b\xd7\x36\x6e\x36\x50\x1c\x76\x5c\xd9\xc6\x41\xfe\x3f\x8b\x09\x57\x3c\x11\x72\xa3\x6d\x9e\xbf\xc2\x42\xfe\x8a\x10\x94\x34\x2b\x08\x67\xbe\xca\xc7\x53\xfa\x77\x0f\x8a\xe0\x21\x9c\x74\xd4\x4a\xa9\x5f\xd1\x9a\x93\x71\x10\x7f\xff\x73\xfa\x37\x00\x00\xff\xff\x81\xfa\x12\x5c\xf4\x1c\x00\x00") + +func fontsBanner3FlfBytes() ([]byte, error) { + return bindataRead( + _fontsBanner3Flf, + "fonts/banner3.flf", + ) +} + +func fontsBanner3Flf() (*asset, error) { + bytes, err := fontsBanner3FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/banner3.flf", size: 7412, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBanner4Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\x4d\x8f\x13\x3d\x0c\xbe\xcf\xaf\x88\x94\xf7\xf8\x62\xb1\xcb\x4a\xab\xde\x86\x13\x27\x7e\x44\xab\x6d\x01\xa9\x5b\xa4\x2e\x15\xda\x7f\x8f\xd4\x49\xe2\xc7\x1f\xc9\x64\x5a\xb8\x4d\x11\x13\x67\xc6\xb1\x1d\x7f\x3c\x71\xe0\x70\x3c\x3c\x6e\xff\x0b\xcf\xe1\x39\x3c\x7e\x0c\x1f\x1e\xc2\xa7\x30\xec\xb6\xa7\xd3\xfe\xfc\x14\x76\xef\xe1\xeb\xfe\x7c\xfc\x71\x0a\x5f\xce\xfb\xf7\xdf\x3f\x8f\x87\xf0\x7a\x9d\x8f\xbb\xf3\xf6\xfb\xeb\x1b\x5d\x5e\xf6\x47\xda\xbf\x5c\x86\xcf\x97\x6f\x97\xb7\x5f\x61\xf3\x7f\x78\xd8\x6c\x9e\x86\x81\x88\xc6\xf6\x63\x1c\x28\xc6\x18\xcd\x40\x31\x4e\x1c\x57\x46\xfc\x96\x46\x7c\x87\x8b\x70\x65\x5e\x3e\x4f\x5f\x27\x57\x41\xe9\x03\xd3\x31\xff\x3a\xdf\x33\x3d\x59\x34\xfd\xa6\x15\x94\xfe\xc2\x64\x5a\x83\x6c\x44\x0e\x5b\xde\x5f\x66\xcb\x06\x24\xde\x89\xb3\xc8\x2c\x5f\x59\x43\x96\xca\xd3\xe4\xb7\xc9\xfe\xbc\x96\xb7\x93\xb4\x65\xd7\x64\xb9\x45\x89\x7d\x5f\x94\xa1\x2c\x34\x31\xf1\x70\x10\x4d\xbc\x8b\x60\x33\x64\x75\xd9\x84\x2c\xda\x19\x85\x79\x49\x15\x87\x26\x6f\xd7\x1d\xb3\x15\x39\x7c\x98\x2c\x22\xb9\x3a\xf2\xc0\x4d\xc6\x32\x29\x6e\x46\x42\x0a\x52\x3c\xb8\xde\x0c\x75\x6f\xb2\xfa\x0a\x85\xe6\xd7\xf9\x26\xb2\xf1\x2c\x06\xb0\x46\x16\x2a\x33\x9b\x33\x8e\xb7\x08\x3e\xe3\x60\x62\xe0\x1d\xb7\x26\x1e\xce\x33\x87\xd6\x61\xc3\xd8\xca\xb4\x94\xef\xda\x54\xe4\xda\x50\x05\x2e\xb6\xad\xea\x16\xf7\x65\xe9\x78\xbb\x50\x7c\xaf\x36\x8f\x78\xa1\x35\xa3\x02\x8f\xd6\x99\x81\xca\x10\x84\x22\x2a\x26\x4d\x2a\x2b\x81\x17\x6d\xa4\xd6\xbe\x3d\x47\xb5\x12\x80\x0c\x4e\x46\xa3\x76\x36\x17\x1d\x72\xde\xc4\x5a\xbc\xdb\x26\x2e\x59\xa0\xf3\xa0\x25\x94\x6a\x07\xaa\xf3\xcd\x82\x48\x1f\xb2\x08\xe4\xd4\xd0\x4c\x2e\xf2\xe6\x1d\xc0\x28\x76\xe6\xcc\x0b\x16\xb4\x25\xd7\x2c\x99\xad\x29\x04\x28\x99\x15\x4e\xd3\xd0\x95\x0d\x11\x4e\x40\x45\x5b\x24\x90\x81\x4b\x46\xe8\x93\xa6\x0e\x7d\xa6\x08\x15\xdd\x59\x36\xcb\x78\xb0\x76\x45\x69\xd5\x80\x40\x91\xa6\xfa\xfb\xd4\xf7\xd0\xad\xea\xf7\x01\xaa\x69\x6d\x92\xf0\x97\xa4\x71\x2e\x5b\xee\xca\x19\xa1\xe0\x4b\xf3\xb3\x9c\x85\x1e\xea\xf3\xa8\x05\x91\xc6\x10\x2b\x1d\x40\x8b\x34\xb9\xe3\xa4\x86\xe4\x90\x0d\x75\x69\x77\xc9\xbc\x35\xfe\x1a\xfb\x13\xb4\x3b\x27\x84\x63\x4b\x9b\x1a\xc5\x2d\x45\x60\x40\x8f\xc3\x8d\xcc\x2c\x12\x90\x04\xef\x08\xe8\x9c\xa8\x36\xdc\x73\xbe\x74\x36\x53\x4b\xcb\xd5\xf0\x38\x9d\x0f\x99\x92\x58\x64\xa9\x74\x2c\x96\xc4\x3d\xd8\x57\xaf\x4b\xe1\xd3\x3a\xf6\x01\x83\x9f\xeb\x1e\xf6\xe9\x76\xe4\x06\xf2\x6e\xf4\xac\xc6\x7b\xf1\x62\x38\x58\xf1\x82\x9c\x8f\x37\xf0\x17\x06\xf3\xf6\x09\xa5\xfa\xb0\x06\xcf\x1b\xd3\x73\xd6\x9a\xf8\xa1\xc8\xe8\x74\x93\xf3\x81\xc2\xb0\xb3\x34\x22\x4f\x5a\x8c\xe4\xdc\x93\xf4\xe9\x54\x8c\xec\x26\x14\x92\x29\x0d\xa8\x57\xec\x02\x6c\x44\xcb\x89\x94\x35\x84\x90\x3f\x47\xb0\x35\x44\x78\xe7\x13\x95\x39\xce\xdd\x69\x11\x51\xba\x18\xdb\x57\x63\x3e\x01\xd5\x41\x48\xd8\x29\x93\xfe\xe7\x8a\xb5\x8d\xbb\x0f\x88\x7c\x7c\x6c\x9d\xc8\x6b\x1b\xb7\xb6\x71\x6b\x1b\xb7\xb6\x71\x6b\x1b\xd7\x88\xf7\xe2\xc5\x6b\x1b\x37\x1b\x28\x0c\x3b\x4b\x23\xf2\xa4\xc5\x99\x36\x8e\x10\xa7\x81\xb1\x80\x88\xfb\x89\x88\x91\x4c\x3c\xd2\x42\x78\x14\x08\x2a\x96\x19\x22\xba\x9f\xf8\x78\x4a\xa4\x84\x6a\x74\x80\xfa\x8f\x8f\xee\xc9\x38\x88\x3f\xff\x72\xfa\x27\x00\x00\xff\xff\xb5\x05\x67\x1b\xf4\x1c\x00\x00") + +func fontsBanner4FlfBytes() ([]byte, error) { + return bindataRead( + _fontsBanner4Flf, + "fonts/banner4.flf", + ) +} + +func fontsBanner4Flf() (*asset, error) { + bytes, err := fontsBanner4FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/banner4.flf", size: 7412, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBarbwireFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\x4d\x8f\x1c\x35\x10\xbd\xcf\xaf\xa8\x43\xae\x40\x58\x21\x11\x24\xcb\x6a\x22\x11\x89\xeb\x5e\xb8\x26\x48\x1b\x14\x69\x05\x12\xe4\x02\xbf\x1e\xcd\x8c\x3f\xde\x7b\xf5\xdc\x3d\xb3\x44\x6c\x36\x6d\xbb\xdb\xae\x72\x7d\xbd\x2a\x7b\x3f\x3e\x7f\x7c\xf8\xf0\x2a\xde\xc4\xf7\xf1\xf0\x3a\xbe\xfa\x36\x1e\x4e\x6f\x7f\x7c\x7c\xfb\xcb\xcf\x8f\x3f\xc5\xaf\x7f\xc7\xe3\x1f\xbf\xc7\xbb\x3f\x3f\x7d\xfe\x27\xde\x7c\xf3\xc3\x77\xa7\x77\x9f\x7e\x7b\x7e\xfa\x1c\x8f\x4f\xcf\x4f\x1f\xfe\x7a\x8a\x87\xaf\x5f\x9f\x5e\x6d\xe6\x37\xb6\xed\x54\x4b\xe1\x47\xb4\x47\x44\xb4\x77\xe7\xde\xe5\x65\x2d\xa3\x89\xb8\x7c\x3f\x6e\x5a\xdb\x7f\xce\xc3\x5a\x4a\xb4\xe7\x85\xc3\xf9\x27\x2e\x6d\xfa\x7a\xe7\x64\x64\x74\x19\xf7\x2f\x97\x5d\x5f\x45\x2b\xb2\x8f\xb6\xae\xab\xa0\xcd\xcb\x54\x51\x92\x46\x67\x4c\x85\xb9\x93\x68\x63\x75\xe1\x5e\xfc\x32\xda\xe9\x9c\x3b\xf6\x32\xd7\x97\xb9\x5c\xe6\xc1\xda\x49\xf0\xfa\xa5\x19\x6a\xfd\x68\xdb\x6c\xe4\x1a\x7d\x6d\xc6\x7e\x3b\xbb\xce\xc1\x7d\xa4\x86\x88\xb2\x12\x45\x92\xf1\xe8\xf2\x34\x83\x75\x9b\x47\xb3\xbc\xf9\x6e\xd6\x33\x7d\xe3\x16\xe8\x8d\x91\x1d\xa1\xeb\xba\xb3\xdd\x9b\x96\xa9\x21\xa3\x9d\xc7\x30\x51\x5f\x96\x5b\xe2\x6e\xbe\xdf\xc5\x89\x95\x0f\x52\xbc\xc0\x83\x45\x5a\x09\x72\x20\x88\x84\xd0\xab\x06\xeb\x41\x6e\x7a\x0c\x6e\xab\x79\xbe\xda\x71\x1b\x20\x23\x6e\xe8\x9a\x0a\x4a\xa4\x9d\x8e\x28\x6a\x94\x8c\x6e\x50\x23\xa8\x87\x89\x45\x1c\xcb\x06\x25\x8c\x62\xd9\x39\x09\x67\x7a\xe8\x17\x75\x2f\xab\x63\x76\xc3\x8a\xa3\x1e\x20\x1c\x21\x10\x4f\x39\x5a\xbc\x8e\x49\x92\xc2\x1e\x20\xe6\xf1\x92\x78\xbc\xb3\xce\x82\xda\x46\x0f\x9c\xf6\xc9\x4e\xd4\xa7\xe5\x24\x20\xc6\xae\x23\x77\xa0\xd8\xc9\xd8\xac\xd2\x45\x7f\x4f\x92\x3e\xf0\x4e\x1f\x73\xbf\xe1\x25\x01\x55\xec\x19\x3f\x19\x78\xea\x9c\xa5\xab\xc5\x42\x17\x2b\xdb\x1a\x7f\x8d\x24\x82\x29\x87\x13\x01\xe6\x86\xfe\x25\xc5\x2b\xb6\xe0\x2e\x8d\x67\x19\xdd\x40\xa7\x16\x82\x4e\xfb\x26\x75\xee\xdf\xc0\x81\x08\x44\x58\x88\x6d\xa7\x4c\x6a\x5a\x72\xae\xb6\xef\x24\xab\x0d\x0a\x18\x38\xc1\xe5\x0f\x64\xc8\x16\x2d\x95\x41\xf7\xfd\xfb\xf7\x09\xbb\x7d\x51\x35\xbd\x1c\x5c\x2a\x04\x09\x9c\x9b\x51\x66\xe9\xc1\xc8\xb8\xaa\x69\x21\x99\x9b\x6c\x35\x31\x0d\x97\xce\x55\x0d\x7e\x12\xbe\xcf\x10\x81\xfa\xc2\x85\xb6\x44\x18\x15\x66\x64\x6e\xe9\xe3\xfc\x18\x51\x2d\x0c\x06\xcc\x8f\xc5\x24\x3c\x32\x5b\xf4\x21\xc1\x25\xd0\xd8\x20\x45\x29\x68\x43\xb7\xef\x6c\x77\x82\x06\xdc\x17\x21\x9c\x3c\x6c\x43\x5d\x75\xb7\x35\x1e\x61\x07\x3d\xcb\x0f\x25\x81\x9f\x19\x38\x15\xc4\xdc\x19\xcc\xa4\x71\xe3\x9a\xc4\xe8\xe8\x01\x81\x45\xd5\xc7\x51\xaf\x16\xac\x9b\x8a\x9e\x56\x20\xe6\xe0\xe0\x41\x65\x49\x8b\x8b\xca\x66\xc1\x69\xab\x8c\xeb\x74\x79\x8b\xb1\xef\x75\xb3\x36\xe8\xf2\x4e\x5b\x4f\x29\x2a\xd8\x80\x41\x6e\xe0\x90\x21\x45\xc3\x88\x24\x51\xf1\x7c\x41\x79\xcc\x95\x99\x72\xbc\x06\xb1\x4c\x1c\x27\xd6\xe4\xca\x47\xb4\x9f\xf7\xef\xc7\x81\x25\x41\x38\xfa\x46\x62\x89\x58\x87\x39\x39\xaa\xf7\x40\x70\x29\xa1\x08\x78\x64\x9e\x64\xad\x3a\x2a\x58\x48\x58\x1a\xe7\x67\xdb\x93\x64\x2c\x9b\x1b\x85\xba\xa7\xe6\x75\x35\x87\xb7\x65\xdf\x0d\xab\x4f\xb2\xaa\x64\xf5\x0c\xfd\x48\x27\xd9\x2a\xb8\x9e\x61\x62\x5f\x66\x44\x0c\xb3\x31\x5e\x3c\xb8\x01\x95\x8d\xce\x86\xb1\xd1\xb1\x7d\x71\x31\x02\x92\x45\x13\xe7\x77\x19\xc8\x6d\x64\x27\xde\x2a\xd7\x04\x09\x24\x10\x18\x66\x32\xf3\xf1\xba\xda\x00\xaa\x61\x88\x5c\xa1\xf6\x41\x59\xd2\x37\x0b\xe1\xd6\x6b\x2d\x43\x52\xfa\x8a\xe1\x3d\x23\x66\x98\x0e\x64\x60\x54\x63\xd1\x08\x73\x68\xa9\x52\x0a\x14\x93\x4a\x80\x61\x13\xf4\xb0\x49\x57\x5d\xc2\x4e\x0f\x08\x69\xaf\x10\x52\xcb\x73\x0b\x5e\x63\xee\x37\xe6\x0e\xa3\x32\x94\xd6\x83\x34\xfd\x92\x41\x1f\xdd\xdf\xb6\x9b\xab\x51\xe5\xe4\x13\x22\x3f\x08\xe1\xb4\x3f\x25\x1d\x61\xe6\x0a\x66\xf1\xd5\x92\x51\x13\x76\xb7\xe8\x92\x16\xb9\xd0\xa6\x09\x59\x47\x72\xa0\xac\x58\x8d\x0e\x36\xf0\x66\xce\x61\x42\xeb\xba\x0f\xc1\xc4\x56\x80\x6c\xf4\xdb\x75\xaa\x07\x09\x3a\x22\xc5\xac\xa3\x97\x49\x7d\xeb\x07\xce\xf3\xb2\xf1\x07\x02\x42\xab\x45\x27\xed\x54\xbb\x8c\x64\x6a\x11\x78\xdb\x15\x45\x91\x09\xda\x38\xcd\xc8\xc5\x1e\x5d\xc3\x24\xed\x96\x70\x3a\xc5\x9b\xf5\x83\x7a\x7e\x08\x61\xc7\xb6\xbd\xd6\x9e\xdb\x6a\xc3\x72\x88\x6f\x2a\x2f\xe4\xc1\xab\xad\x0f\x9d\xad\x9f\xd3\x27\xfd\x25\x43\x90\x42\x47\x86\x03\x0b\x89\x69\xfc\xf0\x2a\x64\xd5\x4d\xee\xbb\x43\xd5\x0b\x83\xe4\x03\xa4\x9e\x44\xfc\xc5\x28\x23\xd4\x6d\xfd\x96\xfc\x0d\xf7\x3d\x71\x65\x12\x56\x54\xb1\x06\xf7\x04\x03\x70\x05\x09\x4e\x43\x54\xaa\x8c\x47\xaf\x2c\x2e\xcb\x14\xae\x72\xa7\x4a\xfc\x7b\xdc\xab\x85\xae\x88\x3b\x72\x04\x20\x85\xe2\xd4\xe6\x80\xe1\x36\x14\xa1\xe4\xba\x56\xbe\x1e\xd5\x7c\x2f\x9a\xda\x16\x00\xaa\x69\x92\xf3\xac\x4d\x40\x63\x93\xa6\x8e\xf1\x79\xd6\x70\x62\x56\x83\x57\x61\xd8\x9f\xa3\xeb\x2f\x84\x40\xa4\x5c\x78\x10\x61\x88\xe8\xe0\xec\x31\xb3\x85\x24\xdd\x45\x84\x2d\x09\x57\xf9\x23\x6b\xc9\x75\x93\xb8\xdf\x3e\x61\x72\x28\x09\xcf\x4d\xee\x66\xdd\x2d\x43\xc8\xa1\x19\x3a\x7a\x5c\x95\xdc\x25\x7e\x7c\xbd\x4d\xbc\xfe\xbf\xf2\xa3\xb1\x86\x45\x26\x57\xab\x0f\x06\x76\x99\x6d\xb8\xed\x84\xae\x84\xa3\xff\xa9\xbf\x9d\xe4\xdf\xff\xf5\xe2\xdf\x00\x00\x00\xff\xff\xef\x0e\xd2\x08\xe7\x20\x00\x00") + +func fontsBarbwireFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBarbwireFlf, + "fonts/barbwire.flf", + ) +} + +func fontsBarbwireFlf() (*asset, error) { + bytes, err := fontsBarbwireFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/barbwire.flf", size: 8423, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBasicFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\x4b\x6b\xe4\x38\x10\xbe\xfb\x57\xd4\xc1\xe0\x4b\x56\x90\x61\x61\x6a\x60\x59\x1c\xba\x59\x18\x58\x58\x9f\x04\xba\x39\x1a\x4d\x66\x13\xc2\x1a\x66\x92\xc3\x5c\xfc\xdb\x97\x7a\x48\xb2\xec\xb6\xba\x33\x0b\x0b\x03\x9d\xf4\xa3\x2c\xdb\xa5\x7a\x7e\xfa\xe4\x7e\x78\x7e\x78\x77\xdf\x02\x02\xc2\xed\x7b\xf8\xe5\x16\xde\x35\xfe\xfe\xdb\xe3\x27\xf3\xf0\xfc\x00\xfe\x3b\x1c\xbe\xde\x3f\x7e\x81\xbf\xba\x3f\x9e\xef\xff\xfe\xfc\xf5\xe5\x3b\xfc\xf6\x69\x7a\x78\xee\x1f\x5f\xcc\x3f\x2f\xaf\xe6\x73\x78\x35\xf7\xaf\xbf\x37\x77\xaf\x5f\x5e\xbf\xbd\xc0\xed\xfb\x1b\xb8\xfd\xf0\xe1\xd7\xa6\x6d\xfb\xda\xbb\x6f\x82\x6f\xfb\x06\xb1\xed\x1b\x37\xb4\x7d\x03\xd0\xea\x58\x3e\xa4\x8f\xbe\x31\x93\x01\x33\x99\xb6\x6f\x46\xec\x60\xc4\x8e\xcf\xf0\xdf\x8f\x48\xc5\x60\xf0\xf4\x6a\xfb\xe6\x80\xf4\x77\xa4\x31\x44\x7a\x95\x63\x6e\xa0\x57\x45\xe1\x9d\x0c\x9a\x40\xb7\x18\xf6\xac\x43\x60\x57\x46\xf4\x68\xf4\x96\xd1\xa2\x37\xec\x28\xc5\xfb\xc8\x67\x11\xd1\xa9\x4f\x56\x15\x06\x0f\x00\xe1\xc8\xc1\x00\x08\xd1\xe5\x80\x9d\xa8\x21\x41\xec\xc7\x0e\x38\x68\x3c\xa2\x91\x3b\x61\x21\xdb\xe5\x0d\x8d\xe2\x00\x00\x78\x24\x71\xb4\x3e\xe9\x0c\x88\x07\x99\xf2\x40\x3a\xf5\x02\x44\x1c\xc0\xf9\xac\x0c\x0a\xb1\x6f\x0e\x74\x6e\xe4\x18\x75\xcb\xb4\xad\x3e\xe4\x26\x56\x0f\x8b\x29\x45\xdf\x01\x55\x00\x1b\x05\xb0\x68\xe2\x54\xf6\x98\xe6\x94\x19\xa3\x19\x23\xfa\x68\x0f\xfa\x28\xa0\x5e\x8d\x92\x2e\x43\xee\xd2\x1c\x43\x61\x7c\x59\x06\x68\xe0\x0e\x0c\xb2\xc3\x06\x8d\x86\x1b\xc9\x2c\xce\x69\x87\x23\x67\x0d\x3b\xb0\x30\x62\xbd\xae\xc4\xc5\x68\x99\xea\x38\xa4\x4a\x8a\x23\x00\x76\x28\x0c\x2a\x2c\xdb\x46\x91\xb3\x6c\x79\xee\xa1\x08\xea\x69\x25\xcb\x19\x77\xaf\xb9\x64\xc6\x61\x9b\xc6\x98\xc7\xb3\x95\xa9\x86\x74\xd5\x66\xa4\xd2\x94\xca\xe4\x5c\xc5\xee\x01\x08\x1d\xa3\x03\x22\x84\x0e\x50\xb2\x83\x69\xce\xd1\x21\x0e\x1d\xec\x57\xa6\xf4\xc5\xc4\x37\xc2\xf6\x53\x1d\x83\xf8\x99\x7a\x84\x9d\x06\x18\x63\xf0\xa6\x70\xec\xa4\x92\xd4\xb5\x27\xa4\x76\xe6\xba\xc1\x32\xc8\x2b\xd7\x18\x0b\x36\x0a\xa7\x69\xd2\x76\x9f\xe7\x39\x82\x01\x08\x18\x38\xba\x63\xd8\x01\x38\x12\x9f\x50\x1a\x13\x9e\x70\x16\x90\x7a\xc2\x4e\xe1\xca\x16\xd6\xc8\x18\xf7\x4f\x0d\xb8\x26\xfa\xe3\xa8\x0c\xf3\x3c\xcf\x64\xcd\x10\x2f\xb4\xc9\x7e\x1a\x10\xfb\x11\xa7\xc9\xbb\x8a\x85\x10\x8e\xb0\x57\x0a\x39\x20\xc8\x48\x9e\xf0\x6f\xdf\xc2\x90\x9c\xb2\xe7\x91\x50\x27\xa9\xd6\x5b\xce\x32\xa3\x8e\xd8\x60\x63\x52\x4c\x88\x49\x59\x9c\x75\xd5\xa4\x2c\x15\x26\xa7\x2c\x4e\x78\x81\xad\x3b\x31\xdc\x36\xdf\x4e\x2f\xee\x5d\x58\x81\x09\x13\x78\xdc\x04\x81\xa1\x9b\x88\x8b\x63\x86\x55\x9b\x60\x75\xb4\xfe\x04\x64\x9e\x44\x97\xb7\xe2\x4d\xac\xb1\xd3\xd3\x9a\x68\xac\xb4\x5e\x10\xa8\x0c\x5b\x10\x3f\xd3\xb4\x40\x18\x01\xda\x77\xb0\x00\xde\x0a\x1e\x4d\x51\xa3\xa4\xc9\xa1\x2c\x9a\x44\x15\x28\x74\xe8\x81\xb2\x3b\x70\xbb\x12\x10\x40\x88\x78\x54\x54\xc9\x06\x8f\x4c\x50\x9c\x0b\x5c\x27\x5e\xbb\x69\x52\x94\x9b\xe7\x39\xe2\x1d\x80\xf2\x22\xa8\xad\xe9\xcb\x6e\x5a\xb6\xa7\x54\x32\x2b\x5c\x55\xf2\x39\x78\x21\xd7\xd5\xf3\x21\x3b\xae\x17\xa2\x8f\x92\x23\x31\x60\x46\xe1\xb7\x59\x18\xdd\x2b\x25\x93\x2d\x3c\xd6\x15\x6a\xe4\x62\x97\xb3\xcb\x8c\x61\xec\x32\x63\x98\x20\x34\x44\x85\x58\x8f\xe1\x29\x85\x2a\xcd\xf3\xac\x52\x72\x7e\xd8\xa8\x29\x62\xc8\xf5\x08\xaa\xd0\xa9\xea\x14\x43\x64\xc4\x55\x0b\x67\x24\x5b\xc1\xd5\x01\x90\xa9\xa0\x5f\xc5\xeb\xbf\x96\x8d\xf8\x4c\x39\x89\x98\x24\xc4\x64\x23\x1a\x54\xee\x2a\x81\x5c\x46\x72\x53\xe0\x54\x14\xaa\x17\xc6\x5c\x16\x79\x25\x12\x29\x78\xa3\x92\x3b\x83\xfd\x89\x05\x23\xc2\x8d\x28\x44\xbc\x51\x46\x87\x18\xf9\x1f\x22\xf9\x61\x92\xeb\xfb\x0b\xb2\x5f\x26\xa2\x26\xf9\x69\xe2\x1d\xc7\x99\xf2\x31\x28\xad\x10\x19\xbf\xf3\x61\x18\x53\x3a\xe4\x5d\x3d\x20\x83\xe5\xbd\x08\x66\x11\xd9\x98\xb4\x45\x19\xe0\x94\x54\x59\x9c\x92\x56\x5e\x73\x74\x02\x5a\xba\x15\x13\xc1\x56\x93\xb6\xa2\x5f\x2e\xd1\xaf\x65\x97\x46\xfa\xe5\x2f\xa7\x5f\x7b\xe8\x24\xc0\x7c\xaa\xb3\xb2\x74\xba\xb3\xde\x64\xe9\x50\x58\xda\x39\x7c\xbb\xa5\x5e\x71\x74\x5b\x66\x3c\xd3\x7e\x99\x15\x9b\x40\x48\x9b\xc0\x29\x6d\x02\x1d\xae\x78\x5f\xb1\x09\xac\xb6\xec\x48\xed\x3e\xcf\x5d\xa5\x65\x97\xa2\x1b\xd6\xfe\x96\xae\x73\x43\x64\x74\xd9\x04\xb2\x10\xa5\xd4\xdb\xbe\x99\x37\x0b\xc9\x65\x8a\x9d\x88\xc3\xba\x96\xd0\x27\x8e\x74\x91\xc5\x45\x37\x00\xc0\x47\xfc\x58\x18\xbc\x38\x76\xab\x63\x9a\x37\x60\x27\x9f\x3a\x39\x2f\xc7\xd9\x82\x9d\x36\xdc\x7a\xb6\xe3\x83\x09\x83\xdb\xd4\xa9\x2c\x1b\xae\xd2\x89\xe7\x15\xff\x68\x92\x13\x85\xbe\xfc\x61\xc2\xb9\x45\x33\xae\xe4\x7b\x5f\x7f\x66\x2e\x98\xbe\xe2\xd6\x1d\xa2\x87\xca\xfb\x58\xd4\xd5\x28\x53\xc0\xb4\xa3\xcf\xa4\x6e\x6b\xc9\x21\x6f\xaa\xf7\xbe\x0e\x88\xb8\xb5\x04\x64\x8b\x1e\xf3\x65\x95\x98\x11\x59\x5c\x72\xde\xfd\xb0\x56\x16\xc1\xb5\x99\x67\xa4\xc3\x99\x4d\x24\x3f\x92\x91\x52\x68\xfb\xda\x03\x96\x2b\xbf\xbc\xf2\xcb\x2b\xbf\xbc\xf2\xcb\x2b\xbf\xbc\xf2\xcb\x2b\xbf\xbc\xf2\xcb\x9f\x96\x5f\x02\x2d\x0b\x7a\x36\xfb\x1c\xc9\x52\xa4\x8d\x2e\xff\xf8\x63\x32\x69\x4c\x9a\x48\x11\x27\x4c\xdf\xd0\x6e\x8e\xfb\xc6\xc6\x27\xf9\x30\xe6\x5f\x9e\xbc\x49\x21\xd0\xdf\xab\x14\x88\x4c\xfa\xbd\xaa\x78\x7c\xab\xd4\x86\xda\x8f\x32\x21\x1c\x72\xbf\x78\xdf\xc6\x27\x57\xff\xff\xd7\xc0\xbf\x01\x00\x00\xff\xff\x8d\x92\x02\x2a\x14\x1e\x00\x00") + +func fontsBasicFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBasicFlf, + "fonts/basic.flf", + ) +} + +func fontsBasicFlf() (*asset, error) { + bytes, err := fontsBasicFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/basic.flf", size: 7700, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBellFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x58\x7b\x6f\xe4\xb6\x11\xff\x5f\x9f\xe2\x87\x60\x01\x25\x80\x44\x9e\xbd\x97\x1e\x6e\x71\x48\x37\x0f\x5c\x7b\xed\xf9\x02\xc4\x09\x8a\x02\x44\x29\x5a\x4b\xef\x2a\xb7\x96\x0c\x49\x7b\x86\x03\xa2\x9f\xbd\x18\xbe\x44\x69\xed\xa4\x3d\xd4\xde\x95\x28\x8a\xf3\xfa\xcd\x70\x66\xb8\xb7\xc7\xdb\x4b\xb5\xc2\x9f\xf0\x35\x2e\xbe\x46\x79\x81\xcb\x75\x76\xa3\x8f\x47\x76\x7b\xbc\xcd\xde\x77\x0f\xba\x47\xad\x06\x8d\xa3\x1e\x47\xdd\x0f\x05\xee\x4f\x6d\x3d\x9e\xd4\xd8\x74\x6d\x01\xd5\xee\x70\xd7\x0c\xb5\x3e\x1e\x55\xab\xbb\xd3\x80\xfa\xa0\xda\xbd\x1e\x70\xf3\x88\xbf\xeb\x76\xc4\x07\x35\x0c\xba\x2d\x90\xe1\xb3\xfe\x3e\xea\x76\x6c\xb7\xf5\xe3\x8d\xee\x87\x7b\x55\x6b\xd6\xf5\xfb\xcf\x63\xf5\x5e\x0d\xa3\xd7\x6e\x83\x8b\x17\xfc\xe2\x82\xbf\x7e\x99\xfd\x72\x7f\xbf\xb0\x10\x5d\xdf\xec\x9b\x56\x1d\x8f\x8f\x64\xc4\xdf\xba\xe1\x70\x52\xf8\x4e\x1f\x8f\x9b\x2c\xfb\xa0\x1f\x86\x7d\xdf\x9d\xee\x87\x0d\xd4\x71\x64\x6a\xa8\x9b\xa6\x54\xfd\x98\xbd\xed\xbb\xbb\x0d\x7e\x1d\x08\xbb\xad\xaa\x07\x76\xaa\xd5\x71\xaf\xfa\x47\x56\x2b\x7c\x99\x70\xf9\x2a\xbb\x3e\xdd\xfc\xaa\xeb\x71\x83\x7f\xea\x11\xdf\xb6\xdd\x78\xd0\x3d\x3e\x74\x6d\xf9\xb6\xd9\x1f\xf5\x88\xb7\x5d\x3b\x66\x57\x7a\x18\xd4\x5e\x97\xef\x7e\xd8\xe0\xcd\x95\xea\x2f\xd6\xec\xc5\xab\xf5\xcb\xf5\x25\xbb\x5c\xaf\x5f\xad\x97\x22\xbe\xc9\x7e\x50\xa3\xde\xe0\xfa\xd4\x16\xb8\x58\xe3\x4a\xf5\xb8\x78\xfd\xfa\x25\x5e\xbc\xda\xac\x5f\x6e\xd6\x97\xf8\xcb\xd5\xcf\xd9\x8f\xfd\x5e\xb5\xcd\x6f\xd6\x79\x1b\xfc\x7c\xd0\xf8\xa5\x6d\x3e\xe9\x7e\x68\xc6\x47\x74\xb7\xf8\xde\xf1\xcb\xde\x37\xad\x1e\x36\x58\xbf\xc8\xae\x47\x35\x9e\x86\x0d\x7e\xfa\x31\xcb\xde\xe5\x9f\x34\x0e\x6a\x87\xf1\xd0\x0c\xf8\xd8\xd4\x1f\x9b\x76\x0f\xd5\x77\xa7\x76\x87\xdb\xae\x87\xc2\xc3\xa1\x39\x6a\x86\x77\xb7\x50\xed\x63\xd7\x6a\x3c\xa8\x76\x1c\x30\x76\x50\xbb\x5d\xd6\xb4\x70\xc6\xd6\x07\xd5\xab\xda\xa2\x4d\x11\xe4\xec\x2e\x9b\xdf\x34\x9a\xb1\xc0\xbe\x43\xdf\xec\x0f\x23\xd4\x41\xab\x1d\xcb\x32\x87\x5e\x56\x96\x99\x09\xde\xfc\xe2\xaf\x6a\x40\x33\xe2\x46\xeb\x16\x0a\x37\x6a\x87\x47\xad\xfa\x02\x5d\x8f\x87\x83\x1a\xff\xfc\x05\xad\x2a\x71\xa5\xfb\x46\xed\xba\x1a\xdf\xf5\xaa\xdd\x3d\xde\x9c\xea\x8f\x45\xe0\x61\x26\x76\x00\x4e\xf7\x5d\x8b\x5e\x8f\xa7\xbe\x25\xb3\xc6\x0e\xe3\x41\xe3\xfa\xd0\xf4\x1a\xea\x76\xd4\xbd\x7d\xfe\x87\xea\x09\x28\x1a\xfe\xd4\xb4\x7b\x96\x30\x20\x76\xcf\xf8\x1f\xdf\xd6\x6a\xa7\xef\x9a\x1a\xdf\x77\x77\xf7\xa7\x91\x04\x5c\xeb\xfe\x53\x53\xeb\xa1\x78\xda\x07\x30\xd9\x0a\xc0\x6a\xfb\x5f\xdc\xb6\xd9\x8a\x6f\xb3\x95\x49\xbe\xd5\x36\x5b\xe5\xf4\xa2\x28\xe8\x2d\xbd\x76\xeb\xd3\x8b\xa5\x67\xf4\xb1\x13\xa5\x29\xe9\x63\x67\x0d\x7d\x68\xb6\x74\xb3\x4e\x56\x4e\x1f\x2f\x16\x5e\x74\xf2\x4f\x2f\x24\x80\x82\x96\x70\x08\x70\xbb\x58\x48\xce\x21\x1d\x19\x07\x87\xa0\x11\x07\x84\x24\xb5\xf2\xa7\x39\xa1\xd8\x66\xe0\xdb\x8c\x38\x24\x5f\x22\xb5\x16\x81\x0c\x8d\x17\x46\x17\x54\xdb\x38\xb4\x32\x4c\xbc\x14\x56\x90\x33\x98\x2c\x26\x84\x18\x0c\x2c\x2e\x10\x65\x6e\x89\x0a\x23\xec\x2b\x0e\x03\x81\x60\xa6\xc7\xa9\x08\x80\x1b\x37\x60\x65\x59\x96\x79\x3a\x03\x54\x98\xd0\xb1\x64\xab\xc5\xd7\xaa\xee\xd5\x88\x4b\x03\xb7\xfc\x89\xf9\x67\x39\x71\x7b\x9f\x05\x82\x33\x86\xdb\xa7\x22\xb7\xe2\xe6\x41\x02\x48\x69\x31\x67\x39\xc0\x85\x05\x0f\xdc\x22\x64\x8a\x1c\x76\xc0\xab\x44\x13\x4f\xc6\xac\x40\x63\x59\x25\x57\x69\xa4\x5d\xe7\x16\x49\x29\x9d\xdb\xe1\xc0\x87\x74\x6c\x68\x82\xe6\xa5\x94\x45\x60\xfb\xbb\x04\x70\xd8\x0b\x29\xe5\x57\x33\x82\x80\x09\xed\x5a\xab\x40\xc5\xa4\x34\x01\xa5\xf3\xc1\xa4\x98\xf4\x54\xd6\xef\x65\x59\xb2\xb0\xd4\xc9\x61\x79\x8a\x34\xa1\xb4\xf2\x20\xd9\x00\xf3\x04\xc6\x13\x54\x6c\x49\xe0\x2d\xc9\x27\x4b\x24\xf7\x2e\xa9\x7c\xd4\xaf\x96\x04\x48\x4c\xff\xa6\x2c\xcb\x37\x9e\x41\xe5\x55\x3c\x93\x90\x12\xd0\x02\x93\xd8\x70\x46\x30\x8f\x92\x24\x5a\x96\x2f\x42\x40\x2e\x76\x31\xd2\xf8\x73\xa3\xa2\x2c\xd3\xc8\x78\x66\xee\x3c\x1f\x48\x78\x2f\x57\x04\x62\x05\xc0\xaf\x77\x81\x8a\x02\x21\xb7\x10\xb5\x0d\x51\x6b\x2c\x38\xa4\x64\x64\xae\xa1\x18\x32\x64\xa8\x81\x90\xd2\x8f\x08\x2b\x12\x8d\xca\x2a\x01\x4b\xec\x76\x37\xdc\x90\x9b\x30\xa4\x64\xe3\x86\x9c\xf4\xa5\xf8\x2a\x5c\xde\x21\xfe\x31\x83\x93\xc2\x56\x7a\x8c\x4b\xeb\xff\x42\x96\x6f\x10\x22\x28\xf8\x67\xb9\x47\x82\x8b\x58\x88\x02\x57\x5b\xd2\x01\x28\x5e\xe3\x1e\xb0\xd2\x58\x2a\xad\x8a\xab\x4d\x3a\x20\x95\xf9\x3c\x1b\x11\x59\xdc\x3c\x6e\x4f\x10\x56\x61\x73\x38\x8e\xa4\x24\x3f\x93\x26\x03\x55\x20\x4a\xb6\x46\x98\x4f\xb6\x5c\xd0\xcf\x99\x15\x05\x4c\xa2\x5c\x5e\x27\xcb\xa4\x99\x0a\x83\x43\x92\xc8\xc3\xc6\xf1\xf2\xa4\x41\x3a\xc3\xad\x73\x66\xb6\xc9\x45\x25\x9b\x12\x9d\x74\x7f\x7e\xa3\xf0\x49\x57\x73\x3e\xac\x08\xb3\xc9\x98\x80\x5b\x62\xa8\xad\x98\xd2\x65\x7e\xe3\x5d\x3d\xe5\xa2\x80\xd8\x02\x19\xb3\xb8\xf3\x14\xe3\x60\xf4\x64\xb5\x37\x5b\xd8\x58\xb4\x73\x82\x4f\x86\x27\x96\x27\xc4\x90\x9e\x04\x81\x24\x8e\x20\xe2\x08\xc2\x2c\x88\x9d\xaf\x62\x08\x56\x2c\xfa\xca\xcc\x46\xd6\x57\x2c\x9f\x13\xb3\x45\x82\xa1\x98\xcf\xff\x38\x32\x52\x71\x53\xa8\x07\x1d\x0b\xe9\x86\x24\x91\x55\x31\xe9\x3e\x2b\x53\x46\x99\x4f\x3b\xc3\xe7\x06\x1a\x7d\x19\xf7\x14\x79\x9a\x45\xd5\x4c\xa8\x1c\x2c\x5f\xc2\xf3\xbf\x44\xcf\x7c\xc1\x2c\x18\xc0\x02\x1c\x2e\x82\x9e\x41\xf8\x4c\xbe\xf7\xad\x9c\xa3\x44\xce\x8d\x71\x20\xac\x54\x5f\x02\x9f\x95\xef\x99\x70\xff\x60\x26\xdc\xa7\x07\x2e\x02\x57\x5b\xd7\x05\x5d\x12\x66\x5e\x23\x84\x68\xab\x58\xa2\x44\x58\x5b\xe4\x62\xf2\xbd\xc0\xd2\x1c\x44\x6a\xa4\xe4\xa8\x58\xb4\xa1\xf0\x05\x94\x36\x19\xce\xcd\x09\x1e\x41\x6c\x02\x23\x13\x56\x96\x41\x63\x6f\x27\x15\x9b\xdc\xad\xe7\xa9\x25\x0e\xda\x59\x2f\x38\x5d\xca\x32\x14\x33\x8a\x0a\xaf\x81\x08\xb9\xd8\x87\x4f\x08\xcb\xc8\x29\x69\x1b\xe3\xc5\x72\xfa\xe3\x1a\xf9\xd4\xc8\x29\x5d\xcc\x3a\x5d\x5b\x76\xc5\x59\x57\x97\x36\x39\x08\x5d\x85\x45\xa0\x4a\x0b\x02\x6d\x60\x6e\xe6\x49\x33\x90\x89\x50\x87\x0c\x4f\xeb\x90\xad\x5a\xa4\xc6\xb2\x6a\xa5\xd2\xa6\xcd\xbc\xac\x5a\xec\x09\x32\x99\x36\x94\x56\x49\x63\x9d\x14\x94\x9c\x49\xab\x9e\xb6\x6d\x92\x16\x2a\xaa\x2d\x24\xc5\x4c\x5a\x11\x4b\x5d\xe5\xb2\x84\xaf\x3d\xc9\x8d\xa7\x8a\x21\x48\x60\x67\xf6\x18\x5f\x1a\x5c\xf3\x24\xac\xf2\x29\x7a\x3c\xd0\x9b\x22\x34\x7d\x81\x9e\x47\xfa\x09\x06\x7f\xce\x3a\xaf\x54\x69\x14\xb0\x30\x10\xb3\x06\x95\x57\x7e\x60\xb5\xb0\xf8\x88\xa4\xae\x16\xa1\x39\xb7\xe0\xbc\x09\x1b\x51\xc8\x59\x5b\xe8\x4e\x32\x94\xb6\xce\x6e\x5c\x48\x99\x44\x76\xd8\x2f\xee\x54\x20\x41\xdf\x95\xb3\xaf\xa2\x6f\x6c\x70\x43\xd3\x01\x7b\xda\x03\x9f\xd1\xa6\xb6\x15\xa1\xb5\x23\x88\xa6\x6a\x63\x9e\xc1\x2a\xf5\x3d\x3b\x6f\x94\xcc\xef\x44\x9a\x45\xa9\xf0\x71\x61\x57\xcf\xc8\x4c\xe8\x7e\xc5\x9c\x6c\x0a\x35\x36\xc5\x5a\xec\x5b\xdc\x29\xc1\xc6\x42\x2c\x4c\x66\x76\x34\x63\x8b\x03\x8a\xf1\xad\x2b\x5f\x1e\xcd\xe6\xc2\xa6\xfa\xe4\x05\xb0\x55\xac\x49\x73\xdb\x98\x4f\x10\x5c\x46\xc7\xcd\xee\x62\xca\x75\xab\x54\xb1\xc2\x57\x21\x93\x54\x1e\x9f\x17\x62\x5a\x98\x2b\x36\x6b\x4a\x6c\x4c\x55\xf1\x50\x22\xf8\xf2\xd0\x8a\x29\xe5\x82\xce\xf3\xbe\xce\x19\x2f\x05\xf8\x97\xef\xe1\x05\x87\xe0\x69\xb6\x4b\x94\x94\x94\xc5\xed\x22\x77\x14\xa5\x9a\xe4\xd1\x14\xe7\xe8\x45\xbb\xdc\x76\x71\x39\xeb\x59\x5f\xd9\x2d\x33\x97\x16\x92\x65\x3c\xff\x3a\xeb\xb8\x47\xbd\x9a\x4e\x52\xfe\xc7\x03\x39\x95\x09\xb7\xa9\x5c\x83\xb8\x68\x3e\xdd\x4f\x29\xdb\xcc\x46\xba\x55\x1c\x74\xe8\xb5\x1a\x49\x1e\x7f\x46\xc1\xfc\xf3\xef\x65\xb1\xf8\xff\x3e\xfc\x27\x00\x00\xff\xff\xd8\x43\x60\x70\xb3\x15\x00\x00") + +func fontsBellFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBellFlf, + "fonts/bell.flf", + ) +} + +func fontsBellFlf() (*asset, error) { + bytes, err := fontsBellFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/bell.flf", size: 5555, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBigFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x7c\x6b\x73\xe2\xb8\xd2\xff\x7b\x3e\x45\xbf\x48\xd5\x42\x15\x13\x83\x73\xff\xd7\x4c\x0a\x07\x9c\xc4\x67\x00\x73\xb8\x64\x77\xfe\xe5\x5a\xad\x93\x38\x09\x67\xb9\xe4\x01\x32\xb3\x39\xe5\x0f\xff\x94\xee\x92\x2d\x19\x13\xf2\xa4\x66\x40\x18\xac\x6e\xa9\xbb\x7f\xdd\x2d\xb5\xfc\x34\x7b\x72\xe3\x03\x38\x87\x53\x38\xb9\x80\xe6\x09\x34\x1b\xd0\x00\xf7\xf8\xf8\xf4\xa8\x72\x35\x7d\x86\xfb\x77\xb8\x99\x25\x8b\x05\xb4\x5f\xe2\xd7\xd7\x64\x36\x83\x63\xe7\xe2\x08\xbe\x7c\x81\xfb\x78\x9d\x3c\xc2\x72\x01\xa3\x4d\xbc\x78\x8c\x57\x8f\x95\x60\xf1\x30\x7b\x7b\x4c\xd6\x10\x8c\x42\xe8\xc6\x9b\xe9\xe2\x4b\xb3\x72\xb3\x4a\x92\xbf\xe1\xe1\x25\x5e\xc5\x0f\x9b\x64\xb5\xc6\x3d\x5e\xad\xde\x1e\x12\xf8\x57\xfc\x77\xf2\x2b\x7e\x87\xaf\xaf\xf7\xff\xa1\xcd\xd6\x22\x79\x9b\xc7\x8b\xc5\xe1\xdb\xaf\x78\x93\xac\x66\xcb\xe5\xe1\x43\x7c\x59\x79\x9a\x3e\xcf\x92\x0d\xac\x92\x59\x12\xaf\x13\x70\x0f\x5d\x4c\xbf\xbf\xfc\x99\xcc\xef\x93\x15\x34\x2f\x2e\x4e\x2b\x83\x64\x35\x9f\xae\xd7\xd3\xe5\x02\xa6\x6b\x78\x49\x56\xc9\xfd\x3b\x3c\x4f\x7f\x26\x0b\xd8\x2c\x61\xbe\x7c\x9c\x3e\xbd\xc3\xe6\x65\xba\x86\xa7\xe5\x62\x53\x87\x78\x0d\xb3\xe5\xe2\x19\xbf\x6f\x5e\x92\x0a\xf9\xc1\x34\x59\xfd\xb6\x86\x45\x3c\x4f\x70\x1f\xaf\xb3\xf8\x81\x0e\x30\x86\x87\xe5\x7c\x9e\x2c\x36\x30\x9b\x2e\x92\xc3\x4a\xa5\x47\x7f\xfd\x88\xc7\x32\x88\xdf\x66\x70\xf5\xb6\xda\x2c\x17\xf0\x75\xbd\x9c\xbd\x6d\xa6\xcb\x45\x2b\x89\x57\x9b\x97\xd9\x74\xf1\xf7\xe1\x22\xd9\x5c\x42\xd3\x75\x2e\x4e\x31\x23\x53\x3a\x45\xb0\x48\x7e\xc1\x6b\xbc\x8a\xe7\xc9\x26\x59\x55\xd6\x6f\xaf\xaf\xcb\xd5\x86\x76\x78\x1d\xdc\xe0\xd1\xc6\x8b\x47\xdc\xfc\x7d\xba\x38\x04\xe8\xc5\xef\x10\xcf\xd6\x4b\xb8\x4f\x60\x3d\x9b\x3e\xbf\x6c\x66\xef\x30\xe7\x5c\x3c\x2d\x57\x70\x9f\x6c\x36\xc9\x0a\xde\xd6\x49\x65\xf9\x44\xba\x7f\x7a\x9b\xcd\xbe\xfc\x9a\x3e\x6e\x5e\x9c\xbf\x93\xd5\xc2\x59\xcf\xdf\xd6\x2f\x10\xcf\x36\xc9\x6a\x11\x6f\xa6\x3f\x93\x75\x1d\xee\xdf\x36\xf0\x98\x3c\xc5\x6f\xb3\x0d\x2c\xdf\x36\xaf\x6f\x1b\x3c\xf2\x7e\x38\xc6\x02\x5b\x3c\x27\x8f\x87\x15\x38\x68\x15\xfd\x6f\x55\x00\x10\xb4\x2a\x90\x42\x9a\x7d\x45\xf8\xb5\x8a\x6a\xf8\x37\x00\xec\x95\xde\x40\x6e\xa9\x42\x0a\xe4\xcb\x3b\xb8\xa3\x5f\x1f\x80\xf9\x1d\x94\x77\xda\x40\xe4\x1f\xb9\x8a\x52\x48\x53\x48\x29\x17\xf8\x3a\xfe\x9f\x16\x7f\x83\x99\x4b\x51\x2a\xbb\xd5\x68\x28\x84\x10\xbd\x9a\x42\x8a\xdf\x1d\x40\x64\x38\x11\x42\x10\x91\x11\x00\x38\xe4\x7b\x94\x9a\x39\xc5\x4c\x22\x44\xe7\x01\x1c\xfa\x63\xc0\x0d\xfa\x33\xdc\x20\x2d\xdc\x20\x7c\x3a\xc8\x91\x73\xa6\x33\xd5\x52\xf8\xa3\xcc\x21\x3e\x07\x55\x40\x50\xe3\x3d\x21\x88\x9c\x88\x88\xa1\x8a\x2e\x01\xbe\xe2\xab\x11\x42\xc8\x89\x9c\x6c\x0f\x1a\xa7\x64\x40\x98\x72\x4a\xb9\x3b\xc8\xbd\x6a\x52\xa4\xe3\x62\x83\x62\x13\x64\x7c\x83\x08\x45\x8c\x1a\xbe\x11\xd1\x39\x8d\x20\xe2\x53\x6b\x7b\xc3\x73\xa1\xdc\xc8\xe4\xce\x86\x19\xa5\x90\x3a\xb4\x8b\x08\xfe\x82\xdf\xe8\x94\xa6\x88\xfe\x2c\xa5\xac\xd5\xe1\x90\x91\x89\x9c\x14\xa5\x91\xa3\xc9\x58\x6b\x67\x67\x5c\x55\x2f\xa1\x41\x8a\xfa\xf0\xdf\x1d\xe8\x32\xca\x4a\xcb\xfc\x9a\x9f\x6c\x83\x78\x65\x13\x91\x3f\xca\x04\xf9\x4b\xd9\x0f\x14\xe2\x07\xf9\xdb\xca\x73\x62\x30\x52\x10\x12\x06\xd8\xa6\xb9\xc0\x35\xb7\x70\x2e\x10\x93\x3c\x55\x51\xae\x20\x02\x32\x64\x0b\x31\x2d\x20\x4a\x6b\xef\x10\x31\x83\xb1\x2b\x10\xc3\x20\xe5\x5e\x76\x23\x65\x24\x45\x8c\x0f\x80\x83\x1a\xbd\x41\x0c\xd0\xc1\x1d\xf3\x09\x4f\x05\x65\xd9\xe0\x72\xe1\x3d\x89\xae\x10\xaa\x71\xe2\x08\xbe\x72\xf9\xd5\x18\x26\x6e\x19\x92\xd0\x3b\x0a\x5d\x4a\x93\x73\x23\x95\x10\xf8\x0f\x74\x85\x34\x8a\x5f\x2a\x10\xf0\x01\xa5\xb4\x37\x9d\xfb\xd2\x9c\x02\xb7\x63\x5d\x15\x58\x87\xf0\x9b\x90\x70\x95\x4f\xc7\x56\x69\x6a\x3a\x0e\x4c\x1e\x70\x20\x54\x4e\x12\x92\x4a\x97\xd7\xba\xbc\xe6\x9b\xf4\x4e\x70\x75\x09\x4c\x48\xe5\x39\x2d\xee\x30\x42\xa8\xce\x79\x97\xd6\x82\x9c\xad\x28\xa1\x5a\xe2\xc1\x36\xdb\x2c\xba\xc1\x08\x2b\xcc\x94\x99\x21\x33\xc6\xbe\xc2\x57\x60\x08\xca\x94\x57\x42\x75\xd6\xe3\x8a\x6b\x66\x30\xb2\x42\x54\xb1\x74\xb8\x07\xc3\x0c\xe8\x9c\x5c\xc2\xa5\xc2\xa9\x36\x81\x1a\x67\x06\x63\x86\xac\x31\x0b\xeb\xc0\x52\x02\x8b\x31\xeb\x1c\x0a\xd3\x26\x52\xe6\x5d\x13\x37\xfd\x97\x40\xac\xaa\x40\x2a\x22\x76\x36\xe6\x28\x6b\x3a\x19\x02\xfc\x1b\x27\x92\x1f\x1c\xdc\x87\x54\x6e\x31\x1d\x0e\x65\x24\x52\x00\x56\x11\x51\x76\x46\x73\xb0\x04\x0a\xd2\x72\xc3\x06\xa9\xf1\x69\x59\x63\x17\xc2\x65\x0c\xf1\x09\x38\x10\x60\xa5\x34\xc5\x6f\xa3\x42\x45\x90\xb0\xc4\x59\x55\xbc\x82\xe2\x16\x14\xbf\x80\x52\xc9\xae\x3e\xc3\xc6\x8e\x91\xe8\x58\x43\x3c\x71\x35\xcd\x72\xbc\x45\x75\x77\xef\x98\x77\x41\x14\x70\x3b\x4c\x19\xe7\x18\x38\x39\xe2\x04\x32\x53\x51\x66\x8e\x89\x3a\xe7\x27\x56\xcc\x26\x85\xf2\xec\x74\x63\x8e\xd3\x12\xc2\xcb\x46\x46\xc2\x27\x89\x96\x12\x41\x65\x78\xcd\xe1\x20\xe7\x94\xfc\xa5\x90\x66\x9a\x08\x0c\xfc\x1b\xec\xcd\x30\x05\x04\xff\x70\xc8\xc8\xc2\x55\x1e\x2f\x02\x83\xc0\x94\x47\x8a\x38\x4c\x54\x2c\x4c\xc7\x08\x24\x2e\xaa\xc2\xb5\x35\x77\x52\x2b\x29\x67\x88\x1c\x2e\x8e\x08\x2b\x03\x1b\x6f\xe4\xe4\x6d\xa2\x84\x90\xa4\xf0\x23\x31\x77\x10\xf1\xd6\x21\x08\x34\x8b\x40\xf4\x19\x15\x09\x09\xa9\xee\xaf\xac\xc9\x96\x11\x92\x05\x0b\x90\x02\x5c\xac\x8b\x0f\x58\xd6\xc7\x38\xd6\xb1\xf6\x23\x1c\x03\x08\x8e\x23\xa1\x60\x59\x10\x2f\x89\x05\x55\x31\x8e\x48\x8b\xd8\x54\x14\x2f\x03\x8b\x6a\x28\xa9\xc5\x92\xb0\xa5\x8d\xd2\x6c\xd6\x94\xf5\x71\x36\xb0\xd9\x57\x3b\x10\xb3\x3c\x62\xc6\xc4\x3b\x8a\x58\x26\xa2\x56\xc2\xee\x8e\xb0\xf5\x88\x58\x81\x7c\x23\x3a\x8b\xb2\xa1\xa2\xc1\x79\x72\x42\x59\x62\x39\x82\x39\xa2\x39\xc2\x79\xe2\x06\x06\xcc\x17\x38\x23\x82\x03\x41\xfa\x8e\xd3\xbc\xe4\xb1\x9b\xc3\x91\x0b\xc7\x07\x9a\x5e\xe9\x96\xcb\x46\xa6\x0c\x4a\x8e\x07\x29\x43\x01\x39\x8a\x5d\x85\x4f\xfe\x78\x42\xc1\x52\xc6\x03\x63\xca\xc8\x73\xb7\x62\x87\xa0\x78\x5a\xa1\x3c\xe6\x77\x81\xb3\xa2\x33\x65\xc8\x52\x8a\x42\x7c\x42\x6e\x52\x60\x91\x90\x94\x7d\x0e\x41\xf1\x78\xc2\xdf\x99\xde\x95\x48\x45\xe3\x08\xe8\x92\x45\xea\x44\xf4\x32\x5b\x57\x31\xbe\xe9\x49\x6b\x7e\xd2\xcb\x36\x0f\xec\x81\x7b\x4b\x4f\x1a\x80\x72\xb5\x6d\xe1\x27\x3b\x31\x14\xb1\x40\x00\x16\x77\x29\x55\xc5\xbc\xeb\x45\x62\xe6\x17\x19\xae\xeb\xc9\xa9\x92\x4b\xca\x78\xf5\x70\x4b\x72\x0a\xd9\x30\x5f\xc5\x53\x81\xa6\x02\x4b\x6d\x09\x3e\xa8\x01\x09\x17\xb1\x40\xad\x0f\x0c\xb5\x98\x33\x88\xb8\x2f\x71\xb6\x72\x26\xef\x4a\x55\x13\xc8\x9a\x8a\xba\x2e\xa0\x29\xe2\xc7\xa4\x28\xa6\xc0\x11\x6b\x1c\x58\x10\xbb\x49\x51\x89\x5e\xd2\x42\xb5\x90\xf9\x6d\x7e\x8d\x99\xdf\xa8\x29\x26\x93\x95\xc8\x99\xd9\x67\x9b\x91\x3a\xdc\x48\xd5\x21\x88\x11\xb0\x46\x36\x72\x04\x96\x3d\x15\xc7\x89\x59\x76\xb7\xb2\x9e\x9f\x04\xfe\x09\x11\xb1\xa8\x13\xf9\x57\x66\x01\x4d\x9f\xd0\x4c\x44\x98\xef\xd3\x26\x7e\x04\x7b\x48\xcb\xa2\x4f\xc5\x2b\x25\x5b\x8d\xb8\x14\x87\x22\xdc\x3a\xd4\xe3\x43\x19\x1e\xee\xaf\xf1\x6a\x3a\x92\x32\xe4\xcc\x08\x9e\xf3\xc7\xd8\x93\x76\x28\x19\x29\x80\x19\xf5\x9d\x41\x02\xc7\x2a\xbe\xdd\x40\x8c\xcd\x68\xcc\x08\xcc\x0e\x11\x90\x86\x0e\xb8\xa7\xb4\x34\x1a\x20\x35\x94\x33\x2d\xce\x6e\x05\x3a\xbd\xc3\x6d\x11\x4d\xb4\x65\x91\x2c\x77\x5d\xe9\x56\xeb\x59\xeb\x5c\xeb\x3f\x43\xc2\x14\x00\xe6\xa5\x8a\x90\xec\x9c\x75\x7c\xc9\x40\xc0\xc1\x5d\xda\x40\xe0\x63\x73\x5a\x80\xb0\x06\x35\x41\x2c\x18\x51\x57\xf5\x1c\x64\x97\xb2\xbe\x0c\x28\xb6\xb5\x1c\x19\x24\x29\xd7\xf5\xe5\xc0\xed\xb8\x96\xc1\x38\xd3\xf2\x9e\xec\x19\x22\x13\x27\xda\x32\x1f\x19\x53\xe4\xa4\x34\x60\x62\x52\xb4\xed\x11\x5a\xf7\x0a\x91\xdc\xc6\xab\xa2\x1a\xc2\xf0\x43\x7f\xe0\xa8\x4b\x6e\x48\x5b\x71\x53\x17\xdc\x32\xeb\x6d\x5a\x5b\x13\x29\xeb\xbd\x65\xdd\xe1\x28\xbb\xaf\xa1\x74\xc8\x5c\xd9\x9e\x1b\x25\xf9\x0e\xf7\x0f\xd8\x0c\x1d\x7e\x1c\xeb\x4d\x1d\xee\x07\x3f\xe6\xcd\x26\xee\x28\xd2\xaf\x62\x09\x54\x5e\x4b\x99\xf3\x40\x69\xae\xc3\xe6\x69\x03\xa0\x1f\x7e\xb9\x1a\xfa\xde\x77\x18\x0d\xbc\xb6\xbf\x7d\x67\xbc\x79\xda\x04\x08\xfa\x77\xfe\x70\xec\x77\xc0\xff\xa3\xdd\xf5\x7a\xde\x38\x08\xfb\xd0\xf3\x86\xdf\xf5\x00\xa7\x4c\x94\xd0\x3c\x75\x01\xda\x7e\x7f\x0c\xa3\xe0\xa6\xaf\x63\x0e\x5f\xab\xe6\xf6\xc5\x77\xaa\x95\x30\x17\x80\xc6\xf8\xda\x66\x35\xed\xf7\x08\x60\x10\x4e\xfa\x1d\xa5\x63\xb9\xb7\x4c\xf6\x51\x11\xb3\x0e\xba\x96\xc7\x5d\x1a\xa2\x6b\x9d\x82\xb4\x58\xb5\xa8\xa2\x7a\x6e\xc9\x2b\x8f\xb5\xcd\xd3\x63\x80\xf6\x64\x38\xf4\xfb\xed\x1f\xda\xa0\x88\xf5\xd1\x3d\x6b\x82\x1e\xc0\x20\x4e\x6a\x14\x33\x53\xfc\x2d\x31\x52\xc7\xa2\x0a\xcd\xd3\x13\x80\x1f\x7e\x9f\x77\xcf\x9c\x10\x9b\x92\x48\xe4\xf0\x88\x38\x22\x91\x1d\x01\x5f\x74\x51\xda\x65\x92\xe0\xe6\xe9\x29\xc0\xd5\x30\xfc\xee\xf7\xe1\xca\x1b\x16\x44\xae\xc6\xeb\xb8\x83\x33\x80\x91\xdf\x26\x7a\x22\xa6\x84\x31\x8c\x1c\x20\x52\x75\x78\xba\x1a\x41\xc4\x80\x94\x24\xf1\x44\x9d\x34\x17\xa7\xcb\xf9\x1c\xa0\x13\x78\xfe\xd0\x1f\x05\x23\x8b\xd5\x1d\x80\xcc\xfc\x8a\x5a\xc6\xa9\xbe\x00\x68\x87\x83\x1f\xc3\xe0\xe6\x76\xac\x32\xcf\x32\x4f\x01\xb6\x6c\x45\x4e\x20\x2e\x95\x66\x0a\xdc\x50\xc5\x02\x1f\x5f\x07\x65\xca\xc5\x2f\xe0\x3b\xc9\x92\x51\x0a\xd2\xb9\xb3\xbf\xac\x83\xe7\xcc\x9d\x35\x00\xae\xfd\x5e\xd0\x0f\xfa\x3e\x84\xc3\x4e\xd0\xf7\xba\x10\xf4\x3b\x41\xdb\x1b\x87\xc3\xf2\x88\xa8\x2d\x62\x17\x6c\xfe\x37\xcf\x9a\x00\x5d\xff\x7a\xfc\x65\x10\x06\xfd\x71\xd0\xbf\x81\x4e\x38\xb9\xea\xfa\xe0\xf5\x6f\xba\x3e\xfc\x7b\x12\x8e\x35\x34\x10\xfe\x9c\x3a\x72\xe1\xd1\xc5\x4e\x9d\xb2\x57\x27\x77\xeb\xac\x6b\x15\xcd\x33\x17\x48\x29\x8f\x66\x57\xea\xdc\xb4\x0a\x76\x5d\xd5\x65\xe5\xc2\xca\x82\xe6\xd9\x11\xc0\x28\xbc\x1e\xc3\xed\x8f\xc1\xad\xdf\xb7\xc5\x3d\xfa\xd2\x43\x6e\xfa\x0a\x27\xf2\x18\x60\xe8\xdf\x04\xa3\xb1\x3f\xf4\x3b\x3b\xe9\x15\xdb\x00\x13\x7a\xc5\xb3\x05\xa1\x57\x7c\x3b\x4c\xea\x15\x5d\xff\x8e\xca\xeb\xd5\x09\x40\xcf\x6b\x0f\xc3\xfe\xee\x85\x1a\x65\xcb\x37\x9a\x67\xa7\x00\x1d\xff\x66\xe8\xfb\x62\xf8\xd2\xbf\x09\x67\x2b\x7d\xad\x23\xa7\x35\xd3\xc8\x62\xc2\xd9\x19\xc0\xa0\x3b\x19\x7d\xe9\x05\xfd\xc9\x48\x99\x5c\x50\x01\xbf\x65\x29\x7f\x31\x8b\xd5\x20\xc2\x73\x80\xd1\x64\xe0\x0f\x47\xed\x61\x30\x18\xc3\xf8\xf7\x50\x5f\x4c\xab\xb5\x4c\xf1\x6b\x71\x6c\xd7\x3c\xbb\xc8\xf4\x7a\x3b\xf4\x7d\x35\x2e\x46\xbc\x3a\x2b\x93\x3e\x6d\xe9\xf7\xbc\x01\xe0\xb5\x27\x63\x1f\xbc\x36\xf6\xb7\x15\x16\x37\x3b\xf4\xf6\xfc\xf2\x58\x6e\x91\xac\x79\xde\x04\xe8\x05\xed\x61\x98\x73\x6b\xaa\x51\x14\x45\x38\x38\x9b\xad\x2b\x39\x24\x68\xd9\x6c\xf3\xdc\x05\x18\x04\xdd\xf6\x30\xfc\x5d\x51\x09\x65\xdb\x80\x1a\x31\x55\x0c\x5a\x41\xc2\x74\x43\xb4\x89\x35\xc8\x36\xa9\x88\x2b\x72\x6d\xe7\x47\x78\x4c\x9d\x4e\xd7\x87\x4e\x38\x56\x07\x6c\x2c\x46\x30\x4c\x0a\x76\xf7\x7e\x27\xe8\x76\xbd\xcc\xdd\xf9\x57\xd2\x5f\x4d\xd4\x3e\xe0\xbb\x4f\x74\x69\x87\x7d\x9f\xff\xce\xc9\xbb\x57\x2b\x0f\xa7\xd8\x5c\x47\xed\x49\xb7\xc0\x0f\x94\x09\x64\x4b\xfb\x81\xf3\x33\x00\xe2\x11\x4b\x3a\x02\x09\x69\x91\x5e\x16\x21\x0b\x23\x64\x69\x04\x2f\x8e\x28\x48\x9d\x9b\xe7\xe7\x00\x77\x93\xee\x8d\x37\x84\xeb\xa1\x47\x23\x8b\xb0\x8f\x09\x7b\xc3\xb1\x3f\x14\x55\x8a\x2c\xec\x82\x54\x96\xd7\xd0\x65\x2f\xe0\x15\x16\x0e\xf9\x96\x47\x98\x4e\xca\xeb\x29\x69\xa2\xa4\xc5\x95\x59\x05\xa2\x9c\x5c\x98\x39\xb9\xf5\xba\xd7\x0a\x1b\x2a\x17\x82\x09\x8e\x15\xb4\xc8\xa8\x26\x27\x40\xd4\x87\x38\xd9\x80\x33\xab\xc0\x17\x8d\x3c\x79\x82\x19\x7c\x2a\x46\xa2\x78\x42\x6c\xe7\x63\x0c\x51\xca\x8d\x30\x94\xa8\x33\x82\xb4\x39\xd1\x67\x05\x6c\xf3\x92\xf3\x24\x84\x39\x35\x59\xf8\xf7\xc4\x1f\x65\x62\x03\x76\x93\x48\x5f\x15\x39\x00\x94\x5c\xd0\x6e\x5e\xb8\x00\x5d\x6f\x1c\xf4\xa1\xed\x0d\x82\xb1\xd7\x85\xae\x3f\x1e\xfb\x43\xf0\xe0\xf7\x60\x7c\x0b\x37\x43\xef\xce\xe7\x11\xa7\xb8\x19\xbb\x43\x99\x0b\x7c\x42\xd2\xdc\xbc\x38\x2a\xe6\x83\xa0\x2f\x4f\x44\x24\x6d\xe4\x7c\x36\x1f\xc7\xc5\x7c\xb4\x83\x61\x7b\xd2\xbb\xee\xfa\x7f\x50\xa2\x8e\x20\x9a\x3a\x28\x4a\x3f\x99\x99\x93\x62\x66\xc6\x41\xb7\x43\x27\xc5\x89\x1c\x51\x65\xe1\x44\x9f\x3e\x29\xa7\xc5\x7c\xa8\x29\xc4\xff\xf1\xea\x4a\xf3\xe2\xac\x98\x97\x21\x86\x53\xef\x2a\xbc\xe3\xda\x22\x3b\xa9\x2e\x6b\x9f\x3d\x31\xe7\x36\x66\x18\x75\x75\x7f\x9d\x92\x95\x35\x42\xec\x63\xaa\xc6\xab\x8e\x52\x2d\xc4\xb8\x11\x39\x34\xe1\xc8\x50\x39\x92\xe3\x4c\x70\x77\x61\xe1\xae\xcd\x74\x59\x7a\x5e\x5b\xa9\xd1\x0e\xe5\x5c\x35\xad\x92\x8e\x70\xe0\x36\x1a\x16\x0e\xfc\x0c\xba\x88\x39\xc0\xf9\x4b\xae\x2c\x15\xb4\xc2\xac\xed\xd1\xa5\xdb\x68\x16\xd3\x95\x68\xc2\x3d\x3b\x76\x98\x9f\x40\xd7\x86\xa6\xbe\x09\x3d\x08\x78\x10\x1f\xe2\x40\x94\xee\x4f\xdc\x06\xa1\x7e\xde\x4a\x8d\x2b\x8a\xfb\xd0\xb6\xc1\x66\xa0\x0b\x5a\x96\xab\xb0\xca\x17\xe6\x18\xb9\xef\xe2\xe7\x04\x2c\x0b\xd6\x6e\xc3\x86\x88\x41\x46\xb0\x5c\x9b\x79\x68\xb6\x2b\x1d\x1b\xe2\x05\x06\x41\x3a\x7c\x0b\x1f\x45\xe9\x87\x88\xd9\x20\x2d\x28\x16\x1c\x5f\x5b\x26\x04\x65\x0c\x90\x69\x11\xb2\xb6\x50\xd0\x6d\xd8\x10\xcc\x1f\xdf\x56\x04\x80\x89\x3e\x65\xe1\x2b\x4d\x03\x45\x51\x03\x46\x2d\x65\x33\x55\xec\x88\x9b\x6a\x90\xf2\x71\xa0\xdb\xb0\x41\x55\x3f\xe3\xe9\x88\xa3\xa3\x36\x13\x39\x05\x65\x6c\xe5\x8a\xd7\xdc\xa6\x0d\xa0\x42\x1b\x40\xd1\xd8\xa7\xb5\xdf\x92\xbe\xdb\xb4\x01\x54\x68\x03\x28\xb9\x13\xb2\x17\x5d\x1b\x40\x85\x5b\x00\x0a\x45\xe9\xfe\xc4\x6d\x00\x15\x16\x09\xf9\x13\x06\x6d\x03\xa7\xb0\x9c\x7d\xed\x45\xfb\x04\xa0\x37\xe9\x8e\x83\x41\x17\xe7\x93\xda\x1a\x2e\xff\x29\x59\xda\x26\xeb\xda\xec\x70\x56\xc4\x0f\x66\x19\xea\x7e\xdc\xa6\x0d\x9a\xd8\x68\x46\xe3\x61\xf8\xdd\xcf\x3b\x75\x47\x6c\x84\x3b\x22\x3b\x76\x44\x05\xa9\xe3\x88\x1a\x16\xa7\xb8\xf2\xce\x6d\xda\xe0\x6a\x52\xc2\xa9\x7f\x7c\xd7\xca\x6d\xda\xa0\x6a\x52\xc2\xa9\xef\x43\xd7\x86\x4d\x93\xb2\x4e\x7d\x0f\xe2\xae\x0d\xa0\x26\x25\x9d\xfa\x3e\xb4\x6d\x20\xf5\xc3\x32\xe1\x88\x4c\xb8\x7d\x3f\x5f\xf1\x4e\xfa\xe9\xa4\x0c\x5d\x1b\x48\x8d\x6f\xc3\x61\x3f\x5b\x74\x5d\xbe\x46\x78\x5b\x61\xb0\xeb\x0a\x80\x1a\xf5\xbc\xae\x20\x3b\xba\xf5\x86\x03\x18\x55\x3e\x7d\x13\xd1\x75\x8f\x8d\x04\xb3\xb9\xb7\xc1\xf9\xec\xb1\x5b\xeb\xba\x27\x45\x54\x8b\x5c\xcf\x5e\x54\x4f\x8b\xa8\x6e\x33\xa2\xfd\x48\x9f\x15\x91\x2e\x72\x3b\x7b\x51\x3d\x2f\xa2\xba\x6d\xdb\x6d\x2f\xca\x17\x45\x94\x33\x19\xb2\x90\x72\xb5\x5a\xab\xed\x3d\xe8\xa3\x86\x99\xb4\x6f\x0f\xff\x38\x45\xc5\x59\xfd\x05\xa2\xf4\x11\xd3\x55\xca\x1f\xeb\xdb\x37\x91\xdd\xa3\xa6\x91\x87\x7c\xd6\xab\xdd\xdd\xaa\x94\x2c\x0a\xad\x89\x15\x0d\x4e\xcf\x35\xd2\x33\xe5\xb8\xc2\x88\xb3\xd4\x4a\x17\x7a\xba\x47\x66\x90\x32\x65\xb6\xc2\x78\xf7\xa0\x66\x46\x28\x6b\x3e\x2b\x8c\x76\x0f\x92\x66\x78\x2a\x97\xc5\x9a\x21\x1a\xb8\x9b\x8b\x0a\x93\xa1\x23\x33\x44\xe9\x39\x2c\x1d\x12\x5b\x18\x2a\x51\x85\xea\x1e\x99\xd1\x27\x93\xb1\xaa\xfb\x49\xa5\x7a\x35\xa3\x4b\x61\x7e\x2a\xc4\xc2\xc0\x55\xcf\x4f\x6d\xcf\x2c\x70\x8f\xcc\x70\xb2\x25\x3b\x55\x6b\x6e\xb3\x39\x69\xd9\x48\xe0\xd8\x8c\x26\x34\x37\xc5\x31\x33\xbd\xe1\x12\xc4\x56\xb8\x13\x89\x83\x99\x0a\x74\x95\xa9\x39\x72\x8f\xcd\xb0\x51\x98\x81\xb2\x38\xf3\xe3\x65\xb2\xee\xb1\x19\x3c\x4a\xe4\x9f\x7b\xd4\x57\xb9\xc7\x66\x10\x29\x91\x7d\xee\x45\xd5\x0c\x26\xdb\x72\x4f\x25\x04\xf8\x38\x69\x33\xa8\x94\xc8\x3c\xf7\xa2\x6a\x06\x94\x2d\x79\xe7\x27\x14\xd0\xb9\xc7\x67\x00\x9d\xe0\x2e\x18\xe9\x19\xa7\xba\x1a\x8e\xf8\x6a\x38\xca\x15\x29\x08\x75\x35\xfe\x3e\x4f\x91\xd1\x34\x43\x52\x2e\x2f\xcd\xdd\xcd\x07\x2b\x47\xcb\x21\x0b\xaa\x8e\xc3\xc7\x8b\x13\xd3\xa2\xbc\xf4\xd8\x0c\x53\xd6\xac\x54\x5a\xd2\x1e\x85\x85\xee\x89\x19\xa2\xac\x39\xa9\xb4\xa4\xbd\xa8\x9a\xb1\xaa\x64\x46\xba\x1f\x69\x33\x60\x95\xcb\x47\xf7\xa3\x6c\x06\x2d\x5b\x36\xba\xd3\x54\x5b\x8b\xac\xdd\x13\x33\x68\xe9\xb9\xa8\x5e\x94\x51\x7c\x54\xa9\xf0\x50\x82\x7b\x62\x06\xaa\x1f\x9f\x35\xbd\xd6\x81\x36\xfe\x69\xb8\x57\x6d\x80\x5e\xd8\x09\xae\x03\x7f\x28\x42\xf7\x41\x88\x4d\x77\x70\x4b\x26\xb7\xc5\x5e\x6a\xb5\x96\xfa\x51\x7b\x61\x7d\x75\xf2\x7d\x0d\xfd\x3b\x7f\x38\xf2\x3b\xd0\x0e\x7b\x3d\x4f\xbd\xad\x5a\xdd\xd2\xdf\xd1\x99\x07\x70\x33\xf4\xfd\xef\xf0\x63\x10\xde\xf8\x37\x43\xaf\xd7\xf3\xfb\x81\xf5\x2e\xf5\x25\x4d\x59\x27\xe7\x67\xbc\x13\xaf\x1f\xc2\xd8\xef\xfa\x81\xac\x3c\xb1\x3d\x63\x23\x57\x33\x82\x7b\xba\x68\xf2\x9e\xb2\x5b\x7f\xdd\xc1\xad\x67\x43\xef\x54\x39\x62\x0f\xd9\x75\x99\xc2\x40\x81\xd0\x74\x2d\x34\xaf\xfc\xb1\x57\xf4\x98\x87\x1a\xbf\x56\xcd\x5c\xcb\xae\xcd\xe7\x49\x1e\x59\x48\xde\x78\x54\x82\xd2\x75\x90\x36\x5b\x74\x92\x9b\x85\x07\x39\xeb\xc8\x2d\x84\xe4\x68\x1e\x5b\x68\x76\xfc\xee\xd8\x33\xa4\x7e\xe5\x9e\x9b\xe1\x20\x14\xf1\xbd\x5d\xf6\x57\xfc\xdc\x0c\xc2\xcb\x89\x85\x17\x7f\x30\x0a\xba\xb2\xc4\x2f\x37\x03\xca\xd3\x1f\x6a\xea\x52\x95\x70\xb2\xb6\x27\x7a\x11\xaa\xa7\x16\xaa\xff\x5f\x0a\x9a\x3d\x44\x4c\x9e\xab\x35\x3f\x8a\x49\x3d\x57\x5b\x48\xf2\xcc\x36\x50\x4a\xb1\xb8\x32\xed\x43\xda\x7c\x6e\xa1\x38\xbe\xa5\x34\x6d\xc7\xf4\xcd\xcf\xa9\x28\x7d\x74\x9c\xd0\xbe\xb0\xd0\x0e\x42\x3e\xc1\xc0\x1e\xe7\x46\xb7\xd8\xb2\xa9\x12\x7d\xaf\xaa\x93\xda\xaa\xe8\x24\x3c\x0b\x89\xef\xde\x60\xe0\xa9\x4f\x82\x4b\x69\x69\x53\x4b\x54\x57\xd1\xb1\x89\xe7\x50\xa4\xca\x73\x02\xec\xe5\xbd\x84\xe6\x95\x85\x66\xd7\xeb\x75\xf6\xb0\x1c\x71\x1a\xba\xe4\x13\x67\x08\x2f\x6d\x0b\x2f\xbd\x89\x5e\x75\x4f\xaa\xbc\x1d\x59\x80\xfb\x53\x94\x29\xa6\x11\x52\x1f\x71\xa1\x68\x56\xf6\x21\x17\xd9\x58\x94\x30\xd0\xb1\x30\xd0\x9f\x6c\x7f\x0c\x06\x28\x4c\x94\xd9\x49\x24\x04\x7d\x0b\xc1\x3f\x02\x15\x26\xaa\x8a\x2d\x0a\x05\x27\x9a\x24\xa3\xf1\xec\xef\x2c\x04\xaf\x2d\x04\x43\x52\x69\xda\xb7\x7a\xa1\x0f\xef\x09\x60\xaa\x5e\xc3\x42\x75\x10\xe8\xc9\x44\x95\x25\x10\xc2\x82\x54\xeb\x31\xb6\xd9\x71\xd5\x2d\x82\xf5\x6c\xae\x77\x78\x1b\x96\xf1\x82\xb6\xe8\xab\x60\xc8\x36\x2f\x38\x0a\x6e\xb0\x17\x94\x63\x8e\x84\x0f\x50\x1f\x2b\x70\x09\x97\x3b\x03\xb2\x67\xf3\x82\x63\x6f\xa2\xab\x09\x9e\x66\x76\x90\xc7\xb4\xc0\x52\x6a\xa9\x85\x50\xb4\xf9\xba\x89\xe2\xeb\x84\xd9\x56\x11\x31\x5b\x46\x39\x82\x9f\x96\xa7\x44\xec\xf2\xc4\x08\xc2\x85\xcd\xf7\x0d\x6e\x83\x6c\x52\xca\x0e\x1f\xb5\x44\x7d\x73\x44\xf5\xae\x9a\x42\x5a\x63\x0f\x30\x20\x1c\x3b\x3b\x71\x60\x73\x85\x6d\xcc\x81\xe5\xd0\xea\x4f\xc3\x63\x38\xfe\x2c\xf3\x18\x0e\x42\xd1\xe6\x0a\x07\xa3\x40\x3c\x04\x90\xbb\x5f\x51\xa3\x9d\x12\xb8\x72\xb8\xd9\x6a\xe3\xdc\x79\xd6\x6d\x0e\x31\xec\xf9\x37\x1e\x4b\xab\xd4\x4a\x35\x84\xd4\x13\x9c\x9a\x39\x8b\x0f\x88\x9e\x8f\x15\x68\x96\xa6\xba\xce\x5b\x3c\xc7\x95\xb0\x6f\x7d\x0f\x81\x07\xd6\xb9\x3b\x5b\x3c\xaf\xe7\x0f\x20\xe5\xc7\x72\xab\x50\xad\xb1\x83\xb9\xe4\x10\x44\xd1\x43\x76\x08\x65\xd7\x48\xf9\x4a\xc6\x23\x86\x88\x5e\x09\xaf\xbf\x96\x07\x1b\x46\xf0\xc8\x48\x90\x07\xd7\x39\x75\xd1\xd2\x3e\xa2\x7e\xb5\x8c\xfa\xd9\x0d\x9f\x51\x3c\x36\x52\x94\xa1\xb5\xfa\x94\x3b\xb6\xd1\xa1\x3e\x7c\x4e\x98\x18\x12\x06\xb6\xc5\x5f\x5c\x9d\x18\x29\xca\x00\x5a\xbd\xab\xa5\x1f\x76\xaf\x91\xa2\x08\xca\x47\x54\x10\x6c\x5d\x9d\x1a\x69\xd0\x70\x59\x96\x30\x45\xf8\xbd\x96\x0f\x92\xb3\x79\x3b\x3d\x60\xcf\x89\xb0\x81\x62\x27\xed\x08\x82\x67\xe6\x41\x8d\xed\x62\xdb\xb6\x46\x2c\x4e\x5a\x65\x9f\x2b\x40\xe8\x9d\x1b\xe9\xa9\xa1\x72\xb9\x64\xb3\xb4\x9b\xbf\xba\x30\x52\x64\x01\xb2\xbc\x83\xe9\x24\x1b\x8f\x7c\x83\x48\x48\x8b\xbf\xb1\x8e\x3d\x63\xc7\x3c\x2c\xd6\xb9\x69\x99\x1f\xd6\xb6\xfd\x91\x1b\x84\xd2\x95\x91\x12\x0b\x86\xb7\x3d\x0b\xe8\x52\x6a\x7d\x69\x20\xbf\x6a\x1b\x29\x92\x90\xb7\xd0\x98\x77\x3f\xf0\x43\xc8\x75\x8c\xe4\xfa\x93\xb2\xf3\xc8\xb3\x0d\xfb\x19\x51\x42\xc6\x37\x92\xc1\x61\x6d\xf4\x2d\xe2\x9a\x77\x09\x2c\x84\xad\xd2\x98\x84\x5d\xa3\xd9\x6f\x95\x6c\xa4\x96\xb4\xad\x6b\x23\x39\x25\xa8\x35\xcc\xa4\xc1\x02\x76\x01\xa9\x76\xc3\x48\x73\x10\x58\x1d\x8e\x16\xe7\x22\x24\xc3\x5c\x25\x9a\xcd\x04\xb3\x79\x87\xd3\x36\xbb\x3a\x1a\xc8\x96\x1c\xe6\x2e\xfe\xa6\x6d\x76\x70\xd7\xe4\xcc\x13\x0b\x66\xad\x74\x95\x02\xf1\x9a\x5d\xa4\x7c\xca\xe9\xc1\x63\x4e\xd7\xec\xe7\xca\x52\x84\x43\x4e\xb1\xc6\xe0\x3b\x42\x05\x07\x9c\x08\x45\xb3\x9f\xa3\xc1\x33\x98\x3d\x4e\x2e\xdd\x57\x30\x2c\xeb\x71\xda\x66\xaf\x36\xd1\xbc\xda\x4e\xe6\x5e\x42\x49\xcd\x5e\xce\x10\x18\xab\x01\xe0\xee\x41\x72\x3e\x78\x64\xf4\xcd\x4e\xaf\x7d\x1b\x98\xc7\xfb\xf1\x50\x99\xd1\x33\x3b\x3d\x1a\x14\x1b\x8c\x4a\xa0\xdc\xde\xc1\x32\xa3\x6f\x76\x81\x32\x24\x36\xf1\xa0\x84\xa3\xca\x91\x5c\x6a\xad\xc2\x5e\xf1\x28\x15\xa9\xff\x99\x2f\x91\xce\x7e\xa2\x1c\x75\x04\x5e\x10\xcf\x0f\xa3\x1f\xbd\xab\xb0\xcb\xa4\xaf\x9e\xd0\x62\x87\x5a\xe8\xa7\x2a\xad\xde\x61\x8f\x0c\x80\x08\x44\xca\x98\xb2\xf2\xf0\x54\xd8\x31\x3d\xd8\x5c\x8a\x17\x61\x01\x83\xdb\x40\xe3\xc4\x26\x17\x19\x02\xa5\xca\x29\x65\x2a\xa3\x5d\xf4\xb0\x23\xec\x60\x60\x20\x6c\x62\x1e\x21\x94\xc1\x6b\xc4\x21\xfb\xa3\xe2\xf8\xd2\xf8\xa7\xd1\x68\x9c\x00\x54\xe2\xd9\xeb\x4b\x0c\xdf\x20\xae\xc3\x7d\xb2\xc1\xad\xfb\x3a\x3c\xc7\xf3\x39\x6e\x3e\xd7\xe1\x31\x99\x91\xab\x8f\x75\x48\x5e\xd7\xd3\xd9\x72\x01\xdf\x20\x21\x7d\xfe\x97\xfe\xfe\xbf\x75\xa0\x8d\x97\x3a\x6c\x5e\x68\xf3\x7f\xea\x30\x5d\x92\xd6\xb4\x0e\xb3\x78\xfe\x88\x9b\xb3\x3a\xcc\xdf\xe0\x1b\xcc\x5b\x95\x05\x7e\x5f\xd4\xe1\x9f\x29\x7c\x83\x7f\xea\xb0\x9c\x4f\x1f\x56\xa4\xeb\x65\x1d\x5e\xf1\xc5\xd7\x3a\xac\x5e\x96\xf0\x0d\x56\x75\x58\x4f\x9f\x09\x3b\x6b\x4c\xf5\xf5\x05\x7f\xfd\x54\x87\x07\xd2\x78\xa8\xc3\xeb\x1a\x37\xde\x71\x2f\xc9\x33\xfe\xdd\xaf\x3a\x3c\x4d\x17\xf1\x4c\xdc\x78\xa7\x4c\xc2\xeb\x14\xd6\xef\xf3\xfb\xe5\x0c\xbe\xc1\x4f\xce\xb1\xb8\xf2\xaf\x3a\x60\x02\xe2\xf3\x7f\xd4\xf9\x9b\x4f\x1f\x1f\x67\x09\x3c\x2e\x37\xf0\x0d\xfe\x5f\x1d\xde\x5f\x97\xcf\xc9\xf3\x2a\x9e\xcf\x93\x05\x66\x01\x41\xfe\x8f\xdd\xba\x5a\xbe\x3d\xbf\xc0\xfd\x2a\x89\x37\x2f\xd3\xc5\x33\x7c\x83\x6a\x1d\xd6\xf3\xe5\x72\xa3\x5f\xad\xe5\x6f\x8d\x1f\xde\x36\x09\xc4\x0f\x0f\xc9\x02\xd3\xfd\xad\x0e\xcf\xab\xf8\xa7\x72\xe5\xaf\x3a\x3c\x4e\xe3\xd9\xfb\x66\xfa\x37\x1e\xec\x9f\x54\xc4\xff\x1b\x00\x00\xff\xff\x3b\x67\x12\x3d\x0c\x67\x00\x00") + +func fontsBigFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBigFlf, + "fonts/big.flf", + ) +} + +func fontsBigFlf() (*asset, error) { + bytes, err := fontsBigFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/big.flf", size: 26380, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBigchiefFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x58\x5b\x8b\x1b\x47\x13\x7d\x9f\x5f\x51\x1f\x0c\xec\x0c\x4c\xab\xbc\xfb\xc5\xc6\x06\xaf\x19\x27\xe4\x29\x04\x87\x84\x40\x1e\x06\x2a\xb2\x25\x25\xc6\x8e\xb5\xac\xb2\x0f\x81\xfe\xf1\xa1\x6f\xd5\xd5\x33\x3d\x37\x49\x8e\x59\xef\x4a\x3d\x3d\xa7\xaa\x4f\x9f\xba\x74\x1f\x3e\x1f\xee\xb6\x25\xbc\x84\x17\x70\xf7\x0c\x6e\x9f\xc3\x5d\xf1\xfe\xe3\x1f\x1f\xfe\x3c\x6c\x0e\x9f\x0f\x70\xfb\x0c\x5f\x7d\x03\x0f\x9f\x5e\xbc\xbc\xbd\x3d\xb5\xdb\x0f\xdb\xdd\x66\xf7\xb8\xfd\xb4\xdf\xec\x77\x4f\x0d\x3c\x3d\xec\xb6\x7f\xef\xe1\x16\x5f\x3d\x87\xd3\x3f\xef\xff\xdf\xbe\xfd\xf6\xfb\x9f\x37\x6f\xbf\xdb\xfc\xfa\x43\xf1\xe3\xe3\x69\x03\x3f\xed\xbf\xec\xf6\x8f\xa7\xe3\x17\xf8\x78\x82\x5f\xde\xc1\xc3\xe3\xf1\x69\x07\xc7\x03\xfc\xb5\xff\x5f\x41\x44\xd4\x16\x00\x00\x6d\x51\x96\x65\xd9\x16\x4a\x29\x15\x3e\xcb\xa7\xf6\xb7\x1b\xe2\x51\x40\xff\x00\xcd\x0c\xa5\x14\xda\x97\xed\x77\x33\x42\x47\x39\x39\xa0\x44\x20\xc6\x46\x84\x60\xd9\xbf\x39\x6a\xd9\x8d\x1b\x4b\xa8\xcc\x43\xff\xc9\xbf\xaf\x22\x08\x44\xa4\xd4\x0b\xe1\x47\x0a\xcc\x73\x88\x90\xc2\xac\xca\x2c\xa6\x0e\xf0\x5d\x30\x50\x99\x39\x35\x9b\xc0\xa1\x91\xc4\x4c\x34\xe4\x07\x0d\x44\xed\x56\x8d\x16\x10\xab\x1a\x06\x53\x7a\x1e\xda\x11\x22\xff\xac\x32\x7e\x39\xc7\xba\x48\x7c\xe7\x5c\xaa\x88\xa0\x8b\xfe\xd4\xc2\x19\x07\x68\xbf\x1b\xbb\x0e\xc1\x7f\x15\x0f\x86\xa4\x9b\x7f\x1b\x05\xbc\xf7\x81\x74\x64\xd2\x91\x49\x47\x7e\x0d\x23\x1b\x6a\x48\xba\x07\x53\x1b\x76\x30\x48\xc1\xf3\xc2\x96\x02\xcb\xac\x2f\x81\xc8\x80\x01\x24\xfe\x55\xd4\x21\x39\x6e\xba\xa8\x87\xc1\xbc\x3c\xf5\x85\x22\xed\x5e\xd6\x30\xb9\x3b\x89\x56\x7b\x4a\x46\xcf\x42\xaa\xe4\x81\x25\x1f\x78\xf7\xf7\xf7\x53\x96\xda\x02\x4a\xf0\xbf\x7c\xa4\xba\x40\x13\x0f\x98\xdd\x92\x37\xbd\x94\x4c\xca\x2d\xe3\x1d\xc3\xbe\x80\xfb\x82\xe9\x09\x81\x38\x42\x2c\x44\x1d\x61\x31\x02\x63\xd0\xa2\xdb\x3b\x19\x10\xc9\xe7\xac\x81\x38\x09\xa3\xd6\x84\xd8\x84\xda\x86\xde\xcf\x1b\xe0\x38\xf2\x0b\xf0\x06\x0c\x98\xa4\x26\x92\xb3\x76\x05\x23\x06\x80\x00\x45\x8e\xaa\x2f\xa0\x08\x12\x03\x3e\x9b\x50\x5c\x01\xc8\xe0\x11\xe1\xb3\x7c\x05\x44\x90\x72\xad\x9c\x05\x75\xa5\x15\xcc\x19\xc0\xcb\x0c\xd8\x05\x0c\x28\x4a\xb3\xcb\x65\x14\xe5\xe3\xc0\xa8\x08\xd4\x95\xe2\x20\x67\xc0\x40\x01\xc6\x3d\x58\x65\x40\x64\x78\x9f\x43\x8e\x65\xc8\x21\x49\xea\x1f\x4c\x14\x35\x02\x93\x1a\x11\xf3\x9f\xa5\xf8\xb5\x9b\xdc\x8d\x57\x72\x1e\xb2\x02\x50\x44\x73\xa5\x9f\x87\x3a\xe7\xcd\x1b\x15\xb2\xea\xc8\x0b\x14\x0b\xa6\x63\xb0\x02\x11\x85\x98\x94\x2c\xf3\xc6\x71\x49\xfe\xf3\xb2\xe2\xb5\x7a\x40\x54\xea\x9d\x03\xec\xcc\xbe\xc8\xbe\x63\x19\xa0\xd8\x63\xcd\x41\xa0\x59\x40\x9a\x93\x90\x5e\x9a\xa1\x85\xf4\xa3\x9f\x16\x16\x70\x10\x5b\x67\xe5\xb7\x89\x12\x30\x48\xa0\xe7\x06\x2f\x2d\xad\x31\xe7\xac\x60\x60\xe0\x26\x50\x94\x59\x81\x35\x50\x7e\x6d\x03\xd7\x49\x3f\xe9\x1e\x28\xd5\x9c\xb1\x07\x42\xea\x00\x24\x4c\x60\x4c\xd2\x89\x92\xb0\x57\x2a\x17\xf4\xc2\xa2\xd0\x84\x4a\x1f\xc3\x93\xa3\xb3\x2d\x4a\x42\x43\x3d\x63\xc4\x0f\xf9\xaa\x28\x3f\x4f\xa6\xfc\xea\x8c\x94\x4f\x29\x21\xcd\x4d\xe0\x63\x73\x13\x39\xef\xe4\xa6\x76\xcb\x0c\xa4\xb9\x65\xbe\x59\xc3\xc5\xa9\xc0\xfe\x67\x50\x0c\x19\x46\x03\xaa\x90\x63\x34\xc6\x2c\x83\xbd\x3c\xb3\x56\x25\x5a\xa8\x44\x69\x50\x51\x25\x5a\xaa\x44\x2f\x56\xc9\x7f\xd7\x76\xf6\xb3\x66\x30\x60\xa1\xbe\x76\xc4\x62\x54\x8f\x58\xc1\x6f\xd2\x40\x77\xc9\x0a\x40\xac\x40\x8b\x15\xe8\xab\xb4\x3c\x9d\x3c\x7b\x07\xfd\xaf\xd9\x03\x33\x2c\x0d\x5d\xe1\xb0\x32\x9b\xc4\x44\x5a\x90\x49\xac\x5a\x95\xc4\x9c\xe7\x6c\x22\xca\x5f\x33\x38\x0b\x7f\xa0\xfa\x89\xc8\x25\x00\x09\xaa\x23\x2c\x6a\x06\x76\xa1\x1b\xce\xdc\x7a\x99\x1c\x5d\x5f\x14\x28\xd1\x81\x0e\x1d\x78\xd6\x81\x66\x3d\x75\x7d\x33\xce\xb1\x16\x1c\x5b\x54\x4e\xbd\x5a\xb6\xdb\x3a\xe1\x98\x0f\xf5\x15\x85\x28\x1b\x39\x94\xc8\x65\xcd\xb5\xf4\x55\x4f\x51\x66\x3c\x6b\x20\xaa\x3b\xfa\x33\xd6\x2b\x62\x78\x03\xed\x21\x2f\xb7\x89\x61\xc8\x75\xac\x0e\xaa\x0b\x40\x7c\x13\xd1\x8d\xde\x44\xe4\x3c\x02\xc0\xb0\x58\xee\xfd\xb9\xd9\xf4\x51\x46\x84\x59\x8f\x5a\xee\x90\x6d\xfa\x47\x3d\x7f\xd7\x36\x70\xad\x48\xef\xd7\xe2\xbc\xde\x78\x72\x78\xe8\x96\x5c\x30\x65\x34\xe6\xcf\x51\x1c\x9b\xb5\x8f\xcb\x6a\x52\x90\xbd\x67\x18\x8f\x94\x03\x20\x5c\x03\x94\xf3\xe8\xc6\x03\x95\x33\x21\x22\x62\x50\xe8\x96\xe1\x62\x56\xc6\xa4\x29\xc9\xbe\xb6\xc2\x43\x22\xaa\x97\x79\xc8\x42\x8b\xac\xfd\x6e\xb0\xc8\x76\xaa\xeb\x33\xef\x8a\xdd\x44\x12\x1b\xe5\xe3\x72\xf5\x6e\xe2\xcc\x6e\xfa\x01\xf7\xa7\xe1\xbb\x36\x79\x76\x48\xa6\xcc\xef\x5e\x93\xb9\x6e\x8e\xc7\x3e\x4c\x33\x8e\x48\x38\x19\xd2\x33\xed\x5e\x58\x5e\x25\x48\xef\xa6\x49\x4f\xfc\x47\x8f\xb3\x70\x89\x59\x5c\x83\x40\xe6\xc7\x81\x60\x60\x1a\xe7\x1b\xcf\x85\xdb\x3f\xb7\x6b\x67\xe9\x68\x55\x0c\x72\x46\xe3\xeb\x87\x00\x49\xc2\x3b\xe4\xd7\xf0\x1a\x1e\x86\x9b\xe0\x8a\x16\x00\xd5\x19\xce\x26\x2b\x72\x36\x67\x7b\x90\x8a\x4c\x58\x1b\x67\xea\xf1\xb2\x93\x03\x20\x8c\x5e\x58\x80\x32\x33\x6f\x81\x9e\x92\x9b\x01\x04\x4e\xe8\xd3\x7a\x9a\x2a\x43\xae\x75\x71\x5d\xd5\x25\x1e\x69\xd3\xf3\x58\x28\x8d\x09\xd8\x6c\xac\xf9\xce\x09\xdb\x22\xdc\xd3\x10\xda\xc3\x13\xe6\x62\x6d\x4c\x2f\x92\x97\x73\x32\x63\x5f\x78\xe0\x81\x24\xbd\x79\xa0\x88\x34\xda\xf7\xbc\xf6\xf8\xa2\xef\xa9\x72\x7d\x4f\x5b\xe8\xf4\xa7\x34\x3f\xe3\x65\x26\x9e\x1d\x44\x3f\x03\x6f\x7a\xfd\x0c\xe4\xfb\x19\xd1\x23\x3b\xb6\x3b\xc2\x94\x4c\xf6\x7a\x2e\x66\xc2\xc3\x06\x1a\xff\xd4\xb5\x98\x97\xdc\xc2\x85\xd1\xc6\x94\x0a\x48\x31\x8d\x83\x75\x5b\x88\xa3\x08\x1f\x38\xe6\x6a\x7e\x98\x10\x61\x41\xa0\xe2\xea\xd8\xea\xd3\xd2\x84\xca\x26\xea\xfe\x59\x8d\xd7\x14\xd0\xaa\xa4\xdf\x88\x5a\xdb\x0f\x91\x69\x8f\xa8\x77\x0f\x17\x8a\xa3\x3c\x15\x9b\xfe\xbd\xf1\x85\x52\xde\x87\xca\xeb\x44\xcc\x6d\x89\x35\xf4\x6f\x00\x00\x00\xff\xff\x11\x0a\x3c\x45\xb8\x1f\x00\x00") + +func fontsBigchiefFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBigchiefFlf, + "fonts/bigchief.flf", + ) +} + +func fontsBigchiefFlf() (*asset, error) { + bytes, err := fontsBigchiefFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/bigchief.flf", size: 8120, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBinaryFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x95\x5d\x6b\xdc\x48\x13\x85\xef\xfb\x57\x14\x43\x2e\x12\x88\x27\x7d\x5a\xdf\x77\x86\xbc\x37\x81\x97\xdd\x85\x2c\x7b\x2f\xdb\x9a\x8c\x60\x22\x65\x25\x39\xb3\xfe\xf7\x8b\xba\xaa\x74\xc8\xc6\x10\xeb\xd1\xd3\x55\x73\xda\xaa\x51\x5f\x6e\x97\xd4\xbf\x13\xec\x3f\x90\x87\xfc\x7f\x0c\xe1\x69\x9c\xfa\xe5\xed\x7c\xb9\x5d\xe4\xfd\xe7\x0f\x82\xae\x2b\xe5\xe9\x4d\xfe\x1a\x9f\xb7\x79\x91\x3f\xfa\xa5\x7f\xe9\xe5\xfd\xcf\x1f\xf9\xe2\x71\x9c\x2e\xe7\xd7\xed\xb2\x7e\x3f\x3f\xdf\x3e\x48\x57\x7e\x42\xfa\x14\xab\x10\x2e\xe3\xb7\xdb\xb0\x49\x3a\x43\x2e\xf3\xb4\x7d\x94\x71\x7a\xbe\xbd\xbe\x0c\xab\x7c\xf9\xfa\xbb\xfc\xbf\xdf\xc6\xe9\x01\x1f\x65\xf8\xc7\xee\x9e\x1e\xfe\x77\x92\xf9\xc7\x36\xce\x93\x3c\x5f\xfb\x65\x3d\x87\xf0\xf5\x47\xff\x3c\xac\xd2\x2f\x83\x4c\xf3\x26\xeb\x75\xbe\x4f\xd2\xaf\x62\x09\x45\xbe\x5c\x64\xbb\x8e\xab\x8c\xab\x2c\xc3\xdf\xaf\xe3\x32\xbc\x7c\xdc\xab\xa7\x6f\x83\x9c\xde\x3d\x9e\xe4\x69\xb8\xcd\xf7\xb0\xcd\x72\x8a\x11\x71\xff\x27\x8f\xa7\xb3\xc8\x67\x5d\xb3\x8b\xc7\xd3\xfe\x6b\x19\xbe\xcf\x3f\x07\xd9\xae\x83\x5c\xfb\xe5\x65\xdd\x3f\x59\x9e\x86\xed\x3e\x0c\x93\xdc\xe7\xe5\x65\x3d\x87\xdf\xe6\x6d\x5f\xd1\x6f\x62\x9b\xeb\x6f\xf7\xfe\x6d\xb5\xda\x55\x56\x8d\x7b\xbf\x0e\x93\x8c\x9b\xe8\xcd\x5c\xbb\x7f\x42\x2f\xd3\x70\x97\xdb\x38\x0d\xe7\x10\xfe\x5c\xde\x7c\xb3\xa7\x87\xef\x12\xff\x9b\xe1\xd7\xcf\xbf\x0d\xdb\x36\x2c\xab\xbc\x9f\x9f\xb7\x61\x5b\x3f\x9c\x43\x78\xf7\x18\x6c\x47\x10\xbf\x44\xe4\xe5\x71\x17\x31\xf2\x92\x77\xb9\x16\xc7\x5a\xc4\x63\x2d\xd8\x17\xec\x0b\xf6\x05\xfb\x82\x7d\xc1\xbe\x38\xfa\x22\x1e\x7d\xc1\xbc\x60\x5e\x30\x2f\x98\x17\xcc\x0b\xe6\x05\xf3\x82\x79\xc1\xbc\x60\x5e\x30\x2f\x98\x17\xcc\x0b\xe6\x85\xe7\xd5\x29\x89\xbc\xe4\x5d\xf0\x2e\xd7\x82\x6b\xc1\xb5\xe0\x5a\xf6\x05\xfb\x82\x7d\xc1\xbe\x60\x5f\xb0\x2f\xd8\x17\xec\xcb\xbc\x60\x5e\x30\x2f\x98\x17\xcc\x0b\xe6\x05\xf3\x82\x79\xc1\xbc\x60\x5e\x30\x2f\x98\x17\xcc\x0b\xe6\x05\xf3\x82\x79\xf9\xf7\x05\xff\xbe\x9c\x5f\x70\x7e\xc1\xf9\x05\xe7\x17\x9c\x5f\x70\x7e\xc1\xf9\x05\xe7\x17\x9c\x5f\x70\x7e\xc1\xf9\x05\xe7\x17\x9c\x5f\x70\x7e\xc1\xf9\x05\xe7\x17\x9c\x5f\x70\x7e\xc1\xf9\x05\xe7\x17\x9c\x5f\x70\x7e\xc1\xf9\x05\xe7\x17\x9c\x5f\x70\x7e\xc1\xf9\x05\xe7\x17\x9c\x5f\x1c\xf3\xfb\xeb\x0f\x52\xe3\x72\x5f\x87\xd4\x06\x1b\xea\x98\xb1\x73\xcc\xb6\x88\x86\xb9\x13\x0a\x38\xaa\x4d\xc1\x46\x5f\x6d\xe1\xa8\xb6\x34\xb4\xda\xca\x51\x6d\x1d\xec\x0b\xa2\xb6\x71\x54\xdb\x1a\x5a\x6d\xe7\x98\x6d\xa9\xa9\x6c\xd7\x28\xe1\xa8\x36\x19\x6a\x6d\x59\x38\xaa\xcd\xa9\xfc\xa9\xa1\xac\x1c\xd5\xd6\x86\x56\xdb\x38\xaa\x6d\x83\x7d\x25\xd5\x76\x8e\xd9\x56\xd1\x50\x6b\x2b\x38\xaa\xcd\xa9\xfc\xa9\xa2\x2a\x1c\xd5\x96\x86\x56\x5b\x39\xaa\xd5\x54\xbe\xdf\xaa\x71\x54\xdb\x1a\x5a\x6d\xe7\x98\x6d\xbd\xa7\x3a\xbe\x55\xa8\xe1\xa8\x36\x19\x6a\x6d\x5d\x38\xaa\x2d\x03\x78\x4a\xa0\xae\x1c\xd5\xd6\x86\x56\xdb\x38\xaa\x6d\x03\x78\x6a\xa0\xee\x1c\xb3\x6d\xa2\xa1\xd6\x36\x70\x54\xab\xa9\x7c\xbf\x4d\xe1\xa8\xb6\x34\xb4\xda\xca\x51\x6d\x4e\x75\x3c\xdf\xa6\x71\x54\xdb\x1a\x5a\x6d\xe7\x98\x6d\x1b\x03\x78\xca\xa0\x85\xa3\xda\x64\xa8\xb5\x6d\xe1\xa8\x36\xa7\x3a\x9e\x6f\x5b\x39\xaa\xad\x0d\xad\xb6\x71\x54\xab\xa9\x7c\xbf\x6d\xe7\x98\x6d\x17\x0d\xb5\xb6\x83\xa3\xda\x14\xc0\x53\x09\x5d\xe1\xa8\xb6\x34\xb4\xda\xca\x51\x6d\x1d\xc0\x53\x0a\x5d\xe3\xa8\xb6\x35\xb4\xda\xce\x71\xb7\x29\xc6\x00\x9e\x5a\x29\xc2\x51\x6d\x32\x84\xda\xc2\x51\xad\xa6\xb2\xfd\xa6\x58\x39\xaa\xad\x0d\xad\xb6\x71\x54\x9b\x53\xf9\xf3\x4d\xb1\x73\xcc\x16\xd1\x50\x6b\x01\x47\xb5\x29\x80\xa7\x5c\x42\xe1\xa8\xb6\x34\xb4\xda\xca\x51\x6d\x4e\xe5\xcf\x37\xa1\x71\x54\xdb\x1a\x5a\x6d\xe7\x98\x6d\xd2\x54\xbe\xdf\x04\x47\xb5\xc9\x50\x6b\x53\xe1\xa8\x76\x4f\x75\x7c\x7f\x53\xaa\x1c\xd5\xd6\x86\x56\xdb\x38\xaa\x6d\x03\x78\x4a\xa6\xfd\xdd\xce\x93\x32\xed\xef\x76\x9e\x96\x69\x7f\xb7\xf3\xc4\x4c\xfb\xbb\x9d\xa7\x66\xda\xdf\xed\x3c\x39\x53\x51\x1a\x5a\x6d\xe5\xa8\x56\x53\xf9\x7e\x8b\xc6\x51\x6d\x6b\x68\xb5\x9d\x63\xb6\x65\x4e\x75\x3c\xdf\x12\x8e\x6a\x93\xa1\xd6\x96\x85\xa3\xda\x32\x80\xa7\x6c\x2a\x2b\x47\xb5\xb5\xa1\xd5\x36\x8e\x6a\x73\xaa\xe3\xf9\x96\x9d\x63\xb6\x55\x34\xd4\xda\x0a\x8e\x6a\x35\x95\xef\xb7\x2a\x1c\xd5\x96\x86\x56\x5b\x39\xee\xf6\xdf\x00\x00\x00\xff\xff\xbf\x19\x3e\xf0\x66\x0d\x00\x00") + +func fontsBinaryFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBinaryFlf, + "fonts/binary.flf", + ) +} + +func fontsBinaryFlf() (*asset, error) { + bytes, err := fontsBinaryFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/binary.flf", size: 3430, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBlockFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5b\x5b\x6f\xa3\xc8\xd6\x7d\xf7\xaf\x58\x0f\x48\xdf\xcb\xa4\xdb\x76\x2e\x8e\xa5\x4f\x47\x21\x36\x49\x50\x3b\xe0\xc1\xb8\x7b\xfa\xc9\x72\x77\x48\xc7\x1a\x07\x47\x36\xee\x51\x4b\xfd\xe3\x8f\xf0\xb5\xaa\xf6\xae\xa2\xc0\xe4\x1c\x9d\x1e\x69\x02\x01\x6a\xaf\x7d\xa9\xb5\x2f\x90\xe7\xf9\x73\x7b\xea\xe0\x1a\x57\x68\x77\xd0\x44\xab\x89\x26\x2e\x3b\x57\x8d\xdb\xf9\xe2\xfb\xdf\xf8\xf6\x0b\xf7\xf3\x24\x4d\xd1\x7b\x99\xbe\xbd\x25\xf3\x39\x2e\x3e\x76\xcf\x71\x76\x86\x55\xb6\x9c\xce\x7e\xbc\x64\xf8\x99\x2c\x57\xb3\x45\x8a\xc5\x33\x06\xc9\x34\x6d\xf8\xe9\xf7\xf9\xfa\x29\x59\xc1\x1f\x85\x18\x4c\xb3\x59\x7a\xd6\x6a\x3c\xcf\x7e\xcc\x93\x0c\xcb\x64\x9e\x4c\x57\x09\xda\x1f\x5a\xf9\x1a\xad\x36\xdc\xf5\x0f\xb4\xba\xdd\x8b\xc6\x30\x59\xbe\xce\x56\x9b\x95\x66\x2b\xbc\x24\xcb\xe4\xdb\x2f\xfc\x98\xfd\x4c\x52\x64\x0b\xbc\x2e\x9e\x66\xcf\xbf\x90\xbd\xcc\x56\x78\x5e\xa4\xd9\x1f\x98\xae\x30\x5f\xa4\x3f\xf2\x9f\xd9\x4b\xd2\xd8\xdc\x30\x4b\x96\xff\xb7\x42\x3a\x7d\x4d\xf2\x35\xde\xe6\xd3\xef\xc9\x13\x16\x29\xa6\xf8\xbe\x78\x7d\x4d\xd2\x0c\xf3\x59\x9a\x7c\x68\x34\x1e\xb7\x77\x3f\xe5\xfa\x0d\xa7\xeb\x39\x6e\xd7\xcb\x6c\x91\xe2\xff\x57\x8b\xf9\x3a\x9b\x2d\xd2\x9b\x64\xba\xcc\x5e\xe6\xb3\xf4\xef\x0f\x69\x92\xfd\x0b\xad\xf6\xc7\xee\x55\x0e\x64\xb6\xd5\x0e\x69\xf2\x0f\xde\xa6\xcb\xe9\x6b\x92\x25\xcb\xc6\x6a\xfd\xf6\xb6\x58\x66\xdb\x05\xef\xfc\xfb\x5c\xd7\x69\xfa\x94\x1f\x7e\x99\xa5\x1f\x80\xc7\xe9\x2f\x4c\xe7\xab\x05\xbe\x25\x58\xcd\x73\xbb\xcd\x7f\xe1\x75\x8f\xe2\x79\xb1\xc4\xb7\x24\xcb\x92\x25\xd6\xab\xa4\xb1\x78\xde\x2c\xff\xbc\x9e\xcf\xcf\xfe\x99\x3d\x65\x2f\x1f\xff\x4e\x96\xe9\xc7\xd5\xeb\x7a\xf5\x82\xe9\x3c\x4b\x96\xe9\x34\x9b\xfd\x4c\x56\x7f\xe0\xdb\x3a\xc3\x53\xf2\x3c\x5d\xcf\x33\x2c\xd6\xd9\xdb\x3a\xcb\x35\x0f\xc2\x18\xdf\x5f\xa6\xe9\x8f\xe4\xe9\x43\xa3\xe1\x00\xce\x8d\xfd\xff\x6f\x1a\x00\x1c\xe7\xa6\x81\xc9\x6f\xd0\x1f\xf2\xb5\xdd\x59\xfe\xef\x66\xfb\x5b\xe1\xf6\xe3\x4d\x38\xde\xe8\x38\x9b\x9b\xf5\x87\xdb\x7f\xe2\xa1\x70\x9c\xdf\xb7\x39\xdd\x2d\x8f\x9d\xac\xfd\x7f\x3b\x79\x25\xae\x92\x95\x41\x04\x4b\x42\x85\x25\x0f\x87\x87\xa5\xa4\xdf\x4a\xf2\x0e\xb2\x36\x8b\x49\x82\x1c\x61\x19\x48\xab\x4a\x0f\x6e\xaf\x4b\x2a\x08\xa6\x16\x4e\x95\x95\x75\x1a\x1d\xac\xbc\x95\xb2\x3b\x3d\x88\x95\x54\x82\x22\x89\xbf\x5a\x28\x58\x5c\xe5\x18\x0d\x37\x8d\xa3\xfb\xe9\x81\x12\x0e\xfc\x42\xf9\xf9\x1e\x3e\x3d\x10\x8c\x76\xf4\xc2\x8d\xe6\x5a\xf1\x81\x0a\x9f\x73\x26\xe4\xf8\xdf\x85\x83\x36\x1e\xa5\xab\xa0\x7b\xc7\xe8\x4c\x65\xf7\x6c\x35\x17\x4f\xa1\x13\xac\xb8\x9a\x3c\xcb\xec\x4b\x59\x70\xf1\x81\x63\x6f\x3a\x2a\x87\xdd\x25\x8a\x06\x3a\xe3\x98\x4f\xf7\x9c\xa5\xfd\x61\x20\x39\x8d\x54\x09\x13\xdd\xb9\x8e\x6e\xb7\x41\xdc\x8b\xfb\x8d\x59\x8a\x8b\x38\xce\x55\xc2\x4f\x25\x20\xca\xae\xaa\xa3\x6c\x36\x84\x64\x01\x39\x1c\x05\x76\x90\x99\x43\x36\xca\x11\x9e\x1c\xa2\x0a\x35\x4b\xc6\x20\x82\xa4\xfd\xa3\x38\x43\x77\x85\x79\x86\x64\x00\x46\x10\x64\x62\x21\x7b\x9a\x53\x4f\x78\xa6\x50\x90\x12\xe7\x82\xe9\xe4\xcc\x73\xb2\x46\x82\xc3\x2d\x04\x29\xde\x2b\x27\xc8\xbc\x77\xab\x6c\x1b\xa8\x29\xcc\xe0\x33\x53\x14\xd6\xa3\xa1\xa5\x20\x55\xd1\xb2\x3e\xd3\xd3\x52\x21\x57\xb1\x5b\x1c\xe2\xbd\x96\x34\xed\x88\x11\x4d\x69\x08\x84\x9c\x88\x87\xa5\xbd\x6c\xe4\x7f\x0b\xc2\xaf\x9e\x0e\x04\xb2\xb5\xc3\xce\xeb\x49\xcb\x56\x99\x64\x15\xef\x72\x25\xa3\x48\x2e\xa6\x7a\x58\xd1\x40\x0a\x14\xc1\x0e\x0a\xa7\xca\x62\x21\x14\x1b\x72\x9e\x00\xd4\xe4\x71\x5c\x95\x5d\x53\xd2\x4a\xf2\x83\xfd\xbe\x60\x58\x8e\x3b\xd1\x38\xd5\x9c\x06\x18\x41\x85\x57\x88\x5d\xa9\xe9\x0b\x49\x53\x4d\xee\x90\x4a\xe5\x1a\x35\xd2\x9f\x54\xd0\xa8\x30\xdf\x38\x54\xa3\x72\xa9\xba\x92\x20\x79\x63\x28\x25\xd2\x29\x3e\xe2\xa3\xae\x9c\x8f\x84\x5a\xdf\xe4\x89\x3a\xc2\xdb\xd0\x50\x0a\xa5\x05\xd7\x9d\x32\x4b\x33\xa6\x22\x68\xf8\x5c\x45\x4c\x55\x3a\xca\x94\xe5\x24\xc4\xa2\xbf\xa1\xaf\x1b\xb5\xa6\x12\xe2\x42\x8a\x18\xe3\x89\x18\x16\xe5\x9c\x2f\xe9\x44\xfa\x76\x45\x49\x25\x0e\xf9\x53\xe8\xc4\xda\x0b\x26\xd6\xe5\x27\x04\x55\x04\x5b\xb0\xb9\xee\xa4\x52\x39\x55\x8a\xcd\x2b\x31\x85\x90\x63\x49\xfc\x91\x3a\x5f\xa1\xa6\x49\xd5\x11\x48\x55\x0d\x2b\x31\x47\x21\x17\xd6\xd5\x1f\xc1\x50\x91\x4d\x8c\x23\x06\xf3\x69\xe1\x00\xc2\x82\x84\xf5\xd6\xab\xa8\xa1\x62\x27\xd3\x29\x64\x32\x83\xda\xd5\x58\x69\xc8\x08\x67\x24\x72\x6a\x72\x40\x08\x18\x02\x48\x0b\xca\xce\x22\x45\x3a\x33\xf4\x5e\x0f\x05\x16\x1b\xfb\xe4\x60\x33\xf5\x1d\x66\x8d\xb7\x0f\x97\xeb\x59\xc4\x39\x8c\x68\xb1\xc2\x51\xa7\xb4\xba\xe3\xc8\xf9\x91\x1f\x3c\x4d\xd4\xfe\x9a\x84\x85\xb2\x57\x6c\x7d\x36\xb1\x9a\x24\xa9\x07\xac\x06\x52\xd4\x70\x05\xce\x69\xef\x16\x38\xdf\xef\x01\x54\x3f\x15\x00\xaa\x6d\x92\x52\xc6\x1d\xb5\x6d\x88\x5c\x44\x0f\x40\x07\xe2\x0c\x7e\x1d\xf5\x9f\x56\xf9\x3a\xa4\x7c\x2a\xd9\xaa\x94\x25\x5d\xfb\x64\x76\x42\xbf\xc5\x57\xc2\x35\x9b\x8e\x68\xc4\x97\x54\xe6\x5c\x5d\x41\x23\xb5\x71\x60\xeb\x5d\x29\x4b\x2b\xc4\xc8\xd0\xe2\x7b\x45\x1d\xcd\xcd\xf5\x44\x1d\xac\x9b\x88\x1d\xa1\x92\x33\xf9\x07\x33\x66\xd3\x4e\xd7\x6c\xd8\xce\x46\x55\xa9\x26\xad\xa3\x5f\xd2\x29\x57\xa8\x2a\x78\xdf\x83\xd6\xb6\x9c\x7b\x8c\x85\x0a\x5b\xbb\xc8\x02\x0a\xab\x12\x75\x83\xd5\x1e\x25\x3a\x41\x96\xcd\x51\xf9\xca\xb3\xb2\x46\x16\x9d\xd2\x7b\xef\xe4\xc3\x89\x51\xd0\x44\x6e\x9b\x25\xa2\x82\x34\x53\x2b\x35\x0b\x2a\xd4\x88\x74\x27\x15\x12\x95\xd4\x48\x0a\xb8\xf7\x6b\xb0\x04\xcb\x4a\x55\x90\xda\x69\x54\xaa\xdf\x29\x23\x88\xaf\x61\x00\x22\xe1\x3d\xfa\x1d\x06\x80\x0e\x88\x06\x10\x0f\x8c\x07\xc8\x01\x85\x09\x30\x0f\xdc\xe8\x1a\x6b\x76\x10\x16\xa8\x24\xa8\x8e\x8c\xab\x17\xc4\xb5\xf7\x45\xd5\x85\x51\x23\xb9\x06\x51\x0e\x69\x23\x45\xee\xa5\xbd\x80\x5d\x62\x63\x7e\x94\x16\x58\x08\x79\x17\x3c\x62\x08\x3a\x6a\xce\x3e\x86\x19\x57\x70\xe9\x4f\x88\x87\x8e\x1f\x61\xe9\xa2\xc9\x3c\x71\x2a\x19\x8e\xd5\x04\xf1\x41\x67\x24\x59\xbd\xa0\x52\x71\x7f\x8a\xa0\x9a\xbb\x81\xff\x35\xd3\xd9\x72\x93\x55\x21\x64\x7d\x45\xd9\x45\x1b\x41\xad\xab\x26\x10\x84\x67\xb7\x91\xe7\x7e\xc2\x68\xe8\xf6\xbc\x06\xb6\x9f\x2f\x96\xfd\x91\x2f\xd6\x02\xfc\xe0\xb3\x17\xc5\x5e\x1f\xde\x5f\xbd\x81\xfb\xe8\xc6\x7e\x18\xe0\xd1\x8d\x3e\xa9\x55\xb2\x7d\x43\xd0\xba\x6a\x03\x3d\x2f\x88\x31\xf2\xef\x03\xc6\x41\x12\x6f\x4c\xd4\xd7\x05\x1c\x45\xe8\x69\xf7\xb0\x74\x2e\xf7\x1c\x18\x86\xe3\xa0\x2f\x09\x86\xfa\x85\x18\x25\x39\x48\xbd\x85\x62\xfe\x7d\x8a\x84\xf2\x69\x08\xa4\x32\x8a\x04\xa4\x10\x1e\xad\xab\x0b\xa0\x37\x8e\x22\x2f\xe8\x7d\x55\xb0\x41\x4d\xe9\x24\xf6\x48\x8e\x99\xfc\xd6\xa1\x27\x8f\x30\x8b\x4a\x62\x73\x6c\x97\xc0\x57\x2f\x20\xb0\x2c\x86\x8c\xe6\x79\xb7\xf9\xaa\xe4\x99\x1c\xc6\x15\x70\x1b\x85\x9f\xbc\x00\xb7\x6e\x54\x14\x67\xba\x50\xcc\x17\xea\x00\x23\xaf\xb7\x89\xe3\xbd\x4e\xea\x4c\x81\xc9\x59\x5c\x52\x53\x5c\x7c\xf8\xed\x21\xde\xae\x81\xbe\xef\x7a\x91\x37\xf2\x47\x26\xb6\xd9\x6d\xe0\xdd\xe7\x9e\x76\x27\x42\x04\x35\xd4\x70\xea\x02\xbd\x70\xf8\x35\xf2\xef\x1f\xc4\x4d\x26\x3a\x9f\x04\x2f\xe1\x9d\x09\xff\x25\x04\x18\x52\x52\x7f\x49\x1e\x67\x05\x31\x90\x6e\x1a\xad\x4e\x13\xb8\xf3\x1e\xfd\xc0\x0f\x3c\x84\x51\xdf\x0f\xdc\x01\xfc\xa0\xef\xf7\xdc\x38\x8c\xac\x13\x8f\x9e\x99\xd9\xea\x8b\xb1\x62\xa7\x05\x0c\xbc\xbb\xf8\x6c\x18\xfa\x41\xec\x07\xf7\xe8\x87\xe3\xdb\x81\x07\x37\xb8\x1f\x78\xf8\x73\x1c\xc6\x32\x1b\x2a\xc8\x40\xe3\x86\xa1\x2f\x6e\xee\x4f\x9e\x25\x2b\x4b\x38\xdb\xd8\x7c\xd3\x4e\x79\xc3\xa2\x5f\x61\x8d\x61\x21\x97\x81\x71\x0e\x8c\xc2\xbb\x18\x0f\x5f\x87\x0f\x5e\xa0\x37\xad\x8d\x3b\x1c\xc7\xbe\xa4\x6b\x75\x2e\x80\xc8\xbb\xf7\x47\xb1\x17\x79\xfd\x4a\xf1\xce\x90\xb6\x40\x9e\xf2\x2f\x6d\xee\x2c\x13\xef\x97\xc0\xa3\xdb\x8b\xc2\x40\xeb\x0f\x62\x8f\x53\x86\xed\xbc\xf3\xae\x80\xbe\x77\x1f\x79\x9e\xc0\x89\x85\xec\xe7\x30\x5f\x7a\x15\xbc\x4f\x68\x75\x3a\xc0\x70\x30\x1e\x9d\x3d\xfa\xc1\x78\x44\x2a\x00\xc3\xdb\x28\x54\xcd\x2a\x8a\x09\x25\x34\xd7\xc0\x68\x3c\xf4\xa2\x51\x2f\xf2\x87\x31\xe2\x2f\xa1\xa4\x9a\x42\xec\xd4\x0c\x44\x94\xfc\x35\x89\xa2\x7b\x57\x91\xf6\x10\x79\x9e\xf4\xe0\x44\xf3\x21\x8b\x31\xd3\x68\xe5\x5d\x37\x01\xb7\x37\x8e\x3d\xb8\xbd\xbc\xde\x6a\x28\x2c\xed\x1c\x3c\x56\xf6\xaf\x0c\x5a\xd7\x2d\xe0\xd1\xef\x45\x21\xa1\x9d\x8a\x95\x34\xd3\x63\x41\x7c\xa7\x77\x10\xdc\x06\x86\xfe\xa0\x17\x85\x5f\xf8\x92\x04\xea\x52\x24\x11\x29\xd4\x6a\x3e\x35\x04\xcf\xf5\x79\x6e\x83\x7e\x7f\xe0\xa1\x1f\xc6\x0d\x51\x7b\x52\x84\xec\x13\xbd\x23\xdf\xb2\x5b\x28\x2f\xff\xbc\xbe\x3f\x18\xb8\xd4\xd8\x85\x07\xec\xb7\xe9\xf9\xaa\x97\x72\xb4\x85\x81\x67\x78\xa0\xa1\x84\xb7\x23\xaf\x4e\x03\xe0\x2a\xa7\xad\x51\x6f\x3c\x30\xe6\xe9\x72\xdf\x19\x2b\x81\x63\x97\xa7\xaf\x3b\xc0\xa6\xd2\x29\x95\xa8\x1d\xf2\x37\x3d\x76\xa9\xd8\x9c\xc6\xc1\xa7\xac\x1d\xd0\x6b\xe0\xf3\x78\x70\xef\x46\xb8\x8b\xdc\x6d\x05\x1a\x06\x39\x40\x37\x8a\xbd\x48\x7e\x00\xea\x5f\x3b\x28\x09\x05\x80\xfc\xba\x43\xc0\xc6\x35\x00\x54\x31\xb1\x7a\xd3\x31\xaa\x28\x91\xb6\x2e\x8a\x8b\xf9\x7f\x1b\xcd\xbb\xbc\xe6\x0f\xee\xe0\x4e\xa7\x36\xaf\x34\xaf\x32\xd7\x02\x1d\x9f\xa3\xea\x32\x0e\x16\xed\x60\x50\x55\xab\x68\xae\x66\xb7\x49\xd5\xdc\x70\xfc\xde\xc5\x23\xa3\x8f\x85\xde\x52\xf5\xb1\xe8\x29\xe2\x63\xd0\x1a\x04\x52\xe7\x29\xea\xf6\x6e\x3e\xee\x8a\xc3\x82\x3f\xc7\xde\x88\xd9\x72\x6a\x1a\x85\x32\xd7\x61\x5a\x30\x43\x72\xcd\x85\xb6\x81\x81\x1b\xfb\x01\x7a\xee\xd0\x8f\xdd\x01\x06\x5e\x1c\x7b\x11\x5c\x7c\xf1\xe3\x07\xdc\x47\xee\x67\xef\x98\x4b\x95\xea\xc2\x48\x4c\x55\x86\x7e\xad\xee\xb9\x19\xcf\x26\x1b\x33\x10\x24\x47\xd7\x89\xe7\xc2\x8c\xa7\xe7\x47\xbd\xf1\xe3\xdd\xc0\xfb\xcb\xcc\xd1\xd4\x55\xda\x17\xf2\x16\xa0\x2e\xcd\xa0\x62\x7f\xd0\xf7\x1a\x5c\xe9\x4d\xa7\x3a\xb5\xe0\xb9\x32\xe3\xb1\x6a\xe1\xeb\x76\x5c\xc7\x8c\x29\xca\xd3\x9c\x7b\x1b\xee\xa3\xdb\x22\xb9\xd6\x00\xea\x5a\x07\xca\xa3\xc4\x26\x95\x62\x54\xd4\xee\x36\xa9\x62\x9f\x30\x9f\x91\x82\x99\x4a\xb0\x0c\xad\x7b\xf5\xdd\xea\x76\x35\xa8\x7b\xbb\x3d\x20\x57\x5e\xfa\xc9\x02\xf8\x57\xae\xc7\x2b\x24\x7f\x4c\x7e\x2b\x4c\xb6\x41\xd4\x6e\x36\x35\x88\xbc\x42\xd6\x62\xc3\x9c\x58\x8e\xbb\x4d\xef\xd7\x76\xb3\x65\xc6\x63\x60\xad\xf7\xc1\xa3\x63\x75\xaf\x0c\x6b\xd5\x0d\x4a\x47\xed\x5e\x09\x96\xa8\x1b\x93\x8e\xde\x7d\x29\x90\x48\x79\x4b\x5b\x57\xb5\x82\x22\x37\x10\x04\x1b\xf9\x3a\x26\xf7\xd9\xc0\xa9\x5f\xbe\x8e\xb9\x7d\x3e\x50\xe8\x44\xa3\x0e\x10\x3a\xaa\xf6\xd9\xc0\xa0\x8b\xd5\x81\x41\xc7\xcc\x5e\xfc\x20\x46\x90\xf4\x16\x80\xd6\xcc\x0e\xdf\x30\x43\xcd\x28\x4c\xc3\xc6\x75\x3c\xed\xa6\x8e\x7a\x03\xcb\x4c\x4f\x90\x29\xb7\x95\x4b\x60\xed\x96\x8e\x78\xc3\x1a\xca\x45\x62\x25\xbe\x5a\x91\xf0\xe8\x88\x37\xac\xa1\x5c\xac\x82\x47\x47\xbc\x61\xb9\x72\xb1\x5e\x50\x3a\xe2\x0d\xad\xcb\xc5\x7a\xf1\xe8\x48\x37\xac\xa9\x5c\xac\x82\xe9\x12\x78\x1c\x0f\x62\x7f\x38\xf0\x7b\xae\xf4\x6e\x6b\x77\x93\xc2\x38\x3c\x21\xf3\xad\x99\xb2\xc2\x4e\xa2\x8e\x7a\x77\x56\x18\xc5\x51\xf8\x49\x2a\x0b\xf9\xe9\x1c\xd4\xde\x4f\x21\x67\x30\x09\xdd\x8a\x7b\x5a\x3a\x5a\x1e\x5b\xef\x75\xc5\x62\x27\xba\x48\x47\xd1\x63\xeb\xbd\x5e\x2f\x1e\x1d\x37\x8f\x2b\xb7\x86\xa7\x83\x6a\xeb\x08\x7a\x5c\xaa\xc8\xd2\xc1\xa8\x86\x49\x47\xd2\x5f\x89\xe3\x94\xe8\x91\x2a\xe4\xe3\xf5\x9b\x86\x3a\xa8\x81\x2e\x0c\x2d\xc6\x8a\xed\xb6\x8e\xb4\xe3\x87\x30\xda\x50\x40\x95\x0f\xc4\xc9\x15\xfa\xb9\x09\x31\xd4\x81\xa8\x47\x8f\xee\xe0\x00\x63\xf4\xe0\x46\x43\x8c\x88\x8d\x4d\x04\x68\x7d\x85\xfb\x08\xa6\xdd\xbe\x60\x81\xd8\xcf\x84\x84\x60\x62\xfa\x40\xb5\x14\x2a\x28\x3a\xda\x97\x26\x34\x24\x7a\x68\xeb\x58\x2f\x9a\x2b\x13\x1a\x79\xd3\x93\x6a\x54\x9f\xc9\x4e\x82\xd4\x31\x41\xb2\x9c\x06\xd5\x87\xe6\xda\x84\xa6\x44\x72\xb7\x24\x20\x0b\x44\x5d\x13\x22\x79\x12\x64\x72\x99\x52\x10\x9d\x02\xe9\xbc\xc9\x43\x22\x73\x20\x50\xce\x62\xfa\x10\x86\x0a\x74\x65\xc2\x6e\x05\xf2\x38\x3b\x13\xd2\x4e\xec\x37\x3a\xb4\x58\x1d\x74\x53\x21\x2b\x6a\x50\xd8\x9e\xb7\xa9\x5a\x98\xee\xa6\x42\xe7\x6d\x16\x0f\x3b\x13\x92\xdf\x0b\x5b\xcd\x41\x75\x68\x74\x1e\xe6\xc9\x9c\x4e\x84\x4c\x21\x56\x1f\x1a\x9e\xd1\x4b\xcd\x83\xea\x86\xc4\xd3\x7a\x99\x69\x50\xdd\x88\x78\x6a\xe7\x66\x41\x7b\x7a\xd8\x2f\x52\xfc\xc7\x58\xf2\x9b\xe0\x5c\x1a\xcf\xda\xea\xe4\x47\x54\xc8\x39\x64\xea\xed\x6f\x74\x07\x4a\x21\xb6\x91\xc6\xb3\xb2\xf5\x9c\x07\xaa\x9a\xc7\xfa\x9a\x39\xa4\x1f\x56\xe4\x10\x78\x1a\xb6\x9d\xf2\x58\x88\x2d\x42\x70\xc1\xb3\xee\x66\xc6\x33\x61\xde\xfb\x32\x7f\x01\x57\x44\xfd\xc5\xe5\xf0\x05\x4f\x9b\xb6\x13\x1d\x69\x65\x53\x01\xaa\xdf\x34\x12\x1a\x9e\x34\xed\xe7\x39\xb0\x29\x49\xad\x6d\xc3\x93\xa6\xfd\x34\xa7\x5e\x34\x3c\x69\x96\x9c\xe5\x28\x21\x7c\x22\x24\x9e\x34\xe9\x24\x87\x8c\x04\xb8\x95\x4f\x46\xc3\x13\xe6\x7f\x71\x8e\x73\xd1\x01\xfa\xfe\x67\x7f\xa4\x4e\x70\x4c\xdf\xc6\x09\x70\x26\x85\x5f\xbf\xd9\xf7\x95\x17\x3c\xe5\x6a\xe7\x3b\x85\xe5\xd1\xe4\x37\x3f\xc8\xb5\x34\x0d\x4f\xbf\x63\xf6\x4d\x87\x44\xa8\xe5\xa7\x27\xc5\xbc\x73\xc9\x53\x31\x9d\xe5\x58\x34\x75\x35\xa0\xe1\x39\xf9\xfd\x26\x39\x16\x90\x78\x62\x7e\xb7\x39\x8e\x05\x22\x9e\x9c\xe9\x14\xa7\x86\xf1\x9b\x84\x86\x72\x40\x8e\x86\x27\xe7\x93\x66\x36\xd2\x89\x6e\x80\x33\x11\x3f\xa7\x6c\x5f\xf2\x84\xfc\xf5\x3f\xe2\x26\xce\x30\xff\x0e\x00\x00\xff\xff\x3f\x39\x52\x57\x73\x5f\x00\x00") + +func fontsBlockFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBlockFlf, + "fonts/block.flf", + ) +} + +func fontsBlockFlf() (*asset, error) { + bytes, err := fontsBlockFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/block.flf", size: 24435, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBubbleFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x9b\xe9\x9b\x23\x55\xf5\xc7\xdf\xf7\x5f\x71\x7e\x3f\x17\x40\xa7\x97\x24\x9d\xa4\x5b\x11\xa7\xba\x52\x9d\x2e\x26\x5d\x15\x2a\x95\x19\x7a\x44\x31\xdd\x5d\xdd\x1d\x49\x57\xda\x2c\x33\xb4\x1b\xee\xfb\xbe\xef\x0b\x08\xca\x22\x08\x08\x28\xbb\x82\x80\x88\xb2\x09\x82\xac\xca\x2a\xa2\x6c\x82\x20\x8b\x3e\x09\x6b\xce\x3d\xa7\xbe\xf5\xf8\xc2\xa7\xf5\x79\x3e\xe7\x33\x37\xa7\xce\x3d\x75\xee\x4d\x5c\x6b\xad\x65\x1b\xc7\xd0\x34\xe5\x68\x86\x32\x79\xca\x64\x68\x8a\x32\x53\x99\x6c\x71\x6c\xae\xbf\xbc\xdc\x8a\x68\x79\x9b\xca\xad\x28\x8e\xc9\xde\x68\x6c\x6d\x45\xad\x16\x4d\x4f\xce\xe6\xc6\xdc\x78\xa5\xd5\x5f\x8d\xba\xb4\xb2\xd1\xe8\x34\x56\x7a\x51\xa7\x4b\x99\xec\xcc\x78\x36\x9f\x1f\x73\xe2\x8d\x46\xbc\x12\xad\xd2\x5a\xbb\x43\x95\x46\xaf\x19\x8f\x67\x77\xe4\x76\x4c\x0f\x5c\x87\xb6\x37\x62\xb2\xdb\xfb\x1b\x31\x1d\xbc\x32\xf8\xb3\x73\x65\xa5\xd9\x9a\x68\x77\xd6\x0f\x19\x1b\xa2\x2f\x0b\xa9\x1b\xf5\xba\xd4\xed\x6f\x6d\xb5\x3b\xbd\x68\x95\xda\x71\x6b\x9b\x9a\x6b\xb4\xdd\xee\x77\xa8\xbb\xd2\x89\xa2\x98\xd6\xda\x71\x8f\x56\xdb\x51\x77\x6c\xad\xb9\xde\x8a\x7a\xd4\x89\x5a\x51\xa3\x1b\x51\x76\x22\x4b\xe3\xe3\xe4\xb5\xf7\x45\x9b\xcb\x51\x87\x32\xb3\xb3\x85\xb1\x6a\xd4\xd9\x6c\x76\xbb\xcd\x76\x4c\xcd\x2e\x6d\x44\x9d\x68\x79\x9b\xd6\x9b\xfb\xa2\x98\x7a\x6d\xda\x6c\xaf\x36\xd7\xb6\xa9\xb7\xd1\xec\x0e\xbd\x3b\xa8\xd1\xa5\x56\x3b\x5e\x1f\xfc\xed\x6d\x44\x63\x43\xa0\x19\x75\x0e\xe8\x52\xdc\xd8\x8c\x06\x8e\xad\x56\x63\x65\xb8\x34\x6a\xd0\x4a\x7b\x73\x33\x8a\x7b\xd4\x6a\xc6\xd1\xc4\xd8\xd8\xe2\xf3\xf4\xea\xe0\x53\x57\x1b\xfd\x16\xcd\xf5\x3b\xbd\x76\x4c\x07\x77\xdb\xad\x7e\xaf\xd9\x8e\x77\x46\x8d\x4e\x6f\xa3\xd5\x8c\x8f\x9a\x88\xa3\xde\x21\x94\xc9\x4e\xce\x16\x06\x0b\x69\x3e\x9f\x5b\x8a\xa3\xfd\xb4\xd5\xe8\x34\x36\xa3\x5e\xd4\x19\x7b\x39\x11\xcb\xdb\x34\xef\x96\x07\x9f\xb6\x11\xaf\x0e\xfe\xeb\x9e\x66\x3c\x41\xb4\xd8\xd8\xa6\x46\xab\xdb\xa6\xe5\x88\xba\xad\xe6\xfa\x46\xaf\xb5\x4d\x9b\x2f\xae\x62\xf0\x2c\x96\xa3\xde\x20\xaf\xfd\x6e\x34\xd6\x5e\x1b\xea\xd7\xfa\xad\xd6\xf8\xfe\xe6\x6a\x6f\x63\xf2\xa8\xa8\x13\x4f\x76\x37\xfb\xdd\x0d\x6a\xb4\x7a\x51\x27\x6e\xf4\x9a\xfb\xa2\xee\x0e\x5a\xee\xf7\x68\x35\x5a\x6b\xf4\x5b\x3d\x6a\xf7\x7b\x5b\xfd\xde\xe0\x93\x7b\x7e\x38\x78\x54\xf1\x7a\xb4\x3a\x31\x46\xc7\xec\x7c\xe5\x7f\x76\x8e\x11\xd1\x91\x44\x83\xbf\x93\x74\xc4\xe0\xef\x81\xf4\x7f\x74\xd0\xe0\x7f\x1f\x71\xe4\x24\xc9\xc0\xff\x23\xe0\x55\x08\x78\x35\x02\x5e\x83\x80\xd7\x22\xe0\x00\x04\x1c\x88\x80\x83\x10\xf0\x3a\x04\xbc\x1e\x01\x3b\x10\x30\x8e\x80\x09\x04\x4c\x22\x60\x0a\x01\x19\x04\x64\x11\x90\x43\xc0\x34\x02\xf2\x08\x28\x20\xa0\x88\x80\x19\x04\xcc\x22\xe0\x0d\x08\x78\x23\x02\x0e\x46\xc0\x9b\x10\x70\x08\x02\xde\x8c\x80\x9d\x08\xb0\x10\x30\x87\x00\x1b\x01\x25\x04\x38\x08\x98\x47\x40\x19\x01\x0b\x08\x70\x11\x70\x28\x02\x76\x21\xa0\x82\x80\x45\x04\x78\x08\xf0\x11\x50\x45\xc0\x61\x08\x08\x10\x50\x43\x40\x88\x80\x3a\x02\x76\x23\x60\x0f\x02\x0e\x47\xc0\x12\x02\xf6\x22\xe0\x2d\x08\x38\x02\x01\x6f\x45\xc0\xdb\x10\x70\x24\x02\xde\x8e\x80\x06\x02\x96\x11\xb0\x82\x80\x55\x04\x44\x08\x58\x43\xc0\x3a\x02\x36\x10\xd0\x44\xc0\x3b\x10\x70\x14\x02\x5a\x08\xd8\x44\x40\x8c\x80\x36\x02\xb6\x10\xf0\x4e\x04\x74\x10\xd0\x45\x40\x0f\x01\x7d\x04\xec\x43\xc0\x7e\x04\x1c\x8d\x80\x6d\x04\xbc\x0b\x01\xef\x46\xc0\x7b\x10\xf0\x5e\x04\xbc\x0f\x01\x97\x22\xe0\x46\x04\xdc\x8a\x80\xbb\x11\xf0\x04\x02\x9e\x41\xc0\x1d\x23\x40\x26\x3b\x23\x41\xef\x67\xd0\xac\x04\x7d\x60\x14\xca\x4d\x49\xd0\x07\x19\x94\x91\xa0\x0f\x31\x28\x2b\x41\x1f\x66\x50\x4e\x82\x3e\xc2\xa0\x69\x09\xfa\x28\x83\xf2\x12\xf4\x31\x06\x15\x24\xe8\xe3\x0c\x2a\x4a\xd0\x27\x18\x24\x66\xfc\x93\x0c\x12\x33\xfe\xa9\x51\x68\x5a\xcc\xf8\xa7\x19\x24\x66\xfc\x33\x0c\x12\x33\xfe\x59\x06\x89\x19\xff\x1c\x83\xc4\x8c\x7f\x9e\x41\x62\xc6\xbf\xc0\x20\x31\xe3\x5f\x64\x90\x98\xf1\x2f\x31\x48\xcc\xf8\x97\x19\x24\x66\xfc\x2b\xa3\x50\x5e\xcc\xf8\x57\x19\x24\x66\xfc\x6b\x0c\x12\x33\xfe\x75\x06\x89\x19\xff\x06\x83\xc4\x8c\x7f\x93\x41\x62\xc6\xbf\xc5\x20\x31\xe3\xdf\x66\x90\x98\xf1\xef\x30\x48\xcc\xf8\x77\x19\x24\x66\xfc\x7b\xa3\x50\x61\x8a\xc8\xf3\xc7\xe7\x02\xc7\xda\x45\xb5\xaa\x65\x3b\x52\xd0\xf7\x59\x50\x86\xc8\xf5\x76\x3b\x41\xe8\x94\xc8\x39\xdc\xae\x58\x8b\x56\xe8\xfa\x1e\x2d\x5a\xc1\x2e\x29\xfe\x07\x2c\x3e\x4b\x64\x3b\x5e\x48\x35\xb7\xec\x49\xfc\xb1\x8c\xcf\x11\x55\xfd\xba\x57\x52\x03\x8e\x63\x01\xd3\x44\x76\x3d\x08\x1c\xcf\x5e\x52\x63\x7e\xc8\x62\xf2\x44\x4b\x8e\xa7\xe2\xc7\x33\xbc\x40\x34\x17\xf8\xbb\x1c\x8f\xe6\xac\x40\x0a\x38\x81\x05\x14\x89\x6a\x8e\x3d\xcc\x93\xf6\x6f\xfc\x88\x85\xcc\x10\x95\x5c\xcb\x09\x9c\x9a\x5b\x93\xf8\x1f\x33\x7e\x96\xc8\xf6\xab\x4b\x81\x5b\x5e\xd0\x93\x7b\xe2\x68\x50\x71\x8a\x68\xde\x59\x74\x3d\xd7\x73\xc8\x0f\x4a\xae\x67\x55\xc8\xf5\x4a\xae\x6d\x85\xbe\xf8\xc1\x4e\x62\x82\x0c\x51\xc5\x99\x0f\xc7\xab\xbe\xeb\x85\xae\x57\xa6\x92\x5f\x9f\xab\x38\x64\x79\xe5\x8a\x43\x87\xd5\xfd\x30\xb9\x3a\x4e\x66\xbe\x2c\x0d\xaf\x16\xb5\xf5\x9f\xc2\xf0\x1c\x51\xcd\x9f\x0f\x69\x61\xa9\xba\xe0\x88\x11\x3f\x61\x11\xd3\x44\x81\x53\x76\x6b\xa1\x13\x38\x7a\x4d\x9d\xca\xa2\xf2\x44\x8b\x96\x1d\xf8\x22\x7c\x1a\x83\x0b\x44\x25\xa7\x1c\x38\x8e\xaa\xff\x29\x8b\x28\x12\x55\x2b\xf5\xda\xf8\xa2\xeb\xd5\x6b\x6a\xd4\xe9\x2c\x6a\x86\xa8\x56\xaf\x3a\x41\xcd\x0e\xdc\x6a\x48\xe1\x1e\x5f\x8a\x3a\x83\x45\xcd\xb2\xa8\x85\xc0\x11\xf7\xfd\x99\xa3\x71\x33\x53\x44\x96\x5d\x0f\x1d\xb2\xec\xc1\xfe\x95\x42\x7e\xc6\x42\x32\x44\x8b\xae\x1d\xf8\xea\x27\x3a\x8b\x05\x64\x89\xaa\x6e\xc5\x0e\xfc\x3d\x6a\xc8\xd9\x2c\x24\x37\xf8\x37\x4a\xa5\x8a\x43\x25\x5f\x5c\xd4\x39\x2c\x60\xd0\x1e\x9c\x92\x5b\xa9\x58\x12\xfd\x73\x46\xe7\x47\xb3\xe5\x7b\x62\xae\x7e\xc1\xa2\x0a\x83\x72\xa9\xd9\xf5\x4a\xea\x7d\x75\x2e\x33\x14\x89\x86\x3b\xf9\x7f\xde\x58\xe7\x31\xe1\x0c\xd1\xee\x7a\xa5\x6c\x05\x34\x1f\x58\xcf\x77\x22\xdf\x1b\x88\xac\x20\x74\xc4\x25\x9d\xcf\x0c\xb3\xb2\x61\xc1\xaa\xcc\x4b\xe1\x17\x8c\x86\xcf\x4e\x99\xe1\xc3\xda\x7b\x71\x09\x62\x93\xbb\x90\x49\x5e\xf9\xf2\x39\xac\xee\xd4\x12\x53\x70\x11\x0b\xce\x12\x55\xac\xd0\xf5\xc8\xb6\xaa\x6e\x68\x55\xa8\xe2\x84\xa1\x13\x90\x45\x7b\xdc\x70\x81\xca\x81\xb5\x5b\x7c\xba\x17\x33\x4f\x2e\xd9\x33\xdc\x25\x92\xe7\x97\xcc\x33\x9d\xec\xb1\xdd\xc0\xae\x2f\xce\x57\x9c\xc3\x25\xd9\xaf\x98\x2c\x9f\x2c\x0b\xdd\x4a\x49\x5c\xd4\x25\xcc\x53\x48\xf6\x24\xbe\x92\x2e\x65\xae\x62\xb2\x2b\x18\x94\xb5\x35\xe7\xcb\x59\xff\x35\x93\xcd\x68\x32\x31\xfa\x32\x16\x3d\xab\x44\xdb\x2f\xe4\x5a\xef\x08\x97\x8f\x98\xb2\x53\x53\x8a\xc9\x01\x55\x74\x05\xf3\x64\x92\x3d\x6a\x15\xfd\x86\x79\xb4\xaa\x76\xd2\x54\xd1\x95\x4c\xa6\x95\xb6\x93\xe2\xe9\xff\x96\xb9\xb4\xf2\x76\x41\xa2\xae\x62\x1e\xad\xb2\x5d\x90\xa8\xdf\x31\x8f\x56\xd9\x6e\x9a\x44\xfd\x9e\xc9\xb4\xd2\x76\x53\x24\xea\x6a\xe6\xd2\x2a\xdb\x09\x17\xa4\xf0\x6b\x58\xb8\x56\xda\x1e\xd8\xf9\xd7\x8e\x7a\x32\x5a\x61\xfb\xe0\x79\x5d\xc7\x3c\x5a\x61\xfb\xe0\x79\x5d\xcf\x3c\x5a\x61\xfb\x69\x9e\xd7\x1f\x98\x4c\x2b\x6c\x1f\x24\xe9\x06\xe6\xd1\x8a\xda\x4f\xf1\xdc\x6f\x64\xae\xc1\x50\x59\xaf\x84\x6e\xb5\x32\x98\x0b\x92\xce\x06\x7f\x64\x91\x5a\x29\xbf\xb0\x8a\x5a\x38\x38\x9c\x48\xa2\x9b\x98\x48\x2b\xe3\x3a\x78\xe6\x37\x33\x8f\x56\xc2\x75\xf0\xcc\xff\xc4\x3c\x5a\x2d\xd7\xd3\x3c\xf3\x5b\x46\x65\x59\xad\xa0\xeb\x29\x9e\xd5\xad\xcc\xa5\x15\xf5\x12\xf8\x80\xb7\x31\x8f\x56\xd4\xe1\x82\x1f\x88\x8f\xfe\x76\x26\x78\xa9\x90\x6b\x8b\x56\xe5\xa5\xf0\xda\x82\x15\x54\x49\xfc\x24\x77\x30\xc1\xb4\x28\x40\x33\xd0\x9d\xcc\x92\x4f\xb2\xa8\xd9\xf8\x33\xb3\x14\x92\x2c\xc9\x0f\xfb\x2f\x4c\x55\x4c\x52\xa9\xdb\xfb\x2e\x66\x99\x49\xb2\x24\x16\xcc\xdd\xcc\x34\x9b\x64\x4a\x9e\x7c\xee\x19\x55\xe5\xa6\x64\x95\x18\x7b\x2f\x8b\xcd\x88\xb1\x78\xea\xb9\x8f\x79\xb2\xa2\x07\xcd\x3c\xf7\x33\x8b\x5c\xbc\x68\xe2\xf9\x2b\xb3\xc8\x15\x9c\x6a\xde\x79\x80\xa9\xe4\x32\x4e\x33\xed\xfc\x8d\x99\xe4\x52\x46\xb3\xce\x83\xcc\x22\x57\x31\x9a\x74\xfe\xce\x2c\x72\x15\xa7\x9a\x73\xfe\xc1\x54\x72\x19\xa7\x99\x72\x1e\x1a\x35\x4d\xcb\x55\xac\xcc\x38\x0f\xb3\x60\xb9\x8c\xd1\x84\xf3\x08\xb3\xc8\x45\x8c\xe6\x9b\x47\x99\x45\x2e\x62\x34\xdd\x3c\xc6\x2c\x72\x11\xa7\x9a\x6d\xfe\xc9\x54\x72\x11\xa3\xc9\xe6\x71\x66\x91\x0b\x38\xcd\x5c\xf3\x04\x33\x15\x89\x4a\xee\x6e\xb7\x96\x34\xd1\xfc\x8b\xc5\xc8\x25\x0b\xe7\x99\x27\x99\x46\x2e\x57\x34\xcd\x3c\x35\x6a\xc9\xcb\xa5\x8a\x66\x99\x7f\x33\x8b\x5c\xb3\xa9\x26\x99\xa7\x99\x4a\x2e\xdc\x34\x73\xcc\x33\xcc\x24\x17\x2f\x9a\x62\x9e\x65\x16\xb9\x78\xd5\x19\xe6\x39\x16\x2e\x17\xec\x52\x8a\x8f\xf3\x9f\x11\xd3\xd4\xd1\x53\x19\xf5\x3c\xfe\xc2\xab\x56\xbf\xb6\xbd\xd8\x74\xc9\x8f\x0c\x9a\xee\x34\x4d\xe0\xae\x69\x2e\x70\xe4\x52\xbc\xc4\x54\xc9\x8f\x0c\x89\xee\x32\x45\xe0\xbe\xc9\x2f\xfb\x9e\x93\xe2\x2b\x9c\xa1\x2b\x71\xfe\xd3\x4d\xa7\x9b\x26\xed\x2c\x63\x83\x9a\xbc\xcc\x54\xc9\x6f\x4f\x24\xba\xd7\x14\x69\xc7\x19\x3b\xcd\xe6\x15\x16\x26\xb7\xa5\x54\x36\x61\x75\x56\xf2\xea\x4a\x7e\x98\xf6\x56\x6d\xa8\x9b\x4b\x5a\x5c\xa2\xec\x1e\x53\x66\x83\xcc\x59\xca\x16\xba\xc2\x54\x95\x12\x93\xa6\x89\xee\x37\x45\x8e\xb2\xa6\x12\x50\x5d\x6d\xaa\xe6\xc5\x35\x21\xd1\x43\x86\x48\xbd\x6c\x29\xa1\x37\xde\x35\xa6\x4b\xee\x5a\xd0\xf4\xb0\x69\x02\x77\x89\x7a\x07\x3c\xc9\x74\x25\xce\xfa\xba\xe9\x5c\xd3\xa4\x75\x08\x27\x45\x89\x5e\x65\xea\xe4\x2e\x91\x46\xf6\xa0\x29\x53\xef\xee\x50\x27\xbc\xd2\x74\xc9\x5d\x02\x9a\x1e\x30\x4d\x5a\x87\x70\x40\xa1\x0a\xd9\x92\xbb\x03\x12\x09\x99\xd2\x3a\x43\x39\x4d\x17\xbc\xc9\xf4\xc9\xed\x21\x95\xed\x49\xd3\xa6\xf5\x88\x32\x78\xcd\x9e\x6c\xaa\xe4\x1e\x81\x44\xe7\x19\x22\xf5\xfe\xaa\x9c\xa2\x54\x6f\x30\x75\x72\x9b\x48\x23\x7b\xdc\x94\x69\x9d\xa2\x0c\x6f\x16\xcc\x8c\x29\x77\x5a\x58\x25\xe4\x4c\x9b\x71\x16\xd2\xd4\xc5\x09\xa6\x4f\x9e\x73\x52\xd9\xce\x36\x6d\x5a\x27\x5b\x40\xbd\xda\x9c\xc0\x94\x0b\x2f\x68\x32\x27\xb0\xac\xd6\xc3\x5c\x70\x82\x3c\xde\x54\x25\x5e\x17\xa8\xa2\xb3\x4c\x91\xd6\xc1\x5c\xf4\xfe\x30\x5f\xda\x59\xb9\x85\x41\x93\xf9\xd6\xce\x6a\x5d\xc2\x45\x3d\xfa\x72\xd3\x25\xb7\x09\x68\xba\xcf\x30\xe5\xb4\x3e\xe1\xa6\xd8\xda\x27\x9a\x3a\x65\x9c\xf0\xc3\x8a\x53\xab\x91\x8b\x7f\xff\x30\xb4\x68\x1b\xf1\xd0\x34\x5b\xe7\x14\xd3\x27\x6f\xc4\x54\xb6\xf3\x4d\x9b\xb6\x11\x77\xc1\x9e\x73\xbd\x29\x93\x77\x22\x56\x3d\x66\xaa\xe4\x6b\x90\x5d\x81\x18\x7f\xac\x19\xaf\x7d\x7f\x52\x01\x67\x20\xf3\x58\x90\xb3\xc4\xa5\x20\x91\x79\x24\xc8\xcd\x25\xaf\x29\x21\x41\x66\x3f\xce\xd9\x49\xab\x4a\x50\x99\xcd\x38\x57\x02\xeb\xd2\x66\x1b\xb3\xed\xe5\x9c\xc4\x55\x69\x22\xb3\xed\x4d\x6b\x5f\x33\x55\x50\x5b\x3f\xce\x74\xc9\x97\x46\xd0\x74\xa6\x69\xd2\xbe\x3c\xf5\x40\x31\x5c\x6b\xaa\xe4\x9b\x23\x24\x7a\xc4\x14\x69\xbf\x0a\xf0\x60\x29\x08\xab\x92\xef\x3e\xb1\x4a\x58\x97\xf6\x8d\xaa\x07\x8a\xe1\x3a\x53\x25\x37\x03\x24\x7a\xd4\x14\xa9\xc7\x01\xaf\x2c\x19\x2e\x30\x0d\xca\x29\x40\x8e\xbf\xc8\x8c\xd7\x86\x7f\x1f\xbd\x84\x85\xb4\xc8\x83\x3f\x34\x99\x79\xc9\x83\x5f\x3c\xbc\xf8\x8b\x3b\xad\x2a\xcd\x19\x5b\xb9\xf3\x4d\xe9\x33\xc7\xec\xbc\xf6\x16\x0d\xc0\x86\x31\xef\x36\x95\x4b\x57\x24\x32\xaf\x36\xf3\xda\xbb\x33\x80\xdb\xc5\x6c\x52\x79\xf9\xdd\x89\x55\x66\x97\xca\x6b\x63\x6c\x00\x36\x8c\x79\xb6\xcc\xcb\x63\x2c\x12\x99\xc7\xca\xbc\xb6\xf3\x6a\x20\xed\xe6\x9b\x2f\x2f\x6f\x41\x24\x32\xdf\x7b\x79\x6d\x2f\xd6\xd2\xcc\x52\xb7\x9b\x3e\x79\x3f\xa6\xb2\x3d\x67\xda\xb4\x11\xbb\x06\x4b\xc2\xbc\x83\xca\xcb\x33\x36\x56\x99\x97\x50\x05\xad\x57\xd4\x40\x51\x98\x03\x76\x41\x6e\x12\x48\x64\xce\xd8\x05\xf5\xc7\x24\xf0\x03\x9a\x8f\xb1\x20\x1f\xc2\xb1\xca\x7c\x86\x05\xad\x6b\x85\xe0\x23\x9a\x57\x03\x05\xb9\x6b\x21\x91\x79\x31\x50\xd0\xba\x56\x88\xe6\x21\xf3\x2c\x52\x90\x9b\x16\x34\x99\xe7\x90\x02\xf8\xdd\x94\x7a\x62\xbe\xcd\x54\x25\x7e\xf5\xa9\x8a\x9e\x35\x45\x5a\xcf\xaa\xa3\x37\xac\x50\x54\x72\xd3\x82\x26\xa1\xa6\xb4\xae\x55\x07\xf7\x6a\x42\xa6\xe4\x86\x85\x44\x42\xa6\xb4\x5e\x55\x4f\xf3\x2b\x9f\x9b\x4d\x9f\xdc\xae\x52\xd9\x9e\x32\x6c\x45\xf4\xf3\x37\x30\x8d\xdc\x62\x1a\x13\xbf\xd1\x46\xbe\xa7\x4d\x9f\xd6\xbf\xea\xe8\x12\xc4\xcc\x5d\x51\x6e\x5f\xd0\x24\xe4\x4d\x3b\x43\xef\x05\xaf\x5a\xb3\x51\x14\xe5\x33\x34\x12\x99\x7d\xa2\xa8\x9d\xa1\xf7\xa6\xb8\xe2\x39\xcd\xd4\xc9\xa7\xe8\x34\x32\x73\xb6\x2f\x6a\xe7\xe8\xbd\xa0\x49\x9f\x6a\xaa\xe4\x73\x34\x12\x5d\xc8\x45\x59\xbb\x48\x3a\x7e\x8e\x81\x97\x66\x48\xdf\xf7\xc6\x2d\x4b\xb6\x34\x4b\xc9\x49\x32\x7e\xa5\x90\x2d\xcd\x51\x42\x11\x9e\x61\xf2\x25\x1a\xd9\x50\x09\xff\xa7\xab\xd1\xd3\xda\x7f\x03\x00\x00\xff\xff\x72\xcc\xde\x1a\xd2\x4d\x00\x00") + +func fontsBubbleFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBubbleFlf, + "fonts/bubble.flf", + ) +} + +func fontsBubbleFlf() (*asset, error) { + bytes, err := fontsBubbleFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/bubble.flf", size: 19922, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsBulbheadFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x56\x4d\x8f\xdb\x36\x13\xbe\xf3\x57\x3c\x30\x16\x88\x88\xd7\x5a\x6e\x92\x7d\x51\xb8\x08\x02\xb6\x40\x7b\xe8\xa9\x3f\x80\xe8\x84\xb6\x28\x5b\x8d\x2c\x05\x96\x8c\x6c\x0a\xfd\xf8\x82\x33\xa4\x2d\xab\xd9\x3d\xf5\xd0\xc3\x02\x4b\xad\xc9\xf9\x7e\xe6\x83\xac\xdb\xfa\x9d\xbf\xc3\x23\x1e\xb1\xd9\xe0\x01\x6f\x7f\x50\x3f\x9f\xdb\xed\x21\xf8\x0a\xdb\x6f\xf8\x2d\xd4\xf8\xbd\x1f\x3e\xfb\xee\xaf\x70\x5a\xe3\xdd\xfb\x3f\xcf\xdd\xe6\x51\xfd\xda\xec\xdb\x30\xe2\x14\xda\xe0\x87\x80\x77\xf7\x0f\x28\x4b\xfc\x74\xde\x9f\x87\x11\xff\x5f\xe3\xed\x66\xf3\x5e\x81\x88\x08\x98\x2f\xe4\x33\xf0\x92\xcd\x9c\xa4\x8a\x48\x70\x05\xa0\xf3\x02\xf2\x99\xa6\x22\x1e\x11\x91\x06\x0c\x91\xcb\x24\x05\x0d\xc2\x07\xe8\x82\x48\xe4\xa2\xc6\x74\x06\x8a\x5c\x9a\xa5\x4c\x64\x70\xcc\xa0\xa1\x55\x11\x4d\x1a\xfe\x46\xc1\xeb\xd7\x44\x7a\x31\x3b\xca\x8b\xc8\x28\xf5\xcb\xd3\x97\xd6\x77\x7e\x6c\xfa\x0e\x7d\x8d\xba\x39\x0d\x23\xda\xa6\x0b\x3f\xaa\x08\x27\x4a\xac\x8e\x7e\xdf\xec\xd0\x9d\x8f\xdb\x70\x5a\xa1\xee\x4f\xa8\x9b\x36\xa0\xa9\x42\x37\x36\x75\xb3\x63\x61\xe5\x63\xd8\x25\x86\x43\x7f\x6e\x2b\xf8\xf6\xab\xff\x36\x60\x1b\xf0\xc9\xbf\x59\xb3\x50\xd7\x7f\x55\x77\xc2\x34\x1e\x02\x56\x07\x7f\xaa\xb6\xad\xef\x3e\xaf\x22\xde\x5f\x4e\x4d\x37\x0e\xf0\x03\x3c\xf8\x74\x8d\xed\x79\xc4\xce\x77\x6f\xc6\xa8\x66\x38\x9e\x87\x43\xa8\xd4\xa3\x68\x38\x84\x66\x7f\x18\xa3\xc7\x1e\xbb\x83\x3f\xf9\xdd\x18\x4e\x2f\x12\xd7\xe8\xfa\x11\x4d\xb7\x6b\xcf\x55\xd3\xed\x51\x85\x61\x17\xba\x2a\x9c\x06\xb5\xd9\xb0\xd8\xd1\x3f\x71\xe4\x68\x43\xb7\x1f\x0f\x28\xc2\x53\x66\xde\xf5\xc7\x63\xe8\x04\x98\x41\xe3\x7f\xf0\xa8\xcf\xd5\x3e\xa0\xf6\xbb\xb1\x3f\xa9\x07\x31\x5c\x85\xda\x9f\xdb\x51\x9c\x3d\xf6\x55\xe0\xc0\xc7\x43\x33\xa0\xee\xbb\x11\x45\xdb\x7c\x0e\x58\x95\x47\x3c\xac\xd0\x77\xac\xd6\x77\x15\xab\xd5\xea\xed\x7b\x56\x22\x40\x47\xef\x6f\xac\x2a\x75\x77\x67\xe7\xcb\x2a\xe3\xac\xd2\x85\x55\xce\x58\x55\x68\x6b\xd5\x34\x59\x05\x5c\x96\x55\x98\x30\xc1\xaa\x72\x2a\xa7\xf2\xf2\x4f\x0e\xad\x02\x4d\x04\xab\x0c\x26\xd2\x56\x39\x9a\xe0\xac\x2a\x68\x22\xc3\x44\xae\xea\x78\xa0\x0d\x34\x2b\x34\x30\x51\x00\x05\xd7\x54\x64\x12\x16\x14\xc2\x60\x88\x3d\x21\x32\x2e\x7a\x67\x15\xd2\x9f\x70\x46\x0e\x68\xab\x0a\x14\xf1\xb7\x13\x15\x62\x04\x2e\x1e\x69\x26\x93\x11\x09\x58\xe5\x26\x63\x95\x99\x9c\x6c\xf9\x4b\x4c\x00\x4d\x60\xef\x0b\x6e\x3f\x1d\x49\x13\x4d\x89\x49\x56\xa1\x63\x70\x22\x25\x32\x24\x12\x24\xfc\xb8\xe1\x4e\x12\xc9\x86\x84\xab\xd9\x65\x03\x71\x2a\xc5\x41\x12\xb3\x89\xcd\xca\xae\x73\xff\x71\x40\x94\x5d\xa7\x14\x2d\x9f\xc7\x3e\x67\xd4\x52\xc0\x94\x70\x15\xf9\xa8\xc8\x24\xb7\xae\x0c\x42\xd7\x2c\x26\x69\x21\x49\x0b\xb2\xf9\x7b\xce\x6c\x0c\x5f\xa2\x89\x5e\x5c\xa5\x27\x70\x90\x13\x2d\xa5\x93\xef\x9a\x13\xcf\x44\x97\x89\xb7\x76\x17\x71\x27\x22\x98\xf8\x7d\xc9\xcb\x29\x60\xa4\x4a\x32\xbe\x85\x9e\xe1\x3b\xdb\xa8\x0b\x5c\x62\xd0\xaa\x0f\xf8\x20\xb5\xe1\xec\x3f\x12\x36\xdf\x59\x25\x38\x38\xc1\xf0\x23\x3e\x5a\x65\x92\xc1\x25\x7e\x46\xd0\xd7\x8b\xd4\xb8\x88\x52\xa1\x8d\x64\x2e\x57\x57\x2a\xaf\x34\x92\x23\xa7\x8c\x59\x56\xa8\x67\x39\x14\x38\x24\x64\x19\xcf\x29\x87\xe6\x52\x26\x92\x5c\x92\x92\x97\xbd\xa3\xb9\x86\x8b\x0a\x2e\xfe\x54\x47\xa2\x04\x37\x66\x12\x02\x3a\xce\xfd\x45\xa9\x3c\xc3\xa0\x67\xe5\x7a\xeb\x47\x79\x9f\x6b\x35\xb5\x1f\x44\x05\xdf\x48\x29\x96\x42\x1a\x1f\xc5\xad\x99\x5c\x6c\x11\x88\x1b\x3f\x32\x07\x32\xcb\x7d\xc9\x97\x97\xd4\x48\xae\xcc\x8b\x25\x93\x0c\x65\x3b\x79\x14\x08\xf8\x72\x51\x0a\x22\xb4\x08\x37\xe5\xaf\x00\x5c\xea\x2e\x1d\xfb\x58\xf4\x18\x67\xae\x43\x45\xd8\x5c\xf1\x9c\xa9\x59\x06\x85\x25\x5e\x91\xd9\xda\x33\x59\x66\xd0\x66\xe8\xbe\xac\xc5\x38\xf7\xfd\x5a\xe1\xfe\xb8\xf1\x45\xba\x47\xc6\xf0\x6d\xc7\x2e\xa1\x87\xc0\xca\xd3\x04\x4b\x4c\xf8\xbd\x90\x80\x9b\x05\xb3\xc4\x44\xe0\x77\xe0\x0e\x17\x24\x73\x29\x5c\x58\x9c\x59\xa2\x4b\xc6\x3d\xa3\x49\x5f\x13\xf0\xb2\xa9\x99\xd3\x97\xa8\xf2\xb0\x59\x66\x3a\x76\x27\xf2\x87\x28\x3b\x28\x77\x05\xb7\x92\xb4\x4d\xba\x47\xa4\xfc\x30\xa5\x0f\xd1\x64\x59\xaf\x74\xee\x1f\xd7\x89\x3f\x1b\xfc\xb3\xed\x72\xba\xb8\xdb\xcb\xeb\x75\x2e\xbc\xce\x85\xd7\xb9\xf0\x1f\x98\x0b\xeb\xd2\x2a\x8a\x0f\x1f\x7e\xfd\xe0\x53\x99\x9e\xc1\xf1\xe5\x1b\x97\x8b\xe0\x94\xf7\xcc\x40\x89\xab\x7c\x73\x79\x4f\x9a\xf8\x40\xcd\x6d\x6f\xaf\x3d\xae\xfe\xfd\xcd\xdf\x01\x00\x00\xff\xff\xd8\x64\xdc\x2f\x8d\x0e\x00\x00") + +func fontsBulbheadFlfBytes() ([]byte, error) { + return bindataRead( + _fontsBulbheadFlf, + "fonts/bulbhead.flf", + ) +} + +func fontsBulbheadFlf() (*asset, error) { + bytes, err := fontsBulbheadFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/bulbhead.flf", size: 3725, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCalgphy2Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x5c\xeb\x8f\xdc\xb6\x11\xff\xae\xbf\x62\x6c\x0a\x48\x53\x38\x47\x3b\x4e\x9c\xc6\x48\x82\x6d\x81\xba\x4d\xd1\x22\x05\x12\x34\x5f\xfa\x85\x27\x71\x77\x85\xd3\x4a\x6b\x3d\x7c\x39\xa0\xe8\xdf\x5e\x50\x7c\xcd\x90\xd4\xde\x3e\xb4\x57\xa7\xbd\x60\x1d\x72\x28\x51\xe4\x3c\x7f\x33\xd4\xee\xba\x5e\x7f\x2e\x72\xf8\xfc\x25\xbc\x7a\x03\xaf\xbf\x80\x97\xf0\xfa\x25\xbc\xcc\xfe\xd6\x96\xd5\xba\x92\x25\xdc\x3e\xc0\xdf\xc5\x58\xc3\x1f\xc6\x6e\x68\x1b\xf8\x51\xee\x07\xb9\xbb\x95\x1d\x7c\xfd\x06\xd6\x5d\xbb\x83\x42\xd4\xd5\xa6\x13\xfb\xed\xc3\xcd\xba\x5e\x83\xe8\x41\x40\x29\x77\x6d\xd3\x0f\x9d\x18\xaa\xb6\x81\x76\x0d\xd9\xbb\x6a\xf3\x73\xd5\xc0\x5a\x8a\x61\xec\x64\x7f\x03\xa0\x9f\x50\xe8\x4b\x86\xb6\xbd\x03\x71\xdb\x8e\x03\x7c\x09\xbb\xaa\x19\x07\xd9\xc3\xd8\x57\xcd\x06\xf4\x9d\x9f\xf4\xf0\xbc\x93\xc5\x20\x9a\x4d\x2d\x21\xeb\xa5\xe8\x8a\x2d\x88\xa6\x84\x4e\xee\x6b\x51\xc8\xe7\x76\xee\x1b\x80\x9f\xb6\x12\x8a\xad\xe8\x44\x31\xc8\xae\x87\x7d\x27\x3f\x54\xed\xd8\xd7\x0f\x50\xb4\x4d\x5f\xf5\x83\x2c\xd5\xa2\xda\xa6\x7e\x80\xe7\xbf\x7d\x0e\x99\xbf\x58\xdf\xdd\x4f\xf7\x37\x1b\xd9\x43\x27\xdf\x8f\x55\x27\x4b\x18\xee\x5b\xe8\x07\xb9\xef\xdf\x66\xd9\xbb\xaa\xeb\x87\x17\x20\xea\x1a\xaa\xa6\x1f\x44\x53\xc8\x5e\xcd\xa8\x26\xbb\x97\x9d\xb4\x8b\x2a\xe1\xbe\x1a\xb6\xf0\x9c\x3d\x87\xa1\x85\x4d\xf5\x41\xc2\xb0\x95\xb0\x6e\x9b\x41\x71\x49\x74\x77\xb2\x83\x4c\xec\xf7\x52\x74\x6a\x92\x1b\x80\x2c\x83\xb9\x3f\x96\x26\xf3\xec\xa7\xad\x6c\xe2\xd5\xbc\x55\x37\xc4\xab\x79\x0b\x1c\x00\x7e\xf3\x47\x51\x6c\xd5\x92\x87\x69\xb7\x95\x92\x5a\xe6\x19\xdc\xae\x61\xdd\x8e\x1d\xf4\xef\x47\xd1\xc9\xfe\x05\x54\x4d\x51\x8f\xa5\x92\x87\xe2\xc3\x6d\x2d\x9a\xbb\xfe\xe6\xd3\x2c\xfb\x7d\xdd\xb7\x20\xca\x52\x96\x20\x60\x2f\xbb\xaa\x2d\x5f\x80\xfc\xa5\xa8\xc5\x4e\x4b\x76\xdf\x56\x8d\x62\x55\x53\xc2\xfb\x51\xf6\x13\x6d\x27\xba\xbb\x89\xa2\x99\x5c\x4e\x4c\xc9\x4a\xb9\x16\x63\x3d\x40\xbf\x1b\xfb\xed\xae\x2d\xe5\x4d\x96\xfd\xd0\x55\x9b\xaa\x11\x75\xa8\x65\x45\xbb\xdb\xb7\xbd\x56\xcf\x7f\x54\x4d\x23\x1f\xe0\xa7\xad\xa8\xe0\x87\x62\x68\x27\xe5\xfc\x22\x83\xcf\x0e\xfd\x7d\xfb\xcd\x37\x46\xb3\xe0\xcf\x0f\x7b\x09\xdf\x7d\xf7\xed\xc1\xeb\x3f\xcb\xac\x5e\xb4\xbb\x9d\x6c\x86\x5e\x73\x76\x23\x1b\xd9\x09\xa5\x4f\x62\x1c\x5a\xb5\xe5\x42\xd4\xf5\x83\x5a\x96\x9e\xfd\x05\x34\xed\x30\xed\x4f\x8c\xc3\xb6\xed\x0c\xcb\xb3\x49\x07\x6e\xc0\xae\xa1\xea\xa7\x6b\x7e\xae\x9a\xb2\xbd\xef\xa1\xa8\xdb\x66\x12\xc2\xbb\xef\xff\x54\xcb\x41\x6b\x91\x80\xf5\x58\xd7\x56\xcd\x4b\xad\x45\xb2\xac\x86\xb6\x7b\x96\xc1\xbb\xb6\xdb\xc8\xc1\xd8\x50\xd9\x4e\x92\xda\x4a\xd8\x89\x61\xab\x74\x6f\x5d\x6d\xc6\x4e\x82\x1a\x54\x64\xc7\x63\x68\x46\x65\xcd\xcf\x00\xfe\x32\xf6\x03\x14\x5b\x59\xdc\x81\xc8\x60\x2d\xef\xe1\xb6\xfd\x45\xf6\xb0\x6e\x3b\x90\x4a\x59\xa6\x7b\xd4\xbc\xdd\x58\x2b\x4d\x25\x4f\x94\x4d\xa9\xa4\xda\x3f\x03\xf8\xab\xd4\xcf\x50\x32\x1a\x07\xd9\x65\xf8\xe1\xdb\xf6\x1e\x06\xa5\xa9\x4a\xfc\xaa\x73\x5f\x95\x12\x1e\x94\xae\x4d\xfb\xa9\x94\x05\x4e\x4b\x29\x3b\x71\x3f\xd1\x7a\xbd\xfd\x69\x37\xed\xd8\xcb\x6c\xba\xf7\xbe\xda\x4b\xbb\x95\x72\x23\xa7\x87\x43\xbb\x5e\xeb\xb9\xfa\xa2\x93\xb2\xd1\x0e\x4a\x3d\x6d\xd8\x8a\x01\x8a\x76\x6c\x86\xaa\xd9\x3c\xcb\x32\xc4\xf7\x89\xa9\x4a\xdf\x8d\x6b\x7a\x01\xb7\xe3\x00\xd5\xf0\x49\x0f\xa5\xec\xab\x4d\x33\xb1\xba\x83\xb1\x97\x4a\xac\x62\xda\x57\x2d\x07\x09\x55\x59\xb5\x4a\x86\xdf\xaf\x33\xf5\x54\x78\x3f\x8a\xba\x5a\x3f\xbc\x80\x0f\x55\x5f\x69\x1e\x98\xc7\xdc\xcb\xdb\xbe\x1a\x14\xd3\x7e\xd4\x0b\xeb\xb7\xed\xd0\x83\xe8\xa4\x6a\xdd\x37\xda\x3e\x5a\xa5\xd7\x63\xa7\x76\xa8\xa6\xbb\xaf\xea\x1a\x6e\x25\x88\xdb\x5a\x2a\x11\x96\xed\x7d\x53\xb7\xa2\x84\x6a\xb8\x99\xf3\x12\xdb\x61\xd8\xbf\xe5\x7c\xdb\xee\xe4\x8d\x14\xdd\xb0\xad\xab\xe6\xee\xa6\x91\x03\xff\x77\xdf\xd6\xa3\xb2\xbe\x2c\xcb\x21\x5f\x5d\xf9\x9f\x95\x5f\xdf\x2a\xcb\x19\x63\xb9\x69\x82\x69\xab\x26\x2c\xdc\x64\xc0\xa7\xc7\xab\x36\xcf\x35\xd5\xad\x41\x51\xf1\xb5\x79\x40\x45\xeb\x7d\xac\xa9\xda\x8f\x7f\xca\x7c\x95\xb5\xf9\x2a\x1b\xf3\x55\x76\x9b\xaf\xb2\x3a\x5f\x65\x32\x5f\x65\xef\x0d\x4d\x8d\x0d\x9a\x76\xdc\x84\x73\x9f\x3d\x7a\x50\x93\x4f\x0f\x3e\x7f\x32\xbb\xea\xda\x7c\x44\xbe\xca\xba\x73\x27\xdc\x9b\x2d\x77\xf9\x2a\x2b\x4c\xbb\xd1\xdb\x3e\x6d\x22\xb5\x8a\x5d\x4e\x27\xec\xcd\xea\x4e\xda\xb2\xba\xa9\x32\x37\x6d\xce\x15\x4a\x8b\x56\xd2\x98\xb6\xe1\x13\xde\x62\xb6\x35\x7d\xf5\xd0\x87\xb9\x09\x0b\xb3\x8a\xd6\x5c\x28\x2f\x9d\xf0\x31\x3e\xf6\x7e\x8b\xd3\x03\x2a\x4f\xbb\x4c\x0f\xad\xc4\x6b\xc3\xcb\xfe\xd2\x09\x0b\xc3\x96\x9d\xf9\x88\x4b\x27\xdc\x21\xe9\xa3\x15\x7a\xf3\x5e\xfa\x7f\x39\xcb\xad\xff\x73\xbd\x03\x37\x5c\xb6\xbb\x1e\x99\x6c\xaf\xb5\xe5\xbc\x89\x2c\x7b\x76\xc6\x73\x59\x4d\x79\xb9\xf4\x84\xaf\x96\x9e\xf0\xf3\xa5\x27\x7c\xbd\xf4\x84\x5f\x2c\x3d\xe1\x97\x4b\x4f\xf8\x66\xe9\x09\xbf\x5a\x7a\xc2\xdf\x2d\x3d\xe1\xd7\x4b\x79\x2a\xeb\xc8\x9b\x53\x27\xb4\x9e\xdf\x7a\xa8\x8b\x27\xc4\x9f\x1a\x85\x8d\xde\x6f\xfb\xb2\x2d\xe3\xa8\x29\xf4\x4a\xcf\x9b\x68\x83\x02\x9d\xa0\xb1\x69\x45\x50\xae\xea\x31\xfd\x37\x81\xb7\x29\x8f\x05\x0d\xe6\x56\x19\x77\x59\xb2\xea\x31\x9b\x2b\x7b\x97\xab\x32\x65\x7d\x9f\xef\xb8\x1e\xcf\x91\x1f\xd6\x1d\xd3\x33\x1d\xd3\x63\xa4\x07\x0e\xd7\x6a\x6a\x4e\xc6\x70\x8f\xd1\x2b\xc9\x58\x02\x62\x3e\xd6\xbb\x4c\x74\xf8\x23\x7c\xe4\x0f\xff\x3c\x8d\xd9\x6d\x3b\x0e\x31\x16\xd2\xb8\xdf\x24\xbe\x97\xc7\x34\x8e\x26\x3c\x40\xe3\xf8\xc9\xf3\x34\x0e\xb1\x24\x38\xf3\x7a\x62\x68\x9c\xee\x65\xa2\xb1\x98\x36\xdd\x89\x88\x46\xcb\x2c\xd5\xa4\x18\xdc\x5c\x60\x2e\x9c\x72\x0d\xb7\x4d\xcc\x3f\x16\x52\x0f\xf3\x19\xd1\x62\xa2\x53\x39\xb5\x16\xbb\x38\x63\x10\x96\xcf\x6a\xb9\x4c\xef\x96\xdb\x6b\x38\x58\x82\xbf\xcb\x12\x90\x11\x4c\x93\x12\x43\xd0\x14\x6c\x0c\x13\x85\x18\x84\xbe\x86\x61\xa6\xea\x79\x34\xcd\xb2\xd9\xb0\x4a\xc3\x20\xa0\x62\x33\x6c\xb7\x3c\xd5\x77\x71\x67\xea\x66\x66\x37\xec\x84\x9f\x60\x6d\xc0\xee\x59\x1e\x1e\x64\xb4\x25\xd0\x6d\x4d\x0c\xd3\x9c\xce\xad\x42\xe9\x45\x72\xbd\x4d\x0e\x76\xdf\x4e\x6d\x88\x3d\xf8\xc1\x05\x09\xd3\x23\xa9\x85\x29\xfe\x1b\x16\x5b\xfe\x1b\xee\x7b\xef\xa7\x19\x4b\x1c\x20\x63\x49\x9f\x74\x26\xe1\x90\x9a\x23\x05\x36\xa6\x49\x54\x78\xe2\x9d\xd7\x59\x86\xb5\xd8\x6a\xad\xd7\x2d\x47\xb2\x61\x00\x90\x0a\x62\x8d\x5b\x9c\x06\x31\x0d\xb9\x19\xab\x14\xcc\x2a\xb6\x63\x3f\x07\xaf\xda\x1c\xeb\xf6\x61\xed\x5e\xdc\x9d\x24\xfc\x49\xe8\x50\xb4\xb3\x23\x1e\x45\x6b\x3b\x27\xb2\x20\x3e\x25\x27\xd3\xbb\x99\x1f\x27\x51\x77\x6d\x49\xa1\xf3\x09\x6e\x84\x88\x04\x78\x0d\x40\x7c\x8b\x1e\xa3\xce\x05\x6f\x88\x2e\x22\x12\x40\x82\xff\xf3\x5c\xbd\x02\xf3\x21\xc1\x7c\xf8\xe8\x98\xcf\x22\xe6\x33\x4a\xb2\xcc\xf7\x3e\xcb\x04\x75\x3c\xd7\x93\x30\xdf\x7b\xf8\x84\x8f\xf7\xb0\xc1\x79\x79\x88\xfd\x7c\xc2\xd3\x27\xfc\x34\x22\xf9\x9d\x22\x92\x8f\x2b\x96\x84\x22\x8b\x21\x79\xe4\xe3\xbc\xbe\xe7\x77\xe4\xf7\x21\xf6\xfc\x09\xdf\x9f\xf0\xfe\x47\xb3\xf5\x6c\xe6\x87\xca\x4f\xe0\x97\xd3\x7f\x03\xb9\x42\x13\x00\x96\x72\x41\x4a\x90\x34\x50\x7b\xd6\x04\xa8\xe4\x68\x2a\x4b\x5a\xc0\xa1\x19\xb0\x01\x41\x8a\x8a\xc2\x03\x01\x9c\x31\x34\xe5\x88\x39\x40\x70\x5c\x60\x05\xa9\x38\x91\x8e\x14\xe9\xb8\x30\x4b\x3d\x28\x5f\xb3\x36\x17\xf6\x9c\xd4\xbc\x33\x62\xc8\x23\x79\xfc\xcd\x83\x44\x0b\x7b\x2d\xaf\x8a\x21\xe8\x8f\x49\x9c\x85\xa4\x29\x36\x3d\x72\xa3\x35\x2e\x14\x64\x35\x89\x24\x79\x86\xed\x9c\x62\x2a\x6f\x32\xd8\x90\xf2\x43\x6c\xbc\x92\xd5\xa4\x10\x54\x2a\x70\x60\x29\x20\x98\x0a\x33\xb1\x3b\x19\x41\xd2\x8e\x7f\x86\x4a\x84\x92\x96\xcb\x59\xf3\x9e\x48\x8d\x42\x10\xf1\x81\x96\x6a\xac\x0b\x11\xad\xdd\x01\xe3\xd4\xc2\x48\xce\x49\x9f\x16\x46\xa4\xb4\xe0\x20\x10\x53\x1c\xc6\x58\x82\x0e\x10\xb8\x3e\x88\x62\x26\xf2\x89\x41\xcd\xc2\x7a\x1a\x9e\x4c\xd7\xc0\x65\x87\xf1\x48\xb0\x79\xef\x0e\x79\xcc\x00\x92\xfc\x25\x46\x52\x36\xe2\x5c\x23\x4b\x21\x37\xd3\x67\xa4\x16\x40\x00\x1c\x76\xca\xd4\x4d\x9a\x70\xe7\xf2\x73\x34\x00\x0e\x3c\xa5\xdc\x25\xcc\xba\x4c\x98\x75\x90\x8f\x8c\x24\x87\x48\xd8\xc3\x62\xc1\xfe\xd3\xed\x8d\xc5\x16\x49\x7d\x28\x9a\x93\x47\x9a\x1c\x9a\xc7\x35\x89\x90\x20\xa6\x34\x89\xc8\x31\x05\xc5\x13\x60\xdc\x83\xa5\x58\x38\x49\x99\x1d\xe2\x7c\x40\x3c\xce\x5a\xc1\x2b\x6c\x5c\x72\x41\x48\x39\xc4\xea\x7e\x2c\xb0\x56\x8d\xde\xc0\xa7\xb0\x1e\xc5\x80\xf3\x29\xd4\x5a\x9f\x78\x84\x54\xc8\x66\x46\x9c\xb5\x06\xd5\x32\x24\xe5\xb8\x8e\x46\xac\xd5\x0c\xa5\x81\x0d\xcc\x82\x1b\x78\x22\x6b\x85\x50\x0f\x42\x16\x78\x35\x00\x48\xc3\x55\xe7\xe8\x42\xc4\x6a\x79\x1a\x95\xe3\x8c\xd7\x4c\x89\x66\x6e\x40\xab\xd3\x49\x03\x53\xf3\x98\x01\x17\x3c\x21\x2c\x60\xe3\x7a\xaa\x1f\xa0\x16\x1e\x66\x48\x71\xe4\x9b\x91\xfc\xac\xe0\x67\xe5\xfe\x98\x04\x13\x03\xd1\x48\x94\x15\x92\x9c\x10\x03\x59\x97\xf0\xd1\x7c\x10\x57\x2e\x2d\x67\x70\xe2\x07\xb0\x18\x81\x64\x80\x1a\x87\x7f\xb4\x75\xbf\x13\x90\x6b\x9c\x3f\x00\xaa\xf5\xd1\x94\x2f\xa8\xba\xd1\x7a\x32\x84\x6a\x7d\x90\x6a\x90\x2b\xe5\x9e\x45\xae\x01\x0b\xc1\xa7\x87\x01\x1f\xa3\x32\xca\xa5\x78\x36\xcc\x57\xd2\x29\x4b\x3a\x6b\x49\x27\x2e\xe9\xdc\xe5\x7f\xc6\x1a\x8c\x6e\x21\x6b\x60\xe8\x14\xc2\x8a\x12\x95\x75\x27\xee\x50\xed\x60\x54\x5d\x22\xfd\xa1\x4a\x82\xfa\x84\xa0\x55\x23\xb6\x06\x0a\x01\xed\xda\x7c\xd5\xc3\x72\x8d\x91\x23\x9b\xb8\xe0\x4a\x83\x86\x7b\x3a\x01\xf1\x98\x12\x9e\xea\xb0\xe8\x54\x07\x0b\x11\x47\x9d\xf0\x0c\x07\x55\xde\xc1\xbb\xef\x20\xf0\x6b\x7e\xd9\x22\x0e\x0d\xf4\xa8\x88\x72\xbd\x33\x1c\xe2\xdf\x58\xa8\xc1\x6e\x91\xc8\xaf\x00\xdd\xae\xcf\x29\xf2\xa4\xcd\x20\x56\xd2\xc2\x19\x63\x51\x55\x91\x10\x89\x2f\xf4\x2a\xe0\x88\x1c\x29\xa8\xa7\xc5\x1a\x89\x4e\x18\x2c\x8d\x53\x9c\x15\x9e\x2f\x90\x82\xd6\xbf\x12\xb8\xf9\x9f\x9f\xc6\x99\xe9\x0c\x57\x31\x69\x16\x71\x73\x0a\xf6\x4e\xc4\xfb\xfa\x1e\x9c\x63\x1b\x43\xe7\x90\xa8\x3d\x40\x9c\x4f\x86\x45\xd8\x25\x88\xc0\x92\x55\x6f\x5a\x7b\xa5\xce\x24\x78\xd1\x80\xd4\x5f\x11\xeb\x68\xca\x7b\x0c\xe3\x97\xc8\x75\xa8\x3f\x02\x8c\xe3\xbd\x53\x9a\x49\x71\x9c\x4a\x79\xf7\x64\x6d\x9b\xfa\x28\xc6\x52\x19\x07\xc0\xd3\x92\x21\x2c\x9a\xc7\xc2\x0b\x63\x37\xae\xa7\x87\x02\x0c\x2b\xe8\x2e\xd8\x1c\x90\xc2\x52\xe4\xe3\xf2\x15\xfc\x62\x44\x20\xd0\x03\x22\x9d\x15\x2a\x0a\x4f\x54\xac\xe1\x1b\x02\xfe\x0e\x36\x37\x30\x77\x47\x58\xab\x0a\x04\xf6\xd8\x1d\x48\xc4\x3c\x0f\x59\x02\x74\xa6\xb4\x95\xc2\x9c\xa5\xc2\x9c\xb5\xc2\x79\xd9\xc7\x02\xf9\x4a\xea\x3e\x62\xc8\x7e\x9d\x29\x7b\xf6\xdb\x48\x9b\xb5\x0f\x91\x09\xeb\xb6\x5c\xc8\x63\x23\x77\x4b\xb2\xa3\x44\x8e\x1f\xcd\x28\x71\x07\xf1\x28\xf6\x0a\x6e\x14\xa1\x3e\xaf\xfc\x2c\xe1\xf9\x9d\x8f\x00\x96\x48\x15\xc0\xe3\x22\x60\xf3\xf9\x17\xf9\xbb\xe2\xe8\xdc\xb0\x51\x1a\x4f\x40\xf9\x3c\xaa\xe3\x30\x5b\xba\xd4\x8a\x82\x19\xa3\xf7\xed\x55\x13\x00\x1d\x3b\x50\x56\x46\x25\x4d\x3d\x10\x55\x35\xed\x40\x92\xce\xd3\x47\x32\x29\x70\x00\x28\x8d\xa4\x74\x1e\x3e\x83\xc2\xc6\x00\xdc\x91\x72\x07\xb3\x28\x07\x93\x01\x1c\xd0\xa1\x64\xc0\x88\x3b\xf1\xb7\x18\xfd\xb1\x1b\x52\xd1\x7f\x26\xfc\xcf\xc4\x7f\x38\x00\x00\x66\x10\xc0\xc1\x30\x7d\x4d\xfa\x2c\x0e\x98\x05\x02\x5e\xa7\x03\x3a\xc5\xac\x14\xc7\x27\x21\x3f\x1a\x0d\x6a\x32\xde\x1d\xf8\x17\x87\x98\x1f\xe2\x8c\xbc\xf1\x6a\xaf\x35\xc8\x64\x69\x04\x6e\x72\x33\x32\x9d\xf5\x60\x78\xc2\x08\xc4\x42\x74\x9a\x8b\x69\x84\x18\x87\xdb\xd4\xe9\x63\xda\x31\xa4\xbc\xc2\x52\xa7\x0d\xb3\x16\xb9\x04\x02\x7f\xfc\x63\x5f\xd5\x5e\x9b\x17\xa1\x6f\xcd\xcb\xd1\xc2\xbc\x13\x7e\x67\xc6\x4f\xfe\xb2\xce\x6d\x30\xc9\x59\x5f\x16\xb1\x5f\x17\xda\x98\xef\x22\x2d\xba\xc2\xf0\x53\x04\x5f\x82\x3a\x69\x42\xf4\x5d\x38\xf2\x85\x31\xfb\x5e\x7d\x77\xce\x17\xef\x42\xe1\xcc\x7c\x69\x2c\xad\x08\x67\x77\x70\xbd\x8d\x83\x7f\x05\x95\x3b\x1f\x60\x5f\x77\xb7\x86\x7b\x4a\xc7\x7a\x16\x0b\x57\x72\x14\xf7\xc3\xaa\xc3\xb1\x9d\x68\x08\x19\x3b\xc1\xd3\x41\x65\x73\xbe\x83\x98\x60\x50\x15\x79\x5f\x6a\x51\x26\x38\xa7\x96\x86\x66\x67\x33\xe1\xf2\xce\x71\x9a\x70\x0e\x7f\x5d\x0a\x64\x98\xe0\x91\x1b\x2a\xc9\x2c\xc4\x84\x10\xfd\xe5\x27\x75\xf0\xcb\xef\xde\x67\x7b\x4d\x60\x1f\xb7\x26\x5c\xdc\xe6\x81\x0a\x90\x97\x8a\xf1\xde\x35\x53\x6c\x4c\x73\xed\x9c\x4a\x5f\xd3\xc1\x06\x48\x14\x17\x29\xee\x3b\x69\x9d\xa8\x83\xd6\xac\xa6\xe7\x64\x95\x76\x91\xb9\x5f\x4b\x8e\x97\xe5\x56\x8b\x8c\x3f\x7d\xed\x59\xcd\xe8\x75\xb1\xa3\x9a\xab\x20\xba\x5f\xa7\xcf\xa3\x34\xc3\xa7\x59\x3e\x75\xe1\xfe\x75\x50\x6c\x58\x97\xf6\x01\xc3\xda\x95\x45\xa9\x71\xba\x42\x0e\x57\xbc\x40\x2d\x78\xd5\xd6\x84\x10\x56\x7c\x2a\x82\x83\x03\x8e\x0d\xa1\x8a\xce\xb5\xbd\x6e\x4d\x19\x8c\x53\x2e\x8e\xb4\xcb\xee\xed\xb2\xb6\x3f\xfe\x58\x21\x18\xba\x42\xe8\xd3\xb4\x51\xaa\x1f\xc1\x73\xda\x72\x59\x03\xae\xd2\xe4\xe1\x75\x28\xbb\x20\xd5\x9c\x4b\x5a\xee\x15\x41\xff\x5a\x60\x62\x7d\x51\xcb\x35\x49\xc3\x6b\x91\xf3\xd1\x39\xbd\x86\xb1\xc4\x35\x2e\xa3\x34\x43\xd6\xdb\xa6\x2f\x46\x8a\xe6\xea\x37\x61\xc3\x9d\xdc\x92\x99\xaf\x00\x46\xbc\x13\xf6\x5a\xe7\xd4\x8e\xbb\x11\x40\x7b\x42\x89\x51\xf8\x06\x2a\xfa\xca\x81\x3b\x85\x43\x1e\xd2\x3a\xe6\x58\x5a\x67\x85\xa0\xeb\x28\xd4\xd3\xab\xe0\xcc\x4e\xaf\x41\x32\x07\x75\x18\x84\x82\x41\xa1\x5c\xd7\xff\xb8\x3d\x73\x06\x5b\xf4\x73\x21\xd4\x57\x22\x9c\x2e\x2f\x49\x62\xfe\x1f\x7c\xce\xed\x68\x8b\x72\x22\xa0\x2d\xda\xc5\xef\x80\x18\xd5\xd4\xd5\x53\xc4\xdc\x90\xb1\x01\x6f\xce\xee\xb2\x30\x96\x41\x92\x79\xa7\x75\xcf\xb2\xd3\xff\x66\x26\x48\x51\x35\x86\x02\x71\x99\xe7\x74\xbf\xf3\xeb\x62\x02\xc1\x3f\x81\x8f\x3e\x2a\x4a\xa0\x43\xe2\x5f\x2d\x13\x80\x56\xc4\xa2\xf2\xd8\x91\x1d\x5d\x54\xf8\xa8\x7c\x07\xa9\x33\x5e\xd6\x65\xa4\x8e\x39\x7b\x8a\xff\xf1\xf9\x0e\x04\x2c\xbc\xc6\x90\xe5\x07\x59\x6f\x50\x07\x70\x6f\x78\x99\x58\xa3\x91\x3e\xb3\xef\xb6\x5b\x76\x84\x27\x5f\xe7\xf9\x8e\xc3\x6d\xe4\xa0\x58\xb2\x4d\xea\x04\x3e\x25\x9e\xbd\xfe\xb2\x76\x50\xe1\x78\x7c\xfd\x89\x0d\x47\xec\x58\xa4\x8f\x8a\x24\x96\x31\x60\x23\x9e\x87\x99\xc8\x56\x98\x3f\xe9\x85\x4b\xfa\x9c\x3a\x11\x0e\x24\x73\x4c\xd5\x1a\xcf\xd9\xdf\xa5\x5e\x24\x3f\xd0\xf5\xac\x0a\x50\x04\xc3\x80\xc4\xc1\x06\xc4\x88\xb3\xbb\x1c\xb3\x88\x05\x6f\xf1\x46\x66\x75\x91\x8f\x39\x70\xe1\x65\x03\x84\x8b\x47\x0d\x60\x9d\x0c\xe0\x07\x19\x41\x5a\x0b\x98\x92\x80\x89\x10\xe9\xe5\x35\x06\xb8\x17\x1b\x10\xc1\x21\x01\x12\x11\x62\x51\x2e\xcc\xf6\xb9\x81\x68\xe4\x5a\x04\x8e\xcd\xc5\x46\x06\x70\xe1\xc1\xb2\x47\xff\x40\x02\xd2\x6c\x1e\x78\x4f\x1e\x04\x54\x0e\xf1\xf9\x3b\x56\x11\xf4\x2b\x22\x7a\x1d\x66\x49\x8c\xfc\x72\x0b\x24\x93\xe9\x73\x77\xfb\x54\x4c\xa5\xde\x3b\x72\xdf\x10\xfa\xef\xc8\x21\x2f\x45\x00\x96\x2c\x10\x26\x2a\x84\x41\x89\x30\xae\x11\x06\x45\xc2\x74\x95\x70\x19\x08\x84\xf1\x2b\x47\x28\x00\xbd\x27\xe7\xee\xc1\x2f\xf3\x60\x04\x13\x02\x60\x7c\x0f\xcd\x18\xe7\x3b\xfc\x88\xe7\xa0\xca\xe1\xea\xe0\x11\x64\x61\x8e\x20\x3b\x73\x9c\xfb\x70\xcd\x13\x59\x30\x3f\xf0\x58\xf9\xdf\xbf\x5c\x3d\x7a\x3e\x5c\x98\xd5\x8c\x4f\xb1\xc2\xc1\x2c\xa0\xf4\x3f\xed\xb9\xca\x8e\xfc\xef\xff\xf5\xc2\x57\x6f\x5e\x3e\xcd\x2f\x02\xbf\xfa\xea\xf5\x45\xc2\x4d\xfe\x7a\xe7\x7f\x02\x00\x00\xff\xff\xda\x71\xad\x47\xbf\x5e\x00\x00") + +func fontsCalgphy2FlfBytes() ([]byte, error) { + return bindataRead( + _fontsCalgphy2Flf, + "fonts/calgphy2.flf", + ) +} + +func fontsCalgphy2Flf() (*asset, error) { + bytes, err := fontsCalgphy2FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/calgphy2.flf", size: 24255, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCaligraphyFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5c\x4b\x6f\xe3\xc8\x11\xbe\xf3\x57\x14\x0c\x02\xbb\xdb\xb1\x8d\xb1\x67\x36\xc9\x2c\x16\x0b\x5e\x72\xd9\xcb\x02\x41\x90\x73\x68\x89\xb2\x98\x91\x48\x47\xa4\xb0\xe3\x7f\x1f\x48\xec\xc7\x57\xd5\x45\x3d\xf8\xf0\xac\x83\x08\xa3\x31\xbb\xaa\xbb\xd8\x5d\x55\x5d\x2f\x36\xb5\xda\xac\x1e\xf3\x94\x1e\x1f\xe8\xe1\x33\x7d\xfc\x40\x77\x0f\xf4\xf0\x29\x59\xe4\x9b\xf2\x79\x97\xbf\xac\x5f\xef\x57\x9b\x15\x2d\xea\xed\x4b\xdd\x14\x4b\xa2\xa7\x57\xfa\x67\x59\x55\xc5\x2b\xfd\x63\x9d\x97\xf4\x73\xd3\xac\xca\xb2\xcd\x8a\x5d\xd9\xdc\x2f\x16\xf7\xfb\xed\xd3\x7d\xb1\xdc\xff\x02\x04\x68\x55\x57\x2d\x7d\xff\xeb\x2d\xfd\x76\x4b\x7f\xff\x21\xd0\x7a\x7a\xa5\x5f\xeb\x5d\x45\x3f\xff\xbb\xde\x55\x59\x55\x2f\x8b\xfb\xa6\x6c\x8b\xfb\x45\xde\xfe\x92\x2c\xf3\xb6\xf8\x89\x7e\x5b\xb4\xf4\xf8\xf1\x96\x1e\x3e\x7f\xfe\x94\x24\x7f\xfb\xfa\xb2\xc9\xab\xbc\x2d\xeb\x8a\xea\x15\xad\xca\x5d\xd3\xd2\xa6\xac\x8a\x9f\x92\xc3\x2a\xe8\x8e\x6e\xb6\xf9\x73\xb9\xa0\x6a\xbf\x7d\x2a\x76\x37\xb4\xaa\x77\xb4\x2a\x37\x05\x95\xcb\xa2\x6a\xcb\x55\xb9\x38\x0e\x4e\x72\x22\xa2\x3b\x6a\xd6\xf5\x7e\xb3\xa4\x7c\xf3\x7b\xfe\xda\xd0\x53\x41\xff\xca\xbf\xbb\x3d\x0e\xaa\xea\xdf\x93\xb4\xeb\xd4\xae\x0b\xba\x59\xe7\xbb\xe5\xd3\x26\xaf\xbe\xdc\xd0\xdd\x1d\xbd\xec\xca\xaa\x6d\x28\x6f\x28\xa7\x23\xf4\x96\x9e\xf6\x2d\x2d\xf2\xea\xbb\xf6\x40\xa6\xd9\xee\x9b\x75\xb1\x4c\x1e\x1f\x8e\x14\xd6\x45\xf9\xbc\x6e\x0f\x33\xce\x69\xb1\xce\x77\xf9\xa2\x2d\x76\xc9\xc3\xe7\x7e\xe4\x2d\x55\x75\x4b\x65\xb5\xd8\xec\x97\x65\xf5\x4c\xcb\xa2\x59\x14\xd5\xb2\xd8\x35\xc9\xe3\x87\xe3\xb0\x6d\xfe\xf5\xb8\x72\xda\x14\xd5\x73\xbb\xa6\xef\x8b\xaf\xae\xf3\xa2\xde\x6e\x8b\xaa\x63\x4c\xf3\x03\xfd\x89\x72\x5a\xed\x97\xcf\x05\xad\xf2\x45\x5b\xef\x92\xbb\x6e\x56\xcb\x62\x95\xef\x37\x6d\x37\xd9\x6d\xbd\x2c\x8e\x0b\x6f\xd7\x65\x63\x05\xb6\x29\xbf\x14\x74\x73\xb7\xa5\x0f\x37\x54\x57\x47\xb2\x79\xb5\x3c\x92\xfd\x21\x79\xf8\xf1\x48\xa4\x63\xf4\x61\xf6\xec\xae\x49\x92\x52\x9a\xbd\xc5\x7f\x59\x42\x74\xfe\x5b\xa4\x59\xf2\x35\xcd\x92\x45\x9a\x25\x9b\x34\x4b\xf2\x34\x4b\xb6\xf6\x6f\x9b\x66\x49\x99\x66\x49\x9d\x66\x49\x95\x1e\xfb\x5f\x46\x74\x69\x07\xed\xd3\x2c\x79\xb2\x84\x0f\x37\xfa\x8f\x85\xd5\x96\x78\x71\x0d\xd1\xbe\xef\x0b\xdc\xec\x30\xcb\xe5\x58\xa2\x6e\xf6\x1b\x60\xc9\x6e\x0c\xd1\x17\xbb\xd4\x9d\xe5\x73\x61\x67\xda\x0e\x21\xea\x04\x84\x44\x1b\x3b\xcb\xab\x97\xdf\x58\x09\x1f\x06\x3e\x8f\x11\x54\x0d\x33\xaa\xec\xb5\xe3\x1b\x2e\x77\x6d\xdb\x87\x1b\xbf\x9e\x22\xea\xf4\xb1\xb6\x9d\x8b\x29\x88\x9e\xe3\x6b\x03\xcb\xdd\x59\xd6\x34\x43\x05\xd5\xa7\x09\x1b\xcb\xdb\x66\x0a\xa2\x0b\xcb\xa2\x2d\x6c\xdb\xd1\x44\xb7\xa0\x15\x93\xcc\xf4\x45\xf0\xb4\x9e\x62\x9b\x3a\xfd\x75\x5b\xb4\xb1\x9a\x30\x98\xa8\x5b\xee\xd6\x5a\x2d\x37\xe3\x0f\x73\x10\x7d\x98\x83\xe8\xe3\x1c\x44\x3f\xce\x41\xf4\xd3\x1c\x44\x7f\x9c\x83\xe8\x9f\xe7\x20\xfa\x97\x39\x88\xfe\x75\x0e\xa2\x9f\xa7\xb4\x52\x9b\x21\xc1\x84\xfb\x3a\x2f\xe0\xac\xd3\x24\x44\xf1\xbb\x01\x37\xd2\x00\x0b\x46\x2f\x1f\x3d\x6a\x6e\x67\x3c\x98\xe8\x33\x38\xc0\x5c\xf8\xab\x41\x44\xdd\xcc\x0a\x70\x76\x83\xa2\xbe\x4b\xbe\x39\xf7\xa6\xf2\x13\x60\xc6\xa4\x1c\x66\x0e\x1f\x0e\x33\x47\x68\x2a\xc7\x2a\x30\x03\x04\x4f\xc0\x0c\xde\xb9\x1f\x66\xd8\x14\x2d\xcc\x7d\x02\xcc\xf0\xb5\xf4\xc1\x8e\xa3\x00\x98\x25\xc7\x5e\x0e\x6a\x0c\x99\x34\x4b\x8c\xed\x10\x3a\x66\x89\x5f\x26\xf2\xcf\x48\xe8\x69\x3e\x9f\x81\xc5\x40\x14\x87\x9f\xb0\x5d\xbe\xe3\xfd\x71\x1a\x96\xc5\xae\x8f\x71\x3c\x0f\xcb\xf4\x00\x02\x06\x99\xe3\x50\x64\xa3\xe9\x78\x40\x1c\xc2\x44\x67\xfb\x98\x88\x4e\x07\x73\xac\x37\x8e\x8b\x29\x28\x01\x40\xbc\x24\xdc\x28\x13\x04\xdb\x51\xf6\x68\x3f\x1f\x85\xdd\x42\x04\xbd\x3c\x3c\x0d\x89\x40\x61\xce\xb8\x50\x37\x47\x22\xbb\x2a\xe3\x41\xdd\xc2\xdd\xba\x41\xb9\xd8\x0e\x09\xc8\x09\x01\x9d\x10\xd9\x3e\x3c\x48\xc4\x2e\xc0\x49\xc4\x71\x3b\x15\xbb\x28\xc5\xd5\x8a\xfd\x78\x82\x67\x43\x00\xa7\x76\x02\xe8\xb8\x9f\x30\x68\xb9\x57\x73\xa7\xe7\xa0\xe8\x5e\xb1\xbd\xfa\x79\x50\x07\x74\xeb\xc5\x2d\x3d\x13\x8c\x62\x18\x58\x22\xa7\x25\xc1\x0a\x91\xb0\x42\x91\xfa\x9f\xde\x00\x6f\x62\x85\x14\x33\x24\xed\x50\x67\x37\x99\x21\xb2\x66\x93\x50\x3e\xcc\x14\xa5\x8c\xbc\xa7\x7c\x1e\x24\x2c\xbf\x41\x77\xd5\x3b\x90\x22\x10\xe1\x1c\x88\x99\xa4\x0e\xc7\x6d\x12\x2e\x88\x4f\x22\x12\x8a\x22\x93\x7e\xae\x9e\x03\x4d\x23\x0f\x52\xe4\x41\xef\x41\x1e\x1c\x64\xa2\xd8\xc4\xfa\x6d\xa4\xf5\xad\xe4\x41\x46\x1a\x50\x70\x17\xb0\xee\xb0\xa5\x23\x97\xa1\x38\x0d\xc5\xe4\x03\x88\xc0\x40\x7a\x50\x70\x51\x0e\x04\x4e\xca\x8d\x21\x74\x53\xe0\xaf\x53\xcd\x85\x50\xec\x44\x14\x37\xa2\x38\x92\x31\x9c\x1e\x2c\x0f\xb9\x45\xe2\x10\x30\x88\x21\xda\x28\x64\x34\xdb\x45\x86\x44\x18\x00\x8c\xe0\x51\xd0\xc5\xd0\x88\x71\x67\x29\xf0\x80\x57\x81\xa2\xaf\x61\x2b\x26\x09\x35\x5a\x30\xcc\x08\x7a\x4e\xc6\x4e\x47\x77\x3b\xba\x43\xb9\x12\x7a\x52\xea\x86\x05\x5f\x20\x4b\x08\x6a\xc0\x9a\x85\x2c\x80\x47\xb9\x0c\x02\x3a\x2b\xd3\x91\x18\x04\x16\xd7\x93\x8f\x40\xd1\x40\xbf\x0b\x83\x1f\x67\xf3\x74\xbd\xbc\x51\xf6\xb4\x0c\x37\x81\x7e\x12\xe9\x29\x36\xbe\xdd\xf6\xd2\xe2\x36\xcd\x0f\xa1\x60\x20\x5a\xa6\x9e\xe8\x40\x75\x48\xba\x1f\xe9\x81\x32\x39\xe9\xa2\x1a\x44\xf7\x4a\xa8\xe2\xd1\x3a\x04\x77\x6a\x56\x1b\x52\xc6\xdf\x90\xcf\x01\x94\x25\xc8\xfc\x6e\xe7\x1d\x5c\xbf\x38\x49\x08\x2f\x26\x65\x14\x38\xe8\xb6\x48\xdd\x29\x8a\xfc\x44\x56\xc9\xe2\x73\x25\xbb\x24\xbe\x78\x86\x11\x2c\x89\x42\x8e\x78\x8c\x91\x6c\x44\x6a\xd1\x66\xf2\x96\x35\xde\x79\x68\x5d\x71\xd6\x3c\x70\xc4\x9d\xcf\x83\x7d\x0a\x8e\x99\xc0\x9d\xc2\x38\xdd\xda\x52\xaf\xc5\xa5\x5e\x51\x0f\xc6\xa8\x28\x1e\x72\x8a\xa5\x1b\xc3\xcd\x96\x34\x6e\x4c\x1d\xa4\xe9\x32\x91\xe6\xcb\xed\x34\x27\x90\x14\xa0\xa6\x63\x46\xc8\x2a\x4e\x0e\xe2\xf4\x20\x04\x66\xb1\xd8\x54\x69\x9e\xe2\xfc\x25\xc0\xcb\x36\x37\x05\xfd\x8e\x0b\x4a\xb0\x95\xa2\xfc\xc1\xe3\xc4\xe6\xee\xc2\x47\x0a\xd9\x77\x88\x99\x88\x94\xf2\xd2\xdb\x63\x58\x34\xd0\x83\x09\x30\xa2\x88\x3b\xa4\x61\xe4\xe6\xb6\x28\x3d\x8c\xa2\xde\x50\x8a\xbe\xf9\xe6\xa6\x68\x83\xdb\xeb\x38\xb3\x24\xd2\xc3\xe6\x80\x49\x65\xfd\xb1\xbb\x36\xb2\x0c\x89\xc6\x5c\x08\xad\x0f\xa1\x57\x2b\x4f\x22\x42\x39\xf3\x0c\xc2\xfb\xe6\x60\xc0\xa5\xcd\x67\x08\x2e\x78\x99\xbc\xc5\x2e\xb4\x47\x27\x7a\x55\xa2\x57\x23\xce\x49\xf0\x1a\x44\x84\x89\x32\x59\x96\xc7\xb2\x5d\x44\xa1\xfe\x0c\x39\x2c\x96\x72\xc9\x42\x30\x59\x25\x9a\x0c\xc0\xb2\x56\x57\x53\x7e\x2f\x65\xcf\x2b\x42\xe8\x38\xb7\x21\xd2\x43\x68\x28\x76\x62\x88\xc5\x6a\xf0\x4a\xd1\x52\x87\xda\x49\x09\x76\x92\xe1\x49\x2a\xa3\xa0\x30\x36\x2a\x0f\x45\x9e\xf8\x4a\xa8\xcc\xa5\xf4\x74\x4a\xcf\xa8\x7a\x63\x59\x25\xaf\xba\x22\x84\x7e\x8f\x9b\xc6\x6a\x1c\x6c\x1a\x4c\xcb\xfc\x08\x54\x06\x18\x42\xbe\x13\x2a\x1c\xdc\x05\x01\x29\x67\x24\x4b\xf3\x08\x33\xf5\xd3\x5c\x16\x1e\xca\x60\x2d\x1d\xe3\x0f\xb6\x4b\xb4\x12\xb4\xf0\x42\xa9\x96\x5e\x20\x44\xba\x0f\xad\x64\x03\xdb\x14\xdc\x98\x7c\x18\x66\x80\x17\xc1\x1f\x88\x18\xc3\x60\x6c\x49\x3c\xa6\x60\x1b\x2f\x76\x25\x73\x3d\x0c\x63\x96\xd2\x48\x25\xf7\xd3\x06\x83\x44\x9c\x01\xc1\x83\xa6\xea\x66\x03\xe6\xf2\xb2\xa1\x31\x51\x4d\x95\x01\x99\x11\x25\x7c\x00\x64\x81\xdc\x24\x7a\x98\xd4\x51\x78\x32\x23\xa6\x8a\x41\x7e\xca\x3c\xf9\xa9\xc7\x32\x73\x57\x9c\xa7\x48\x4a\xec\xca\x29\x32\xaf\x5a\x6d\x5e\xd4\xa4\x83\x58\x27\x06\xda\xdd\x24\xfd\x00\x2f\x46\x73\x13\x24\x4d\x88\x51\x5c\x0e\x53\xb8\xb1\x69\xd6\xe8\x84\x2c\x8e\xb5\xe3\x54\x8c\x7a\xf2\x30\xaf\x7a\x60\xd8\x9c\x28\xb9\x75\x8b\x6b\xf5\xcc\x1b\xbc\x0d\x98\x05\x69\x29\xf5\x49\x34\x96\x69\x54\xaf\x45\xb1\x0a\xb9\x0a\xa3\x71\x99\x70\xa6\x02\x9f\x0b\xbb\x31\xa3\x52\xe5\x7c\x42\xd2\xbd\xb2\xee\x95\xb6\x7c\x72\x40\x34\x18\x61\x44\x59\xee\x4a\x04\x48\x5e\xca\x27\x64\x98\x52\x70\xc6\xf4\x20\xd8\xae\xa6\xbe\x9d\x4d\x13\xe7\x47\x13\x64\x54\xda\x38\xb6\xf1\xc3\x02\xb4\xfd\x4f\x04\xc1\x85\x62\x06\x82\xeb\x55\xac\x01\xd9\x7e\x69\x6c\x14\xfc\x94\x1c\x96\xc9\xf1\x0f\x83\x65\xe6\x23\xc6\xa2\x15\x09\xd8\x54\x31\x26\x46\x71\x1f\xde\xa6\x90\x51\x72\x17\x02\xff\xaa\x1a\x1e\xe5\xf3\xcd\xb0\x7d\x68\x11\xd3\x10\x16\x2a\x20\x7a\x30\xae\x84\xdb\xa9\x11\xb2\x0d\x13\x50\x9f\x9f\x78\x42\x8c\xd1\x32\xa0\x23\x61\x9d\x65\xbc\xd6\x07\x57\xe9\x28\xf1\x07\x41\xd6\xcb\xe1\x46\xde\x83\x07\xab\x22\xa4\x64\xec\xe9\x52\x00\x9b\x9f\x31\x44\x48\xd3\x04\x22\xed\x17\xcf\xec\xf0\x73\x03\xb4\x18\xa3\x27\xc8\xe8\x89\x32\xe8\x44\x98\xd1\x13\x67\x9c\x0c\x06\xe6\x84\xf7\x46\x1b\xbd\xe1\x46\x6f\xbc\xc1\xc3\x65\x91\x55\x68\x09\x08\x60\x65\xad\xc9\x04\x44\xb0\xf1\x1e\x05\xe5\x64\x76\x8b\xe8\xbe\x4c\xbe\x53\x27\x05\x06\x0e\x00\x84\xb1\x50\x73\x21\x9d\x4d\x44\xd1\x83\x74\x84\x71\xc6\x29\x71\x9a\xe6\xd1\x55\x03\x30\xe3\x53\x9a\xde\x1d\x3c\x5b\x52\x70\xfe\xeb\x0e\xc8\xaf\xec\x21\xee\x27\x7b\xec\x3c\xb7\xa7\xf1\xbf\x58\xfc\xa0\xd7\xa5\x9e\x04\xa1\xc1\xaf\xf6\xb8\x97\x8c\x9e\xed\xa0\xc9\x67\x2a\xbf\x0b\xf1\x5a\xda\xd5\x44\xf1\xcd\x45\x7c\xa5\xcf\xbd\xe1\xb0\x1b\xfa\xaa\xa4\x14\xd8\x89\xd7\xfa\x74\x05\x19\xdc\x30\xec\x34\x59\x38\x21\x63\x3c\xce\x16\x02\xfd\x31\x9f\x01\x0d\xa8\x52\x67\x3e\xa8\x90\x85\x94\x41\x8d\x08\x05\x66\x42\x54\x79\x2f\x6c\x00\x47\xac\xa9\x66\xa7\xe1\x26\xe5\x08\x3e\x10\x52\xcc\xe0\x34\x1c\x19\xdf\xb8\x4c\x47\x86\x30\xdb\x7b\x5b\xc7\x11\x9e\xe3\x46\x4e\x72\x32\x8e\xc8\x08\x34\xbd\xaa\x81\x6f\x54\x04\x3f\xf0\x3e\x75\x64\xf4\xb5\x54\x0e\x76\x26\x1d\x19\xd1\x71\x08\x1c\x29\xc6\x0e\x6c\xdf\xba\xcc\x1f\xb5\x02\x95\x62\xd4\x9c\xa1\x01\xf3\xef\x22\x15\x71\xcc\x35\x85\x89\x89\x4b\x98\x39\xd8\x0b\xbd\xef\xa0\xcb\xe8\x58\xe0\xb5\x97\x99\x08\x20\xe6\x69\x9b\x28\x1d\x0a\xe9\x20\x54\xa8\xa0\x06\x01\x9b\x6f\x6c\x5b\xc9\xbd\x43\x78\x9d\xb1\x5c\x3c\x3c\x9d\x0a\xb2\x25\x16\x46\xcb\x77\x86\xa2\xf5\x4a\x8d\x52\xc4\x76\xf1\x75\x50\xbb\x63\x72\x11\x76\x04\x28\x9e\x5b\xeb\xb8\xeb\xf0\xf8\x28\x83\x70\x38\x83\x28\xd8\x99\x44\x60\x62\x14\xa7\xc3\xe2\xf9\x15\x96\x04\x52\x09\x83\x05\xab\xfd\x46\x5d\xf9\xe3\xa1\xe2\xc9\xcd\xc5\x57\xfe\x92\x5d\x04\x5d\xf3\xd6\x3e\xe5\x7d\x8c\x51\xfa\xc8\xce\x6e\xbd\x7a\x67\x50\x47\x5f\xab\x8a\x2e\x5c\x1f\x7d\x1a\x33\x44\x3e\x04\x96\x0c\xdf\x93\xb2\x75\x0d\xdf\x0d\x16\x07\xf9\x9b\x3c\xa3\x1c\x64\x2e\x49\x83\xf4\xb0\x08\x29\xd6\x73\x75\x63\x6e\x85\xfb\x96\x2a\xda\xb3\xe6\x39\x40\xae\xa6\x89\x86\x10\x61\xbe\xc4\x65\x08\xfb\x65\x09\x96\x56\x40\xd7\xa7\x04\x99\xf0\x1f\x1e\x37\xf0\xb0\xb9\x99\x23\x60\x93\x36\xf1\xcc\x0e\xb9\x93\xdd\x26\x28\x11\x0b\x2e\x89\x78\xa8\x38\xb2\x69\xa4\x93\x24\x95\x9f\xa3\x9a\xe3\x37\xf8\xe5\xb9\x08\x4d\x11\x79\xb3\x28\x23\xae\x5c\x4d\x69\xb0\xfe\xf0\x1c\x61\x71\x96\x30\xee\x17\xb9\x17\xc2\xf8\xe7\x7f\x81\x23\x5a\xbe\x6a\xae\x4b\x23\x7d\xeb\x52\x8e\xbc\xb9\x89\x61\xf5\xd5\x71\x4d\x2e\xfe\xde\x03\x15\xef\xc0\xe2\x40\xe8\x82\xa9\x6c\xaa\xac\x36\x73\x39\x09\xaf\x36\x84\x82\x47\x48\x3d\x8c\x7b\x39\x22\xba\xe9\xf8\x25\xa8\x1c\x89\xaf\xc1\xc8\x19\xf5\x9a\x95\x3a\x30\x31\xef\xe9\x3f\xee\x9a\x62\x16\x8c\xba\x96\x0e\x7d\x7c\x1b\xf6\x8b\x63\x10\xf9\x98\x24\xc5\x84\xd5\xc0\xf9\x42\x66\x43\xc6\xb5\x7d\x06\x8b\xbe\x5b\xad\xae\x4e\xb2\x5e\xd1\xbe\xda\xfa\xa4\x27\x9a\x81\x77\x22\x48\x61\xf1\x8e\x8f\x4a\x80\x13\xc3\x9b\xc8\x33\xb0\xca\x7a\xed\x71\x46\xdb\x74\xa2\xe3\x45\x08\xc6\xca\x8b\x10\xa8\xa9\x22\xc0\x61\x18\x19\xf0\x73\x0c\x17\x8d\xa6\x9d\xf3\x21\x52\xb4\xcf\x86\x1f\x21\x47\x39\xfa\xbf\x71\x04\x77\x21\x77\xdf\x00\x11\x61\x26\x02\xb0\x43\x40\xce\xcd\x90\xf7\x35\x8e\x7f\x14\xe9\xbf\x30\xbe\xf1\x61\x88\xf8\x14\x04\xd3\x21\x62\xb7\xc5\x13\x69\xf0\x23\x49\xa4\xe6\xfe\x13\x2e\x5f\x00\x66\xba\x0d\xf7\x02\x91\x1b\x20\xe9\x07\x22\x43\x3e\x15\xc0\xfb\x67\xc3\xea\xa1\x4a\x41\x54\x54\x44\xe3\x92\xa8\xa8\x89\xf6\x16\x45\xcf\x71\x79\x68\x03\x2d\x32\x86\x1a\xb0\x64\x3f\x86\xc5\xd6\x91\xd2\xca\xa8\x2b\xb4\xc2\x4d\x4f\x34\x2e\xb9\xcf\xb9\xdc\x30\x3b\xfb\xbc\x77\x61\x9f\xf7\xee\xec\xb3\xf4\xd7\xb9\x1f\x85\x93\xfd\xb1\xcf\x12\x7e\x0e\x36\x22\xaa\x3d\xa0\x5f\xd8\x59\xed\xdf\x6a\xa6\xee\x57\xee\x96\xf0\xab\xb7\x1d\xd1\x51\xff\xfe\x3f\xfe\x5b\x8f\xff\x6f\x00\x00\x00\xff\xff\xae\xc6\xd0\x0a\xb5\x5d\x00\x00") + +func fontsCaligraphyFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCaligraphyFlf, + "fonts/caligraphy.flf", + ) +} + +func fontsCaligraphyFlf() (*asset, error) { + bytes, err := fontsCaligraphyFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/caligraphy.flf", size: 23989, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCatwalkFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\x4d\x6f\x24\x35\x10\xbd\xcf\xaf\xa8\xc3\x5e\x91\x97\x11\x12\xcb\xad\x57\x48\x7b\x81\x53\x84\xc4\xd1\x09\xd2\x04\x45\x8a\x40\x82\x5c\xe0\xd7\xa3\x99\xb6\xcb\xef\x55\x3d\x77\xcf\x0c\xd1\x66\x93\x6e\xbb\xbb\x5d\xdf\xf5\xaa\xec\x7d\x7e\x7d\x3e\x3e\x7d\xb0\x4f\xf6\xbd\x1d\x3f\xda\x37\xdf\xda\xf1\xf0\xe3\xe7\x5f\x7e\xfd\xfc\xf3\x4f\xf6\xdb\x3f\xf6\xf0\xe7\x1f\xf6\xe5\xaf\x97\xb7\x7f\xed\x53\xf9\xe1\xbb\xc3\x97\x97\xdf\x5f\x4f\x6f\xf6\x70\x7a\x3d\x3d\xfd\x7d\xb2\x63\xf9\x78\xf8\xb0\x88\x5f\x5b\x96\x43\x2d\x85\x2f\xd6\x2e\x66\xd6\x9e\x9d\x47\x97\x87\xb5\xf8\xcd\xec\xf2\x7e\xff\xd6\xee\xfd\xe7\x3c\xad\xa5\x58\xbb\x5e\x38\x9c\x7f\xec\x72\x4f\x6f\x6f\xfc\x18\x19\x5d\xe6\xfd\x4d\x5d\x17\x5d\x28\x04\x39\xda\x3a\x7f\x7b\xfe\x4b\x6f\xa3\x26\x8d\xa4\x7f\xcc\x42\xc0\xd2\xc6\x6f\x10\x94\x6b\x49\x62\x9b\x4a\x7c\x56\xbb\xbb\x05\x49\x75\x2f\xd5\x24\xf1\xfa\xa6\x39\x6c\x7e\x69\xe2\x35\x72\x8d\x7e\xbc\xa1\x65\xc0\xbb\xe4\xa9\xae\xb3\xba\xb7\xf7\xe4\x2c\x37\xaa\x32\x0e\xea\x55\x0b\xc5\x80\xb5\x48\x10\xef\xc5\x7a\xa6\x2f\xc2\x04\xa3\xd3\x72\x60\x74\x9b\x77\xb6\x5b\x9f\x65\x6a\xc8\x68\xe3\xe2\xae\x72\xdb\xa4\x3b\x71\x17\xef\x6f\xe2\x84\xba\x07\x43\xdd\x1b\xd1\x51\xe9\x90\xfc\x48\x0a\x08\x60\x94\xb9\x00\x4e\x6f\x84\xa5\x71\x88\x29\x7f\xa2\xed\x52\x5c\xea\x7b\xb0\x69\x72\xe0\x9a\x5f\x9d\xa4\xb2\x16\xeb\x85\x76\xe9\x39\xbb\x82\x96\x08\x07\x70\x2c\x90\x72\x63\x73\xd0\x52\xe2\x74\x68\x28\x31\xec\xa4\xcd\x39\x3c\x2b\xbb\x33\xf0\xc3\x78\xef\x79\xa5\xa8\x6c\x6a\x42\x2e\x6c\xb9\xb3\xa7\x49\x04\x44\x95\x9a\x60\xe4\x1c\x48\x9d\x97\x99\x0a\xaa\xe1\xcc\x58\x2c\x02\xb2\x57\xaf\x31\x57\xe7\xc4\x74\xbc\xa5\x49\x9f\xe8\x24\x88\xc1\x97\x35\x01\x53\x6c\x39\x3f\x39\x78\xd8\x9c\xb5\xab\x45\x42\x1a\x1b\x5b\x3a\x7f\x8e\x30\x01\x6b\x76\x3f\x04\xf8\x73\xfb\x87\x56\x80\x23\x8b\xa5\x14\x91\x25\x6c\x03\x83\x5a\x28\xfd\xe5\x93\x34\xb8\x5d\x80\x1d\x15\x88\x70\x20\x36\x7c\x41\xcf\x6a\xf4\xbe\x7c\x16\xaa\x9d\x53\xc0\xc4\x31\x6a\x93\xb0\x72\xb6\x6c\xa9\x0c\xc2\x8f\x8f\x8f\x09\xcb\x63\x59\x05\x27\xb2\xf5\x6a\xb1\x80\x04\x2a\xcc\x46\x14\x42\xc3\xc7\x65\x26\x96\x89\xe4\x6e\xf2\xd5\xc0\x34\x5c\x3a\x56\x35\xf8\x81\x6f\x62\x8a\x40\xdf\xa1\x52\x3b\x64\x18\x35\x6e\xe4\xee\x30\xc6\xef\xcd\xb3\x3a\x30\x70\x98\xf7\xc5\xa4\x3c\x32\x9b\x8c\xb1\xd4\x45\x77\x39\x83\x42\x42\x27\xfc\x2e\x8c\x50\xf2\x83\x98\x70\xef\x42\x38\x45\xd8\x82\xb6\x1a\x75\x39\x45\x84\x9c\xf4\xad\x83\x1b\x09\xe2\x4c\xc0\x69\x40\xcc\x8d\xc9\x28\x1a\x57\xae\x49\x8c\xf6\x2e\x90\x58\x15\x9b\xd0\xbd\x51\x2d\xd8\x47\x71\x80\x2d\x18\x22\xdd\x48\xd0\x02\x20\x62\x79\x36\x8b\xcf\x66\x15\x57\xd9\xf2\x1a\x67\xdf\x1a\x66\x6d\xd2\xf5\x1d\xbe\x1e\x5a\x54\xf0\x01\x83\x9c\xe3\x90\x20\x45\x53\xb3\xa4\x51\xd1\x7c\xc1\x78\xcc\x95\x99\x72\xbe\x1a\xb1\x4c\x1c\x07\xd6\xe4\xce\x27\x58\x3f\xcb\xaf\xe7\x86\x2d\x81\x29\xfa\x42\xe3\x90\xb1\x0a\x73\x72\x56\x6f\x81\xe0\x54\xc3\xa0\xe0\x9e\x7b\x92\xb7\xaa\x77\xb0\x50\xb0\x62\x9e\x9f\x7d\x4f\x9a\xb1\x6e\x6a\x66\x31\x3c\x63\x5d\x8f\xee\xd0\xbe\xec\xd2\xb0\xf9\x42\x55\x0d\x55\x3d\x43\x3f\xd2\x49\xbe\x32\xee\x67\x98\xd8\xfb\xcc\x88\x61\x76\xc6\xdd\x93\x2b\x50\x59\xd8\xcc\x9d\x8d\x81\xad\x9b\x0b\x4f\x48\x56\x2d\x04\xbf\xaa\x40\x4a\x90\x8d\x7c\xab\xdc\x13\x24\x90\x40\x60\x18\xc5\x4c\xe7\xeb\x4c\x00\x34\x83\xab\x5c\xa1\xf7\x41\x5d\xd2\x3b\x09\xe1\x32\x6a\x25\x43\x32\xfa\x8c\xe1\x2d\x33\x66\x98\x36\x64\xe0\x54\xe1\x51\x33\xb1\x69\xa9\xa1\x15\x28\xa2\x94\x00\xc3\xa6\xe8\xee\x0d\x8f\xc2\x14\xbb\xb8\x41\x48\xb2\x42\x4a\x4d\xf7\x2d\x78\xdc\xb9\x7d\x0b\x12\x19\x27\x6b\x2d\x65\xb7\x4c\xdf\x33\xe9\xb3\xdb\xef\xed\xf4\xc5\xbb\x9c\xbc\x43\xe4\x0b\x21\x5c\x1c\x0f\x4d\x3d\xcd\x54\xc3\x1c\x62\xb5\x64\xd4\x04\xe9\x26\x43\xb2\x22\x37\xda\xf4\x41\xb6\x51\xd8\x50\x56\xec\x46\x9d\x0d\x3c\x19\xdf\x30\xa1\x79\xdf\x87\x60\x22\x3b\x40\x76\xfa\xf5\x36\x8d\x1b\x09\xda\x22\xd9\xe8\xa3\xa7\x45\x7d\xe9\x1b\xce\xf3\x32\xff\x8f\x04\x42\xab\xc9\x20\x49\x1a\x87\x8c\x64\xd1\x23\xf0\xb4\x1b\x8a\x32\x13\xac\x71\x18\x99\x8b\x23\x3a\x86\x49\xd6\x2d\xa6\x6c\x8a\x27\xef\x3b\xfd\xbc\x2b\x21\xe7\xf2\xbe\xf6\x9e\xcb\x4c\xe0\xb0\x89\x6f\x26\x2f\x14\xc1\x33\xd1\xdd\x66\xf3\xeb\x88\x49\x7d\xc8\x60\x64\x50\xaf\x70\xe0\xa1\xe0\x1a\x3d\x5d\x95\xac\x51\xc8\xed\x70\xa8\xf1\xc0\x20\xc5\x00\x99\x27\x11\xbf\x1b\x65\x02\x75\xd9\xbf\xa5\x78\x43\xb9\x07\xae\x0c\xc2\x11\x55\xa4\xc3\x35\x41\x03\x5c\x41\x82\xc3\x11\x95\x3a\x63\x1f\x95\xc9\x61\x59\x84\xab\x3c\xa8\x21\xff\x35\xee\xd5\x42\x47\xc4\x1d\x39\x0c\x90\x22\xe2\xd4\xa2\x80\xe1\x3a\x14\xa1\xe2\x3a\x37\x7e\xdc\xaa\xe9\x91\x35\xb3\x4d\x00\x34\x96\x49\xae\xb3\xb2\x00\xb9\x90\xa2\x8f\xd1\x75\x56\x70\x62\x56\xce\xab\x30\xec\x8f\xd9\xfa\x0b\x29\x60\xa9\x16\xee\x64\x18\x22\x3a\x04\xbb\x8d\x6a\x11\x8a\xee\x24\xc3\xa6\x84\x6b\x21\xc2\xa5\xe4\xbe\x29\x84\xdf\x36\x61\x0a\xa8\x90\x9e\x4b\x38\x9b\x55\xa7\x0c\xe0\xc7\xb4\x6a\x58\x51\xbf\xc5\xb5\x4e\x70\x3d\x55\x5c\xff\x56\x12\x34\xc7\x44\x9f\xd1\xac\x65\x43\x9e\x60\x85\xc5\xc3\x78\x40\x59\xc2\xd5\xff\x35\x5e\x0e\xe1\xdf\xd7\x7a\xf0\x5f\x00\x00\x00\xff\xff\x77\x4b\x9c\x34\x1e\x21\x00\x00") + +func fontsCatwalkFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCatwalkFlf, + "fonts/catwalk.flf", + ) +} + +func fontsCatwalkFlf() (*asset, error) { + bytes, err := fontsCatwalkFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/catwalk.flf", size: 8478, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsChunkyFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x57\x4d\x4b\x1c\x4d\x10\xbe\xcf\xaf\x78\x0e\xc2\x5c\xa6\x7b\xd4\x57\x0f\x2f\x48\x18\xc8\x21\xc1\x43\x2e\x21\xb7\x85\xde\x95\xa8\x11\x44\x89\x92\x43\xa0\x7f\x7c\xe8\xea\xaf\xea\xee\xea\x9d\x59\x02\x91\x45\xc7\xd9\xfa\x7c\xea\xa9\xea\xea\x87\xe7\x87\xcb\xc3\x19\xae\x71\x85\xcb\x73\x5c\x5c\xe3\x62\xf8\xfa\xf3\xd7\xe1\xed\x1e\x77\xbf\xf1\xf1\xc7\xdb\xd3\x3b\x3e\x3d\x3d\x3f\x4f\xf8\xef\x5c\xdd\x7e\xfb\xa2\xfe\xbf\x82\x52\xb8\x3b\xbc\xdf\x7f\xc7\xeb\x0b\xf4\xfb\xd3\x23\x5e\x1f\x70\x7b\x7f\x87\xcf\x87\xc7\xc3\x8b\x1e\xce\x16\xfe\x59\x06\x18\x83\x65\xb0\x80\x5d\x06\x6b\x4c\xfa\x0d\x00\xfe\xeb\x20\xe0\x45\x60\x8d\x8d\x5f\x03\xe9\x2f\x3d\x18\x92\xa5\xb7\xc6\x49\x03\x96\x54\x8d\x17\x82\x37\xce\xfe\x71\x02\xc6\x7d\xa2\xab\x49\x4d\x31\x9c\x11\x63\x8c\x86\xe4\xfd\x23\xfd\x78\xf7\xd6\xa9\x05\x3d\x84\x20\xc9\x14\x89\x92\x8b\xa0\x6f\x72\x46\x39\x2b\xc9\x15\x0a\xad\x8e\xab\x08\x16\x12\x0e\x19\xac\x10\xc7\x34\x86\x64\x09\x04\xff\xd7\xbd\xdf\x6b\x32\xe9\x51\x45\x80\x66\xaf\x09\x08\x6f\xd3\xa4\x42\x98\x69\x8c\xb9\x65\xaf\x51\xec\x03\x45\x7a\x13\x52\xae\xf2\x4b\xd5\x60\xa5\xc8\x75\xc8\xb8\xe7\xf2\x45\x15\xa4\xdf\x75\x9e\x4c\xd2\x53\x22\xe2\x9d\x41\x2a\x08\xd1\xb3\x58\x50\xcb\x53\x86\x1e\x66\x60\x5e\x06\x4c\xa3\xfb\x2c\xc3\x6c\xcc\xcc\xad\x2d\x85\x57\x24\x42\xc0\xb1\x9d\x51\xa3\xa9\x72\x00\x81\x52\xb7\x35\x1c\x3d\x95\x18\x2a\x24\x2e\x6d\x53\xe1\x4f\x3d\x95\xc8\x0c\xf7\xe1\x2a\x08\xe5\xe9\x79\xe1\xe1\xac\x7b\xa9\x54\x3c\x2f\x36\xaa\x04\x41\xa4\xc0\xec\x8a\x4a\xb4\x7d\x82\x97\x2c\xb8\x92\x4b\xa6\x8f\x40\xa5\xf6\xeb\x8a\xbb\xf4\xd2\x91\xcb\x89\xcc\x44\xb8\x65\xd8\x01\x3b\x7a\xef\x1a\x73\x9d\xe4\xdd\xc8\x02\xc5\xf6\x9a\x6a\xe6\xac\xba\xf7\x9e\xd3\xd6\x75\x72\x31\x20\x03\x90\x11\xd3\x71\x02\x26\x35\xfa\xba\x2b\x15\xdb\x72\x54\x6a\xe4\x68\xe5\xb2\x98\xa8\xeb\xc9\xc3\xfe\xa9\x23\x65\xd3\x86\xeb\x33\x5a\x67\xc8\xad\x69\x87\x48\xe5\xd6\xa5\x15\x9e\x6e\x92\x8f\x79\x9d\x40\x70\x6d\xaa\x36\x35\x03\x1c\x7c\x6d\x63\xcf\x62\x33\x30\x6a\x1b\x5b\x3e\x9a\xc6\xd3\x66\x3d\x8b\x62\x8e\x49\x7a\xc8\x7a\x56\x38\x99\x7a\x7a\xf6\x44\xe0\xd9\xe1\x69\xe2\xe1\x50\xcc\x2e\x71\xea\x07\x8a\xf9\x3c\x04\x7f\x72\x9c\xc8\x6d\x33\x33\xad\x78\xc4\xec\x7a\x55\x4b\x90\x58\x56\xf4\xbf\x84\x44\x3c\xd0\xaa\x12\x58\x51\x6f\xbd\xe4\x8c\x92\x5b\xe2\x6c\xb8\x0f\x62\x7c\xe4\xc9\x51\x56\x16\xae\xd6\x59\xd9\xba\x42\x6c\x33\x69\x7f\x91\x08\xd9\x5b\x95\xd6\x88\x05\x9b\x8e\x47\x84\xd4\xb6\x56\xcd\xae\xfa\xc3\x31\x3d\xec\x9a\xfe\x2e\x66\x1e\x3b\x22\xf3\xc4\x2b\x7b\xae\xf0\x79\xc4\xa9\xa2\xef\xd5\xf1\xae\x6b\xf5\xdc\x38\xa7\x05\x65\x13\x38\xac\x02\x45\x5d\xba\xc5\x88\x55\x17\xb6\xc5\x40\xe7\x65\xf0\x8d\x16\xce\x2b\xcf\xba\xbd\x8e\xa7\x8d\x43\x70\xd7\x2c\x4a\x69\xe7\x91\x77\xcb\x7c\x2c\xa4\x7e\x72\x5f\xcc\xbb\xde\x6a\x5f\xbc\xaa\x9e\x78\xf6\xcc\x36\xbb\x55\x58\x34\x8b\x72\x50\xd6\x4a\x29\xad\xb4\xdf\x04\x52\x78\x9a\x41\x95\x8e\xd8\x60\x4f\x35\xd2\xa6\x92\x66\xa6\xa3\xac\xc9\x79\x97\x69\x51\x98\x9a\x4e\x5d\xbb\xc5\x6c\xb4\x1b\x0c\x2b\x56\xdf\x4a\xda\x43\xa2\xf3\x4d\x20\x8e\x00\x2b\x61\x5b\x9a\xcd\x41\x14\xd1\x74\x90\xe0\xf3\xaf\xc1\x2d\x5d\xea\xd8\x05\x8f\xc5\x08\xda\x91\xe8\x65\xdc\x98\xf2\xc1\xd4\x77\x98\x6e\x1e\xa2\xc3\xd4\xaa\xc2\xc2\x5f\xa6\xab\x58\x0e\x45\x1a\x42\x57\x4b\x40\xf5\x53\x3f\x0e\xeb\x09\xd2\xac\x89\xed\x16\xdb\xc5\x0e\x2f\xd3\x71\x33\x13\x8c\x01\xdb\x9b\x84\x9e\x60\xc3\x02\xec\x8c\xa9\x59\x1e\xcd\xea\x18\x81\xdd\x82\x44\x25\x4d\xa3\x7a\x86\x20\xcd\x15\xb8\xce\xca\x98\x6e\x5d\x99\x9c\x84\xde\x1e\x98\xd4\x27\x2d\x96\x4a\x61\xad\x59\x69\x9b\x96\xae\x07\x56\x9a\x6a\x35\xcb\xcb\x5b\xbd\x95\xaf\x02\x6e\x14\x16\x0e\x47\xaf\x3f\xb1\xfb\x1e\x2b\x5e\x4e\x96\xf2\x08\x57\xde\x1e\xe9\x45\xe9\x2e\xe9\xb9\xb4\x8d\xd2\x5d\x62\x14\xb6\x75\x63\x5b\xff\xbb\x48\xda\x25\x9e\xed\x66\xb4\x31\x85\x9b\xd0\x9c\x9a\x96\xf4\xfe\x04\x00\x00\xff\xff\x9e\x93\x6b\xf8\x4b\x13\x00\x00") + +func fontsChunkyFlfBytes() ([]byte, error) { + return bindataRead( + _fontsChunkyFlf, + "fonts/chunky.flf", + ) +} + +func fontsChunkyFlf() (*asset, error) { + bytes, err := fontsChunkyFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/chunky.flf", size: 4939, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCoinstakFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\x5d\xab\x1c\x45\x10\x7d\x9f\x5f\x51\x0f\x79\xe9\x07\x49\x5c\x04\xe3\xdb\x88\x10\x10\xc1\x86\xe8\x0f\xb8\x11\x36\x12\xb8\x28\x68\x5e\xf4\xd7\xcb\xee\x74\x57\x9f\x73\xaa\x7a\x66\x77\xbd\x24\x5e\x67\xba\x67\xa6\xeb\xbb\x4e\x55\xf7\x7e\x7c\xfe\x78\xfa\xf0\xca\xde\xda\xb7\x76\x7a\x63\x5f\x7d\x6d\xa7\xe5\x87\xfa\xe3\xcf\xbf\xfc\xfa\xfd\x4f\xf6\xdb\x3f\xf6\xfe\xcf\x3f\xec\xdd\x5f\x9f\x3e\xff\x6b\x6f\x5f\x7f\xf7\xcd\xf2\xee\xd3\xef\xcf\xe7\xcf\xf6\xfe\xfc\x7c\xfe\xf0\xf7\xd9\x4e\xaf\xdf\x2c\xaf\xd6\xe4\xcf\xd6\x75\xa9\xa5\xf0\xc5\xda\xc5\xcc\xda\xb3\xcb\xe8\xfa\xb0\x16\xbf\x99\x5d\xdf\x1f\xdf\xda\xbd\xff\xbb\x4c\x6b\x29\xd6\xae\x57\x0e\x97\x7f\x76\xbd\x87\xb7\x77\x7e\x8c\x8c\xfc\xf5\xba\xd8\x55\xe8\x4d\xc5\x26\xc4\x36\xba\xae\xb9\x0c\xb7\x57\x15\x88\x3a\x39\xd4\xa0\x51\xd8\x56\x18\x71\x1e\x04\x95\x49\xb2\x0c\x08\xd7\x32\xbe\x45\x19\xdb\xb3\x32\x96\xcb\x77\xb0\x76\x10\xdc\xde\x34\x07\xcd\x2f\x4d\xcc\x46\xae\xd1\xd7\x9b\xcb\xdb\xd9\x75\x0e\xd9\x4b\xba\x11\x51\x36\xa2\x68\xe2\x97\xae\xcf\xe6\x29\xf7\xb5\x35\x8f\x27\xef\x93\xf5\x4c\x3f\x86\x03\x45\x21\xf9\x0c\x26\x75\xb0\xdd\xfb\x2c\x52\x43\x46\x3b\x17\x77\x51\x5f\x16\xef\xc4\x3d\x79\x7f\x17\x27\x36\x3e\x68\xf1\x40\x04\x8b\xb6\x92\xdc\x40\x10\x09\x61\x54\x51\xba\x81\x4c\x6d\xea\x62\xb5\xc8\x57\x3f\xae\x0e\x2e\x12\x86\xd9\xad\x82\x11\x39\x89\x29\xca\x26\x86\x71\x6b\x90\x09\x0a\x79\x26\x45\x87\xc4\xa0\x1c\x94\x04\x2f\x3d\xe5\x8b\x86\x55\x6a\x5b\x0e\xbf\xca\x28\x26\xfc\x30\x9e\x8b\x23\x60\xa4\xb2\xab\x49\x61\xcf\x8b\x5b\x72\x4d\x72\x9c\x4b\x83\x04\xcc\x44\x91\x47\x9e\x91\xe0\xe9\x9f\x45\xd0\x27\x46\x6e\x55\x51\x1b\xd3\x18\x65\xd9\x1f\xa7\x0c\xac\x43\x30\xd9\x1c\x85\x1d\xb2\x5a\xf6\x7c\x9f\x01\xfb\x95\x92\xa4\x5b\x67\x6a\x10\x29\x6f\x76\x23\x4a\x39\x62\x08\x76\x1c\x7e\x08\x70\xe6\xf6\x96\x12\xae\x18\x82\x21\x90\x44\x52\x62\x13\x18\x54\x49\xc4\xec\x49\x18\xdc\x2f\xc0\x81\x0a\x44\x58\x88\x0d\x47\xd0\x33\x62\x6a\x04\x0f\xeb\x92\x87\x01\x50\xc5\x44\x41\xf9\x2a\x55\xc2\x16\x0c\x95\xc1\xf5\xe9\xe9\x29\x60\x74\xda\x34\x99\x87\x35\x56\x3d\x93\xcc\xcf\x72\x98\x2a\x48\x4f\x3e\x42\xc0\x00\xff\xc1\xdd\xe4\xab\x81\x61\xb8\x74\xac\x6a\x70\x03\xdf\x68\x76\x40\x1f\x91\x65\x9a\x40\x34\x35\x60\xe4\x6e\x19\xe3\xf7\xe6\xb9\x2c\x0c\x1c\xd6\x7d\x31\x29\x8f\xcc\x26\x63\x28\x64\x45\xdd\xb5\x52\x41\x62\x90\x86\x61\x97\x6c\xf7\x03\x4d\xb8\x17\x21\x1c\x22\x6c\x45\x5b\xf5\xb0\x4d\x22\x22\x9d\xf4\x6a\xee\x46\x82\x38\x4b\xd0\x8d\x0a\xc6\xee\x64\x14\x89\x1b\xd7\x04\x46\x47\x17\x48\x2c\x6a\x34\x8e\x46\xb5\x60\x7f\xc4\x01\xb6\x62\x88\xe0\x06\x83\xda\x90\x96\x17\x95\xdd\x82\x9f\xcd\x2a\x6c\x66\xcb\x5b\x9c\x7d\x6f\x98\xb5\x49\xd7\x77\xf8\x7a\x68\x51\xc1\x07\x0c\x72\x8e\x43\x09\x29\x9a\x9a\x05\x8d\x4a\xce\x17\x8c\xc7\x5c\x99\x29\xe7\xab\x11\xcb\xc0\x71\x60\x4d\xec\x74\xc4\xfa\x51\xfe\x7c\x3e\x20\xbc\x62\x27\x9f\x95\x7a\xce\x68\xdb\xc7\x9f\x98\xd5\x7b\x20\x38\xd5\x50\x14\x3c\x72\x4f\xf0\x56\xf5\x8e\x15\x0a\x96\xe6\xf9\xb5\xa9\x42\xa9\x59\xb7\x6c\x66\x1a\x9e\x5a\xd7\xd5\x1d\xb9\x2f\xab\xb6\x74\xb4\x89\xe1\xbe\x8b\x7b\x4f\xfa\x1e\xe8\x04\x5f\x19\xf7\x33\x4c\xec\x65\x66\xc4\x30\x3a\xe3\xe1\xc9\x0d\xa8\x9c\xd8\xcc\x9d\x8d\x81\x9d\x37\x17\x9e\x90\xac\x9a\x04\x7f\x56\x81\x32\x41\x76\xf2\xad\x72\x4f\x10\x40\x02\x81\x61\x14\xb3\x3c\x5f\x67\x02\xa0\x19\x5c\xe5\x0a\xbd\x8f\x49\x0f\x4f\xef\x52\x08\x4f\xa3\x36\x65\x48\x46\x9f\x31\xbc\x67\xc6\x0c\xc3\x06\x0c\x9c\x9a\x78\xd4\x24\x75\xa4\x73\xc4\x3a\x52\x72\x0d\xdd\xf4\x87\xb7\x70\xa4\x25\xec\x74\x83\x10\x64\x85\x94\x9a\xee\x5b\xf0\x98\x72\xff\x96\x9c\x55\x54\x86\xd2\x7a\x50\xa6\x1f\x99\xf4\xd9\xfd\xf7\x76\x42\xe5\x5d\x4e\xdc\x21\xf2\x85\x10\x4e\xc7\x43\x53\x4f\xb3\xac\x61\x96\x58\x2d\x11\x35\x41\xba\xc9\x90\xac\xc8\x8d\x36\x7d\x10\x6d\x24\x1b\xca\x8a\xdd\xa8\xb3\x09\x07\xcb\xb0\x1f\x3d\xec\xfb\x10\x4c\xd2\x0e\x90\x9d\x7e\xbb\x4d\x75\x23\x41\x5b\x24\x1b\x7d\xf4\xb4\xa8\xaf\x7d\xc3\x79\x59\xe6\x3f\x00\x10\x5a\x4d\x06\x41\x52\x1d\x32\x92\xa9\x47\xe0\x69\x37\x14\x65\x26\x58\x63\x19\x99\x8b\xa3\x21\x77\x09\x36\x15\xe2\x06\xa7\x1d\x95\x8e\x37\xe6\xfd\xbc\x2b\x91\xce\xd3\xfb\xd6\x7b\xae\x33\x81\x65\x13\xdf\x4c\x5e\x28\x82\x67\xa2\xbb\xcd\xe6\xd7\x11\x93\xf9\x21\x83\x91\x41\xbd\xc2\x81\x87\xc4\x35\xf9\x74\x53\xb2\xaa\x90\xfb\xe1\x50\xf5\xc0\x20\xc4\x00\x99\x27\x10\x7f\x18\x65\x84\x7a\xda\xbf\x85\x78\x43\xb9\x07\xae\x0c\xc2\x8a\x2a\xa9\xc3\x73\x82\x06\xb8\x82\x04\x87\x23\x2a\x75\xc6\x3e\x2a\x93\xc3\x32\x85\xab\x38\xa8\x92\xff\x39\xee\x6d\x30\x81\x70\x67\xed\x90\xd8\x44\x5d\x26\xa4\xc0\x70\x1b\x8a\x50\x71\x9d\x1b\x5f\xb7\x6a\xf9\xc8\x9a\xd9\x26\x00\xaa\x65\x92\xeb\x6c\x5a\x80\x5c\xc8\xa4\x8f\xc9\xeb\x6c\xc2\x89\x59\x39\xaf\xc2\xb0\x3f\x66\xdb\x1f\xa4\x80\x85\x5a\x78\x90\x61\x88\xe8\x10\xec\x36\xaa\x85\x14\xdd\x49\x86\x4d\x09\xd7\x42\x84\x4b\x89\x7d\x93\x84\xdf\x3e\x61\x0a\xa8\x1a\x7e\x9f\x19\x26\xdf\xf9\x7d\x06\x37\xcd\x14\x87\xbc\x5d\x95\xda\x25\x71\xbc\x9d\x26\x6e\xff\x6f\xab\x69\x7e\x2b\xb9\x1d\x21\x06\xa7\x62\xf0\xeb\x6e\x82\xa3\xff\x6b\xbc\x2e\xf2\xdf\x97\x7a\xf0\x5f\x00\x00\x00\xff\xff\x43\xbc\x37\xa4\xc7\x20\x00\x00") + +func fontsCoinstakFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCoinstakFlf, + "fonts/coinstak.flf", + ) +} + +func fontsCoinstakFlf() (*asset, error) { + bytes, err := fontsCoinstakFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/coinstak.flf", size: 8391, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsColossalFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x1a\x4b\x8f\xdb\x38\xef\xee\x5f\x41\x18\x02\xa6\xc5\xd7\x31\x3a\x33\xdf\x41\xdd\x53\x80\xc5\x5e\x3a\x17\x1f\x7a\xa8\x6f\xb5\x23\x67\x92\xdd\xc4\xee\x26\x0e\xda\xfd\xf7\x0b\x89\x94\x44\xf9\xed\x3c\xb6\x39\x24\x64\xac\x07\x49\xf1\x6d\x6d\xf6\x9b\xe7\x5c\xc0\xd3\x13\x48\x78\xfe\x08\x2f\xcf\xf0\xf4\x12\xfd\x5e\xef\xeb\xd3\x29\xdf\x27\x9b\xfd\x06\xde\x7d\xae\xab\xbc\xd9\xd6\x15\x3c\xc2\x9f\x75\xb5\x3a\xfc\x9d\x94\xea\x9c\xe4\xe7\xf7\x91\x84\xcf\xe7\xaa\x84\xa7\x4f\x9f\xfe\x1f\x45\x7f\xfc\xfc\xbe\xcf\xab\xbc\xd9\xd5\x15\xd4\x1b\xd8\xec\x8e\xa7\x06\xf6\xbb\xaa\xfc\x2d\xd2\x9b\xc0\x23\xc4\x87\xfc\x6d\xb7\x86\xea\x7c\x28\xca\x63\x0c\x9b\xfa\x08\x9b\xdd\xbe\x84\x9d\x2a\xab\x66\xb7\xd9\xad\xcd\xe4\x28\x07\x00\x78\x84\xd3\xb6\x3e\xef\x15\xe4\xfb\x1f\xf9\x3f\x27\x28\x4a\xf8\x96\x3f\x7c\x30\x93\xaa\xfa\x47\x24\x70\x50\xb3\x2d\x21\xde\xe6\x47\x55\xec\xf3\xea\xaf\x18\x1e\x1f\xe1\xfb\x71\x57\x35\x27\xc8\x4f\x90\x83\xf9\xf7\x03\x14\xe7\x06\xd6\x79\xf5\xd0\xe8\x65\x4e\x87\xf3\x69\x5b\xaa\xe8\xe9\xc9\xac\xb0\x2d\x77\x6f\xdb\x46\x53\x9c\xc3\x7a\x9b\x1f\xf3\x75\x53\x1e\x23\x09\xc3\x0f\x3f\x40\x55\x37\xb0\xab\xd6\xfb\xb3\xda\x55\x6f\xa0\xca\xd3\xba\xac\x54\x79\x3c\x45\xcf\x1f\xcd\xb4\x43\xfe\xd3\x70\x0e\xfb\xb2\x7a\x6b\xb6\xf0\xae\xfc\x69\x07\xaf\xeb\xc3\xa1\xac\x50\x30\xa7\xf7\xf0\x3f\xc8\x61\x73\x56\x6f\x25\x6c\xf2\x75\x53\x1f\xa3\x97\x67\xb3\x82\x2a\x37\xf9\x79\xdf\x20\xb1\x87\x5a\x95\x86\xf1\x66\xbb\x3b\xc1\xa6\xae\x9a\xe8\xe9\xc5\x0c\x43\x51\x6a\xfa\x82\x75\x23\x2d\x1e\xb1\xba\xe6\x7b\x15\x49\x29\xc5\xf0\x77\x26\x53\xb1\x8a\x20\x06\xf7\x8f\x96\x58\xf0\xad\x1f\x00\x4e\x49\xc1\x0c\x8f\x01\x27\x80\xde\x02\x2e\xfe\xd5\xeb\x4b\x29\xe9\x4b\x74\x50\xc9\x3e\xdd\xa7\xd7\x0c\x06\xf6\x99\x42\x09\x97\xd2\x3e\x4d\x94\xd9\xa4\x48\x34\xa6\xa4\x16\x8a\x8c\xa5\x2c\x8c\x34\x65\x91\x98\x91\x86\xcd\x38\xa3\x91\x02\xdc\x2a\x6c\x24\x48\x99\x48\x92\x3f\x8e\x4c\x63\x36\x12\xfc\x19\xd8\x4f\x0b\x33\xdb\x17\x00\xa0\x70\x99\x4c\x13\x63\x10\xc7\x06\x22\x34\x91\x10\x3c\x01\x83\x20\x66\x11\xc2\x94\x5d\xa6\xb0\x0c\x02\x64\x44\xe8\x30\x31\x6d\xd2\x48\x4e\x45\x42\xcb\xe2\x42\x9a\x7d\xc4\x8d\xac\x38\x49\x28\x83\x34\xa6\xf1\x7a\x7a\x2a\xe5\x6b\x42\xcc\x49\x29\x63\x43\x87\x1e\x62\x25\x48\x7b\x80\xe0\xf3\xf5\x93\x90\x9c\x36\x79\x3d\xb8\x26\xb0\x30\xdb\xa4\xa8\xe9\x46\xcf\xad\xb6\xce\xfa\xd6\x3f\x9a\x20\x41\x32\x8c\xad\xf8\xf4\x2a\xa8\x7a\x1c\xc8\x50\x14\xab\x48\x18\x51\x18\x7b\xd2\x3c\x78\x31\xf7\x00\x7a\xbe\xd6\x27\x58\x45\x7a\x6c\x81\x47\x9b\xe1\x59\x19\xb5\x91\x21\x40\xd2\x43\x29\x9a\xed\xb5\x92\xf5\x2d\xcd\xf6\xe8\x15\x55\x6d\x0e\x06\x9c\x62\x15\x74\x50\xa4\x59\x88\x23\x59\xf4\x49\x63\xfd\xb4\xa5\xdb\x46\x32\x99\x3d\x34\xa5\xcf\x1b\xf4\xa0\x42\x0c\xec\x3b\x80\x12\xee\x79\xe9\x40\x2d\xd7\x20\xf8\x7f\x53\x73\x3b\x90\x06\x91\x81\xb1\xef\x96\x0e\x81\xf1\x95\xde\x91\xa2\xe3\x83\x79\x80\xa7\x7a\xc1\xac\x25\x64\x66\xce\xa4\x5b\xfe\x9e\x98\x56\xde\xe6\x05\xf7\x2a\x82\x7b\x15\xd1\xe3\x55\x50\x4d\x18\xe2\xbc\x8a\xa0\xa3\x64\xc8\xb8\x1f\x69\x7b\x15\x61\xbd\x8a\x70\x2e\x85\x94\x5f\xa2\xc7\x74\xc1\x6d\x0e\x62\xd4\xd0\xf2\x29\xac\x03\x11\xe1\xb6\x23\x88\x56\x60\xbd\x20\x11\x43\x10\x6a\xd9\x12\x88\x6b\x68\x7b\x1f\x06\x31\xa7\xda\x61\x9f\xa4\xe9\x35\x06\xac\xe3\x84\xa4\x56\xce\x6b\x2a\xf4\xb0\x60\x0d\x03\xe1\x20\x80\xb6\x42\xe0\x38\xfb\xe3\xf4\x38\x12\x84\xa1\x4c\xba\xac\x41\x3b\x05\xe3\xf2\x86\x8f\xc3\xfb\xf3\x25\xf4\x90\x2f\xb2\x4c\x28\x1d\x42\x08\x51\x26\x52\x23\xa2\x61\x8b\x28\x43\x02\x21\xad\x24\x02\xfc\x93\x1e\x64\x9a\x1e\xbf\x1a\x38\x5e\x59\x18\x60\x88\x17\xa3\x39\xc8\x98\xc4\x28\x00\xa6\xe4\xb3\x44\x5d\x27\xcd\xc7\xd1\xa3\xdc\x30\xbd\x89\xa3\xe7\xc6\xe6\xd3\x11\x37\xf7\x3a\xdc\xe9\x30\x9f\x23\xb8\x4c\x43\x8f\xc3\x1d\x4e\xe0\x6f\x66\xeb\xcf\xa0\x7c\x7c\xba\xc2\xf2\x35\xa3\x9b\x89\x92\x69\x7c\x27\x7d\x9e\xe9\xee\x1c\x71\xd2\x1d\x44\x4a\xc8\x6d\xf5\x67\x2a\x94\xcc\x0a\x2e\x3d\x4b\x98\x74\x28\x5d\x10\x46\xad\xd2\x79\x15\x71\xfa\xe1\xe3\x51\xe6\x92\x4d\xf0\xc9\x16\xb8\xdc\x43\xb0\xbc\xa9\xc5\x6d\xe8\x74\x67\xc6\x68\x66\xca\x8b\xa3\x76\x1f\xa9\xe0\x49\xe5\x29\x9e\x33\x90\x31\xd6\x27\x58\x1a\xd6\xab\xb6\xdf\x4e\x94\xd3\x11\x01\x18\x30\x3c\x02\xbe\x9e\x03\x80\xbe\x27\xcb\xf4\x3c\x20\x29\x66\x6e\xd0\xac\x68\x52\x4e\x16\xc5\xed\x17\xc3\x0b\x5f\x2c\x60\xad\xc0\x8a\x85\xc4\x6c\x96\x28\x6f\x23\x36\x2f\xbd\xa0\x58\x20\x7e\x15\x8f\x14\x88\x39\x4f\xa6\x9c\x11\x5a\xcc\x25\xe5\x54\x62\x59\x45\xa1\x1a\x4b\xba\xd2\x81\x39\x45\x7a\xc6\x32\xfb\x41\xba\xba\x19\xb2\x17\xa9\x8d\x38\x31\xd5\x0c\x06\x49\x5c\x4d\x60\x3e\xaf\x89\x7b\xd2\xeb\xee\x11\xf1\x12\xbe\x43\xf8\x09\x72\xb4\xfe\x40\x09\xb7\x0f\x3f\xc5\x2c\xc6\xe7\x21\xc9\x35\xf2\x09\x4e\x9e\xd6\xc6\xd0\xd6\x46\x30\xfe\xf5\x3d\x19\x9f\xd3\xd1\xa2\x5f\x48\x8f\x47\xee\xa5\x3f\xc0\x5c\xc3\xcc\xec\x7f\xa9\x7c\x66\x2a\x46\x47\x8c\x8b\x90\x85\xe7\x25\x2e\xa9\x41\x2e\xa9\x46\x20\x18\x19\xb3\x68\xca\xe6\xcf\x00\xd3\xce\xbf\xbe\x78\x71\xf1\x87\x9c\x39\x17\xbc\x72\xfe\x0b\x11\xef\x23\x94\x0d\x84\x98\xca\xbe\x86\x9a\x5a\x80\xf7\x88\x2e\xdc\xe2\x2a\x36\xe4\xd2\x0e\xad\x0c\xa1\x23\xf4\x6e\x85\x6a\x83\x1f\xdc\x08\xec\xd8\xec\x00\x68\x06\x17\x00\x54\x01\x91\xda\x15\xb6\x20\xb2\x6a\x58\x24\x8a\x69\x22\xb5\x66\xbc\xca\x99\xd4\x91\xab\x60\xe6\x03\x15\xd2\x15\xf7\x98\x59\x4f\x6c\x9a\x13\x44\x2d\xc1\x21\xb9\x9c\x58\x86\x51\xc7\x94\x11\x5a\x04\x64\x06\xe6\x95\x85\xc6\x96\x75\x09\x9c\x38\x45\xe1\xba\xbc\x3e\x1d\x69\xc5\x86\x96\x14\xa6\x31\x4c\x42\x92\xd0\xe1\x74\x23\xc4\x0c\x05\x6b\x05\xad\x11\x27\xd8\x17\x8d\xd0\xce\x69\xf1\x61\x64\x88\x86\xa1\xdc\xed\x7a\x51\x65\xee\x84\x8d\xa8\x32\x7d\xc2\x5c\x54\x92\xd7\x4e\x99\x4b\x8d\xef\x2c\xaa\x2f\x81\x83\xf8\xc2\x13\x28\xc4\x6e\x11\xbf\x5c\x82\x6a\x6a\x14\xc3\x6f\x91\x80\x6d\x07\xc4\xb6\x43\x8c\x87\x13\xdf\xaa\x9c\x93\x9d\xac\xc0\x6a\xc0\xfd\xb0\x81\xc3\xea\x77\xa3\xcb\x8d\xec\xbf\x31\xc0\x05\x1b\xb2\x8a\xcd\xa0\xbe\xa7\x91\x49\x59\xb3\xd7\x33\xe8\x7a\x5d\xf5\x96\xb9\xb6\xc6\x05\xa4\x85\xe4\xd4\x21\xde\xaa\xa3\xb0\x77\xef\x70\x32\x66\xe6\x85\xd3\xc0\xbb\xda\xd7\x41\x16\xc7\xea\xa4\xc7\xc3\xce\x0a\x01\x17\x48\xc8\x62\xec\x9d\x03\xc5\x38\x5b\x37\x2b\xa2\xb9\x80\xe0\x05\xd6\xc2\x40\x7e\xcd\xe1\xdd\xdf\x38\xac\x9b\xba\xa8\x41\x3f\xf2\xda\xcf\xbf\xad\xeb\xaf\x17\x66\x92\x66\x75\xcd\x27\x34\x4b\xa1\x79\x29\xa8\xed\x9b\xe0\x03\xc1\x50\xdf\xea\xa1\x49\x0c\x45\xb9\xb0\x9e\x0b\x43\xad\x3c\x59\xef\x05\xba\xad\xa2\x45\x92\xa0\xf6\xf7\x45\xd0\x82\x64\x1c\xa0\x76\x9a\xa4\x74\x94\x72\x96\x51\x90\x59\xa4\x71\x46\x5c\x08\x00\xdf\x82\xbb\x0a\x74\xed\x1f\xd6\x09\xba\x02\x5c\x94\xf1\x22\x9f\x64\x8e\xb6\x4f\x46\x33\x33\x67\x19\x74\xbe\x8e\xea\xc9\xdf\xe9\xad\xb1\x09\x8d\x57\x00\x84\x2f\x7d\x6c\x17\x8b\x95\x9d\xc2\xbf\x11\x9d\x9b\xc4\x5b\x8d\x1d\x04\x79\x46\x13\xb3\x7c\x26\xf0\xe9\x2e\x97\x69\xc5\xb7\x11\x61\x4e\xf1\x4c\xd9\x8b\x7f\x8d\x24\x78\x21\xef\x92\x18\x9f\xc2\x84\x77\x06\xc6\x77\x1e\x2b\x13\x85\x2f\x5a\x94\xce\x02\x3b\x22\x66\xc5\x81\x0d\xeb\xf3\x77\x9e\xe4\xd9\xe6\x6b\x26\x5d\x2b\x44\xa0\xa4\x19\xb6\xd7\xc0\xf3\x3c\x4f\xda\xc8\x92\x15\x25\x4b\xcb\xed\xea\x22\xfc\x67\x08\x68\x2f\xee\x81\xa5\xdc\xc9\xb4\xab\x4b\xfd\x72\x15\xc0\x32\x9c\x22\xc8\x40\x0d\x2b\xb7\xd2\xe2\x2e\x38\x2d\x57\xfe\xf6\x41\x00\x8c\x5c\x7d\xf2\x2b\x06\xdf\x2b\xec\xcf\x92\x9b\xcc\xdc\xfb\x4b\xbc\x36\x20\x9c\x4b\x8f\x9d\xa7\x9e\x04\x04\xb3\xc7\x05\x02\xe2\xfc\x27\xbc\x38\x79\x1d\x16\xdb\xb4\x80\xc6\xc5\x31\x53\x40\xfc\x33\x8d\xfb\xc6\x43\x70\xe2\x1d\xfa\xa7\xba\xfb\x17\x37\x1a\xa6\xa4\x72\x3f\x75\x9c\xdc\x39\x34\xc1\xb8\xd7\x04\x93\x84\x55\x2c\x37\x73\xe7\x17\x06\x92\x29\xcd\x9d\x1f\x48\x2e\x70\xe7\x02\x46\xc0\x89\x14\xc9\x16\x37\xde\x0e\x19\xdd\xfd\xd0\xe8\x7a\x33\x18\xb5\x01\xd3\x2c\xfa\x6a\xb3\x10\xe4\xc8\xd7\xf3\x5f\x91\x51\x94\xf1\xc3\x92\x2c\x61\x00\x18\x8a\x1e\x99\xbb\x8b\x18\x87\xf5\xda\x65\xd1\x63\xd2\x3c\xee\x16\x95\x07\xb7\xb3\xcd\x64\x91\xc9\x82\x9a\xc3\x26\x75\x4f\x61\xa1\xd1\xb4\x9f\x0d\xe0\x4b\x3d\x97\x95\x48\xeb\xad\x43\x6f\x17\xe2\x66\x9e\xcd\x6e\xfe\x0d\x85\xf2\x60\xf4\xee\x2b\x36\xc5\x8d\x8e\xc6\x31\xbb\xbc\x70\xe7\x43\xba\x30\xb9\x10\x2c\xb9\x98\xb3\x33\x2b\xbd\x6c\x61\xcc\xaf\x90\xf8\x92\x78\x59\xbd\xe1\xb2\x36\xe1\xd2\x36\x41\x55\xeb\x2a\x4a\x1c\x24\xa5\x7c\x87\x50\xec\xfe\xf3\xe3\x84\xb7\x43\xd1\x32\xc4\xd0\xbf\xe0\x9b\x74\x31\x01\x00\xc0\xe4\x18\x19\x5c\x5b\xe2\x56\xce\x1a\x8d\x31\xbb\x19\x61\x27\x18\x08\xef\xa6\xc2\x7b\x9e\xd6\xc4\xc1\x38\x23\x97\x14\xac\x6b\x0d\xac\xad\xdf\x65\x76\xd4\x99\xde\x7c\x63\x47\x53\xf0\x0e\x04\x06\xa0\x94\x9a\x2c\x8b\x9a\x75\xf3\x31\x83\x2a\x7c\x1f\xa1\xe8\xfe\x70\x86\x0d\x91\x4c\xa6\x34\xf8\x9a\x37\xfc\x5e\xcf\x5c\xa7\xa5\x27\x9b\x99\x40\x4d\x15\x5f\x58\x4a\x4d\x57\x2a\xb5\x84\x5e\xde\x89\xbf\x51\x5f\x74\x8c\xb4\x5f\xdc\xb2\x55\xa8\x57\xf6\x9e\x11\xd8\xcb\x46\x6e\xfc\xdd\xea\xf9\xe9\x9d\xef\x95\x00\x4e\xee\x7c\xbf\xf8\x9d\x28\x7f\xed\xc4\x64\x78\x43\xd7\x4e\xe0\x82\x6b\x27\x10\xa4\xa2\x21\x37\x3d\xc7\xbf\x5a\x45\xff\x06\x00\x00\xff\xff\xf0\xe4\xf5\xb7\x1d\x35\x00\x00") + +func fontsColossalFlfBytes() ([]byte, error) { + return bindataRead( + _fontsColossalFlf, + "fonts/colossal.flf", + ) +} + +func fontsColossalFlf() (*asset, error) { + bytes, err := fontsColossalFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/colossal.flf", size: 13597, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsComputerFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x5f\x6b\xe4\x36\x10\x7f\xf7\xa7\xf8\x61\x0b\xf2\x72\xac\xae\x81\xf6\xd4\xe3\x38\x0a\x7d\x2e\x84\x3e\x1b\xf6\xd2\x64\xd2\xa4\xc9\x66\x43\xb2\x61\x39\xd8\x0f\x5f\xf4\x67\xa4\x91\x2c\xd9\x5e\xee\xe9\x42\xd6\x33\x5e\x7b\x34\xa3\x99\xdf\xfc\xd1\xde\x3d\xdd\x5d\x5e\x2b\x7c\xc2\x6f\xb8\xfc\x88\x8f\xb8\xec\x6e\xf6\xbb\x97\xf7\x03\xbd\x6e\xee\x9e\xee\xf0\xcf\x77\xfc\xf5\xf0\x48\xf8\x7b\xff\xf6\xfe\x44\x8f\xf8\xb2\xfb\xef\xf5\x8f\x67\x3a\x3c\x3c\xbf\x6d\x9e\xe9\xf0\xf5\x03\x3e\xe9\x5f\x2e\xf5\xef\xbf\x6e\xf0\xe7\x3d\xdd\x3c\x62\xff\x7e\xc0\xee\x7b\x77\xbf\xdf\xd1\xcb\xf5\xbf\x84\xeb\xc3\x67\xdc\x1f\x0e\x2f\x9f\xb5\x3e\x1e\x8f\x9b\x24\xaa\xdf\xee\xf7\xc7\x9b\xeb\x37\xd2\xbb\x87\x47\x3a\xbe\x3e\xdd\xea\x4e\xa9\xa1\xf5\x19\x3a\xa3\x30\x74\xc6\x28\x79\x51\xe0\xef\x00\xf7\x0e\x2c\xdf\xa3\xf7\xcf\xd0\xbc\x5a\x01\x00\xb7\x57\xb8\xbd\xb2\xeb\x02\x86\xff\xdc\x62\xfc\xc4\xaf\xcf\x4f\xec\x5d\x7a\xc2\x8b\xf1\x5f\x71\x37\x74\x44\x86\xc8\x9a\x0a\x38\xbb\x0c\x91\xbb\xb7\xb7\xf6\x9e\x02\x35\x44\x41\xad\x53\x31\x74\xa6\x37\xd6\x02\xf7\xc8\x44\x0b\xbd\x5a\xcb\xb1\x65\xd6\x16\xd3\x5b\xd1\xdb\x2b\xc0\x50\x70\x09\xeb\x47\xda\x90\x35\xe1\x83\x77\x20\x59\xb3\x9c\x03\x9d\x59\xc8\x39\xfb\x54\x21\x5b\x26\xac\xc0\xee\xae\x5c\xac\xc5\x8a\x23\x54\x5c\x48\xf1\x3b\x7d\x5c\xa7\xb8\xb0\xdd\xc3\xd0\x8d\x27\xad\x86\x4e\x9f\xc6\xe5\x00\xe6\x21\x88\xde\x8b\x5e\xe7\xfb\xe2\xbd\xf6\x2e\xd2\x46\xc3\xcb\x25\x09\xe1\xab\x3d\x5b\x5a\x55\x25\xcc\x69\x8f\x00\xe8\x10\x55\x1d\x82\xaa\x43\x4c\x75\x58\x58\x17\x18\x1b\xbc\x01\x11\x51\x1e\x52\x33\x0c\xb9\x58\x4a\x79\x86\x70\x8d\x04\xa4\x04\xa2\x84\xc6\x80\xd8\x44\xc2\xba\x26\xa4\x87\xf4\x4a\x90\x40\x78\x57\x50\x06\x56\x04\x7f\x00\xbd\xca\x8c\x0b\x0e\x63\x86\x13\x24\x65\x48\xf8\x26\x32\x85\x73\xd8\x2c\x64\xd9\x06\x23\x68\xee\x16\x61\xb1\xe1\x24\x28\x16\x90\xe9\x3b\x11\x94\x2f\x00\xa0\x0b\x47\xe9\x42\x09\x44\x16\x48\x1c\x3a\x50\xf4\x52\x4c\x3d\xc4\xd4\xf3\xe9\x68\x54\xce\xe5\xfe\xaa\xea\x67\x03\x85\x93\x32\xda\xca\x80\xa2\x94\xaa\x15\xef\xf8\x2c\xb1\xcb\x91\x4b\x7d\x4f\x5d\x92\x38\x6a\xb9\xde\x90\xe2\xd4\xf4\xbc\x97\xe8\x0d\xe5\xfb\x28\x33\x99\x29\xef\x8f\x64\xe2\x49\x3f\xce\x6b\x98\xb7\x4e\x16\xb8\xbe\xef\x7b\x86\x48\xc4\x08\x85\x2e\x13\x36\xd3\x97\xe8\x4c\xc8\xb3\x64\xbb\x8d\x96\x6c\x2e\x80\x6f\x1b\x6f\x8c\x76\xa1\xc6\x68\xe3\x7a\xb2\x25\x9a\x0c\x70\xb2\xaa\x46\x18\x03\xc3\x55\xe0\xdb\x66\xbb\xdd\x5c\xa8\xa2\x89\x48\xdb\xa6\x79\x6d\x62\xdc\x27\x8c\x2a\xe5\x81\xb8\x42\x84\x76\x68\x4e\x26\xee\x3a\x63\xa9\x28\x1f\x15\x6b\x5c\xcd\xa7\x5a\xd6\x82\x22\x66\x6b\xd6\x64\xbb\x69\x6e\xa2\x25\x5f\xc9\x71\xc3\xb5\x28\xd2\x0c\x31\xe7\x08\xaa\x1c\x62\xb3\x3b\x06\x88\x96\x2c\x16\x65\x59\xe6\xa8\xab\x6d\x15\x9a\x14\xe7\x2d\xb4\x32\xf8\x70\x49\x43\xc9\x90\x2c\x90\x84\x34\x65\x90\xa9\x19\x07\xc9\x14\xd3\xc1\x02\xb8\xd8\x13\xd1\xab\x75\x6f\xd6\xc2\x61\xa2\x5b\x4d\x44\x42\xf2\xc5\x0c\x57\x24\x6e\xee\xdc\xba\x53\xa7\xce\xb5\x82\x4b\xfd\x73\x92\x70\xf3\x38\x2e\x2b\xc6\x94\x99\x95\xaf\x30\xdb\xed\xd6\x2f\x2d\x9b\xb2\xd8\xf8\x0f\x04\x2e\xd3\x0f\xb6\x7f\x35\x70\x5c\xf8\x54\xd6\xe1\x88\xc7\xe8\x1a\x55\xed\x74\x68\x45\xac\xe8\xb5\xf5\x6d\xb9\x77\x7a\x13\x5b\xa8\xe8\xa5\x72\x8a\x65\xc5\x42\xb7\x50\x2f\x34\x57\x58\x57\x27\x27\x75\x70\x12\xad\x34\x9a\xad\xf2\x7f\x1b\x64\xde\x65\xa8\x31\x95\xf8\xcd\x74\x2c\xc1\x54\xf0\xeb\x74\x57\x2e\x69\x5c\x1f\xe3\x60\x3d\xc6\xd6\x36\x86\x19\xd5\x73\x5e\xfd\xc8\x75\x07\x63\x8e\x11\xb3\x3c\xee\xeb\x71\xe6\x54\x91\x06\x91\xea\x74\xb0\x30\x35\xb8\x0d\x38\xe3\xd7\x2c\xde\x1a\xa5\x9a\x15\x84\xf7\xd9\x91\x18\x1f\xdb\xa9\x38\x89\x40\x7e\xa4\x88\x70\x54\x31\x72\x4e\xca\xa8\xec\x80\xd1\x30\x75\x21\x75\x0a\x55\xd9\xd4\x9e\x54\x51\xa9\x6a\x49\x42\x15\xa7\x9f\x86\x71\xdc\x01\xd0\xb7\x8d\x83\xaf\x36\xe7\x06\x60\xe8\xa8\xd5\x24\xf3\xe8\xba\xb5\x8b\x33\x09\x55\x0e\xe0\x95\xe8\xb2\xe8\x9a\x42\x5b\x44\xd7\xdb\xb7\xd0\x20\x8b\xc6\x18\x9b\x5b\x28\x28\x74\x46\x8b\xac\x4c\xd4\xcb\x58\xa9\xc2\x3a\x13\x34\x59\xbd\x6e\x9d\x85\x96\x13\xa9\x31\x60\x35\x05\x45\x1b\x8c\x5d\xf0\x07\x32\xb0\x1d\x23\xa1\xb8\x57\xd3\x5f\x69\x1a\xe0\x48\x82\x4b\x4d\x70\x05\xd6\xd7\x64\xb0\x57\x19\x67\xeb\xb2\x0b\xce\x35\xbf\xd4\xba\xbc\x6e\xa1\x7e\xbe\x0f\x56\x9b\x5f\x96\x1c\xa0\x95\x5d\x70\x62\x4d\x4d\x7e\xa6\x0b\xb6\x03\xd7\x17\x07\x7b\x99\x5c\x99\x1b\x01\xed\x86\x96\x93\x5f\xd9\x93\x2f\xa1\xa5\x65\x5f\x02\xe3\xd6\xff\xe6\x58\xff\x28\xd7\xe0\xb8\x07\x9e\x94\x24\xf8\xaa\xca\x2f\xb7\x9a\xdb\x9d\x5e\xfa\x5d\xcb\x5f\xed\x84\x05\x37\x61\x9d\xdd\x98\x2a\x82\xeb\x32\x38\x0a\x9e\x8b\xce\x9f\xc8\x54\x55\xf9\x1f\xba\xff\x03\x00\x00\xff\xff\x33\x14\x99\x35\x04\x17\x00\x00") + +func fontsComputerFlfBytes() ([]byte, error) { + return bindataRead( + _fontsComputerFlf, + "fonts/computer.flf", + ) +} + +func fontsComputerFlf() (*asset, error) { + bytes, err := fontsComputerFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/computer.flf", size: 5892, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsContessaFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x55\x4d\x4f\x1b\x41\x0c\xbd\xcf\xaf\xf0\x01\x69\x37\x69\xe2\xa1\x69\x25\xa4\x06\xa2\x91\xb8\xf5\xd4\x3b\x13\x39\x40\x83\x88\x84\x08\x4a\xe0\x50\xc9\xe2\xb7\x57\xf6\x7c\xac\x67\x49\x0e\x99\xec\xfa\x8d\x9f\x3d\xcf\x1e\xef\xd3\xcb\xd3\xe2\xfe\x02\x7e\xc2\x0f\x58\x5c\xc2\xfc\x3b\x2c\xdc\xed\xfe\xf5\x7d\x7b\x3c\xde\xc3\xc3\x3f\xb8\x7d\x3e\xec\x8e\xef\xfb\xb7\xe7\xed\x01\x7e\xef\x8f\xdb\xb7\x67\xf8\xb3\x3b\xec\x5e\x5e\xf6\xd0\xbf\xe9\xc3\xe3\xe2\xea\xea\x32\x3c\xee\x1f\x0e\xf7\xf8\xf1\xba\xc3\xed\xdf\x8f\x89\x73\x17\x17\xc1\xfe\x82\x03\x86\xb2\x4c\x65\x01\x08\xc1\x4d\xf5\x79\x03\x9b\x64\x29\x6b\x70\xc4\xc4\x04\xc3\x3f\x30\x70\x81\x75\x03\xd0\x92\x10\x82\xeb\x05\x87\xe0\x90\x98\x26\xba\x61\x53\xa8\xbd\xbc\x7a\x01\x7d\x09\x59\x3c\x93\x5f\x59\xc5\x47\x80\x69\x70\xd0\xc9\xab\xfe\xd4\x39\xb8\x5e\x5f\x63\x4d\x39\x3f\x4e\x32\x9c\x8c\xc8\x33\x08\x6e\x3e\x9d\x43\x70\x1d\x6f\x4c\xb0\x99\xb8\xcf\xbf\x09\x00\x9d\x39\x60\x39\x6c\x91\xa3\xcb\x46\xb5\x10\x11\x0c\x6f\x67\x3c\xb2\xd1\x9e\xd2\xf2\x93\x64\xc4\x28\x82\x33\xf1\x38\x23\xaf\x6a\x22\xe3\xd8\xa3\x53\x15\x3d\x9d\x01\x30\x8b\x9c\x4e\x0d\x33\xc3\x6e\x62\x60\x0a\x9e\x0b\x33\x19\x03\xbd\x02\xbd\x05\x48\x81\xb3\xe7\x48\x7b\x1b\x0f\x0b\x34\xc1\x8d\x44\x56\xa7\xd6\xd8\xd5\xfa\x5e\x8f\xeb\x9b\x55\xbf\xb9\xb9\x31\x7f\xd0\x14\x7f\xd5\x14\x1f\xa4\x58\x9d\x76\xc4\xd2\x18\xf5\x90\xbe\x5f\x4b\x6a\x91\xc8\x1b\x1a\x24\xc2\xe0\xee\x88\xd6\xc1\x31\x00\x1b\x7d\x84\xeb\x8e\x68\x52\x56\x18\xe8\x54\x99\x8d\x92\xe1\xc8\x83\x01\xa2\x08\x4e\xde\x02\xa4\x24\x89\x90\x4e\x02\x6c\xa5\xce\xc1\x21\xc1\x68\xa8\x00\x50\xd9\xf9\x4b\xba\x98\xef\x74\xbe\x47\x59\xc0\x94\x1e\x6b\xaa\xfc\x85\xc6\xd7\x84\x07\xa0\xe6\xc2\x4d\xaa\xc9\x23\xfa\x2f\x81\x33\x00\x0a\xc4\x46\x40\x2c\x7b\xb9\x09\x6e\x94\x1d\x9f\xda\x7a\xc4\x73\x1e\xd1\xd6\x02\xa5\xf3\x48\xbb\xdb\x14\x49\x94\xc5\xd4\x8d\x60\xfe\x9a\x94\x4f\x64\xa6\x40\x04\x10\xc1\xa3\x87\xf1\x21\x93\x87\x37\x87\xcc\x7b\x57\xd7\xa9\x25\x5a\x21\x67\xc2\x82\xfe\x44\x02\x25\x3b\x2d\x00\xfa\xda\x45\x02\xde\x7d\x6a\xe9\xf5\xc0\xb5\x87\x23\xe4\xbb\xa1\x96\x68\xae\xd8\xe7\x5a\x9e\x65\xa1\x75\xd9\xed\x23\x34\x43\xaa\xde\xa6\xb2\x92\x2d\x6d\x9a\xfb\x76\xd8\xa6\xc9\xa7\xca\x56\x4e\x4c\x2d\xa1\x69\x4d\xda\xbb\x9c\x76\xe2\x60\x94\x47\x91\xd5\xb8\xe7\x9d\x32\x27\xfc\xac\x72\x52\x70\x3c\x13\x61\xdb\xc8\x90\x5d\x91\xb8\x8d\x0c\x43\xe4\xe0\xa6\xc1\x49\x21\x86\x79\x3c\x55\xa1\x1b\xb7\xa5\x5c\x43\xce\x75\x11\xa3\xfa\x0c\x6e\x69\x2c\x42\x22\xe7\x4c\x3f\xa4\x82\xe3\xa8\x26\xbf\xd6\x88\x55\x19\x3e\xad\x21\x0f\x3b\x31\xd7\xb7\xe1\xa4\xe0\xca\x4c\x4d\x9f\x06\xfd\x56\x95\xef\xb4\x09\x04\xca\xc9\x6d\x59\x31\x77\x9b\x6f\x86\x6f\x81\x0a\x58\xe1\x81\x2e\xca\x70\xf1\x5d\x6c\xb3\xd1\x6b\x40\x55\xca\xdc\x33\x52\x56\x5f\x87\x8b\x70\xad\x84\xa1\x14\x4f\xbf\x6f\x60\xaa\x19\x75\xa6\xa7\x9d\xa5\x2f\x73\x03\xfb\x13\x1f\xd5\xfd\x1e\x4e\x8e\xb6\x04\xf8\xcd\x26\xa6\xe9\x3d\x02\x7e\x01\x2c\x47\xf7\x38\x13\x2b\xdc\x13\xcf\x46\x1e\x69\x82\xf7\xcd\x50\x57\x20\xa9\xd8\x52\x95\xe1\x9c\x2a\xbb\xaa\xc0\xff\x00\x00\x00\xff\xff\x79\xe8\xd6\x6a\xad\x09\x00\x00") + +func fontsContessaFlfBytes() ([]byte, error) { + return bindataRead( + _fontsContessaFlf, + "fonts/contessa.flf", + ) +} + +func fontsContessaFlf() (*asset, error) { + bytes, err := fontsContessaFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/contessa.flf", size: 2477, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsContrastFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\xcd\x8e\xd4\x30\x0c\xbe\xcf\x53\xf8\x40\xc4\x05\x0c\x8c\xf8\x11\xb7\x95\x40\x48\x1c\x56\x1c\x78\x82\xee\xd2\x59\xaa\x5d\xa5\xa8\xad\x84\xe6\xed\x11\x9d\x99\xc6\x8e\xed\x38\xd3\xdb\x4a\x1d\xed\x21\x72\xe2\xf8\xef\xb3\xbf\x74\x0f\x4f\x87\x7d\xf3\x02\x3e\xc2\x07\xd8\xbf\x85\xd7\xef\x60\xbf\xfb\xd2\xc7\x69\x68\xc6\x09\xbe\xf5\x71\x82\xbb\x23\x7c\x6d\x63\xec\x46\xb8\xed\xe3\x23\xc0\xa7\x37\x9f\xdf\xef\x7e\x0c\xdd\x43\x17\x9b\x27\x38\xf4\x71\x7a\x05\xdf\x61\xfa\xdd\xc5\x47\x04\xb8\x6d\x8e\x77\x2d\x74\x71\xfc\xd3\x0d\xed\xaf\xff\xca\x7d\x6c\xa1\x3f\xc0\xcf\xfb\x66\x68\xef\x87\xfe\xef\xcb\x11\x70\xec\x1e\x46\xdc\xe1\xf9\x77\x73\xfd\x6a\x5e\x86\x70\x11\x6a\xab\xa4\x92\xcb\x6e\x76\xb3\x28\x04\x64\x2b\x0c\xf3\x5f\x85\xe5\xe5\x60\x98\x7f\xe8\xca\x98\xf2\x65\x3b\x04\xbc\x38\x36\x8b\xce\xce\x2e\x7e\x25\x99\xe6\xf6\x29\x2c\x1e\xdf\xbc\x9d\x05\x25\x2c\x5f\xf6\x97\xa8\x03\x26\xe9\xec\x13\x3f\xeb\xe7\x1c\x95\x9c\xab\x05\xc3\x90\x6c\x9e\xb6\x43\x3a\xb8\xc8\xc8\x39\x1a\x39\xdb\x4e\x77\x07\x2e\x63\xe7\x16\xe5\x73\x29\x4e\x35\x09\xac\x50\x42\x4a\xce\x3a\x91\x13\xfd\x02\x02\xcd\xc4\xd4\xa4\xd2\x50\xa6\x96\xbd\x9c\x9b\xdb\x55\x6e\x87\x90\xe7\x9c\xa1\x2d\xa1\x49\xc5\x39\x62\xd6\x6b\x04\xfb\x09\x7f\x12\xe7\xcc\x4a\xa1\xcb\x65\x1e\xc4\x8d\xb9\x95\xe4\xac\xaa\xcc\x84\x24\xe6\xfc\x42\xb5\x3d\xe9\xb6\x8c\x99\xa5\x53\xb6\x27\x4f\x8e\x1c\x0c\x8e\x65\xad\x2c\xb2\x02\x6a\xb6\xad\x98\x57\xd7\xb9\x2c\xab\x56\xd6\x7c\x30\xb0\x5d\x33\xf6\xd7\x2a\x97\x1b\x23\x41\x51\xbf\x50\xc2\x13\x6d\x00\x96\x3b\xbb\xc2\x8a\xe6\x4d\x39\xdb\x5a\xc5\xd5\x61\x90\x94\x17\x96\xa0\x2b\xa4\xcc\xb1\x76\x18\xa8\xbc\xa5\xe1\xb8\x2c\x73\x2d\x23\x41\xb4\xd7\x18\x8a\xb2\xda\xe3\x76\x57\xf1\x91\x93\x17\xc8\x19\x43\x9e\xb2\x5b\x67\x4c\xdc\x5e\x33\x7a\xcd\xf8\xaa\x4b\x55\x60\xc4\xe2\xdc\x2e\x4f\xca\x6b\xdc\x4e\x6f\x81\x40\xa2\x97\xc8\x97\x5d\xe5\xad\x54\xb7\xe9\x68\x46\xfa\xb8\x23\x15\x14\x4b\xcb\xf9\x40\xc3\xa8\xc9\xb9\x85\xf0\xca\xb4\x79\xbd\x75\x35\xc5\x1b\x38\x5b\xdf\xd5\x6e\xcc\x36\x57\xba\x74\x57\x02\xa9\x3a\x06\xbd\x41\xb0\xa2\xb7\x8a\xc4\x67\xe0\x8c\x2e\xd9\x17\x83\x78\xd4\xeb\xf6\x0d\x92\xf5\x9a\xa4\xe4\xa8\x93\xb6\xda\x27\x46\xf5\x30\xbc\xa6\x3d\xc5\x23\x82\xd2\xbb\x00\x8d\xe1\xb6\x18\x49\x88\xfa\x23\x4f\x7d\xc4\x5a\x99\x5d\xf9\x70\x2f\xc6\x6c\x94\xa5\xe2\xc2\x8d\xab\x37\xae\xde\xb8\x7a\xe3\xea\x8d\xab\x37\xae\x5e\xc5\xd5\x67\x83\xf9\x04\x41\xcd\x72\x2d\x6f\x55\xac\xd8\x04\xc9\xe8\x5d\x7a\xa3\x7f\xdd\xa6\x6f\x5a\xff\xbf\xc0\x56\xb6\x91\x21\xbc\xae\xb7\x0c\xe5\x3a\xd2\x93\xca\x2b\x10\xfe\xac\xdc\xce\x67\x1e\x9b\x65\x8e\xf4\x74\xc5\xbf\x00\x00\x00\xff\xff\x92\x0a\xba\xc5\xf3\x18\x00\x00") + +func fontsContrastFlfBytes() ([]byte, error) { + return bindataRead( + _fontsContrastFlf, + "fonts/contrast.flf", + ) +} + +func fontsContrastFlf() (*asset, error) { + bytes, err := fontsContrastFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/contrast.flf", size: 6387, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCosmicFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x6d\x8b\xdc\xca\xd1\xfd\xde\xbf\xa2\xe8\x6e\xd3\x0f\xa6\xdc\xf3\xd8\x21\x37\x1d\x75\x1c\x42\x0c\x17\x02\x11\x11\x09\x26\x88\x5e\x5b\xf2\xce\xac\xaf\x1d\x7b\x46\x97\xeb\x35\xce\x85\xfb\xe3\x43\x9d\xea\x96\xb4\xf6\x26\x21\xdf\x57\xac\x76\xf4\x32\xaa\xae\x3a\xe7\xd4\x8b\xe6\xed\xc7\xb7\xcf\xde\x3c\xa2\xef\xe8\x3b\x7a\xf6\x94\xfe\x9f\x9e\x99\x17\x7f\xf9\x5b\xff\xa7\x17\xf1\xfb\x3f\x7f\x4f\xd7\x3f\x53\xff\xfe\xc3\x0d\xfd\x75\xf9\xf4\xf9\xe3\xcd\x07\xfa\xdd\xf9\x1f\x3f\xfd\xe1\x72\x73\xfb\xfe\xf2\x29\x5e\x6e\x6e\x7f\xcf\xf4\x9b\xc3\xd3\xa7\x87\xdf\xfe\x3a\xd2\x1f\x3f\xff\xf0\x89\xde\xbe\xff\xe7\xcd\x49\xae\xfd\x4a\xae\x99\x17\xef\x6e\x8e\x1f\x68\xf9\x7c\x4b\xe7\x9f\xe9\xdd\x72\xbe\xf9\xf1\xcd\x0f\x37\xf4\xe6\xb6\xa3\x77\xb7\xb7\x3f\x76\x87\xc3\x97\x2f\x5f\xe2\x66\xee\xf0\xe9\xdd\xf2\xe5\xf8\xe6\xd3\xcd\xe1\xfc\xfe\xc3\xcd\x97\x9f\x3e\x9e\x0e\xe6\xd1\x23\x77\xdf\xee\x0c\xc5\xce\x99\x9c\xb3\x33\xa1\x14\x67\xc8\x7b\x67\xc8\x5a\x67\xa8\xef\x9d\x33\x5d\x47\x5d\xe7\x8c\xcd\x94\xe5\x9a\x6c\xf7\x7f\xe0\xb3\xeb\xb0\x3b\x33\x4d\x9c\x03\x76\x67\x42\x28\x45\x77\x67\x88\xbd\xc5\x4e\xce\x2c\x29\x2d\xd8\xc9\x19\xee\x2d\xc9\x5e\x0d\xc5\x4e\x2d\x73\xce\x39\x92\x33\xa5\x30\x4f\xb3\x5c\x99\xad\xb5\xe3\xe2\x0c\x8d\xcb\xc4\x27\xb8\x14\xfa\xd0\x1e\x8b\x24\xcf\xce\xce\x50\x26\xca\xc4\x19\xcf\xd0\xeb\xd7\xc4\x65\x6e\xbe\x1e\x07\x62\xc6\x31\x27\x4b\x89\x28\x39\xc3\xa7\x41\x6e\x59\x0b\x43\xdd\x95\x2e\x1f\x73\x0e\xe4\xcc\xff\x95\x69\x92\xf3\xa3\xb7\x70\xdc\x8e\x0b\x2f\x03\x8c\xd9\x9e\x01\x92\x33\x2c\xd1\xd1\x9d\x5d\x23\x71\x86\x61\xe5\x52\xe4\xf2\xe8\x71\x33\x2d\x2c\x77\x2d\x20\x8e\xb8\x14\x32\x2e\x95\x8b\xfc\xf7\xa3\x33\xbc\x24\x72\x46\x40\x51\x70\xc9\x99\xab\x5f\x0e\xe4\xcc\xe1\x97\x2b\x6a\x57\x76\xff\xbf\xe5\x05\x2b\x7a\xd9\xe4\x2c\x7d\x45\xd6\x37\xff\x4e\xe9\x1a\x4c\xb8\x7b\x8c\x9b\xe3\xf1\x78\xfc\x76\xb9\xfb\x6d\x8c\xfd\xd0\xbc\x39\x80\x0d\x39\xa8\x5c\xd0\xa5\x04\xfd\xf6\xd1\x5b\x7d\x6e\x49\x56\xed\x9e\xa1\x80\x2d\x94\x6a\x96\x73\x66\x39\x62\x79\xb2\x5c\x10\x93\xdc\x93\xb0\xc6\xc4\xe0\x51\x44\x3b\x9e\xcf\x03\x1e\xee\xa2\x48\x5a\x64\x23\x5f\x75\x26\x25\x67\xa0\x66\xe1\xa3\xab\x78\x73\x0e\xf3\x0c\x75\x51\x08\x44\x5c\x8a\x30\x18\x8f\xde\x0f\x41\x23\x49\x34\xf1\x22\x44\xf5\x7d\xff\x72\x78\x6c\x5f\xbf\xd6\x7c\x11\x03\xf2\x8d\x1c\xe6\x50\x5d\x13\xdd\x5d\x4a\xd1\xc3\x79\xb6\xde\xfb\x08\xd7\x79\x49\x09\xde\xd1\xd8\xf7\xc3\xca\x65\x44\x96\x88\x0f\x39\xd7\xc8\x4a\x91\xc0\x86\x69\xf2\xfe\xe8\x8c\xe8\xbc\x3e\x48\xd4\xf7\x88\xaa\x6e\xce\xcc\x39\xcf\x73\x08\xd9\x19\x2a\x85\x27\x85\x69\xb6\xf6\xf1\x08\x8e\xa6\x89\x97\x53\xd2\x14\x1e\xec\x86\x67\xac\x91\xcb\xb2\x1a\x01\x97\x12\x97\x53\xba\x56\x99\x58\x22\x0b\xa9\x8c\xe9\x9a\xf9\x94\x20\x71\x3b\x36\xb7\x63\x8c\x75\xfd\x10\x04\xba\x1c\xaa\xd9\x12\xd4\xaa\xf7\x95\xd9\x94\x52\xa5\xae\xef\xfb\xba\x3e\x9e\x15\xb4\x05\x73\xf1\x40\xc5\x7e\x29\x48\x45\x48\x41\xe2\xa6\xf1\x9a\x27\xde\x2f\xad\xb4\x31\xc3\x77\xce\x00\x9d\x51\x12\x68\x62\x94\xac\xd1\xfb\x41\x1d\x67\x9e\x58\x23\x27\xbb\x43\x5b\x9e\x63\x29\x70\xe1\x5e\x95\xde\x7f\x7b\x4b\x04\xe5\x97\x25\x3d\x55\x25\x47\x8f\x8b\x36\x5d\xb7\x34\x5e\xf5\x4e\xbb\x4f\x96\x0d\xf7\x2d\x0a\x2a\x2d\xcb\xb2\xac\xe7\xab\x6d\x66\x7c\x84\xa2\x50\x28\x0e\x95\xc1\xb1\xa5\x7f\x7c\x22\xf8\x3d\x81\xb0\xc3\x2c\xe8\x67\xf5\xea\x52\x2a\xfc\x27\x3f\xd4\x7c\x92\x32\x46\x2b\x01\x5b\x46\x09\x05\x41\xb7\x35\x0b\xa4\xc2\x49\x95\xcd\x39\x73\xcc\x34\xe7\xe8\x4c\x27\x42\x2e\x51\xa9\xc5\xb1\x36\x06\x26\x0a\xa3\x97\xfc\x18\x91\x24\x28\xbe\x63\x5a\xa0\x40\x82\xec\x96\x1a\xff\x6c\xc7\x97\x3d\xb6\x97\x63\xa3\xa1\xab\x89\x83\xaf\xe4\x3c\xe7\xdc\x70\x2a\x85\x10\x3d\x6a\xac\xf7\x47\xfc\x43\x99\x84\x90\x52\x4a\x02\xe3\x88\x58\xac\x9d\x77\xc9\xd0\x82\xc8\x21\x68\xad\xa6\x52\xca\x34\x95\x72\x15\xe1\xb2\x20\x3d\x8a\x34\xa6\x94\x16\x16\x50\x07\x67\xac\x3a\xb7\x69\x0b\xd8\x76\x55\x5f\x80\x77\x86\x92\x8b\x04\xbf\x16\x22\xaf\x55\xa7\x9e\xcd\x49\xd8\x97\x98\x95\xb5\x16\xf0\x60\x37\xef\x9e\x34\xf7\x84\x61\x95\x2d\xcd\xd5\x26\x30\x55\x50\xb5\x92\x21\xce\x89\x97\x34\x84\x9a\x39\xfd\x60\x67\xcd\x3c\xee\x56\x0f\x33\x82\x0d\x9a\x69\xa5\x48\x59\x56\x14\x35\x58\x3d\x4e\xd2\x56\x79\x9a\x08\xbd\xbc\xc6\x2b\x86\x34\xd2\x00\x33\xb0\x82\x20\x99\x9f\x3f\x97\x90\xa4\x06\xcc\xf3\x06\x3c\x1e\xef\x7b\x5e\x05\x54\x91\x3a\x54\xa8\x9e\x34\xa8\x56\xac\x0a\xb6\x83\x33\xd6\xfb\x23\xd8\xb6\xde\xa3\x3a\x8d\x02\x17\xca\x21\x52\x80\xe6\x30\xf6\x2f\x07\x3b\xd6\xba\x2c\xc1\xa1\xed\x6b\xd7\x27\x02\x6a\xa8\x50\x85\x59\xfe\x13\x8c\x4a\x90\x5e\x4d\xaa\x8f\x56\x0d\x6a\xa1\xa1\xb1\x0e\x2d\x6d\xa6\x29\xda\x02\xd0\x03\xd0\x04\xfa\x56\x0b\x5b\x35\xeb\x54\x3f\xba\x49\x38\xe8\x07\x33\x09\x51\xb1\x25\x28\x48\x9a\x95\x7f\x98\xba\x5e\x96\xe5\xd4\x30\xb2\x7d\xdd\x6c\x4b\x35\x70\x15\xa9\x89\x53\x06\x09\x8e\x2a\xce\x52\x0e\xe0\x6e\x92\xb6\xcc\xd5\x80\xf4\x08\x11\x28\xd5\x48\x6c\xd5\x91\x1a\xda\xec\xd4\xc3\xa6\x4b\xb0\x1e\xea\x21\x46\x29\x8e\xd3\x24\x02\xdf\x51\xde\x24\xab\x23\x95\x38\x42\x40\x37\x57\xd6\x0a\x4b\x02\x6a\xf1\xf1\x75\xab\x10\x0b\xc2\x63\x92\x21\x09\x20\x03\x63\x19\xb6\x6c\xdf\x60\x86\x2d\xf9\x44\x53\x82\xd0\x5b\x69\x42\xac\x91\x42\x13\xba\x27\x3b\xfa\xe3\xaa\x74\xb0\x25\x9c\xb4\x26\x21\xe4\x6d\xe4\xb4\xf8\x62\x65\x26\xd6\x76\x85\xeb\x57\xa5\x30\x9c\x05\x7e\x04\x7e\x05\x43\x9e\x68\xe2\xb4\x36\x8f\x7e\x4d\xf2\x6e\xdd\xd0\xef\x67\xa5\x3a\x56\x4f\xe7\x57\xaf\x2e\x97\xcb\xab\x57\xa1\x96\x5f\xef\xad\x5d\x4b\xb9\x84\xbe\x15\x76\x31\x7a\x4d\xfb\x96\xa6\x18\xd4\xce\x34\xeb\x40\xb0\x55\x8e\x52\xca\x95\x26\x03\x11\xca\x1a\x06\x95\xc7\x92\x0b\x63\x4a\xd7\xac\x43\xe4\xe3\x71\xb0\x64\xfb\x5d\xe5\x88\x91\x5a\xb2\xa3\xd6\xe7\xc6\x3b\x1f\x4a\xa9\x03\x94\x52\x75\xa4\x35\xe5\xaf\x25\x19\xb4\x25\xf5\x8a\xa9\xfd\xbb\xbd\xeb\x29\x4c\xce\x50\x33\x5a\xce\xf3\xe7\x07\x70\x8f\x52\x1e\x20\x25\x0f\x63\x08\xf2\xd4\x90\x3c\xf7\xe3\x57\x38\xd6\x0c\xcb\xad\x1c\xd5\x6e\xb6\x09\x53\x89\xd9\x9f\xa5\xc4\xbb\xb3\x4a\x3a\xbc\x8b\x4d\x46\x90\x39\x55\x7d\x0a\x8a\xa1\xa1\xd8\x66\xbe\xca\xb6\xea\x27\x9e\x90\xd5\x98\xfc\x90\x7f\x15\x41\xfd\x62\x84\x2c\x75\x42\x43\x8f\x03\xbf\x57\xe2\x62\x2c\xe5\x50\x5d\x19\xfd\x31\x62\xf4\x69\x0c\xa7\x34\xec\x3b\x79\x3f\xb4\x92\xef\x4c\x54\xd3\xfa\x96\x81\x41\x08\xc6\xc5\xe5\x66\x1e\x7d\x4c\xf7\xd0\x2c\x7a\xf4\x33\x3f\xac\x36\x2d\xd2\x3d\xed\xd0\xb0\x92\xf3\x76\xeb\xd4\xbc\x05\x01\x85\x6a\x6a\x45\xc6\xb4\x28\xd7\x65\x1d\xde\xd6\x90\x55\x3c\x56\xd0\xf3\x65\xb0\xf3\x2c\xef\x29\x90\xe6\xd9\xd6\x18\x6c\x7f\x66\x6d\x03\x9a\x60\x3a\x51\x50\x40\x7e\xa1\x0c\x2a\x91\x62\xbd\xec\xad\x1f\xf7\x10\x71\x42\x63\xaa\xa7\x6d\x68\x57\xdf\x77\xf2\x40\x73\x58\x07\x15\xcc\xc7\xa1\xce\x89\x18\xcd\x0c\x27\xed\xa1\x6d\x7a\x7d\xb9\xcd\xb9\x18\x73\x90\x47\x54\xc7\x7d\xe1\x9c\xf1\xf2\x85\xda\x33\xe3\x1d\x4d\x07\xdf\x7c\xd5\x7c\x09\xe5\xd2\x9c\xb4\x92\x19\x0d\xee\x65\xc3\xf9\xbc\xae\x20\x61\x6b\xa1\xd2\x02\x25\x9d\x47\x9b\x84\xd6\xde\xa8\xf9\x12\x68\xce\xdf\xbe\x5a\x7d\xfd\xe6\xbb\xbb\xf0\x5f\x0e\xce\xba\x21\x86\x5a\x8d\xf8\xbe\x17\xae\xd5\xf6\xc3\xdc\xf4\x30\x37\x3d\xcc\x4d\x0f\x73\xd3\xc3\xdc\xf4\x30\x37\x3d\xcc\x4d\x0f\x73\xd3\xbf\x9b\x9b\xf0\x43\xbe\x7c\x53\x48\xb7\xce\x24\x94\x58\xcc\x19\x92\x66\xf7\xfc\x1c\xaa\x3c\x3b\xa3\x3d\x42\xf4\xeb\x0c\xb2\x29\x74\xb1\x0b\x72\x97\xe6\xfd\xfc\xf3\x9f\x27\x1b\xfc\x92\xa5\x79\x8d\xdf\xd7\x43\xa8\xd3\x4a\xa8\xbf\x0f\xee\xc6\x15\xba\x3b\xaf\xdc\x1d\x58\xee\x4e\x2c\xda\x44\xaa\xd6\x99\x61\x77\xab\x6e\x9a\x41\xfb\x02\x47\xb5\x6c\xae\x35\x4e\xc7\x8c\x5d\x99\x6b\x6a\x5a\x2b\x5d\x5d\x66\xcb\xa7\xb6\xce\x96\x54\x75\xa1\x5d\x66\xd5\xd8\x77\xe9\x55\xaf\xec\x72\x6c\x1d\x46\xd7\x44\x7b\x00\xea\x7f\x02\xea\xd1\xd7\x7f\xce\xfc\x2b\x00\x00\xff\xff\xdf\x73\xc9\xcc\x69\x1b\x00\x00") + +func fontsCosmicFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCosmicFlf, + "fonts/cosmic.flf", + ) +} + +func fontsCosmicFlf() (*asset, error) { + bytes, err := fontsCosmicFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/cosmic.flf", size: 7017, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCosmikeFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\xdf\x6b\xe4\xc8\x11\x7e\xef\xbf\xa2\xe8\xee\xa1\x2f\x4b\xb9\x27\x7b\x21\x97\x8e\x3a\x1b\x0e\x42\x1e\x05\x22\x60\x82\x68\xef\xaa\x3d\x9a\x71\xec\xd8\x33\x7d\xac\x7d\x31\x07\xf7\xc7\x87\xaa\xea\xd6\xe8\x62\xef\x42\xc8\xbd\xc5\xc2\x9a\x69\x69\xa4\x52\xd5\xf7\x7d\xf5\x43\xbe\x79\xb8\xf9\xf6\x7a\x03\xdf\xc1\x77\xf0\xed\x7b\xf8\x2d\xfc\x4e\xcd\xe5\xf1\x78\x37\xfb\x9b\x87\x1b\xd8\xfd\x04\xfd\xdd\xfd\x01\xfe\x56\x1e\x7f\x7c\x38\xdc\xc3\x9f\x8e\xff\xfc\xfc\xfd\xe9\xf0\x74\x77\x7a\xf4\xa7\xc3\xd3\x9f\x11\xfe\xb0\x7d\xff\x7e\xfb\xc7\xdf\x7b\xf8\xcb\xed\x61\xbe\x87\xf2\xe3\x13\x1c\x7f\x52\xb7\xe5\x78\xf8\xe1\xfa\x1f\x07\xb8\x7e\xea\xe0\xf6\xe9\xe9\x87\x6e\xbb\x7d\x7e\x7e\xf6\xe7\x5b\xb7\x8f\xb7\xe5\x79\xbe\x7e\x3c\x6c\x8f\x77\xf7\x87\xe7\xcf\x0f\xfb\xad\xba\xb8\xb8\xb8\x80\xbe\xec\xef\x6e\xee\x0e\x7b\xb8\x3e\xed\x61\x2e\x9f\x3f\x1f\xe6\xa7\xc3\x5e\x3c\x99\x6f\x0f\x0f\xf0\xd7\x9b\xa7\xbb\xe3\xf5\xfd\xdd\x23\x7c\x23\x67\xfc\x72\xe6\xfb\x7f\x3d\x3c\xde\xf9\xb9\x1c\x7f\x03\x64\x4b\x6d\x36\x9b\x8d\xf9\xfa\xa7\x51\xe0\x3b\xa3\x62\x8c\x46\xb9\x94\x8c\x02\x6b\x8d\x02\xad\x8d\x82\xbe\x37\x46\x6d\xba\x0e\xba\x8e\x2e\xd7\x11\xa2\xde\x18\x05\xb2\x7d\x6d\xc1\xab\xae\xe3\xdd\xa8\x69\xc2\xe8\x78\x37\xca\xb9\x94\x64\x37\x0a\xd0\x6a\xde\xc1\xa8\x12\x42\xe1\x1d\x8c\xc2\x5e\x03\xed\xd5\x90\xef\xc4\x36\xc6\x18\x3d\x18\x95\x12\xe2\x94\xe9\x4c\xd6\x5a\x8f\xc5\x28\x18\xcb\x84\x7b\x72\x19\x5c\xef\xda\x6d\x1e\xe8\xde\x6c\x14\x44\x80\x08\x18\xf9\x1e\xf8\xf4\x09\x30\xe5\xe6\xed\x3c\x00\x22\xaf\x31\x68\x08\x00\xc1\x28\xdc\x0f\xf4\x93\xd6\x6c\xa8\xbb\x92\xc7\xfb\x18\x1d\x18\xf5\x4d\x9a\x26\x3a\x9e\xad\x66\xc7\xf5\x58\xb0\x0c\x6c\x4c\xf7\x68\x8c\xea\x3a\xa3\x90\xa2\x83\x5f\xec\x12\x89\x51\xc8\x56\x4e\x89\x4e\x8f\x96\x7f\x0c\x05\xe9\x57\x4d\x70\x77\x9e\x4f\xb9\xc8\xa7\xd2\x89\x3e\xed\x68\x14\x96\x00\x46\x11\x28\x02\x2e\x18\x75\xf5\xf3\x16\x8c\xda\xfe\x7c\x05\xed\xcc\xea\xf3\x0b\xec\x24\x5e\x6c\x2c\x6d\x42\x65\x78\x49\xdc\x8b\x8f\x7d\xd8\x31\x2b\x8b\xd5\x97\xdf\x9b\x79\x9e\xe7\xcd\x8b\xf3\x5f\xb1\x37\xf6\x43\xb3\xb7\x65\x96\x68\x51\x39\x82\x53\x72\x72\xf5\x6c\xb5\xdc\x57\x82\x16\xcb\x47\x56\xc6\x3a\x44\xa1\x2f\x46\xa4\x15\xd2\x9d\xe9\x04\x46\x59\x4b\xbf\x91\x9c\xc7\x80\xcc\x2f\x09\x7b\x3c\x1e\x07\xbe\xb9\xf3\x24\x7b\x92\x13\x5d\x6a\x54\x08\x46\xb1\xe2\x89\xa7\xae\xf2\x80\xd1\xe5\xcc\xaa\x03\xe7\x00\x30\x25\x62\xd6\xcf\xd6\x0e\x4e\x22\x09\x30\x61\x21\x02\xfb\xbe\xbf\x1c\xde\xe9\x4f\x9f\x24\xa7\xc8\x00\x5d\x11\x5d\x76\xd5\x35\xd2\xe3\x29\x25\x59\xe6\xac\xad\xb5\x9e\x5d\xc7\x12\x02\x7b\x07\x63\xdf\x0f\x0b\xc7\x9e\xb3\x87\x7c\x88\xb1\x46\x96\x12\x05\x36\x4c\x93\xb5\xb3\x51\xa4\xff\x7a\x23\x40\xdf\x73\x54\x75\x33\x2a\xc7\x98\xb3\x73\xd1\x28\x48\x09\x27\x81\x29\x6b\xfd\x6e\x9c\x67\xa3\x60\x9a\xb0\xec\x83\xa4\xf9\xa0\xcf\x78\xfa\x1a\x39\x3d\x56\x22\xc0\x94\x7c\xd9\x87\x1d\x43\x4a\x74\x68\x2e\x11\x63\xd8\x21\xee\x03\x4b\x5f\x8f\xcd\x6d\xef\x7d\x7d\xbe\x73\x04\x5d\x74\xd5\x6c\x72\x62\xd5\xda\xca\x6c\x08\x4d\x7a\x7d\xdf\xd7\xe7\xf3\xbd\x84\x36\x61\x4e\x1e\x48\x12\x9c\x12\xa7\x28\x4b\x81\xe2\x86\x71\x87\x13\xae\x1f\x2d\xb4\x21\xb2\xef\x18\x19\x74\xe4\x52\x01\x13\x72\x59\x1b\xad\x1d\xc4\x71\xc4\x09\x25\x72\xd0\x2b\xb4\xe9\x3e\xa4\x22\xe8\x5e\x55\xe9\xeb\x3f\xaf\x92\x82\xbf\x90\xd2\x56\x54\x32\x5b\x3e\xa9\xc3\xae\xa5\xf7\xf0\x6a\x56\x6e\x90\x36\x2a\xb0\xa0\xb5\xe6\x02\xbb\x29\xa5\x14\x5e\xc8\x99\xe5\x09\x88\xfc\xe5\x92\x00\x22\x68\x54\x1e\xc7\x56\x1c\xfc\x05\xa1\x78\xc1\xf2\x76\x99\x38\x88\xe2\xdb\x29\x55\x12\xf6\x76\xa8\x59\x45\x45\x0e\x16\x1a\xce\x79\x45\x44\x38\xd9\x96\x5c\xa0\xfa\x47\x35\x38\xc6\x88\x3e\x42\x8e\xde\xa8\x8e\xe4\x9c\xbc\x10\xcc\x6b\x69\x21\x08\xe0\x46\x4b\x59\x32\x72\xaa\x70\x69\x1e\x43\x61\x1d\x02\x8b\xaf\x54\x10\xb2\x1e\x2f\x7b\xde\x2e\xc7\x46\x46\x57\xd3\x87\x2f\x89\x31\xc7\x58\x0f\x30\x25\xe0\xe8\xb9\x02\x5b\x3b\xf3\x07\x17\x51\x96\x53\x08\x81\xc0\x1e\x39\x16\xad\xf3\x2a\x25\x5a\x10\xd1\x39\xa9\xe4\x90\x52\x9a\xa6\x94\xae\x3c\xbb\x4c\x50\x8f\x24\x90\x29\x84\x82\x04\xea\x60\x94\x16\xe7\xce\x0a\x63\x6c\xbb\xaa\x32\x86\x37\xb3\x9e\x13\x05\xbf\x70\x6a\xa5\xf6\xd4\xa3\x1c\x48\x03\x14\xb3\xb0\xd6\x02\x1e\xf4\xd9\xbb\x8b\xe6\x1e\x31\x2c\xe2\x85\x5c\x6d\x32\xa6\x02\xaa\xd4\x33\x8e\x73\xc2\x12\x06\x57\xf3\xa7\x1f\x74\x96\xfc\xc3\x6e\xf1\x30\x72\xb0\x4e\xf2\x2d\x25\xaa\xd0\x82\xa2\x04\x2b\xeb\x40\x4d\x17\xa7\x09\xb8\xeb\xd7\x78\xc9\x90\x44\xea\xd8\x0c\x5b\xe1\x20\x11\x3f\x7c\xa0\x90\xa8\x12\xe4\x7c\x06\x9e\x6f\xef\x7b\x5c\x04\x54\x91\xda\x56\xa8\x2e\x1a\x54\x0b\x56\x89\xb7\xad\x51\xda\xda\x99\xd9\xd6\xd6\x72\x8d\x1a\x09\x2e\x2e\x8a\x85\x25\x92\xdd\xd8\x5f\x0e\x7a\xac\xd5\x99\x82\xe3\xa1\x40\x66\x02\x00\x46\x8d\xeb\x54\x42\xa4\x4f\x60\xa3\x14\xa4\x15\x93\xe2\xa3\x16\x83\x52\x6e\x40\xcc\x71\xa9\x8a\xd2\x07\xb8\x11\x70\x27\xe0\x56\xd0\xb7\x8a\xd8\x6a\x5a\x27\xfa\x91\x8d\xc2\xe1\xae\x90\x81\x88\xf2\x2d\x41\x99\xa4\x2c\xfc\xb3\xa9\x5d\x29\x65\xdf\x30\xd2\x7d\xdd\x74\x4b\x35\xe6\xca\x43\x13\x27\x8d\x19\xe8\x45\x9c\x29\x6d\x99\xbb\x89\xba\x35\x56\x03\xd4\x29\x48\xa0\x50\x23\xd1\x55\x47\x62\xe8\x6c\xa7\x2e\x9b\x2e\x99\x75\x57\x97\x3c\x68\xa1\x9f\x26\x12\xf8\x8a\xf2\x26\x59\x19\xb8\xc8\x11\x60\x74\x63\x65\x2d\x21\x25\xa0\x14\x1f\x5b\xb7\x0a\x31\x21\x3c\x06\x1a\xa1\x18\x64\xc6\x98\x46\x31\xdd\x37\x98\xd9\x16\x7d\x73\x6b\x62\xa1\xb7\xd2\xc4\xb1\x7a\x70\x4d\xe8\x16\xf4\x68\xe7\x45\xe9\xcc\x16\x71\xd2\x5a\x05\x91\x77\x26\xa7\xc5\xe7\x2b\x33\xbe\x36\x2d\x3e\x7f\x95\x12\xb2\xb3\x8c\x1f\x30\xbf\x84\x21\x4e\x30\x61\x58\x5a\x48\xbf\x24\x79\xb7\x6c\xdc\xf5\xb3\x50\xed\xab\xa7\xf9\xe3\xc7\xd3\xe9\xf4\xf1\xa3\xab\xe5\xd7\x5a\xad\x97\x7a\x4e\xa1\x9f\xab\x3b\x19\xdd\xc1\xba\xb1\x09\x06\xb5\x3f\x65\x19\x0b\xce\x95\x23\xa5\x74\x25\xc9\x00\xc0\x65\x8d\xc7\x95\x77\x94\x0b\x63\x08\x3b\x94\x11\xf3\xdd\x38\x68\xd0\xfd\xaa\x72\x78\x0f\x2d\xd9\xb9\xd6\xc7\xc6\x3b\x6e\x53\xaa\x63\x94\x50\x35\xc3\x92\xf2\x3b\x4a\x06\x69\x4c\xbd\x60\xaa\xff\xae\x7f\xe9\x29\x9b\xcc\xac\x66\x6e\x39\x1f\x3e\x6c\x99\x7b\x2e\xe5\x8e\xa5\x64\xd9\x18\x07\xb9\x6f\x48\x1e\xfb\xf1\x3f\x70\xac\x19\x16\x5b\x39\xaa\xdd\xec\x2c\x4c\x21\x66\x7d\x14\x02\xae\x8e\x2a\xe9\xec\x9d\x6f\x32\x62\x99\x43\xd5\x27\xa1\xe8\x1a\x8a\x6d\xf2\xab\x6c\x8b\x7e\xfc\x9e\xb3\x9a\xe7\x3f\xce\xbf\x8a\xa0\x5c\xe8\x59\x96\x32\xa7\x71\x8f\x63\x7e\xaf\xc8\x45\x9f\xd2\xb6\xba\x32\xda\xd9\xf3\x00\xd4\x18\x0e\x61\x58\xb7\xf3\x7e\x68\x25\xdf\x28\x2f\xa6\xe5\x1d\x84\xc7\x21\x36\x4e\x2e\x37\xf3\xdc\xc7\x64\x77\xcd\xa2\xe5\x7e\x66\x87\xc5\xa6\xe6\x74\x0f\x2b\x34\x34\xe5\xbc\x3e\x77\x6a\x3c\x07\xc1\x0a\x95\xd4\xf2\xc8\x33\x23\x9d\xa7\xe7\xe0\xf9\x19\xf4\x14\xcb\x4f\x90\xe3\x32\xe8\x9c\xe9\x2d\x86\xa5\x79\xd4\x35\x06\xdd\x1f\x51\xda\x80\x24\x98\x4c\x14\xe0\x38\xbf\xb8\x0c\x0a\x91\x64\x3d\xad\xad\xcf\x6b\x88\x30\x70\x63\xaa\x87\x6d\x74\x17\xdf\x57\xf2\xe0\xe6\xb0\x0c\x2a\x3c\x25\xbb\x3a\x2d\xf2\x80\xa6\x30\x48\x0f\x6d\x33\xec\xe5\x79\xda\xe5\x31\x87\xf3\x08\xea\xd0\x4f\x9c\x23\xbf\x9a\x71\xed\xc9\xfc\x06\x27\xe3\x6f\xbc\x6a\xbe\xb8\x74\x6a\x4e\x6a\xca\x8c\x06\x77\x39\xe3\x7c\x5c\x9e\x40\x61\x4b\xa1\x92\x02\x45\x9d\x47\x9a\x84\xd4\x5e\x2f\xf9\xe2\x20\xc7\xf5\x34\xf8\xe2\xeb\x0b\x6f\x65\x5f\x5a\x1c\x65\xe3\x18\x6a\x35\xc2\x57\xde\xf5\xce\x6f\x7c\x6f\x73\xd3\xdb\xdc\xf4\x36\x37\xbd\xcd\x4d\x6f\x73\xd3\xdb\xdc\xf4\x36\x37\xbd\xcd\x4d\x5f\x9a\x9b\xf8\x5f\xfe\x74\x25\x91\xae\x8d\x0a\x5c\x62\x79\xce\xa0\x34\x7b\xe5\x9f\xa2\xc2\xb3\x51\xd2\x23\x48\xbf\x46\x71\x36\xb9\xce\x77\x8e\x7e\x85\xbc\x9e\x7f\xbe\x3e\xd9\x50\x10\x40\xc9\x22\x0d\xf7\x7f\x9f\x55\xc8\x10\x69\xbc\xda\xfb\x75\x4a\x1a\x50\xff\x02\xfe\xfb\x75\xb2\xe6\xff\x30\xe4\xff\x6e\x84\x84\xd5\x08\x09\x7d\x9f\xc7\xe6\xd7\xbf\x03\x00\x00\xff\xff\x16\x77\xec\x28\xa6\x1b\x00\x00") + +func fontsCosmikeFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCosmikeFlf, + "fonts/cosmike.flf", + ) +} + +func fontsCosmikeFlf() (*asset, error) { + bytes, err := fontsCosmikeFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/cosmike.flf", size: 7078, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCricketFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x59\x5d\x8b\xe3\x38\x16\x7d\xf7\xaf\x38\x03\x0d\xe9\x86\xd8\x9e\x34\xb3\xcd\x76\x18\x96\xcc\x0c\x53\x45\x0d\xfb\xb0\xec\x3c\x34\x0b\x01\x95\x2b\x96\xcb\xa2\x1c\xab\xc7\x92\xdb\x5b\x60\xf6\xb7\x2f\xfa\xb2\xe5\xc4\x29\x3b\x1f\xb3\x6c\x68\x2a\x72\x2c\x9d\x7b\xef\xb9\x57\x47\x1f\x9d\x15\xd9\xc7\xe4\x1d\xfe\x8a\x1f\xb0\xfa\x01\xdf\xe3\xe3\x0a\xdf\x63\xf5\xe9\xe3\x5f\x3e\x05\xbf\x54\x6c\xf7\x42\x25\x9e\x5e\xf1\x77\x2a\x0a\x46\xf1\x73\x22\xa9\x80\xfd\xfc\x96\x94\x11\x56\x4b\xac\x3e\x7f\xfe\x14\xec\x4c\xdf\xcf\x9b\xa4\xe2\x22\x2a\xa9\xb4\x9d\x72\x29\xbf\xae\xe3\xb8\x69\x9a\xc8\xbd\x89\xff\xe3\x3a\x07\x0f\x68\x98\xc8\x21\x39\x64\x9e\x94\x2f\xf8\x92\xa4\x14\x77\xac\xdc\xe5\xb4\x42\xc6\x2b\xc8\x9c\xe2\xf7\x87\x7b\xf5\x5a\xa2\x49\x04\x6a\x41\x53\x24\x02\x09\x9e\x12\x41\x75\x1f\xe7\x66\x22\x82\x86\x16\x85\x7a\xfb\x4b\x5e\x31\x81\x7b\x56\x14\x68\x72\x8e\x94\x0a\xf6\x5c\xd2\xd4\xc0\xfd\x51\x27\x95\x1a\x59\xca\x25\x04\xdf\x53\xf0\x0c\x4d\xce\x76\x79\x6f\x80\x95\x70\xd1\x47\xf8\xa9\x4c\x91\xb1\x32\x29\x8a\x57\x1c\x3a\xfc\x8f\xa4\x2e\xf0\x73\x5d\x49\x5e\x2e\xb5\x29\x96\x81\x19\x4f\x4b\x2e\xb5\x7b\x39\x13\x08\x94\x07\x15\x55\xe3\x76\x15\x4d\x24\xc5\xdd\xc3\xfd\x17\x56\x2e\xf1\x80\x7d\xf2\x8a\x92\x7e\xa3\x15\xf2\xe4\x1b\x45\x9e\x18\x37\xf7\x5c\xb2\x6f\x89\x64\xbc\xf4\x46\x25\x08\xee\x1e\xee\x95\xeb\x51\x60\x21\xc0\x84\xee\xff\x85\x95\x29\x6f\x04\x76\x05\x2f\x75\x48\x77\x0f\xf7\x05\x95\x68\x98\xcc\x91\x20\xab\x8b\x02\x19\x4d\x64\x5d\xd1\x14\x16\x04\x34\x65\x92\x57\xdf\x05\xb8\xe3\xd5\xb3\xa2\xf0\x89\xd7\x12\x29\x67\xe5\xb3\x71\x22\x91\x3a\xd8\x8c\x3d\xd7\x15\x85\x7a\xa9\x7e\x16\xfb\x5a\xe4\x7b\x9e\x52\x94\xf5\xfe\x89\x56\x11\xf0\x5b\x2d\x24\x76\x39\xdd\xbd\x20\x09\x90\xd1\x06\x4f\xfc\xdf\x54\x68\x06\x68\xb2\xcb\xcd\x18\x85\x5b\xd5\x05\x8d\x30\xb4\x48\xcb\x74\x9f\x54\x2f\x02\x61\x08\xe5\xb4\x32\xb2\xe3\xfb\xaf\xb5\xa4\x55\xe0\x5b\xcf\x79\x03\x99\xa8\x24\x97\xa9\x7e\x68\x58\x4a\xf1\xca\xeb\x4a\x27\x14\x4c\x38\x5f\xd2\x2a\x69\x5c\x9c\xc2\xb0\x20\x73\x1a\x60\xcf\x6b\x41\xf5\xf0\x86\x7d\xa5\x2e\x9c\xf4\x99\x1a\x07\x78\x96\x19\x38\xb1\xab\x28\x2d\x91\x55\x7c\x0f\x65\x50\x57\xe0\x8e\xd7\xa5\x64\xe5\xf3\x77\x81\x4f\xbf\xe6\x96\x67\x8e\x5e\xb1\xc4\x53\x2d\xc1\xe4\x42\xf4\x95\xa7\x78\x50\x86\x9f\x5e\x91\xe8\xd0\x0a\x2a\x29\x58\xca\xb8\x8c\x80\x87\x2c\x50\x56\xf1\x47\x9d\x14\x2c\x7b\x5d\xe2\x1b\x13\xcc\xd0\x60\xcd\x34\xf4\x49\x30\xa9\x88\xfb\xdd\x38\x26\x72\x2e\x05\x54\x25\x8b\x9c\x37\xe5\x52\x87\xc4\x33\xe5\x62\x25\xa8\x81\xdb\x25\x25\x52\xde\x94\x05\x4f\x52\x30\x65\xe8\xfd\xdd\x3f\x7f\xfd\xf5\xbb\x0f\x41\x80\xd1\x8f\x9d\xad\x39\xdf\xd3\x88\x26\x95\xcc\x0b\x56\xbe\x98\x39\x2b\x78\x51\xab\x6a\x0c\x02\xfc\x54\x08\x0e\x41\xa9\xf3\x4f\x97\x99\xf1\x6f\xdd\x61\x08\x19\xaa\x49\xbf\x13\x51\xcd\xea\x5d\x44\xd3\x3a\xae\x05\xad\x44\xbc\xcb\x13\x16\x67\xec\xb9\xa0\x32\xca\xe5\xbe\x08\x02\xbc\x7b\xb7\x19\xff\x03\x78\x7f\xd4\x5f\x42\xd4\x43\x0b\xb4\xea\x8b\x10\xef\x0b\xae\xe3\xd1\x97\x19\xe8\x86\xda\xc1\x68\x49\xdb\x8d\xeb\xfa\xbe\xdd\x30\x2d\xa2\xb1\xcc\xef\x44\xa1\x29\x2c\x8d\x4d\x6c\x47\xeb\x96\xff\x04\xed\x26\x51\xfd\x37\x03\xf2\xdf\x7c\x32\x9e\x2f\xc3\x65\x17\xf7\x02\x8b\x2e\x6a\xdd\xc7\xb6\xf5\xc7\x86\xd3\x2a\x2b\x03\xa8\x93\x6d\xcb\xa9\x23\x47\xfb\x67\x20\x4d\xa4\x0e\xdd\xe3\xf8\x00\xe3\x44\xf3\x2d\xcf\x31\xc4\xbe\xca\xf3\xbe\x1a\xd0\x4e\x94\x81\x57\x0d\x2e\xe0\xe5\xc2\x25\x4b\xa7\xd1\x36\xf4\xab\xc7\x88\x90\xb7\xeb\xc3\x96\x15\x5c\xea\x1f\x23\x93\x67\xeb\x0e\xe9\xab\x94\x2c\x17\xd3\x40\x20\x5e\x3c\x1d\xca\xdf\x74\x97\x1f\x5d\x76\x0e\x13\x31\x9f\x2b\x6d\xc2\x2b\x5a\xaf\x62\xbd\x02\x9d\x93\xda\xe0\x10\xd8\x67\x78\x66\x4a\x0e\xf0\x4c\x11\xa0\x2f\x88\xf3\x8a\x6d\x8e\x47\x73\x44\xc2\x14\x86\x69\xc5\x40\xac\x5a\xcb\x85\xfa\xb7\x09\x10\x13\x12\x0f\x8c\x4d\xb4\xfa\xb8\x1c\x25\x20\x6e\x72\x45\xba\x4c\x8e\xdb\x6b\x60\xd5\xb5\xd7\x51\x84\x48\xb7\x1f\x43\xf3\x59\x8c\xcc\x80\x0e\xbe\x07\x77\x70\x8f\xa1\x06\x37\xc9\x5d\xf7\xad\x75\x64\x5a\x8f\x43\xc8\x31\x8f\xe1\x29\x8c\x83\x55\xd4\x10\x45\x46\xef\xf1\x16\xe7\x79\x3c\x42\x89\x6f\x80\xbc\x27\xe4\x1a\x4a\xe0\x19\xf8\x97\x27\x68\xad\xaf\x97\xce\xd8\x80\x1c\x9f\x9e\x23\x82\x26\x22\x00\xb0\x22\x9e\xac\x5d\x97\xd4\x09\x03\x91\xee\xbe\xc5\x4d\x0d\xf8\x39\xd0\x69\x46\x0c\xa7\x69\x70\xda\xe0\xb7\x35\x41\xa3\xc2\x3c\xab\xee\xff\x14\x8a\xba\x08\xb6\xd7\xe5\xc0\x57\x8d\x33\x34\x64\x74\xe0\x3c\x39\x34\x9d\x95\xdc\xe8\x7e\xb1\x4e\xc1\x26\xc0\x56\xc5\xa2\x5f\xa9\x45\x69\x72\x4d\xf2\x7e\x1c\xd5\xd5\x8b\x24\xd6\x06\xe3\x16\xbb\xc7\xc8\xbc\x55\xae\xe9\x57\x56\x2e\x5b\xb5\xd6\x4d\xee\xa1\x7c\xaf\x6c\x56\x16\x4b\x60\x19\xea\x7e\x6d\x18\xda\xfa\x5a\x84\xe1\x59\x3a\xeb\x15\x04\xf1\x4a\xa2\x1d\x14\xc8\x31\x13\x07\x64\xcc\xdb\x94\x9d\x2c\xf0\xd5\x89\x02\x6f\xbd\xfa\x6b\xd7\x7d\xfd\xe1\xcd\x02\xf7\x2d\x98\xd9\x6e\x4d\xc4\xf0\x6c\x0c\x74\x40\x67\xc4\x55\xb9\xce\x4a\x57\xe6\xc3\x50\x26\x23\xf1\xc4\xa6\x75\x7d\x2e\x9c\xaa\xf0\x23\xd9\x02\x9e\x1a\x6c\xfd\x07\xeb\xbc\x8d\x24\xf6\xac\xe8\x80\x1f\x5d\x20\x97\x47\x42\xc8\x87\xab\x22\x99\x63\x00\xf0\x92\xee\xd6\x46\xd7\xb6\xa2\x79\xa6\x6c\xfa\xb9\xb8\xf9\xda\x78\xcb\xb2\x75\xe0\x9d\xb7\xfe\xf7\xda\x03\x73\x54\x38\x90\xb9\x4b\xd3\x6d\x28\xf0\x39\xf8\xe0\x4d\xac\x18\xde\xc4\xda\x0e\x12\xb9\xed\x33\x19\x01\x1f\x7c\x22\xc6\xcb\xd1\xa9\x87\x5b\x96\x06\x85\x7e\x8b\x89\x75\x2a\x99\xe8\xdb\x5b\x25\xcd\xd7\x68\xd0\x70\xe2\x4e\xec\x5e\x07\x06\xba\x6d\xe8\x94\xc8\xfd\xb9\xdb\xe3\x19\x32\xed\x26\xd7\x4d\x67\xec\xac\x08\x7a\x8a\xc2\xb6\xdf\x78\xaa\x1f\x16\xa7\x0c\x6c\x2d\x68\xe1\x04\xd2\xce\xd8\xd5\x55\x0b\xcd\xff\x6a\x37\x8b\xe1\x31\xc5\x3f\xa8\xd8\xc1\x36\x86\xfe\xb0\x32\xbd\xd9\x3c\x35\x0f\x6e\x2b\x19\x17\x18\xc0\x56\x1b\x88\x6f\x11\x41\xac\xe4\xc8\x19\xc0\x59\x49\x1e\xa8\x1e\xde\xfb\xb2\xa7\x45\xae\xd3\x3d\x73\xb8\x73\xc2\x87\xd8\x57\xbe\xf7\x6b\x65\x67\x6d\xa5\xef\x94\xf6\x4d\x1e\xc2\x7a\x6a\x54\x4d\x11\x77\xc2\x38\x3f\xe9\x93\x47\x98\xab\xcf\xa9\x1d\x7a\xbf\x91\xdc\x04\xfe\x7d\xdf\x8c\x2b\x22\xbb\x0c\xd8\x6d\xbc\x7d\xfb\x18\x75\x3b\x68\x75\x4c\xd9\x4e\x6c\x71\x8f\x37\xbb\x76\x1b\xde\x9d\xe9\x47\xee\x9c\x66\x5d\x5e\xf5\xd3\xd2\x0c\x8b\xb7\x97\xdd\x89\x1e\xf8\x79\xd8\x1c\x3d\x87\x4c\x0c\xdb\x1c\x5f\xf7\xb6\x27\xce\x5d\xa7\xee\x96\x36\x01\xa2\x30\x0c\xa3\x30\x32\x28\xa4\x67\x27\xf2\x5d\x98\x45\x3a\xba\xb5\xbc\x0d\x47\x00\xc9\x99\x80\xbe\x83\x1d\x1c\x99\x97\xbc\x43\xf2\x0d\x4d\x91\x3e\x37\xb5\xb7\xf1\xac\x73\xcd\xf9\x16\xfa\x47\xa6\x33\x01\x6d\xee\x23\xef\xb6\x15\xdd\xd5\xfd\xd9\x75\x76\xe8\x99\x17\xea\x39\x41\x9f\xce\x6a\xaf\x25\xed\xb9\xa1\x0e\x4e\xff\xc3\xff\xa6\xc0\xdb\x05\x6b\x0f\xff\xae\x73\x77\x65\xe0\xe9\x9a\xb3\x75\xf4\xfd\x56\x28\xfd\xd5\xf1\x65\xa1\xf4\xe7\xe6\xd9\xa1\x1c\x25\x2a\xf4\xa9\x1d\xb2\x7b\x78\xf4\x9e\xfb\x70\xba\x1c\xae\xc8\xde\x38\xe0\xcd\xa7\x12\xe9\x17\x43\xe2\x4d\x83\x5b\x78\xd8\xdf\xde\xcd\x08\xfe\x94\x0c\xdd\x6c\x6e\x12\x82\x30\xbc\x94\x3b\xef\xce\xa0\x3d\x58\x89\xcf\x16\xc8\xce\xb3\xa8\x0b\xb2\xbd\x4d\x56\x8f\x00\xf5\xb5\x63\x3c\x05\x33\x06\x38\xc0\x1c\xc0\xfa\xae\x1e\x5d\x56\x5d\x3a\x63\x9c\x09\xe2\xd1\x1a\xdd\x94\x8a\x4b\x14\xd9\x07\x74\x65\x14\x86\xb8\x7e\xf1\xd1\xf3\xa4\x1d\xbf\x56\x6d\xe7\xed\x96\x46\xf4\x70\x4a\x16\x37\x07\xff\x47\x68\x77\x30\x23\xd7\xa6\xfd\xe6\xe6\x74\x21\xf7\xa1\x2c\xac\xdd\xe5\xc4\xb0\x51\x20\x2f\x57\x9a\x64\x1b\x49\x78\xb1\x70\x9e\x00\xbc\x5c\x38\x07\x80\x6d\x07\x78\xf9\x9c\x1d\x7a\x18\x8d\x78\x78\xe6\x86\xf0\xff\x3f\xe4\xa3\x83\x37\x21\xe6\xea\xc0\xb6\x7f\xdc\xf4\xd7\xe1\x71\xbf\x08\x0d\x65\xe3\x64\x7b\x13\xfc\x37\x00\x00\xff\xff\x78\x67\xe6\x4a\x39\x26\x00\x00") + +func fontsCricketFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCricketFlf, + "fonts/cricket.flf", + ) +} + +func fontsCricketFlf() (*asset, error) { + bytes, err := fontsCricketFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/cricket.flf", size: 9785, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCursiveFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x5d\x6f\xa3\x56\x13\xbe\xe7\x57\xcc\x45\x24\x40\x32\x4c\x12\x65\x57\x1b\xc9\xb2\xc8\x7b\xf1\xaa\x5d\xa9\x52\x2f\x5a\xed\x5e\x20\x9d\x10\x38\x89\x69\x6d\xb0\x00\xaf\xeb\xca\x3f\xbe\x9a\x99\x73\xf8\x26\x71\xeb\xc4\x98\x8f\xf9\x3a\xcf\x3c\x33\x73\x78\xdd\xbd\xde\x27\x37\xf0\x19\x1e\xe0\xfe\x16\x6e\xe1\xee\xce\x71\xfe\x97\xd4\x3a\x83\xb2\x80\xf4\x58\xd5\xf9\x0f\xed\x7d\xf6\xe1\xe5\x0c\x5f\x93\x02\xbe\x95\xbb\x46\x57\x40\x9f\xf5\x1f\x49\x91\x46\x69\x95\xef\x43\xad\xd3\x3a\x3c\xee\xf3\x74\x1b\xea\xec\xb8\x21\xd5\xfb\x07\xf8\x7a\xdc\xc1\xdd\xe3\x97\x4f\xce\xff\xf3\xb7\x9d\x6e\x82\xfc\x6f\x9d\x41\x52\x64\xac\xfd\x72\x86\x6f\xba\xc8\xf4\x6e\x07\x3f\xe5\xe9\x9f\xba\x80\xf5\x69\xcb\x27\xd1\x21\xa9\x92\xba\x7c\x6d\xc2\xb4\xdc\x6f\x58\xb8\x2c\x00\x3e\xc1\x2f\x49\x05\x77\x8f\x8f\x0f\x0e\xdd\x7a\xca\x32\x9d\xc1\x3e\xaf\xeb\xbc\x78\x83\xc3\xb1\x48\x9b\x63\xd2\xe4\x65\xc1\x1e\x8a\xe3\xfe\x45\x57\xb5\xf3\xb4\xdf\x93\x13\x71\x69\x3e\xb4\x12\x5d\x55\x49\x06\xbf\xe6\xba\x4a\x35\xad\xe4\xc0\x67\x51\x7a\x48\x8a\xb0\xac\xde\x36\xad\x30\x79\xfe\x02\x4f\xc7\x37\xb8\xbf\xbd\xbd\x67\xcf\xbf\x17\x99\xae\xea\xb4\xac\x34\xbb\xca\x92\x7a\x0b\xdb\xe4\x87\x86\x17\xad\x0b\x38\x1e\xb2\xa4\xd1\x19\x34\x25\xec\x49\x24\xdd\x95\xb5\xde\x9d\x61\x9f\x34\xe9\xb6\x07\xa8\xe3\x18\x0f\xbf\x6d\x35\x80\xab\x5c\x48\xb7\x49\x95\xa4\x84\x6e\x5e\x43\x53\x69\x36\x93\x17\x90\x40\x7d\xd0\x69\x9e\xec\xe0\x94\x9c\x43\x80\x9f\x1b\xd8\x27\x67\x78\xd1\xd6\x42\x5e\xd4\xba\x32\xc2\xcd\x56\x43\xa3\xff\x6a\x20\x29\xce\xa7\xad\xae\x34\x9c\xcb\x23\x9c\xf2\x7a\x4b\x11\xed\x74\xf1\xd6\x6c\x35\xd9\x84\xb4\x2c\x02\x6b\xa1\xd0\x69\x43\x30\xee\xf2\x82\x96\xd1\x9c\x68\x25\xcd\x89\x14\x9a\x46\x57\x75\xe8\x38\x37\x37\xd1\xdc\x37\x62\x13\x7c\x44\x3a\x22\x9d\xbb\x74\xa7\xb4\xf7\x41\x84\xe8\x4e\x19\x39\x2e\xb8\xe6\xb2\x3d\x88\x36\xeb\x07\x18\x60\x10\x39\xfc\x03\x91\x83\x80\xad\x95\xde\x2f\x9d\xac\x58\x10\x3c\x85\xf2\x00\xc1\x07\x56\x74\x67\x15\x3c\xbf\x17\x1f\xab\x20\x78\x7e\x2f\x7a\xb6\xea\xf9\x7c\x85\x31\xfd\x78\x00\xdf\xe9\x2a\x46\x88\x8d\x25\xfb\x13\x39\xb4\x9c\xc8\x71\xf9\x84\xff\xe9\x91\xc2\xce\x78\x77\x84\x11\x10\xd8\x1e\x15\x2e\x0a\xf1\x55\xcc\xe1\x06\xc1\xf7\x20\x60\xc3\x31\x4c\xe3\xe8\xe0\x67\x51\x34\xa2\x30\x23\x6a\xff\x6d\xe0\x94\xc0\x1b\x73\x08\x48\xaf\xbb\x94\xcc\xf6\x14\xda\x35\x4e\x61\x9c\x0b\x5f\x29\x93\x14\xf0\x45\x90\x14\x3c\x65\x93\x35\xc9\xa6\x62\x69\x16\x65\x93\x63\x76\x18\x7b\x20\xf6\xc2\x20\x70\xc5\xde\x12\x3b\x46\x0a\x00\x01\x19\x56\xcb\x01\xc8\x35\x72\x10\x6e\xc0\xd2\x00\x38\x87\x22\x28\xa5\x5a\x88\xdd\x20\x08\xc5\xb0\x3f\x9f\x9b\x0e\x2c\x25\x11\x8f\x39\x27\xd6\xc8\x2d\x13\x7a\xba\x76\x59\x89\x67\x56\x8e\x66\xe1\x73\xee\x54\xdf\x45\x97\xa1\x71\x6e\xda\x43\xd9\xd6\x65\x1f\xea\xf9\xc7\xae\x3c\x46\x8a\x30\x72\x98\x88\x5c\x14\xfc\xed\xeb\x31\x91\xf8\xd0\x5b\x45\x2b\x6f\xf4\x7b\x7a\x26\x51\x12\x35\x5a\x87\xb3\xc0\x9b\x4b\x25\xa9\x2a\x63\x59\xad\xe0\x37\x16\x1d\x11\x50\x32\x8a\x00\x9e\x7a\x9f\x30\x9d\xc2\x9a\x14\x88\x31\xea\x2a\x05\x11\x78\x87\xe2\xf3\x35\x81\xe4\xe0\x43\x85\x67\x09\xc9\x78\x50\xab\x45\x85\x16\x0d\x04\xee\xb3\xd4\x25\x57\xa2\x86\x03\x85\xc1\x19\x9d\x52\xeb\x5b\xb1\xbb\xe7\xc0\xbd\x98\x15\x99\x08\x83\x75\x17\x62\xef\x84\x28\x47\x27\x2b\xa2\x87\x6f\x29\x47\x50\xb7\x58\x2f\xa8\xd1\xc7\x9a\xbc\x10\x1c\xab\x20\x08\x2e\xac\x1f\x2b\x84\x58\x2d\xa8\xa9\xb6\x18\x60\x23\xd7\x28\xb5\xc8\x2e\xd7\x0a\x47\x90\xa8\x2e\x3a\x13\xdc\xda\xf6\x2c\x7f\x09\x43\xee\xcb\x5d\x23\x6f\x11\xed\x4a\x48\xf1\x9f\x98\xf5\x4d\x32\xd1\x74\x2e\xb7\xcf\xb0\x31\x56\xca\x6a\x0d\x18\x70\x0d\x29\xdd\xab\xda\xe8\xc4\x83\xf4\xc9\x61\xe6\xaf\x20\xa5\xa7\x62\xeb\xe1\xf9\x5a\x0f\xf1\xc2\x1a\xcc\x48\x35\x33\x95\x1d\x59\xda\xcf\x17\x96\x1a\xb2\x98\x1e\x32\xa5\xae\x61\x31\xd3\x91\x94\x0d\x1f\x6d\x4f\xef\xd5\xe6\x47\xca\x7d\x2e\x7b\xed\x08\x88\x11\x96\xb8\xac\xa0\xaf\x67\xfb\xae\xa1\xa4\xa7\xbc\x01\x29\xe7\x7c\x32\x41\x65\xd4\x03\x6d\x39\x4c\x42\xe2\x59\x78\x66\x0a\x0e\xac\x2b\xea\x24\x7c\xc7\xa2\x85\x56\xad\x1b\x32\xb6\xa4\xa3\xf1\x80\x1d\xce\x0c\x54\x93\xc9\x11\xdb\x3d\x89\x00\x22\x0f\xe2\xe9\x78\x11\xd3\xed\x2e\x41\x4d\x67\x10\x76\x93\xa3\xff\xed\x6d\x38\x54\xb7\xe1\xe0\xdd\xc7\xf3\x70\x9f\x35\x18\x0f\x4a\x85\xc2\x8c\xcb\xec\x20\xe8\x4f\x60\x45\x0b\xf3\xdf\x19\x89\x2a\x64\x1c\xc7\xe0\x58\x61\xde\xb3\x29\xa9\x90\xae\x6b\x0f\xdc\xb5\x96\xa8\x1f\xe1\x8c\x25\x6b\x4a\xf6\x01\x68\x36\x1a\x62\x0d\x37\xfc\x7c\x8d\xa3\xf0\xc5\xe2\x8a\xfd\xfa\xdc\xf0\x2e\x10\x39\x17\x5c\xd8\x65\x20\xe0\xa4\x67\x59\x98\x4b\x6a\x93\xd1\x60\x70\x8f\x66\xad\x8c\x7d\x34\x84\xa0\x89\xee\x2e\x6d\x67\x10\xd6\x53\x47\x86\x4a\x28\x44\x64\x14\x66\x10\x9f\x96\xb1\xd4\xbd\xd9\xf4\x77\x96\xa7\x35\x33\xac\x09\x51\x34\x6a\xeb\x85\x59\xdd\x66\x45\x31\x84\x8b\x59\x31\xbf\x86\xfc\x68\xc0\x96\xfb\xee\x98\x54\xd3\xac\x6c\xa6\x59\x31\x6e\xe5\x7d\x63\x06\xac\x9e\x90\xd4\x9d\xbf\x20\x64\x27\x1d\xcf\xb9\x59\x44\xdb\x63\x08\xa1\x30\x74\x91\xe8\x2b\x4a\x52\x8c\xf0\x3e\x10\x2b\xfa\x33\x3d\x6c\x01\xd8\x41\x21\x86\x2b\x49\x43\x3c\x5f\x88\xa3\xa4\x71\x1b\x23\x50\x8c\x6d\xd3\x27\xdc\x79\xdb\x5c\xe4\x08\x20\x45\xee\x5d\x06\xb6\xe9\xbd\x4b\x5d\xf7\xde\xf5\xd1\x8b\x8b\xc1\x04\x65\x1b\x60\xe5\x66\x4a\x5d\x19\x4f\xb1\xc2\xb9\x2a\x1a\xc4\x57\x2a\x55\x46\xff\x66\x37\xda\x29\x5c\x39\xf8\x4b\xd6\xf9\x6f\x73\xcf\xdc\x29\xa1\xbc\xa2\x9d\x9a\xb7\xf8\xc5\x4a\x12\x43\xef\x93\xb0\x7d\x39\x33\x78\xd8\xdd\xb6\xb2\xdb\x6d\x1c\x85\x19\x45\xce\x3f\x01\x00\x00\xff\xff\xbf\xcb\xeb\x13\xa7\x12\x00\x00") + +func fontsCursiveFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCursiveFlf, + "fonts/cursive.flf", + ) +} + +func fontsCursiveFlf() (*asset, error) { + bytes, err := fontsCursiveFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/cursive.flf", size: 4775, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCyberlargeFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x56\x51\x6f\xab\x36\x14\x7e\xf7\xaf\xf8\x1e\x40\x24\x12\x98\xa4\xcb\x95\x52\xae\x98\x90\x36\xdd\xed\x6e\x77\x57\x57\xea\x1e\xd9\x0c\x49\x4d\x16\x35\x81\x0a\x48\x9b\x48\x28\xbf\x7d\xb2\x0d\x0e\x34\x34\x2d\xd3\xee\x5b\x5c\x81\x4f\xec\xef\x7c\xfe\xce\xf1\xe1\xa8\xc9\x26\xb9\x89\x0d\xcc\xf0\x03\xa6\x13\x38\x53\xdc\x4c\xc8\x4f\x87\x05\xcf\x93\x2c\x2d\xe1\x60\x13\xe7\x2b\x4e\x3e\xad\x57\x1b\x5e\x62\x99\xa5\x4f\x3c\x2f\xd6\x59\x8a\xc5\x01\xbf\xf3\xb4\xc4\xd7\xb8\x28\x78\x6a\xe3\x81\xa7\x65\x1a\x2c\x85\x67\xf1\x18\x2f\x39\xcd\xf2\x95\x8d\xb9\x33\x9d\x38\xb7\x33\xf2\x29\xcf\xb6\x1e\x8a\x32\x5b\x3e\x04\xc9\xf3\x9a\xee\x9e\x62\x9a\x6e\x30\xfa\xc2\xd3\x94\xe7\x25\xee\xc4\xce\x98\xfc\x1c\x97\xdc\xc3\xf4\x03\x7e\xdb\x6d\x30\xbd\xbd\x9d\x61\x32\xf1\x26\x33\xef\xe6\x03\x7e\xf9\xe3\x4f\x42\x7e\xe5\x39\x47\x9c\x73\x14\xd9\x96\x43\x28\x2c\x28\xbe\x66\xa9\x93\x28\x7d\x9f\xad\x2d\xe2\x24\x8f\xd7\xf7\x36\xd6\x09\x0e\xd9\x0e\xcf\x71\x9a\xc6\xb5\xf0\x12\xe5\x3f\x7c\x6b\x63\xc1\xc9\xf6\x80\xd5\x8e\x17\x25\xc5\x67\x3c\x66\x45\xc9\xef\xc5\x1e\xd6\x82\xb8\xcc\xd7\x4b\x45\x8e\x05\x4f\xb2\x9c\x53\x42\x9c\xff\x75\x10\x02\x31\xe8\xde\x94\x63\x4f\x31\x64\x9c\xdc\x24\x8d\x6d\xea\x31\x84\x87\xb6\xdc\x48\x4d\x63\x01\x63\x0b\x08\xc7\x03\x78\xbc\x11\x10\x89\xa7\x26\xb2\xcd\xbd\x39\x66\xf5\x18\x43\xc7\x8c\x2f\xe0\x48\x91\x82\x23\x47\x09\xe0\x0e\x25\x32\x2c\xf1\x50\x13\x8d\xc0\x00\x30\x86\x91\xb9\x97\x4c\x23\xd3\x34\x8f\x7f\xcf\xe7\xdf\x8e\xf3\xf9\xb7\x6a\x80\xa4\xea\xe8\xff\x08\xea\x3b\x47\x98\xa6\x39\x56\x4c\x9e\x47\x41\x3d\x3b\x04\xb5\x06\x30\x45\x14\xae\x2d\x3d\x3d\xc5\x14\x7d\x34\xbd\x28\xa4\x88\x1c\x0b\x43\x24\x01\x95\xf4\xa1\xae\xe5\x99\x9e\x25\x12\x1e\x45\xfb\x88\xc2\xf1\x7d\xbf\x57\xd2\xd9\xc7\xa2\x13\x19\x51\xdf\xf7\x1d\x50\x6b\x6f\x49\x22\xb8\x88\xbc\x88\x32\x46\x3f\x0e\x52\x04\x78\xc2\xc9\xf2\x2c\x84\x04\xf4\x7e\xbe\x50\xf5\x43\x69\x34\xb0\x20\x2d\x5a\x7b\x0a\x0e\x02\x03\x30\x82\xde\x29\x10\x7a\x5d\xf1\x76\x11\x10\x50\x40\xae\xc8\x0d\x4a\x03\x02\xcb\x92\x0b\xea\x15\x90\xfa\xef\xa2\x01\xe9\x27\x3c\xe4\xf3\x4e\x27\xa3\x7e\x94\xb3\xf8\x61\x88\x9f\x8c\x31\x6d\x8b\xf7\x0b\x64\x1d\x81\x0a\x41\xc5\x00\x57\x1e\xec\xbe\x5f\xf0\x7f\x33\xb4\x06\xad\xa3\xbd\x60\xbd\xe1\x2c\x3e\x46\xa9\x96\x31\x56\x89\x39\x6c\xb2\x1c\x75\x75\xa3\xfe\x72\x03\x82\x4a\x1a\x02\xad\x2a\xbd\x52\xb1\x43\x45\x5a\x03\xa1\x81\x7f\x75\xac\x33\x20\xd3\x3c\x86\x06\x1a\x7d\x40\x43\x03\x43\x0d\x74\x5f\x65\xec\xb1\xde\x02\x6a\x11\x2d\x20\xda\xdb\x67\xf1\xb7\x19\xa5\xf5\xde\xf4\xc8\xa5\x0a\xaa\xb2\xaa\x93\xba\x57\x00\x27\x55\x3d\x47\xb9\x3a\x31\xe1\x8b\x28\x0d\x9d\xd5\x6e\x7a\x35\xea\xec\x16\x2a\x25\xf7\x64\x75\x80\xcd\xa9\xa2\x48\x6a\x58\xd8\xca\xc3\x29\x5f\xe8\x04\xdf\x9b\x2f\xf4\x55\xc9\xeb\x37\xd0\x30\xd6\x89\x15\x4b\x61\x2f\x23\xeb\xd4\x46\x6f\x5a\xfa\x2e\x9f\xf5\xde\x69\xbb\x74\x2a\xad\xac\x3a\xd3\xd8\xba\x91\xcb\x51\xb3\x06\x27\x52\xe8\x2a\x96\xd0\x6d\xd1\xd5\x20\xa6\xd9\x2a\xcd\x56\x5d\xa8\x39\x84\x4d\x21\x30\xb7\x37\x62\x99\x3a\x75\x1c\x73\x2f\x04\xa2\x03\x6e\x32\xe8\x9e\x55\xcc\xa9\x25\x34\xbd\x22\xac\x5b\x46\xa8\x26\x84\xfd\xbd\x46\x11\x9c\xe6\x0e\x73\xd3\xb0\xa3\x56\xc3\xbe\xf6\x9c\x6b\xcf\xb9\xf6\x9c\x6b\xcf\xe9\x34\x91\xea\xe5\xeb\x3b\xfc\x4f\xf5\x6f\x00\x00\x00\xff\xff\x7b\x5a\x0e\x33\xfd\x0e\x00\x00") + +func fontsCyberlargeFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCyberlargeFlf, + "fonts/cyberlarge.flf", + ) +} + +func fontsCyberlargeFlf() (*asset, error) { + bytes, err := fontsCyberlargeFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/cyberlarge.flf", size: 3837, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCybermediumFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x55\x51\x6f\xab\x36\x18\x7d\xf7\xaf\x38\x0f\x41\x24\x12\x7c\x24\x5d\x2a\xa5\x54\x4c\x48\x9b\xba\x75\xeb\xaa\x4a\xdd\xdb\xe8\x0c\x49\x4c\x87\x9a\x40\x05\xa4\x4d\x24\x94\xdf\x3e\x19\xbb\x24\xb4\x49\x74\xb9\xba\xf7\xe9\xd6\x89\x01\xe7\xf8\x1c\x1f\x1f\xdb\x24\x5e\xc4\x67\x51\x0f\x63\xfc\x84\x09\xec\x11\xce\x86\xec\x97\xcd\x54\xe4\x71\x96\x96\xb0\xb1\x14\xf3\x64\xb5\x64\x57\xc9\xe3\x42\x94\x98\x65\xe9\x8b\xc8\x8b\x24\x4b\x31\xdd\xe0\x4f\x91\x96\xb8\x8d\x8a\x42\xa4\x16\x9e\x44\x5a\xa6\xfe\x4c\x52\x8b\xe7\x68\x26\x28\xcb\x1f\x2d\x4c\xec\xd1\xc8\xbe\x18\xb3\xab\x3c\x5b\xba\x28\xca\x6c\xf6\xe4\xc7\xaf\x09\xad\x5e\x22\x4a\x17\xe8\xdf\x88\x34\x15\x79\x89\x7b\x89\x0c\xd8\xaf\x51\x29\x5c\x8c\xce\xf1\xc7\x6a\x81\xd1\xc5\xc5\x18\xc3\xa1\x3b\x1c\xbb\x67\xe7\xf8\xed\xaf\xbf\x19\xfb\x5d\xe4\x02\x51\x2e\x50\x64\x4b\x01\x69\xb1\x20\xdc\x66\xa9\x1d\x2b\x7f\xd7\xe6\x12\x51\x9c\x47\xc9\xdc\x42\x12\x63\x93\xad\xf0\x1a\xa5\x69\xa4\x8d\x97\x28\xff\x13\x4b\x0b\x53\xc1\x96\x1b\x3c\xae\x44\x51\x12\xae\xf1\x9c\x15\xa5\x98\x4b\x0c\x89\x14\x2e\xf3\x64\xa6\xc4\x31\x15\x71\x96\x0b\x62\xcc\xfe\xa6\x85\x31\xc8\x42\x6b\xa3\x2e\x6b\x42\x97\xb2\xa3\xd5\x32\x96\xd1\x94\x2e\x3a\xb4\x47\x63\x5a\xc6\x04\x06\x26\x10\x0c\x3a\xe8\xb8\x7d\x20\x94\x55\x0b\x59\xc6\xda\x18\x70\x5d\x06\x68\xe6\x8c\x1b\x08\xa4\x48\x21\x90\xa3\x04\x70\x8f\x12\x19\x66\x78\xd2\x42\x7d\x70\x00\x9c\xa3\x6f\xac\x6b\xa5\xbe\x61\x18\xdb\x7f\x27\x93\xbb\xed\x64\x72\x57\x75\xb0\x54\x6d\xbd\x9f\x41\x9e\xbd\x85\x61\x18\x03\xa5\xe4\xba\x04\x72\xad\x00\x64\x76\x50\x0a\x09\x8e\x55\x33\x5d\xa5\x14\x5e\x1a\x6e\x18\x10\x42\xdb\x44\x17\x4b\x40\x55\x73\xc8\x31\x5d\xc3\x35\x65\xe0\x61\xb8\x0e\x09\xb6\xe7\x79\x07\x2d\x7d\x38\x2c\x4d\x90\x21\x79\x9e\x67\x83\xcc\xb5\x59\x0b\xc1\x41\xe8\x86\xc4\x39\x5d\x76\x72\x04\xb8\x92\x64\xba\x26\x02\x06\x9a\x4f\xa6\x6a\xff\x10\x85\x1d\x37\xa4\x49\x9a\x29\x35\x58\x0f\x3d\xff\xdd\xc5\x67\x80\xe3\x33\x38\xf0\x19\x01\xb2\x09\xdf\x67\x44\x3e\x33\x4d\xd9\xaa\xab\xcf\xf4\xe7\xe4\x03\xc1\x67\x26\x3a\x92\x74\xe7\x37\xb2\x32\xc0\x38\xd7\x56\xb4\x1f\xd4\x5f\x92\xd7\xda\x31\x6a\xcb\xf0\x99\xf3\xd6\xe9\x4b\xc7\xfb\xba\x87\x5e\x3d\xb8\x1e\x5f\x35\x2c\xd5\x38\x41\xe2\x5c\x12\xf8\x83\xcf\x40\xcd\x4c\xf6\xd0\x7a\x92\x15\xe7\x95\xbc\xc9\x7d\xa8\x26\x02\x05\x42\x81\x0f\xbb\xdb\x1e\xc8\x15\x45\xf7\xe1\x07\x98\x40\xa0\x40\xe7\x00\xb3\x75\x3b\x02\xa2\x49\xb6\x35\xe6\x5b\x9f\x96\x21\xe0\xc4\x54\xea\x9f\x54\x55\x6b\x59\x0f\x2a\x9b\xbc\x6a\x62\x69\x24\x1c\xe5\x3e\xe0\x6d\x7d\x1c\x9f\xb0\x66\x06\xce\xc1\xc1\x35\x08\x05\x06\xd5\xc1\x1c\xab\x9d\xfb\x23\x2b\x70\x24\x0d\xcd\x3c\x28\xab\xa2\xd7\x0b\xf1\x0e\xfc\x47\xa9\xf3\x0f\x0b\xab\xa2\xd9\x5d\xdb\xd3\x38\xec\xb4\x05\x22\x70\x5a\x4e\xc1\x35\x5a\x69\x6e\xb5\x23\xef\xb1\x35\x8d\x3b\x1f\x82\x57\x28\xd7\xaa\xd5\x2e\x87\xbd\x94\xa0\x0e\x23\x9c\xd6\xca\x34\x5b\x3d\x50\xa4\x40\x75\x0d\x4e\x1d\xd8\x5d\xce\x4d\x1c\xfa\x8d\x04\x9f\x85\xfb\x2f\x97\xcf\xf3\xf3\x79\x7e\x7e\x90\xf3\x53\xa1\x55\xbf\xc3\xff\xdb\xff\x01\x00\x00\xff\xff\x21\xe3\xee\x74\x6d\x0c\x00\x00") + +func fontsCybermediumFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCybermediumFlf, + "fonts/cybermedium.flf", + ) +} + +func fontsCybermediumFlf() (*asset, error) { + bytes, err := fontsCybermediumFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/cybermedium.flf", size: 3181, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsCybersmallFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x53\x61\x6b\xdb\x48\x10\xfd\xbe\xbf\xe2\x7d\x88\x90\x0d\xda\x91\x6d\x12\x70\x36\xa7\x20\xb8\x23\x77\xb9\xa6\x21\x90\x7e\x68\x89\xd2\x95\xec\xac\x52\x13\x7b\x15\x24\x39\xb1\x41\xf8\xb7\x97\x95\xb6\x8e\xd4\x34\x05\x41\xa1\x5f\x3a\x20\x69\x47\x4f\xef\xcd\x9b\xd9\x55\xba\x4c\x27\xc9\x01\x26\x98\x60\x0a\x3e\xc6\x64\xc4\xfe\xde\xce\x54\x9e\x66\xba\x04\x47\xb1\x4a\x96\x4b\x76\xb6\xb8\x5f\xaa\x12\xf3\x4c\x3f\xa9\xbc\x58\x64\x1a\xb3\x2d\xde\x29\x5d\xe2\x32\x29\x0a\xa5\x3d\x3c\x28\x5d\xea\x70\x6e\x98\xc5\x63\x32\x57\x94\xe5\xf7\x1e\xa6\x7c\x3c\xe6\xc7\x87\xec\x2c\xcf\x56\x02\x45\x99\xcd\x1f\xc2\xf4\x79\x41\xeb\xa7\x84\xf4\x12\x83\x0b\xa5\xb5\xca\x4b\x5c\x1b\x64\xc8\xfe\x49\x4a\x25\x30\x3e\xc2\xff\xeb\x25\xc6\xc7\xc7\x87\x18\x8d\xc4\xe8\x50\x4c\x8e\xf0\xef\xfb\x0f\x8c\xfd\xa7\x72\x85\x24\x57\x28\xb2\x95\x82\x71\x58\x10\x2e\x33\xcd\xd3\xc6\xdf\xb9\xbb\x42\x92\xe6\xc9\xe2\xce\xc3\x22\xc5\x36\x5b\xe3\x39\xd1\x3a\xb1\xc6\x4b\x94\x5f\xd4\xca\xc3\x4c\xb1\xd5\x16\xf7\x6b\x55\x94\x84\x73\x3c\x66\x45\xa9\xee\x0c\x86\x85\x11\x2e\xf3\xc5\xbc\x11\xc7\x4c\xa5\x59\xae\x88\x31\xfe\x4b\x83\x31\x98\xa0\x8d\x53\xc7\x86\xd0\x27\x5e\x68\xb5\x8c\xe7\xec\xa3\x8f\x0e\xb5\x68\xcc\xca\xb8\xc0\xd0\x05\xa2\x61\x0f\x1d\x31\x00\x62\x73\x59\x21\xcf\xd9\x38\x43\x69\x63\x88\x7d\xcf\xb8\x80\x82\x86\x86\x42\x8e\x12\xc0\x35\x4a\x64\x98\xe3\xc1\x0a\x0d\x20\x01\x48\x89\x81\xb3\xa9\x95\x06\x8e\xe3\xec\x3e\x4f\xa7\x57\xbb\xe9\xf4\xaa\xea\x61\xa9\xda\x05\xa7\xa0\x80\xef\xe0\x38\xce\xb0\x51\x12\x82\x40\xc2\x8b\x40\x6e\x0f\xa5\x98\xe0\x7b\x35\x53\x34\x4a\xf1\x89\x23\xe2\x88\x10\x73\x17\x7d\x2c\x01\x55\xcd\x21\xdf\x15\x8e\x70\xcd\xc0\xe3\x78\x13\x13\x78\x10\x04\x3f\xb4\xf4\xea\x67\xd9\x0f\x32\xa6\x20\x08\x38\xc8\xdd\xb8\xb5\x10\x7c\xc4\x22\x26\x29\xe9\xa4\x97\x23\x40\x18\x92\x2b\x5c\x44\x0c\x74\x37\x9d\x35\xe7\x87\x28\xee\x79\x20\x5d\xb2\x4c\xa3\xc1\x70\x10\x9a\x2b\x64\xf0\x43\x46\x30\x0b\xd7\x0d\x8d\xd3\x30\x64\x21\x7b\x7d\x43\x8d\xbe\x01\x1e\x20\x64\xf0\xc2\x86\xcf\xc0\x79\x18\x36\x15\xa8\x7e\xe7\x9b\x32\x6f\x71\x7f\x7e\x03\x7d\x53\x21\x5b\xe2\xfb\x0f\xb8\x67\xc0\xbd\x3a\xcc\x91\x0e\x19\x2a\xce\x2b\x9b\x1a\x4b\x55\x10\xdc\xb6\x51\xf3\x68\xa1\x52\x9e\xb6\xd1\x20\x08\xba\x52\xbc\xcb\xad\x5b\x95\x40\xa7\x90\x59\x37\x23\xa8\xd7\x52\xca\xaa\xf3\x99\x90\x36\x45\xdb\x80\x45\x23\xdf\xba\x6d\x52\x44\x55\xab\xe2\x8d\x94\xb7\x6d\xb7\x9c\xbb\x75\xa1\x26\xbd\x91\xde\x6d\xd7\xed\x5f\xad\x34\x78\xe9\xc5\x50\x2a\xb4\xab\x76\x2d\x22\xf2\x3b\xa8\x6f\x4d\x34\xed\x7c\x94\x2f\x09\x3e\xa1\xe5\x07\x7e\xdd\x4b\x33\xfe\xa8\x7e\x13\x75\xf7\xa8\xe9\xd9\xb6\x1c\xdb\x83\xf4\x67\xa7\x7e\xef\x4e\x55\x76\x0c\xbd\xfe\xc7\xaf\x01\x00\x00\xff\xff\x04\xac\x2e\xa0\x8a\x08\x00\x00") + +func fontsCybersmallFlfBytes() ([]byte, error) { + return bindataRead( + _fontsCybersmallFlf, + "fonts/cybersmall.flf", + ) +} + +func fontsCybersmallFlf() (*asset, error) { + bytes, err := fontsCybersmallFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/cybersmall.flf", size: 2186, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsDiamondFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\x4d\x8f\x1c\x35\x10\xbd\xcf\xaf\xa8\x43\xae\xe0\xb0\x42\x22\xdc\x1a\x29\x8a\xc4\x01\x90\x72\xf6\x61\x83\xb4\x41\x91\x56\x20\x41\x2e\xf0\xeb\xd1\xcc\xf8\xe3\xbd\x57\xcf\xdd\x33\x4b\xc4\x66\xd3\x6d\x77\xdb\xf5\x5d\xaf\xca\xbd\x1f\x9f\x3f\x3e\x7c\x78\x15\x6f\xe2\xbb\x78\x78\x1d\x5f\x7d\x13\x0f\xa7\xb7\x3f\xfe\xf0\xd3\x2f\x3f\xbf\x8d\x5f\xff\x8e\xf7\x7f\xfc\x1e\xef\xfe\xfc\xf4\xf9\x9f\x78\x53\xbe\xff\xf6\xf4\xee\xd3\x6f\xcf\x4f\x9f\xe3\xfd\xd3\xf3\xd3\x87\xbf\x9e\xe2\xe1\xeb\xd7\xa7\x57\x9b\xf9\x8d\x6d\x3b\x95\x5a\xf9\x12\xed\x12\x11\xed\xd9\x79\x74\x79\x58\xea\xb8\x45\x5c\xde\x1f\xdf\xda\xbd\xff\x9c\xa7\xa5\xd6\x68\xd7\x0b\x87\xf3\x4f\x5c\xee\xe9\xed\x9d\x8b\x91\xd1\x65\xde\xdf\x5c\xa4\xbe\xaa\x56\x45\x8e\xb6\xaf\x9b\xa0\xad\xcb\x54\x51\x93\x46\x67\x2c\x85\xb5\x93\x68\x63\x75\xe1\x5e\xfd\x36\x92\x74\xae\x1d\xb2\xcc\xfd\x75\x6e\x97\x75\xb0\x77\x12\xbc\xbe\x69\x8e\x5a\x5f\x9a\x98\x8d\x5c\xa3\xaf\xb7\x21\x6f\x67\xd7\x39\xb8\x97\x74\x23\xa2\x6c\x44\xd1\x64\x5c\xba\x3e\xcd\x61\xdd\xe7\xd1\x3c\x6f\xde\x9b\xfd\x4c\xdf\x84\x05\x46\x63\xe4\x40\xe8\xb6\xee\x6c\xf7\x96\x65\x6a\xc8\x68\xe7\x32\x5c\xd4\xb7\xe5\x3b\x71\x37\xef\xef\xe2\xc4\xc6\x07\x2d\x5e\x10\xc1\xa2\xad\x24\x39\x10\x44\x42\x18\x55\x83\xf5\x20\x37\x23\x06\xc5\x6a\x91\xaf\x7e\xdc\x06\xc8\x48\x18\xba\x5b\x01\x23\x72\x12\x53\x94\x1d\x59\x83\x4c\x50\xc9\x33\x16\x1d\x76\x49\x84\xe2\x4b\x4f\xf9\xaa\x61\x65\x6d\xcb\xe1\x57\x70\xd6\x55\xe2\xcc\x80\x3c\xca\x59\xe2\x6d\x4b\x9a\x54\xf6\xbc\xb8\xc5\x6b\xe2\x71\xce\x06\x09\x98\x89\x22\x6f\x78\xc6\x04\x4f\x5f\x96\xc1\x9f\x18\x0d\xab\x8a\xda\x98\xc6\x28\xcb\xfe\x78\x4f\x93\x3e\xf1\xc1\x1e\x53\xde\xf0\x9a\x80\x29\xf6\x9c\x9f\x1c\x3c\x6d\xce\xda\x95\x6a\x21\x8b\x8d\x6d\x9d\xbf\x46\x10\xc1\x92\xc3\x85\x00\x6f\xc3\xfe\x52\xda\x15\x53\x50\x4a\x13\x59\xc6\x36\x30\x28\x92\x98\xee\x49\x1a\xdc\x2f\xc0\x81\x0a\x44\x58\x88\x6d\xa7\x4c\x6a\x7a\x72\xee\xb6\xcf\xa4\x9a\x0d\x0a\x98\x38\xc1\x6d\x0f\x54\xc6\x96\x2d\x85\xc1\xf6\xf1\xf1\x31\x61\xb6\x6f\xa6\x66\x94\x43\x48\x85\x20\x81\x0b\x33\xaa\x28\x3d\x19\x29\x80\x53\x39\x48\xee\x26\x5f\x4d\x4c\xc3\xad\x73\x57\x83\x1f\x58\xa3\x29\x02\x7d\x85\x4b\x6d\xc9\x30\x6a\xc8\xc8\xdd\x32\xc6\xf5\x31\xb2\x5a\x18\x0c\x98\x1f\x9b\x49\x79\x64\xb6\x18\x43\x61\x4b\xa0\xb1\x51\x81\x62\xd0\x86\x61\x97\x6c\x77\x81\x26\xdc\x17\x21\x9c\x22\x6c\x43\x5b\xf5\xb0\x35\x11\x61\x27\xbd\xba\x0f\x23\x41\x9c\x19\x38\x15\xc4\xdc\x99\xcc\xa2\x71\xe3\x9e\xc4\xe8\xe8\x02\x89\x45\x1d\xd9\xd1\xa8\x54\xec\x97\xaa\x9e\x52\x20\xe7\xe0\xc0\x41\x6d\x49\xcb\x8b\xc2\x6e\xc1\x65\xab\x8a\xeb\x6c\x79\x8b\xb3\xef\x0d\xb3\x36\xe9\xfa\x4e\x5f\x4f\x2d\x0a\xf8\x80\x41\x6e\xe0\x90\x21\x45\xd3\x88\xa4\x51\xf5\x7c\xc1\x78\xcc\x95\x99\x72\xbe\x06\xb1\x4c\x1c\x27\xd6\xe4\xce\x47\xac\x9f\xe5\xf7\xf3\xc0\x96\x20\x1c\x7d\xa3\xb1\x64\xac\xc3\x9c\x9c\xd5\x7b\x20\xb8\xd4\x50\x14\x3c\x72\x4f\xf2\x56\x19\x1d\x2c\x14\x2c\xcd\xf3\xb3\xef\x49\x33\xd6\xcd\xcd\x42\xc3\x53\xeb\xba\xba\xc3\xfb\xb2\x4b\xc3\xe6\x93\xaa\x2a\x55\x3d\x43\x3f\xd2\x49\xbe\x0a\xee\x67\x98\xd8\x97\x99\x11\xc3\xec\x8c\x17\x4f\x6e\x40\x65\x63\xb3\xe1\x6c\x0c\x6c\xdf\x5c\x8c\x84\x64\xd5\x24\xf8\x5d\x05\x72\x82\xec\xe4\x5b\xe1\x9e\x20\x81\x04\x02\xc3\x2c\x66\x3e\x5f\x57\x02\xa0\x19\x86\xca\x05\x7a\x1f\xd4\x25\xbd\xb3\x10\x6e\xa3\xd6\x32\x24\xa3\xaf\x18\xde\x33\x63\x86\xe9\x40\x06\x4e\x35\x1e\x8d\x30\x87\x96\x22\xad\x40\x35\xa5\x04\x18\x36\x45\x0f\x6f\xe9\x13\x97\xb0\xd3\x03\x42\x92\x15\x52\x6a\x79\x6e\xc1\xcf\x97\xfb\x37\xf3\xed\xa2\x30\x94\x96\x83\x32\xfd\x92\x49\x9f\xdd\x7f\x6f\x5f\xac\x46\x97\x93\x4f\x88\x7c\x21\x84\xd3\xf1\xd4\x74\xa4\x99\x6b\x98\x25\x56\x6b\x46\x4d\x90\x6e\x31\x24\x2b\x72\xa3\x4d\x0b\xb2\x8d\xe4\x40\x59\xb0\x1b\x1d\x6c\xe0\xc9\x5c\xc3\x84\xd6\x7d\x1f\x82\x89\xed\x00\xd9\xe9\xb7\xdb\x54\x0f\x12\x74\x44\x8a\xd9\x47\x2f\x8b\xfa\xd6\x0f\x9c\xe7\x6d\xe3\x0f\x03\x84\x56\x8b\x41\x92\x54\x87\x8c\x64\xea\x11\x78\xda\x0d\x45\x99\x09\xd6\x38\xcd\xcc\xc5\x11\x7d\x86\x49\xd6\xad\xe1\x6c\x8a\x5f\xd4\x0f\xfa\xf9\xa1\x84\x9d\xdb\xfb\xb5\xf7\xdc\x56\x02\xcb\x21\xbe\x99\xbc\x52\x04\xaf\x44\x1f\x36\x5b\x5f\x67\x4c\xfa\x8f\x0c\x41\x06\x1d\x15\x0e\x3c\x24\xae\xf1\xd3\xab\x92\x45\x85\xdc\x0f\x87\xa2\x1f\x0c\x52\x0c\x90\x79\x12\xf1\x17\xa3\x8c\x50\xb7\xfd\x5b\x8a\x37\x94\x7b\xe2\xca\x24\xac\xa8\x62\x1d\xee\x09\x06\xe0\x0a\x12\x9c\x8e\x28\xd4\x19\x8f\x51\x5d\x7c\x2c\x53\xb8\xca\x83\x22\xf9\xef\x71\xef\x0a\x13\x08\x77\xd1\x3e\x1a\x87\xa8\xcb\x84\x14\x18\x6e\x43\x11\x2a\xae\x6b\xe3\xeb\x51\xcd\x8f\xa2\x99\x6d\x01\xa0\x5a\x26\xb9\xce\xda\x02\x34\x84\x34\x7d\x8c\xaf\xb3\x86\x13\xb3\x1a\xbc\x2a\xc3\xfe\x9c\x5d\x7f\x21\x05\x22\xd5\xc2\x83\x0c\x43\x44\x87\x60\x8f\x59\x2d\xa4\xe8\x2e\x32\x6c\x49\xb8\xc8\x1f\x57\x6b\xee\x9b\x24\xfc\xf6\x09\x53\x40\x49\x7a\x6e\xf2\x6d\x76\xf5\xf7\x1a\x3c\x34\x53\x1c\x32\x15\xa9\x5d\x12\xc7\xd7\xaf\x89\xd7\xff\xd7\xdd\x34\xd7\xb4\x58\x91\x33\x42\x84\xa4\xde\x00\xd3\xe9\x1d\xc1\xd1\xff\x34\xde\x4e\xf2\xef\xff\x7a\xf0\x6f\x00\x00\x00\xff\xff\x6c\xcd\x44\x15\xde\x20\x00\x00") + +func fontsDiamondFlfBytes() ([]byte, error) { + return bindataRead( + _fontsDiamondFlf, + "fonts/diamond.flf", + ) +} + +func fontsDiamondFlf() (*asset, error) { + bytes, err := fontsDiamondFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/diamond.flf", size: 8414, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsDigitalFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x9a\xf9\x77\x24\x55\x15\xc7\x7f\xef\xbf\xe2\xba\xa2\x32\x99\x49\x77\xa7\xbb\x13\x45\x9c\x4a\x75\xa5\x53\x4c\xa5\xaa\xa9\xae\x9e\x21\xe3\xda\x49\x2a\x49\x4b\xa7\x3a\x76\x57\xcf\x10\x45\x71\xdf\xf7\x7d\x5f\x40\x50\x16\x41\x40\x40\x76\x54\x04\x44\x94\x4d\x10\x64\x55\x56\x11\x65\x13\x04\xd9\x3c\x9d\x99\xa9\xba\x75\x97\xce\xf1\x07\x0f\x73\xbe\xb9\x9f\x57\x75\xdf\xfb\xde\x77\xdf\xab\x76\xb9\xb3\x5c\x68\x9d\x00\x45\x28\x40\x19\xf2\x90\xcf\xc3\x38\xe4\xcb\xa5\x7c\x31\x57\x6d\xaf\xb4\xe3\x56\x07\x16\x36\xa0\xd6\x09\xa3\x08\xcc\xd5\xd6\xfa\x7a\xd8\xe9\x40\x7e\xc7\xd4\x04\x8c\x8d\xc1\x42\xab\x1f\x2e\x41\x37\x82\xe9\xc1\xc2\x42\x27\xcc\xd9\xd1\x62\x67\xb0\x14\xf6\x61\x71\xb5\xd5\x6b\x2d\xc6\x61\xaf\x0f\xf9\xc2\xe4\x58\xa1\x54\xca\x59\xd1\x6a\x2b\x5a\x0c\x97\x60\xb9\xdb\x03\xa7\x15\xb7\xa3\xb1\xc2\xb6\xe2\xb6\x89\xe1\xf0\x47\x75\x57\x23\x30\xbb\xfb\x5b\x11\x1c\xb1\x38\xfc\xcf\xce\xc5\xc5\x76\x67\x7b\xb7\xb7\x72\x64\x6e\x33\x34\x1d\x10\xfa\x61\xdc\x87\xfe\x60\x7d\xbd\xdb\x8b\x37\x1f\xde\xd9\x80\xf6\x32\x6c\x74\x07\x3d\xe8\x2f\xf6\xc2\x30\x82\xe5\x6e\x14\xc3\x52\x37\xec\xe7\x96\xdb\x2b\x9d\x30\x86\x5e\xd8\x09\x5b\xfd\x10\x0a\xdb\x0b\xc3\xd7\x76\xbb\xfb\xc2\xb5\x85\xb0\x07\xf9\xa9\xa9\x72\xae\x1e\xf6\xd6\xda\xfd\x7e\xbb\x1b\x41\xbb\x0f\xab\x61\x2f\x5c\xd8\x80\x95\xf6\xbe\x30\x82\xb8\x0b\x6b\xdd\xa5\xf6\xf2\x06\xc4\xab\xed\xfe\xe6\xb8\xdb\xa0\xd5\x87\x4e\x37\x5a\x19\xfe\x37\x5e\x0d\x73\x9b\x01\xed\xb0\x77\x58\x1f\xa2\xd6\x5a\x38\x1c\x63\xbd\xd3\x5a\x3c\x30\x2f\x2d\x58\xec\xae\xad\x85\x51\x0c\x9d\x76\x14\x6e\xcf\xe5\xe6\x0e\x44\x2f\x0d\xb3\xae\xb7\x06\x1d\x98\x1e\xf4\xe2\x6e\x04\x47\xf4\xbb\x9d\x41\xdc\xee\x46\x3b\xc3\x56\x2f\x5e\xed\xb4\xa3\x63\xb7\x47\x61\x7c\x24\xe4\x0b\x3b\xa6\xca\xc3\x17\x69\x1f\x98\x5b\x88\xc2\xfd\xb0\xde\xea\xb5\xd6\xc2\x38\xec\xe5\xd2\x89\x58\xd8\x80\x19\xbb\x36\xcc\xb6\x15\x2d\x0d\xff\xb9\xa7\x1d\x6d\x07\x98\x6b\x6d\x40\xab\xd3\xef\xc2\x42\x08\xfd\x4e\x7b\x65\x35\xee\x6c\xc0\xda\xa1\xb7\x18\xae\xc5\x42\x18\x0f\xe7\x75\xd0\x0f\x73\xdd\xe5\xcd\xe1\x97\x07\x9d\xce\xd8\xfe\xf6\x52\xbc\xba\xe3\xd8\xb0\x17\xed\xe8\xaf\x0d\xfa\xab\xd0\xea\xc4\x61\x2f\x6a\xc5\xed\x7d\x61\x7f\x1b\x2c\x0c\x62\x58\x0a\x97\x5b\x83\x4e\x0c\xdd\x41\xbc\x3e\x88\x87\x99\xbb\x5e\x30\x5c\xaa\x68\x25\x5c\xda\x9e\x83\x13\x76\x1e\xfa\xdf\xce\x1c\x1c\x3e\x76\xf8\xce\x1c\x1c\xff\x8a\xe3\x0f\xfe\x3b\x95\x5e\xc9\xa5\x57\x71\xe9\xd5\x5c\x7a\x0d\x97\x5e\xcb\xa5\xc3\xb8\xf4\x3a\x2e\xbd\x9e\x4b\x6f\xe0\xd2\xe1\x5c\xda\xc6\xa5\x31\x2e\x6d\xe7\xd2\x0e\x2e\x8d\x73\x29\xcf\xa5\x02\x97\x8a\x5c\x9a\xe0\x52\x89\x4b\x65\x2e\x55\xb8\x34\xc9\xa5\x29\x2e\xbd\x91\x4b\x6f\xe2\xd2\x11\x5c\x7a\x33\x97\x8e\xe4\xd2\x5b\xb8\xb4\x93\x4b\x06\x97\xa6\xb9\x64\x72\xa9\xca\x25\x8b\x4b\x33\x5c\xaa\x71\x69\x96\x4b\x36\x97\x8e\xe2\xd2\x2e\x2e\x39\x5c\x9a\xe3\x92\xcb\x25\x8f\x4b\x75\x2e\x1d\xcd\x25\x9f\x4b\x0d\x2e\x05\x5c\x6a\x72\x69\x37\x97\xf6\x70\xe9\x18\x2e\xcd\x73\x69\x2f\x97\xde\xca\xa5\xb7\x71\xe9\xed\x5c\x7a\x07\x97\xde\xc9\xa5\x77\x71\xa9\xc5\xa5\x05\x2e\x2d\x72\x69\x89\x4b\x21\x97\x96\xb9\xb4\xc2\xa5\x55\x2e\xb5\xb9\xf4\x6e\x2e\x1d\xcb\xa5\x0e\x97\xd6\xb8\x14\x71\xa9\xcb\xa5\x75\x2e\xbd\x87\x4b\x3d\x2e\xf5\xb9\x14\x73\x69\xc0\xa5\x7d\x5c\xda\xcf\xa5\xe3\xb8\xb4\xc1\xa5\xf7\x72\xe9\x7d\x5c\x3a\x9e\x4b\xef\xe7\xd2\x07\xb8\x74\x25\x97\x6e\xe5\xd2\x9d\x5c\xba\x9f\x4b\xcf\x70\xe9\x05\x2e\xdd\x93\x4a\xf9\xc2\x64\x22\x7f\x10\xcb\x53\x89\xfc\x21\x24\x17\xc7\x13\xf9\xc3\x58\xce\x27\xf2\x47\xb0\x5c\x48\xe4\x8f\x62\xb9\x98\xc8\x1f\xc3\xf2\x44\x22\x7f\x1c\xcb\xa5\x44\xfe\x04\x96\xcb\x89\xfc\x49\x2c\x57\x12\xf9\x53\x58\x4e\xb3\xfc\x34\x96\xd3\x2c\x3f\x83\xe4\x89\x34\xcb\xcf\x62\x39\xcd\xf2\x73\x58\x4e\xb3\xfc\x3c\x96\xd3\x2c\xbf\x80\xe5\x34\xcb\x2f\x62\x39\xcd\xf2\x4b\x58\x4e\xb3\xfc\x32\x96\xd3\x2c\xbf\x82\xe5\x34\xcb\xaf\x62\x39\xcd\xf2\x6b\x48\x2e\xa5\x59\x7e\x1d\xcb\x69\x96\xdf\xc0\x72\x9a\xe5\x37\xb1\x9c\x66\xf9\x2d\x2c\xa7\x59\x7e\x1b\xcb\x69\x96\xdf\xc1\x72\x9a\xe5\x77\xb1\x9c\x66\xf9\x3d\x2c\xa7\x59\x7e\x1f\xcb\x69\x96\x3f\x40\x72\x79\x1c\xc0\xf5\xc6\xa6\x7d\xcb\xd8\x05\x8d\xba\x61\x5a\x49\xd8\x0f\x71\x58\x1e\xc0\x76\x77\x5b\x7e\x60\x55\xc1\x3a\xc6\x74\x8c\x39\x23\xb0\x3d\x17\xe6\x0c\x7f\x57\x42\xfc\x08\x13\x05\x00\xd3\x72\x03\x68\xd8\x35\x37\x89\x38\x11\x47\x14\x01\xea\x5e\xd3\xad\x66\x43\x4e\xc2\x21\x13\x00\x66\xd3\xf7\x2d\xd7\x9c\xcf\x46\xfd\x18\x47\x95\x00\xe6\x2d\x37\x1b\x70\x32\x0e\x28\x03\x4c\xfb\xde\x2e\xcb\x85\x69\xc3\x4f\x42\x4e\xc1\x21\x15\x80\x86\x65\x6e\xe6\x94\x19\xe7\x27\x38\x68\x12\xa0\x6a\x1b\x96\x6f\x35\xec\x46\x12\xf1\x53\x1c\x31\x05\x60\x7a\xf5\x79\xdf\xae\xcd\x92\xd4\x4f\x45\x61\x95\x71\x80\x19\x6b\xce\x76\x6d\xd7\x02\xcf\xaf\xda\xae\xe1\x80\xed\x56\x6d\xd3\x08\xbc\xf4\x05\x4f\xc3\x48\x1e\xc0\xb1\x66\x82\xb1\xba\x67\xbb\x81\xed\xd6\xa0\xea\x35\xa7\x1d\x0b\x0c\xb7\xe6\x58\x70\x74\xd3\x0b\x84\x15\x39\x1d\x8f\x50\x80\xcd\x8b\x46\xe6\xad\xce\xc0\x01\x45\x80\x86\x37\x13\xc0\xec\x7c\x7d\xd6\x4a\x63\x7e\x86\x63\x26\x00\x7c\xab\x66\x37\x02\xcb\xb7\xc8\xca\x9d\x89\xe3\x4a\x00\x73\x86\xe9\x7b\xe9\x9f\xcf\xc2\x7f\x2e\x03\x54\xad\x9a\x6f\x59\xd9\x21\x7e\x8e\x63\x2a\x00\x75\xa7\xd9\x18\x9b\xb3\xdd\x66\x23\x1b\x77\x36\x8e\x9b\x04\x68\x34\xeb\x96\xdf\x30\x7d\xbb\x1e\x40\xb0\xc7\x4b\xe2\xce\xc1\x71\x53\x24\x6e\xd6\xb7\x52\xb7\x9f\x8b\x22\x27\xc7\x01\x0c\xb3\x19\x58\x60\x98\x43\x0f\x27\x41\xbf\xc0\x41\x79\x80\x39\xdb\xf4\xbd\xec\x9b\x9d\x87\x43\x0a\x00\x75\xdb\x31\x7d\x6f\x4f\x36\xe8\x7c\x1c\x54\x1c\x8e\x53\xad\x3a\x16\x54\xbd\xf4\x51\x17\xe0\x90\x61\x19\x58\x55\xdb\x71\x8c\xe4\xef\xbf\xc4\x7f\x2f\x65\x33\xf3\xdc\x34\xaf\x0b\x71\x5c\x79\xb8\x28\x0d\xb3\xe9\x8c\xf6\xdd\x45\x98\xa9\x00\x6c\xba\xf9\xff\x33\xde\xc5\x78\x88\x49\x80\xdd\x4d\xa7\x66\xf8\x30\xe3\x1b\x07\x6a\xcc\x73\x87\xa8\xe1\x07\x56\xfa\xd8\x4b\x30\x33\x25\x33\xb3\x86\x33\x93\x00\x97\x22\x60\x6a\x9c\x03\x9b\x2b\x7c\xe8\x31\x69\xc1\x5e\x86\x31\xbc\xb1\x1d\xdd\xb4\x1a\x3c\x95\xcb\x71\x78\x01\xc0\x31\x02\xdb\x05\xd3\xa8\xdb\x81\xe1\x80\x63\x05\x81\xe5\x83\x01\x7b\xec\x60\x16\x6a\xbe\xb1\x3b\x9d\xfb\x2b\x30\x59\x1c\x4d\x6e\xfa\x2d\x21\x7f\x85\xc9\x89\xd1\xa4\x69\xfb\x66\x73\x6e\xc6\xb1\x8e\x49\xf0\x5f\x63\xbc\x34\x1a\x0f\x6c\xa7\x9a\x3e\xf8\x37\x98\x2c\x8f\x26\xf9\x56\x78\x25\xa6\x2b\xa3\x69\x7f\x68\x25\x63\xda\x43\xf3\xf5\x5b\x8c\x4f\x6a\x78\x1a\x7f\x15\x8e\x9f\x52\xe2\xcd\x83\xb3\x44\xea\xe7\xea\x94\x2d\x8c\x8f\x2b\xac\x25\xad\xea\x35\x98\xcc\x8f\x26\xb3\xab\xfa\x3b\x4c\x6a\x4e\xb2\xd4\x55\xbd\x16\xe3\x9a\x9d\x2c\x6d\x6d\x7e\x8f\x69\xcd\x52\xb6\x94\xf0\x75\x98\xd4\xdc\x64\x4b\x09\xff\x01\x93\x9a\x9b\x6c\x35\xe1\x3f\x62\x5c\xb3\x93\xad\x25\x7c\x3d\xa6\x35\x37\x59\xc1\x6c\x02\xdc\x80\x01\xcd\x4e\xae\x54\x35\x37\x22\x32\xaf\x99\xc9\x93\xe6\xf6\x26\x4c\x6a\x66\xf2\xa4\xb9\xbd\x19\x93\x9a\x99\x3c\x75\x6e\xff\x84\x71\xcd\x4c\x9e\x94\xec\x2d\x98\xd4\x8c\xe4\x69\xab\x72\x2b\xa6\x87\x87\x84\xa6\x13\xd8\x75\x67\xd8\x81\xd8\xd9\xeb\xcf\x38\x56\xb3\xcf\xc1\x27\x35\x82\xe1\x01\x2f\x41\x6f\xc3\xa8\x66\x9d\xa6\xb4\x22\xb7\x63\x52\xb3\x4d\x53\x5a\x91\xbf\x60\x52\xf3\x4f\x53\x5d\x91\x3b\x10\x5e\xd0\x4c\xd4\xd4\xe6\xf5\x4e\x4c\x6b\x46\x9a\x97\x5e\xfb\x2e\x4c\x6a\x46\x0a\x66\x3d\x3f\x5d\x98\xbb\x31\x92\x98\xa7\x31\x67\x38\x09\xd0\x98\x35\xfc\x3a\xa4\xef\x77\x0f\x46\x26\x44\x44\xec\xa1\xf7\x62\xae\x34\x8a\xcb\x66\xf5\x57\xcc\x95\x47\x71\xc2\x52\xfc\x0d\xc3\x95\x51\x70\xb6\x34\xee\xc3\xdc\xe4\x28\x8e\x2f\xe0\xfd\x98\x9d\x1a\xc5\x0a\x9d\xf3\x01\x04\x17\xc7\x65\x38\x8d\x7e\x10\x47\xe7\xc5\x68\xa5\x6b\x3e\x84\xc9\x82\x48\x8a\x3d\xf3\x61\xcc\xc9\x86\x11\x3b\xe6\xdf\x31\x27\xbb\x46\xef\x97\x8f\x60\x58\xb6\x8e\xda\x2d\xff\x81\x59\xd9\x3e\x62\xaf\x7c\x14\x73\xb2\x73\xc4\x4e\xf9\x4f\xcc\xc9\xce\xd1\xfb\xe4\xbf\x30\x2c\x5b\x47\xed\x92\x8f\x21\x76\x42\x76\x0e\xee\x91\x8f\xe3\x70\xd9\x3a\x62\x87\x7c\x02\x73\xb2\x71\xc4\xfe\xf8\x24\xe6\x64\xe3\x88\xdd\xf1\x29\xcc\xc9\xc6\xd1\x7b\xe3\xbf\x31\x2c\x1b\x47\xec\x8c\x4f\x63\x4e\x36\x8d\xda\x17\x9f\xc1\x6c\x05\xa0\x6a\xef\xb6\x1b\xac\x23\xfe\x07\x47\xc9\x36\x91\xfb\xe1\xb3\x18\x94\x2d\x22\x76\xc3\xe7\x10\x57\x92\xed\x21\xf6\xc2\xff\x62\x4e\xf6\x89\xde\x09\x9f\xc7\xb0\x6c\x16\xb5\x0f\xbe\x80\x59\xd9\x30\x62\x17\x7c\x11\x73\xb2\x61\xb2\x3d\xf0\x25\x0c\xc8\x26\x99\xd7\x5e\xf2\xe5\x94\x1d\x3f\x6e\x3c\xaf\xde\x3f\x0e\x6e\xf8\xe4\x43\xca\x15\x84\x96\xa7\x57\x66\xef\x25\xec\x16\xf7\xd9\x69\xdf\xda\x2d\x5e\x0e\x37\x61\x79\x7a\x45\xf4\x3e\x82\x6e\x71\xa7\xf5\x6a\x9e\x6b\x89\x1f\x16\x37\xe9\x91\xe7\x00\xc2\x9e\x4d\x58\xed\x1c\x69\x4a\xae\xb8\x8a\xc0\xf2\x8e\x2e\xa2\x0f\x12\x54\x3b\x4a\x9a\x6a\x19\xd0\x87\xcb\x65\xab\xf3\xf4\x0d\x8c\xd1\x6f\x50\xf5\x02\xfd\x3e\xbe\x39\xc0\xf4\xa8\x17\xe0\xf8\x03\x04\x37\xb7\x98\x01\x03\x1b\xf5\x1a\x02\x57\x47\x26\x9f\x41\x1f\x26\xa8\xa5\x3c\xb7\x2a\xc1\xd7\x13\x78\x46\x7c\xae\x88\x3e\x96\x45\xd5\x8b\x60\x55\xdc\xa1\x6f\x20\xb4\x5c\xd5\x32\xfb\x38\x61\xb7\xf8\xb6\x40\xf6\x84\xd3\x08\x3d\xf2\x7c\x46\xd8\x8b\x08\xab\x55\x97\xa5\x99\xe4\x3a\x32\x80\x5c\x61\x2a\xfe\x28\xc1\xd5\x7b\xbe\xb8\x37\x5c\x4b\x68\xb9\xc2\x64\xf6\x11\xc2\x6a\xd5\x65\x49\x56\xa1\x59\xcb\x95\x25\xa2\x34\x63\xad\xaa\x6a\xea\xbe\x70\x1b\x19\x41\x2e\x2d\x9d\x7f\x96\xf0\x5a\x7d\xd5\xa4\x46\x70\x3a\x81\xe5\xfa\x12\xd1\x8b\xb3\xa8\x7a\x47\xae\x69\x66\xb9\x85\x0c\x20\x97\x98\x8a\x3f\x4d\x70\xad\xca\x6a\xf2\xdd\x89\x64\xae\xdc\x9b\x15\x98\xe6\xae\xf5\xcf\x59\x75\xd5\x4e\x21\x23\xc8\x3d\x54\xe7\xcf\x27\xbc\x56\xe9\xb3\xe2\x0e\x45\x3a\xb8\x72\xa9\x96\x59\xd2\xc1\x0b\x5a\x8d\xdb\xd2\x99\xfc\x64\x02\x8f\xbc\x1e\x65\xd1\xf3\x08\xaa\x55\xb8\x2d\xee\x8c\xa4\x91\x14\xe4\x12\x97\x59\xd2\x49\x0a\x5a\x85\xd9\xe2\xce\x74\x35\xa1\xe5\x12\x93\xd9\x87\xb2\x6c\x51\xab\x31\x5b\x2b\x92\x53\xc9\x00\x4a\x1b\xf3\x02\xc7\x6a\x34\xc0\x4e\xb8\x0b\x09\xa7\x19\xfc\x28\xd5\xa0\x67\x90\x11\x64\x83\xeb\xfc\x25\x84\xd7\x0c\xbe\x4b\xae\xd0\x9b\x09\x2e\x3b\x5c\x81\x9f\x22\xb0\x7c\xb5\xdb\xe5\xa7\xc4\x89\x84\xd0\xbe\x37\x3a\xd2\xd9\x94\x1c\xec\x8a\x86\xf8\x38\x11\x25\x87\xba\xe2\xf4\xe8\xe7\xd2\x44\xc9\x2e\x54\x34\x47\x3d\x99\xc2\x64\x0b\x2a\x56\xb7\x78\x76\xa6\x6f\x92\x8d\xa0\x68\x8d\x7c\x72\x06\x25\x1b\xc1\x84\xf6\x79\xd5\x11\xb7\xaf\x93\x08\x2d\x5f\x68\x65\xf6\x5c\xc2\x6a\x1f\xea\x5d\x69\xa9\x6e\x24\xb0\x7c\xab\x15\xd1\x27\x08\xaa\xfd\xea\xe3\xca\x0b\x45\x9f\x2c\x7f\x0b\x51\x60\xfa\x6c\xed\xeb\xbd\x2b\x2d\xd5\x4d\x04\x96\x0b\x49\x44\x9f\x24\xa8\x7a\xa0\x73\x6b\x09\x73\x29\x61\x94\x73\x1c\x22\x2e\x27\x84\x76\x7c\xf3\xc4\xc6\x40\xd3\x93\x8f\x6e\x32\x4b\xf2\x2b\x6d\xf1\x3b\xd5\xa1\x5f\xf9\x33\xbe\x20\x27\x28\xe5\xcb\xce\xa8\x11\xc8\x21\xaa\xa4\xed\xf3\xbe\x64\x4b\xf2\xe5\x43\xf9\xec\x22\xa2\xe4\xc3\x47\x49\xdb\xdd\x7d\xd9\x94\xa4\x88\x4b\xf2\xee\xae\xc0\xa4\x8a\x4b\xda\x01\xc6\x97\x6c\x49\x4e\xeb\x25\xf9\x00\x23\xa2\xe4\xa0\x5e\xd2\x1c\xdd\x90\x26\x8c\xec\xd4\x25\xd9\xda\x22\x4a\xf6\xe9\x92\xe6\xf1\x86\xda\x8b\xef\x26\x23\xc8\x3e\xd7\xf9\x97\x08\xaf\x1d\xa0\x1a\xf2\x82\x91\x1b\x71\x49\x3e\x41\x29\x30\xb9\x12\x97\xb5\x3a\x6b\x48\x4b\x46\x8e\x4f\x65\xb9\xc0\x44\x94\x9c\xa0\xca\xea\x4f\x79\xf2\x6b\x93\x29\x2f\xcb\xd7\x13\x05\x26\xf3\x5d\xd6\xaa\x3a\x90\x5e\x9c\x5c\x8c\xca\x72\x55\x8b\x28\xb9\x16\x95\xb5\xaa\x0e\xc4\xee\x4a\x4e\x8c\x65\xb9\xa8\x65\x96\x9c\x16\xcb\x5b\xfc\x52\x9c\xbd\x59\xdc\x45\xe0\x91\x1f\xe4\xb3\xe8\x8b\x04\xd5\x6a\xba\x29\xf6\x00\xba\xc8\x72\x51\xcb\x2c\x5d\x63\xad\xaa\x9b\xd2\xdd\x9d\x66\x2c\x17\xb4\x88\xd2\x8c\xb5\x5a\x6e\xaa\xbf\x8e\xde\x4e\x46\x90\xcb\x59\xe7\x9f\xcb\xf2\x95\xad\x7e\x9c\x97\xfa\xde\x1d\x64\x8c\x91\xbf\x89\x88\x23\x3c\x4f\x46\xd0\xea\xbb\x29\x5e\xec\xc8\x1c\x54\xe4\xf2\x96\x59\x9a\xbf\x76\xd7\xd8\x2b\x35\x03\x52\x64\x15\xf9\xae\x21\xa2\xa4\xc6\x2a\xda\x5d\x63\xaf\x76\x19\x3d\x8b\x0c\x20\xdf\x36\x54\x9c\x9c\xd5\x2a\xda\x7d\x63\xaf\xb4\x35\x9d\x49\x60\xf9\xbe\x21\xa2\x97\x65\xd0\x82\x59\x01\x12\x70\x41\x36\xa0\x3a\x09\xa4\x66\x4e\x24\x01\x53\x20\xa4\xf7\x32\x09\x9a\x06\xba\xf0\xe7\x90\x88\x2a\x64\xac\x49\xff\xef\xb1\xe8\x34\xfc\xbf\x00\x00\x00\xff\xff\x76\x19\x2f\xf2\x1f\x3b\x00\x00") + +func fontsDigitalFlfBytes() ([]byte, error) { + return bindataRead( + _fontsDigitalFlf, + "fonts/digital.flf", + ) +} + +func fontsDigitalFlf() (*asset, error) { + bytes, err := fontsDigitalFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/digital.flf", size: 15135, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsDohFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x9c\x79\x77\x55\x37\x92\xc0\xff\xd7\xa7\x10\x4b\x3c\x90\x99\x10\x56\x07\x6e\xa7\x83\x0c\x13\x48\x48\xb3\x07\x08\x9d\xe0\x34\x8b\x31\x76\x83\x77\x70\x48\x4f\xf0\x57\x9f\x23\xd5\xa2\xaa\xba\xd2\x7d\xef\x3e\xdb\xef\x5d\xe7\xa4\x66\xce\xb8\xea\x57\xa5\xda\x24\xaf\x99\xe4\xf5\xdb\xd7\xe7\x9f\xef\xf9\xf3\x97\xe2\xff\x5e\xbc\xe4\xcf\xfa\x0b\xee\xd5\xfa\x9b\x33\xaf\xdf\xbe\xf6\x2f\x3e\xfa\xeb\xef\xb7\x76\x56\xb6\xfd\x93\xe7\x6b\x6b\x4b\x5b\xfe\xd4\xcb\xdd\xa4\x84\xe7\x2f\xb7\xcf\xbc\x78\x7f\x66\xe9\xd5\xfb\xd3\xee\xed\xf3\x9d\xa5\xed\x1d\xbf\xb5\xf4\x61\x65\x7b\x65\x7d\xcd\x7f\xe1\x2f\x7e\x79\xe5\x92\x73\x7b\x51\xc2\x51\xf9\x10\x9c\x8f\x92\x3f\x1c\x3b\x76\xcc\x07\x77\xec\x58\x73\xec\x58\x70\xc7\x9a\xa6\x99\xe8\x03\x1c\xc7\x64\xc5\xd4\xc6\x37\xee\x87\xe0\x8e\x27\xf1\xde\x83\x12\xdc\xf1\xa6\x69\x9a\x04\x92\x52\x00\x7b\xc7\x05\x38\x1e\x67\x8f\x1a\x82\xe3\x3e\x01\xcf\x49\x3d\x02\x21\x7f\x81\xfe\xc0\x92\xae\xe8\x9e\xbe\x13\x49\xb4\xc6\xbe\x78\xe7\x27\xb4\x36\xc2\x07\x49\x12\x31\x5a\x70\xc9\x28\xc9\x89\xee\x73\x7f\x86\x5e\x46\xee\xfa\x30\xee\x76\x18\x3e\xfc\x72\xe3\xfd\xc9\x28\xa0\x66\xd6\x34\x8d\x64\x29\x26\x32\x08\x0e\xce\x27\x53\xc8\xc9\xe0\x80\x40\xc8\x49\xc9\x4e\xca\x4a\x86\x61\x8d\x2a\xa3\x7c\xa9\xc5\x56\x5d\xe8\x05\x33\x53\xd9\x58\x57\x66\x3a\xd9\xc1\xc4\xf0\x5e\xf7\xac\xf3\xd9\x69\x71\x07\x72\x2d\x62\x7f\x62\x7d\x99\x89\x4a\xba\x97\x29\xb2\x12\xac\x05\x23\xff\x2c\x0a\x83\xcf\x40\x82\xfb\x2c\xed\x81\x1c\x64\x19\xce\x86\xce\x93\x63\x54\x5d\x71\x54\x72\x99\x51\x70\x55\x28\x73\xdd\x17\x73\x8d\x99\x1b\x4c\xdc\x62\xe4\x2d\x0c\x5c\x8f\x9c\xc6\x8c\xbc\xb4\x89\xb8\x87\xd2\xe2\x82\xc3\xc5\xea\x45\x77\xdd\xcb\xb0\x78\xd9\xd1\x4d\xe7\x58\x04\x9d\xcb\x9f\x64\x73\x4c\x13\x9c\x9b\x9b\x23\x9c\xe8\x1c\xc5\xcc\x8d\xa6\x2a\x83\xc9\x9b\x8b\xd9\x1e\xa0\x31\x91\x97\x73\xcf\xcd\xcd\x45\x8a\x0d\xcd\x25\x1a\x8d\x48\xb9\xcb\x39\x70\x35\x73\x8a\x7a\x39\x45\xa6\xb2\xdf\x39\x2a\x3f\xc7\x5a\xaa\x96\x7b\xc0\x41\x70\x8a\xbc\xc5\x39\xea\xac\xef\x5d\x4c\x89\xd2\x77\xa4\xf8\xad\xe9\xbf\x92\x44\x25\xce\xd2\x52\xa2\x2b\x7d\x8c\xc1\x31\x52\x7c\x43\x1b\xb0\x62\x07\xaf\xda\xa7\x92\x90\x7d\xea\x14\xdc\xec\xa9\x64\x93\xd5\x80\x9f\xad\x53\x5e\xda\xa7\x28\xdf\xa9\x6c\x1e\xa2\x6d\xeb\xe5\x7e\x6c\xbf\xad\x79\xda\xf3\x8e\xde\xcf\x41\xd9\x1a\xb4\xac\xd3\x49\xd0\x3a\x0d\x4d\x27\x3b\xfa\x1a\xb6\xe3\xb9\xd3\x6c\x43\x96\xd3\xca\x42\xf3\x40\x2d\x59\x41\x55\x57\x9d\x51\x54\x69\xa2\xce\xd9\x0f\xc2\xaa\x7c\x47\x38\x5c\xfc\x79\x12\x06\x60\x06\xf7\x79\xda\x03\x71\xb4\x52\x74\xd2\x40\x50\x4d\xb9\xc1\x42\x49\x09\x29\xb7\xa0\x32\xb7\x96\xcf\xab\xd1\xf5\xdc\xa5\x4e\xea\x7d\x97\xa6\x9c\xc1\xbe\x0f\x11\x97\x7f\x47\xa9\xfc\xea\x72\xc0\xf8\xbf\x41\x5a\x38\x5d\xc0\x68\x8c\xc7\x01\x83\x24\xdc\x7a\x27\x9d\xb8\x9c\xa4\x5f\x27\x9d\xe3\xcc\x60\xb1\x53\xc4\x33\xff\xbe\xff\x3f\x49\xa2\x12\x2f\xa5\xa5\x44\x57\xfa\x18\x83\x63\xe4\x04\x3f\x51\xd8\xc9\x0b\xcb\x18\x0c\xfa\x42\x4b\x42\xea\xd5\x7f\x51\x89\x1a\x44\xf7\xd3\x46\xcc\xf6\x06\xa6\x9d\x49\x92\xb4\x78\x69\x67\x34\x3b\x88\x1a\xfb\xfe\xd5\xf3\x4b\x10\xcb\xbf\x4c\xaf\xec\x4b\x1b\x4f\xd8\x70\xc6\x9a\x67\xac\xb8\xc0\x92\x4b\x2c\xb8\xc2\x99\x6b\xcc\xdc\x60\xe2\x16\x23\x6f\x61\xe0\x6d\x9c\x78\x01\x47\x5e\xc2\x3e\x38\x5c\xac\xb7\xdc\x17\x65\x70\xbc\xe8\xe8\x82\x67\x49\x32\x3c\x7b\x96\xbf\x5e\x25\x1c\x9c\x44\x88\x83\x3b\xcb\x06\x6b\x0c\xe3\xb1\xb3\x06\x42\x85\xb3\x63\xc1\xd8\xcf\x78\x70\xdc\x9c\xc5\x96\x4c\xf3\xc5\x31\xcb\x0b\xa9\xac\xae\xcf\xe6\xa7\x00\x47\xfd\x2a\x73\x0e\x04\xac\x73\x30\x21\x58\x68\x90\x75\xee\x9c\xb0\xbc\x87\xd8\xf1\xad\xb7\xfb\xb2\xa8\x7a\x73\xee\xdc\xb9\xdc\x59\xb4\x5b\x96\x90\x99\xfd\xfa\x57\xa5\xe7\xb5\x00\x3d\xdf\x68\x89\x9c\x29\x04\x82\x1a\x69\x3e\xe8\x33\x95\x55\xc6\xa6\x94\xf7\x3c\xf5\x9b\x2b\xa5\x16\x12\x3d\x7f\x3e\x77\x75\x9e\xa7\xc8\x9d\xe5\xd9\xa8\x82\x9e\x78\x1c\x0a\x23\x99\x89\x31\xa4\xbd\x1d\xa4\x05\x19\xc4\x5f\xf8\x6a\x7f\xf7\x1b\x37\x36\x38\x7f\x41\x4b\x82\x17\xcc\x0a\x2e\x5c\xc8\x30\x45\xa1\x1a\x5c\x3e\xe5\xfd\x05\x82\xb2\xc2\x68\x48\x95\x81\x61\x9f\xb2\x83\x0b\xd4\x7c\x31\x72\xfc\x42\x2d\x58\x6c\xbe\x3c\x66\x69\x21\xc5\xd5\xf5\xd8\xfc\x94\x60\x89\x76\xb2\x8b\x24\x99\x5d\xa4\xc1\x33\x63\x94\x59\x42\x17\x2f\x4a\x86\x86\x64\xa4\x0b\x46\xaa\x60\x98\x2b\x0a\x6a\xc8\xa4\x44\xc6\xd2\x70\x5c\x9e\xe9\xa2\xee\xb9\x07\xc3\x7c\x8d\xcc\x77\x51\xd4\xf5\x76\x59\x17\x7b\xef\xf9\x30\x59\x9f\x47\x73\xa9\x25\x11\xda\x5d\x8f\x80\xc5\xe3\x97\x4c\xa1\x0e\x98\x0f\xfb\x42\xa1\x4b\xb6\x4f\xae\x2e\x93\x21\xad\xc0\x9c\x5f\xc2\x26\x97\x6f\xf8\xf8\x25\x5d\xff\x12\x16\x12\x18\xfb\x84\x64\x97\x64\xea\xa3\xf2\x25\x60\xd4\xf9\x79\x14\x09\xe7\x61\xf8\x79\x19\x29\x19\x41\xc5\x10\x6a\x06\xd0\xb0\x04\x2d\x8b\x70\x9e\xd6\x4e\x1d\xf9\xe0\xe6\xf5\x0b\x89\x98\x61\x0a\xc3\x34\x04\xe7\x45\x67\xf3\x9d\x50\x1c\x8f\xd0\xcf\xcf\xdb\x4a\xd0\xfc\xbc\x2a\xce\xb3\x8b\xc5\xcd\xfc\xe2\xc7\x7f\x0d\xd5\x9f\x30\xbe\x2a\x48\x70\x5f\x35\x2d\xa9\x53\x21\x99\x8a\x22\x48\xbf\x52\x3d\x30\x94\x34\x43\x41\x05\xcc\x54\x42\xa6\x0a\x12\xd5\x10\xa9\x81\x40\x2d\x4c\xb4\x05\x23\x6d\x43\xb1\xc9\x71\xb6\x3e\x00\xda\xfb\x99\x5d\x26\xc9\xf0\xf2\x65\x7e\x08\x09\x07\x27\x11\xe2\xe0\x2e\x93\x7e\x99\xbc\x97\x09\x42\xb2\xcb\x9d\x90\x34\x3e\x7e\x39\x55\x37\x85\xa0\x7a\x21\x72\xfc\x42\x1d\xd0\x34\x5f\x1c\xb3\xbc\x90\xca\xea\xfa\x6c\x7e\x0a\xb0\xf7\xf9\x2b\x24\x19\x5e\xb9\xc2\xc3\x27\x1c\x9c\x44\x88\x83\xbb\x42\xfa\x15\xf2\x5e\x21\x08\xc9\xae\x74\x42\xd2\xd4\x71\x6f\x2b\x01\xc4\x36\x99\xc9\x89\xe8\xb0\x1c\x33\xb3\x0c\x05\x63\x28\x19\x2f\x44\x32\x84\x9a\xd1\x96\xc4\xde\x66\x7f\xf1\x95\xd7\x70\x40\x7f\xf9\x86\xf9\x6b\xda\xe1\x9d\x9d\x5c\xcb\xff\x60\x61\xef\x60\xd4\xbf\x25\x01\x35\x36\xff\x37\x4b\x27\xcd\x10\xb5\xa4\x26\x0d\x62\x63\xe0\xc1\xb5\x9e\x77\xe2\xa5\x8c\x67\x7e\x0d\x82\xe6\xd7\xe9\xe2\xbe\x46\x2f\x59\x60\xb2\x95\xcc\x6c\x45\x53\x58\x3e\x38\x69\xb5\xbd\xf6\xac\xcd\x6c\xeb\xda\xae\x6c\xcf\xfd\xe6\x3d\x04\x73\x26\xff\x58\xed\xef\x5a\x12\x52\x5f\x59\xff\x5e\x89\x3a\xe4\x5c\x7f\x32\xd4\xf3\x61\x7c\x03\xc2\xde\x6f\xd2\xfa\xbe\xe1\x60\x61\xa7\xb3\xd9\x86\x54\x6c\x63\x66\xb2\xa9\x10\xda\x45\xaf\x3e\xab\x33\xeb\xba\xba\x2b\xdb\x73\x9f\x79\x0f\xcd\x24\xfb\x2a\x88\x88\xb9\x7a\x15\x1f\x65\xa2\x91\x31\x49\x34\xb1\xab\x18\x81\xae\xab\xcc\x52\xa6\xab\x05\x86\x6a\x64\xb9\x22\xb3\x2c\xf9\xac\x45\x92\x71\xde\xcc\x72\x29\x66\xa2\x7a\x37\xbb\x8a\xd3\xaa\x7c\xd4\x64\xf1\x87\x03\x15\x67\xd9\xd5\xab\x4d\xfb\x6c\x21\xae\x98\xef\x10\x59\xf1\xa7\x9c\x13\x7d\x61\x20\xc9\x30\x04\x7e\x1f\x09\x9f\x70\x12\x21\x3e\xe1\x02\x1b\xac\x31\x8c\xc7\x82\x81\x01\x8a\xb5\x61\x63\x8f\x8f\x82\xc1\xe6\x84\xfe\x55\xf5\x3c\xa6\xe8\x93\x06\x2d\x4c\xd4\x9e\x3d\x43\xb1\x25\x3a\x7e\x00\x9b\x3f\x58\x58\xa4\x5a\xca\xbf\x33\x77\x87\x2c\x2c\x2c\x8c\x08\x59\x68\x9a\x66\xa1\x33\x64\x21\x2d\x73\xa1\x23\x64\x01\x17\xbe\x50\x0d\x59\xe0\x4b\x59\xa8\x84\x60\x15\x1b\x23\x42\xc8\x67\x63\x72\x48\xf6\x98\x18\x0e\x91\x5c\xc7\x50\x08\x52\x12\x19\x83\x21\x0b\x4d\x49\x16\x44\x88\xc9\xa1\xf3\xa4\x90\xd2\x4e\x33\x8b\x21\xa5\x08\x41\x83\xab\x44\xe4\x98\xe0\xb0\x76\x2b\x22\xbd\x8b\x28\x93\x3c\xa9\xa3\x1f\xd2\xef\x0f\x55\xd7\xac\x00\xb5\xf7\x7f\x4d\x50\x88\x43\x1c\x33\x50\x84\xf7\x1e\xf5\x58\xed\x5a\x6f\xaa\xf3\x52\x00\xb7\x70\xcd\xfb\x11\xb1\xfb\xe8\x81\x92\xe5\xbc\x89\xb6\x3e\x11\xae\xd5\xb7\x53\xdc\x64\x9f\xbb\x98\x2a\xad\x3c\xa5\x91\xf8\xba\x14\xc4\xd7\xaf\x5f\x97\xeb\x00\xac\x19\x61\x80\x74\x9e\x30\x52\x55\x22\x38\x4d\xb9\x93\x19\xe0\x4a\x83\xb5\x71\x6a\xc3\xd7\x56\x55\x5d\x6c\xef\xdb\x99\x1e\xee\x9b\xe5\x7f\xa5\x08\x2c\xb7\x41\x1e\x83\x93\x87\x92\x50\x20\x69\x50\x92\x8d\xac\x1a\x4c\xfa\xec\x71\x6e\xb0\x32\x4e\x6d\xf8\xca\xaa\xca\x8b\x3d\x88\x3b\x9e\xc1\xfb\xa9\x7e\x8b\x0b\xee\xdb\xa2\x04\xf7\x6d\x53\x90\x2e\x8e\x0e\x4e\x81\xdc\x7b\x70\x7c\x8b\x15\x29\xbf\xe5\xb9\x4f\x9b\xe8\x5b\xcd\x73\xd1\xf1\x78\x25\x4f\xad\xae\xed\xd3\xce\x35\x72\x0f\xd5\xfd\x94\xf7\xdc\xf7\xbe\x66\xc4\xfb\x27\xba\x51\x94\xe0\x6e\x94\x16\xd4\xc5\xd1\xc1\x29\x90\x7b\x0f\x8e\x1b\x58\x91\xf2\x5b\x9e\xfb\xb4\x89\x6e\x68\x9e\x8b\x8e\xc7\x2b\x79\x6a\x75\x2d\xa7\xb9\x9a\x1b\x37\x34\x6f\x7a\x72\xd5\xc7\xe8\x7b\x19\x18\x9f\xf4\xeb\xde\x4d\x29\x88\x6f\xde\xbc\x29\xef\x0b\xb0\x66\x84\x01\xd2\x79\xc2\x48\x55\x89\xe0\x34\xe5\x4e\x46\x63\xd9\xa0\xc4\xa2\x13\x1b\xdd\xc6\x7c\xa2\xd0\xe0\x88\x71\x6a\xc3\xcb\x55\x81\x72\xd3\x2e\x16\x95\x01\x7d\x6b\xeb\xf9\x1d\xaf\xfe\x5b\x5d\x70\xdf\x91\x24\x9b\xad\xe0\xbe\xc3\x2d\xa1\x83\xac\x2e\xc7\x77\x05\x47\x4c\x45\x86\x74\x7c\x07\x5d\x8d\x70\x34\xa9\x99\xa6\xe0\x10\x32\xa6\xa3\x96\x6a\xfc\xae\xaa\x03\x4e\xb2\xab\xca\xda\xfb\xdf\xe0\x2c\x1d\xd2\xa3\xf5\xef\x59\x82\xfb\x9e\x2e\xc4\xea\x68\xc4\x18\xef\x93\xf1\x3d\xe4\x39\x78\x5d\xd6\xaa\xf7\x93\x7b\xae\xcd\xb5\x0f\x7d\xf2\x4f\xf2\x5b\x59\x14\xe6\x97\x3e\x16\x26\xae\x93\x50\xfc\x2d\xdb\xc9\x24\x78\xb5\x23\x1a\x07\x68\x61\x52\x4b\x38\xf1\xac\x32\x6e\x6e\xe5\x69\x52\xc9\x5b\xb7\xd4\x67\x7e\x2a\x93\x3a\x11\x0e\xac\x8d\x0d\xf2\x46\xc7\xbf\x86\x19\xe3\x32\xaf\xd1\x1f\x48\xa2\x89\x6a\x70\x3f\xe0\x2e\x80\x82\x3a\x0e\x45\x3d\xd2\x1f\x98\x62\x64\xcc\xcb\x06\x7f\x84\xce\x28\x38\xa7\x17\x34\xc3\xd1\xb4\x98\x41\x57\x2b\x77\x56\x99\xa2\xcf\x1e\x8a\x9b\xec\x73\x17\x53\xa5\xd5\xaf\xd6\x1d\x7f\xdc\x0d\xee\x1f\x59\xac\x87\x2f\xa5\x87\x87\x5c\xad\x6c\xde\xff\xa3\x70\xe2\xe8\x79\x60\x55\x6a\x52\x14\xb0\xe4\x76\x94\x74\x7b\x2a\x32\xd9\x9d\xce\xdc\x53\x77\x8d\x3c\xcb\x01\xb7\x51\x8c\x83\x70\x70\xb7\x71\x7d\x3a\x82\x69\x0e\x50\x11\x19\x8a\x00\x11\x21\x98\x0c\xe0\x08\x89\x54\x00\x46\x28\x22\x9a\xa4\x88\xdb\x6c\xa9\x80\xdb\xe8\x50\x1f\x74\x80\x38\x7a\xdb\x17\x03\xbc\x5c\x49\x31\x00\xa9\x50\x6d\x80\xcf\x4b\xaf\x04\xb4\x76\x79\x30\x01\xa3\xae\x7b\xe4\x83\xf9\x53\x04\x4c\xf6\x89\x78\x07\x85\x08\xd9\xc1\xdd\xc1\x17\x41\x2e\xb4\x85\x87\x5c\x05\x0f\xba\x4a\x1e\x70\x15\x3d\xc9\xd5\xf6\xdc\x41\x57\xcb\x73\xc7\xdf\x11\x1f\xb4\x07\x5d\x4d\xc1\xe3\x55\x49\xe5\x91\x2e\xe3\x11\x2e\xeb\xc9\xae\x96\xc7\x97\x3b\xb0\xdb\x31\xb7\xe0\xf9\x16\x26\xb9\xd3\x99\x7b\x8a\xae\x2e\x78\x97\x24\xc3\xbb\x77\x79\xdd\x09\x07\x27\x11\xe2\xe0\xee\xb2\xc1\x1a\xc3\x78\xec\xae\x81\x50\xe1\xee\x8c\x60\xb1\x25\xd3\x7c\x71\xcc\xf2\x42\x2a\xab\xeb\xb3\xf9\x29\xc0\x7e\x3f\xa0\xde\xb3\x02\xb4\x31\x72\x4f\x50\x88\x43\x1c\x33\x50\x84\xf7\x1e\xf5\x58\xed\x5e\x6f\xaa\xf3\x52\x00\xb7\x70\xcf\x7b\x13\x0b\xed\x7a\x93\x57\x4c\x3c\x3e\xbd\x77\x2f\x57\x11\x54\xcc\x3f\x92\xaa\x9e\xba\xb7\x3e\x00\x3a\x41\x8a\xfb\x24\x82\xde\xbf\xcf\x17\x04\x3c\x38\xc9\x88\x07\x77\x9f\x2d\xd6\x32\x8d\x9f\x4d\xf7\x2d\x85\x4f\xb1\xfb\xd3\xa3\xf7\xa9\x37\xd5\x19\x0f\x73\x5f\x4d\x21\xa9\x9d\xb8\xb9\xdf\xde\x4e\x82\x76\x93\xb8\x1e\xb3\x75\x6c\xc2\xde\x05\x1c\x19\xf4\x33\x79\x60\x05\x68\x63\xe4\x81\xa0\x10\x87\x38\x66\xa0\x08\xef\x3d\xea\xb1\xda\x83\xde\x54\xe7\xa5\x00\x6e\xe1\x81\xf7\x23\x62\xf7\xd1\x43\x79\x8a\x07\x4d\x0f\x9a\x17\xe8\x3d\xea\x83\xf8\xba\xd1\xe3\x99\xd4\xe1\x43\x2d\x11\x3e\x7c\x68\x1e\xc9\xc3\xe0\x00\x41\x8c\x85\x29\x19\x9e\xd7\x90\x0b\x15\xa1\x7f\x48\x39\x55\x4b\x5c\x80\x3c\xd0\xfc\xc3\xdc\x56\xc4\x3c\x51\xee\xe9\xa1\x19\xf3\x21\xf5\x59\x84\x34\xb0\x86\x8d\x9d\x33\xc3\x46\x94\x4f\x5b\xd2\x8b\x9b\xf9\x4f\x1b\x3d\x7e\x04\xe9\xfa\x21\x36\xb8\x1f\xcb\x12\xdc\x8f\xf6\xab\x47\x92\xd1\x8e\x1f\xc9\x8f\xca\x8f\x54\xc3\x7b\x04\xde\x53\x0d\x6a\x83\x1d\xb6\xdd\x21\x3a\xc4\x80\xc6\x91\x97\x31\xae\x23\xcb\xc8\x8b\x1a\xa4\xa3\xe2\xa9\xe2\x47\x28\xc9\x24\x23\xb8\x47\xb0\x1d\xc4\x68\xd4\xf1\xa3\x16\x8e\x49\x7c\x01\x7b\xc6\xf2\xff\x2d\x6c\xb6\x38\x75\xc8\x6a\xc6\xcd\xa3\x47\xac\xa5\x0d\xb2\x49\x6e\x5a\xac\x70\xc0\x2a\x69\xdf\x8f\xd4\x7e\xeb\xd7\x30\x18\xdc\xf9\xf7\x9e\x11\xce\xc7\x28\x02\x12\x0a\xee\x31\xac\x47\x39\x11\xed\xc7\x89\x6a\xdb\xf9\x38\x75\x6b\xbd\x6c\xa7\x51\xb4\x37\x5b\x30\xa7\xf4\x0a\x1d\x97\x90\x89\x8c\xa3\x0d\x11\x53\x15\x78\x7d\x48\x55\xf5\xbc\xdb\xc7\xfc\x96\x1e\x17\x16\xff\xd8\xfa\xd4\xad\xb4\xb6\xa1\xae\xec\xb1\xd9\x94\xbe\x4f\x7d\x73\xa3\x2e\xfb\x48\x39\x3b\xbd\xbd\x52\xb5\x83\x9f\xa0\x74\x04\x51\x48\x70\x4f\xe0\xf2\x3a\x83\x31\xe4\x30\x83\x51\x7d\x62\x7b\x6c\x05\x3f\x49\xdb\xb0\xd1\x55\x3b\xad\x4e\x7b\xb1\xa8\xb1\x9f\xf0\x9e\x65\xf4\x13\x7e\xf9\x4f\x6c\x1d\xbc\x94\x4c\x50\xb3\xb6\x17\xc1\xcc\x6a\x1f\x55\x70\x2d\xa3\xea\x3e\xbf\x8d\x72\xaf\x6a\x2f\xe2\x21\x95\xb7\x20\x77\x28\x5f\xdd\xa8\x7d\xeb\x27\xfa\xc4\xde\xa4\xb1\xf5\x7b\xb6\x2f\xd5\xd8\x87\xf9\x99\xf2\x67\x0f\xee\xfb\x1d\xf8\x27\x10\xb4\xd1\x0a\xee\xa7\x74\xdb\x8c\xc1\x1a\x81\x91\x93\x91\x72\x33\x47\x2d\xe6\xce\x96\xcf\x7e\x68\x10\xed\x9c\x0d\xfb\xfe\x89\x1f\xf6\x4f\x7e\x24\x2e\x27\x29\x95\xac\x34\x58\x1b\xa7\xdf\x4e\xca\x8b\xed\x77\x3b\xd3\xc7\x7d\xb3\x3c\x05\x41\x1b\xad\xe0\x9e\xa6\x35\x30\x06\x6b\x04\x46\x4e\x46\xca\xcd\x1c\xb5\x98\x3b\x5b\x3e\xfb\xa1\x41\xb4\x73\x36\xec\xfb\x29\x3f\x94\xa7\x6a\x9c\xa7\x8a\xe6\x29\x75\x9f\x07\x88\x9f\xf2\x4c\xb8\x34\xd9\x77\x63\xfb\x7e\x2a\xa5\xf3\x1a\x06\x83\xfb\xfc\xb5\xe2\x9f\x6d\x09\xee\x9f\x8d\x95\x2e\x48\xe7\x80\x51\xce\x94\x9f\x98\xa8\xce\x48\x40\xc1\x18\x4a\x46\x50\x31\x84\x9a\x01\x34\x0c\x5a\x12\x4c\x8f\x29\xfa\xef\x37\x7b\x69\x75\xe3\x6f\x7e\x5a\x50\x50\xa5\xfe\x4c\x12\xdc\xcf\x38\x54\x49\xcd\x01\x3f\xc3\xb1\x03\x57\x9b\xae\x1e\x72\x93\xe5\x29\x26\x53\x6b\xdf\xcf\xeb\xfc\x17\x90\x76\xfc\x2f\xa9\xd5\x5f\xda\x79\x4a\x8e\x94\xbf\xe0\x80\xba\x6d\x07\xf6\xd3\x72\x50\x9f\xd6\xc1\xfd\x1b\x47\x9e\x4b\x3b\xc4\xbc\xca\x21\xf7\x20\x1d\x6a\x3f\xc2\xa1\xf7\x96\x1d\x66\x9f\xec\xb0\x7b\x26\x47\x6b\xff\xb8\xf6\xfe\xf7\x35\x23\x5e\x7b\x7a\xcf\x48\x82\x7b\x86\xaf\xdb\xa8\xcf\x9e\x91\xea\xbd\x7f\x76\x18\xaa\x28\x51\xed\x81\x9a\x2c\x4f\x31\x99\xda\xf3\xbf\x4e\xb4\xb8\xb8\x68\xd0\x62\xd3\x34\x8b\x0a\x2d\xa6\x96\x17\x05\x5a\xc4\x31\x16\x19\x2d\xf2\xd7\xea\x45\x44\x78\x0a\x59\x44\xa4\xe3\xc7\xbd\xe0\x16\x41\xa0\x8f\x28\xc3\xf8\xef\x34\x0d\x15\xb5\x59\x3d\xfa\x2f\xcf\x51\xf1\xfc\x5a\x91\xe0\x7e\x6d\xfd\x20\x94\xe4\xd7\xae\x33\x53\xea\x9a\x5c\x7b\x42\xf9\x57\x92\xa8\xc4\x2e\xb5\xf2\x2f\x54\xf6\x7c\xd2\xd2\xa9\x18\x5d\xca\x33\x0c\xa5\xf4\x5d\x67\x5a\xec\xb9\x14\x62\xf2\x09\x3c\x37\x71\x08\x55\xbe\xe7\x92\xb5\xe3\x9e\x9b\x84\xc1\xe1\x09\x0a\x4b\x0c\x93\x50\xba\x6e\x66\xce\xca\x96\x13\x7f\xae\x67\x03\x7d\x96\x7b\x6e\xb1\xf2\x8f\x1a\xc1\xbd\x40\x31\x14\x66\xdb\x07\xf5\x2f\xba\xa8\xac\xca\x94\x25\xf2\x36\x8d\xc9\x74\x06\xc2\xba\xda\x8b\x22\x25\x7d\x32\x2a\xca\xbd\x18\xd1\x99\x84\x89\x1a\xa9\xfe\xd8\x37\x08\x3a\x88\x26\x12\x7d\x69\x24\xd2\x97\x2f\xcd\x8a\x23\xb5\x2c\x51\x82\x70\xd6\xd0\x97\x5e\x14\x20\xfa\xd2\xf4\xd0\x45\xcb\x19\x6c\xb5\x72\x67\x95\x29\x2a\x13\x0f\xe3\x2e\x5a\x74\x9c\xe0\x57\x28\x86\xc2\xc8\xfb\xa6\xaf\xb0\x1a\x55\x91\x14\x0d\x96\x44\x5f\xd9\xbb\x88\x94\xa0\xce\x40\x75\x75\xb5\x57\x19\xee\x8b\x8a\x62\x4d\xda\x4e\xab\x33\x98\x38\x63\x0c\x87\x3d\xf0\xc0\xa4\x0f\xe2\x41\x0c\xfd\xab\xc9\x92\x10\xa2\x4b\x4b\x72\xe7\x91\x07\xe7\x97\xc8\x5a\x42\xf7\xd2\x52\x70\x04\x53\x42\xc4\x4c\x45\xac\xa4\x22\xb3\xcf\x19\x64\x17\x22\x83\xea\x77\xa9\x69\xe3\xdc\x59\x4e\x52\x9a\xa2\x59\x2a\x4d\xbc\xb4\x34\x90\x07\xd1\xe7\x99\x8c\xfa\xb3\xc9\x6b\x23\xc8\x5f\xdb\xf5\xbf\x4e\xbc\x85\x9b\xe6\xb5\xe0\x98\xc3\xf2\xd7\x58\x11\xbc\x6d\xce\xfd\x50\x7a\x6a\xc5\x72\x75\x6a\x0c\x6e\xf3\xd4\xea\x8e\xec\x67\xbf\x3c\x6f\x77\xdc\x7b\x19\x10\x1f\xc4\xeb\x8e\x74\x99\x84\xf4\x48\x97\xf9\x01\x2c\x2f\x27\x3d\xd2\xe5\xd6\x33\x5d\x0e\x6e\x99\xc3\x96\x59\x25\xba\x9c\xaa\x90\x3e\x09\x05\xdc\xa2\xa2\x5c\xa4\xed\xce\x12\xf5\xcb\xcb\x25\x9a\x47\xd6\x94\x44\x54\xa3\xbd\x94\xfa\x85\x7d\xb5\x7a\xe0\xb4\xe5\x1e\x96\x7d\xee\x81\x0f\xa8\x1b\x92\x45\xfb\x3e\x93\x37\x20\x96\xa6\x3a\xfb\xa1\xfe\x0d\x50\x99\x9d\x29\x1e\x01\x07\xd1\x2c\x91\x0b\xfa\x06\xe3\x9b\x37\x82\xc6\xa3\xa4\x32\x85\x42\x6f\x66\x45\xc5\x2a\x51\x1f\xcc\xa7\x6c\x9b\x66\xac\xb5\x95\x95\x95\x95\xa4\xad\xc4\xb9\x56\x34\xe3\xb8\x15\x90\xa8\x35\x1c\xb7\x32\xb9\x06\x59\x9a\x95\x8a\x86\x52\xeb\xb9\xa7\x36\xe1\x9f\x1e\x56\x57\x57\x57\x0d\x5b\x8d\x0d\xae\x8e\x8e\x2b\xe4\x5b\x05\xd1\xac\x29\xe4\x2b\xd5\x38\xda\x2c\x2d\x48\x33\x50\x56\xd5\x0e\x40\x6b\x56\x57\x49\x81\xb3\xf2\x2b\x05\xed\x39\xc7\xac\xca\xff\x74\x0d\xac\x18\x76\x5f\xfc\x34\xa8\xc0\x7f\xa3\x68\x08\x15\x26\x85\xfe\xdf\x19\x62\x7a\x05\x49\x93\x90\x15\x01\xf3\x09\x86\x8d\x48\x9d\xa1\xe8\x61\x04\x54\xc7\xb9\x79\x59\x28\x4f\x24\x5a\x12\x63\xe6\xe6\xe5\xea\xf2\x98\x63\x6f\x7e\x5a\xb0\xfc\x15\xe2\x2d\x48\xd4\xd2\x40\x35\xcd\xbf\x3d\x58\x0d\x32\x37\x6f\x2b\x1a\xca\x21\x7e\xf5\xeb\x58\xd5\xf4\x3d\xef\x40\xbc\x52\x83\xf3\xef\xde\xe1\xfb\x7d\xe7\x3d\xab\xef\x7c\x70\xef\xf2\xd3\x7e\x27\x75\xe5\x91\xc2\x9e\x77\x14\x4f\x0a\x79\x62\x49\xa3\x0c\xc2\x43\xeb\x30\xca\x40\x6e\xae\xaf\x67\xc2\x6f\xc4\x07\xc0\xd6\xd6\xd6\xd6\xbc\x5f\x43\x41\xd6\x34\xcd\xda\x1a\xbd\x91\x48\x81\x09\x59\x5b\x4b\x67\xcd\x73\x5a\x8b\x35\xd6\x30\x00\xbd\x99\xa5\xec\x6b\xd3\x65\x3c\x15\x68\xb3\xdb\x73\x81\x15\xe0\x34\xd0\x7a\x16\x40\xeb\xeb\xe2\x0a\xd7\xd7\x7d\x70\xeb\xe6\x5e\xd7\x09\xa5\x53\x0a\xad\xa7\xc4\xeb\x07\x82\x0a\xe9\x75\x13\x85\x56\x4b\x03\xcd\x62\xab\x06\x0d\xe2\x37\x1b\xb7\x11\xc5\x7b\xbf\x41\x02\x34\x2e\x6f\x63\x63\x83\x17\xb9\xc1\x54\xc9\x46\xca\x80\xfa\x06\x1f\xd8\x08\xce\x6f\xd0\xb9\x98\x7c\xbf\xb4\x9d\x37\x17\x13\xd4\x74\x66\xe9\x46\x9a\xc2\xcb\x7e\x61\x60\x6f\x7a\xc8\x3b\x2b\xd2\x8d\xbc\x94\x49\xa9\x2c\xde\x7d\x43\xc3\x78\x26\xde\xfb\x4d\x12\xd2\x23\xdd\xe4\xe5\x6e\x6e\x26\x3d\xd2\xcd\xd6\x3b\xd9\x0c\x6e\x93\xc3\x36\x59\x25\xba\x99\xaa\x90\x3e\x09\x05\xdc\xa2\xa2\x5c\xa4\xed\xce\x12\xf5\x9b\x9b\x25\x9a\x47\xd6\x94\x64\x73\x7c\xba\x99\xf7\x30\x29\x25\x19\xf4\x33\xd9\x8a\xe2\xbd\xdf\x22\x01\x1a\xc7\xd9\xda\xda\xe2\xfd\x6e\x31\x55\xb2\x95\x32\xa0\xbe\xc5\x07\xb6\x82\xf3\x5b\x74\x2e\x26\x2f\x53\x10\x43\x73\xbf\x87\x46\x79\xd0\x29\xec\x77\xdf\x74\x56\xdf\xfa\xb6\x59\x10\x6d\x6f\xe7\x7b\xdf\x8e\x48\x82\xc4\x82\x43\x12\x4f\x81\x12\x9c\xdf\xa6\x13\x98\x2d\xa5\xdf\xe6\x3c\xb2\x09\x41\x63\x7a\x2c\xbe\x6d\xd2\x73\xf6\x98\x5e\xf7\x90\xa2\xbc\x64\xdb\xdb\xd4\xbd\x9c\x67\xd0\x3f\x50\x74\xfd\x56\x21\x1c\x3b\x3b\x3b\x3b\x6d\xc7\xce\xce\x4e\xd3\x34\x25\x47\x5a\xc7\x78\x8e\x1d\x10\x70\x80\xa0\xa3\xf5\x15\x60\xa4\x23\x67\xa2\x5c\x93\x76\x35\xbe\x03\xea\x68\x47\x93\x27\xca\xbb\x32\x2d\xe7\xed\x2a\xd7\xce\x8e\xbc\x8f\x9d\x2c\xe3\x5d\xd4\x90\x1c\xb3\xfb\x5d\xe9\x7d\x92\xa8\x93\x16\xdc\xfb\xb8\x5d\x60\xa8\x4d\x91\x35\xb1\x8b\xc6\xb0\x2c\xef\xdf\x07\xe7\x5b\x30\xce\xf6\xfe\x7d\x8e\x21\x46\x53\xc5\xb9\xe2\xff\x1d\xfa\xef\xa4\x5d\xf1\x33\x71\x7d\x00\x11\x0c\x49\x70\xfe\x43\x5a\x76\xf6\x91\x1d\x13\x6a\x1f\x5b\xa9\x96\xf4\x65\x1d\xda\xc8\xb6\x88\xc2\x0e\x89\xc8\xf3\xd4\x3c\x32\x59\x95\xe7\xfa\xc0\xef\xe4\x43\x6b\xe4\x0f\xc6\x23\xb7\x61\xe7\x93\x8b\xfa\xa0\x3c\x7a\x87\x7a\x5f\x83\xb9\xca\x89\x5c\x1d\xbe\x1e\x69\x8e\x76\xe8\x2e\x88\x60\x45\x7b\x77\x37\x38\xbf\x9b\xde\x4c\xf6\xd5\xec\xd8\x80\xf6\xed\xe2\x4b\xdc\xb5\x27\x53\xaf\x32\x76\x97\xdf\xf3\xae\xa9\x01\x63\x65\x1b\x35\x6b\x8b\x0d\x10\xa9\x7d\x94\xcb\xaa\x64\x93\x5d\xf3\x5e\xcb\x3d\xca\xf9\xf2\x15\x94\x27\x17\x5b\x13\xb7\x35\x6a\xbf\xf2\x62\x77\x95\xa7\x65\xab\x37\xa0\xef\xd3\xda\x03\x78\x84\x47\x28\x74\x10\xbf\x9f\xb9\xdf\x40\xc0\x44\x23\x38\xff\x5b\x7a\x30\x09\x93\x1a\x33\x30\x66\x25\xe5\x45\x2b\x1f\x82\x6a\xbf\xf1\x6b\x86\x4c\xd8\xc3\x6f\x0a\x76\xd2\x62\x86\x62\xb5\x62\x67\xc5\x29\x2a\x13\x0f\xe0\x2e\x8a\x74\x28\xdf\xe2\xba\x5c\x1f\x41\x04\x43\x12\x9c\xff\x98\x16\x9f\x7d\x64\xc7\x84\xda\xc7\x56\xaa\x25\x7d\x59\x87\x36\xb2\x2d\xa2\xb0\x43\x22\xf2\x3c\x35\x8f\x4c\x56\xe5\xb9\x3e\xf2\x4b\xfb\xd8\x1a\xf9\xa3\xf1\xc8\x6d\xd8\xf9\xec\xa9\x8f\x85\x1d\xb6\x3c\xb6\xc3\xc2\xe6\x0b\x9e\xd6\x36\xac\xab\xe8\x81\xcd\xb7\x6e\xcc\xce\xd5\xc7\x35\xe6\x1f\x0c\x0e\x14\xfd\x6e\x25\xb8\xdf\x1b\x2d\x6d\xf4\xbb\x38\x48\x84\xd3\x67\x42\x48\x10\x44\x92\x00\x52\x24\x21\x22\xdc\x97\xb7\x5d\x94\x5b\x6d\x0f\x34\xfd\xad\xb6\x91\x62\x64\xfc\x27\x0a\x1a\xff\x89\xfd\xa3\x91\x74\x65\x60\x18\xe8\x98\x40\x1a\x78\x22\x19\xac\xfb\xe0\xb2\x6e\x3d\xfa\x4c\x47\x6a\x8a\x2b\xf5\xa6\xba\xd6\xf3\x14\x56\xb1\x7f\x23\xff\x6b\xa0\x7b\x5a\xfd\x3f\x90\xa4\xa6\x06\xfb\xab\x39\xc3\xcc\x4a\x4c\xa4\x9a\x75\xfd\x11\x85\x8d\x58\xfb\x0f\x69\x80\x15\xc3\x84\xe1\x3d\x5b\x65\x23\x5a\xa9\x0e\x59\x50\x14\xad\x96\x47\x9d\xa9\xa5\xfe\x23\xf7\x63\x7b\x53\x5d\xcb\x79\xa6\xf1\xb2\xb4\x54\xff\xcd\xe3\xa3\xe9\xf8\x44\xe2\xd9\x88\x8e\x4f\x9f\xf8\x4b\xe8\x27\xef\xc1\x88\x0e\xd4\xf4\x87\xbd\xe0\x3e\xe9\x50\xb4\xa2\xc3\x24\x07\x6b\x08\x93\x0f\xce\x11\x5c\xef\xff\xf9\xeb\x48\xff\x23\xff\x1f\x00\x00\xff\xff\xac\xd9\xf0\x1f\x3b\xc1\x00\x00") + +func fontsDohFlfBytes() ([]byte, error) { + return bindataRead( + _fontsDohFlf, + "fonts/doh.flf", + ) +} + +func fontsDohFlf() (*asset, error) { + bytes, err := fontsDohFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/doh.flf", size: 49467, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsDoomFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x58\x6d\x6b\xe3\x3a\x16\xfe\xee\x5f\xf1\x50\x02\x69\xd9\x36\x22\xb3\x9d\x32\xb3\x94\x92\x7d\x87\x85\x65\x06\x2e\xf4\x93\xb8\xaa\x9b\x28\x89\xb9\x8e\x53\x6c\x67\xa6\x05\xfd\xf8\x8b\xce\x91\x6c\xf9\x35\x6e\x67\xe8\x44\xc2\x96\x1f\x9d\xf3\x9c\x57\x69\x9b\x6e\x3f\xc5\x33\x7c\xc1\x1d\x96\xb7\x58\x7e\xc6\xf2\x2e\xfa\xd7\xb7\x6f\xff\xc7\xf3\x1b\xfe\x93\xc7\x59\x81\xef\x0b\x6c\x34\x1e\xf3\x44\x17\xb8\xdf\xbe\xfc\x58\xbd\xbe\x1d\x5e\xf6\x8b\x24\xde\x2e\xb2\xf4\x01\x58\x7e\xc1\xff\x4e\x19\x96\x5f\xbf\xde\x45\xcf\x71\xa1\x37\x38\x66\xf8\x47\xb2\xb3\x08\xff\x4d\x75\x96\xe1\x9f\xfb\xf8\xe5\x45\xa7\x29\x6e\xc5\xd7\xbf\xe2\xe6\x06\xd5\xb2\xdf\xca\x38\xdb\xc4\xf9\x26\xda\x26\xbb\x54\x97\xc8\x75\xaa\xe3\x42\xe3\xd3\x62\x69\xd7\x2d\x3f\xe1\xef\xa7\x9d\x85\xbe\x8d\xbe\xeb\xfc\x90\x14\x45\x72\xcc\x90\x14\xd8\xeb\x5c\x3f\xbf\x61\x97\xfc\xd0\x19\xca\x23\x0e\xc7\x4d\xb2\x7d\x43\xb9\x4f\x0a\x6c\x8f\x59\x79\x8d\xb8\x40\x7a\xcc\x76\x76\x2c\xf7\x3a\xa2\x05\x89\xce\xe7\x05\xb2\xf8\xa0\x2d\xc6\x4b\x1a\xaf\x59\x8c\x18\xeb\xe3\xe1\xa0\xb3\x12\x69\x92\xe9\x45\x14\xfd\xfb\xf5\x25\x8d\xb3\xb8\xb4\xbb\x1d\xb7\xd8\x26\x79\xc1\xef\xfe\x16\x59\xc2\x70\x83\x8b\x43\xbc\x4b\xd6\xc8\x4e\x87\x67\x9d\x5f\x60\x7b\xcc\xb1\x4d\x52\x8d\x64\xa3\xb3\x32\xd9\x26\x6b\xfa\x38\x8a\x01\xe0\x06\xc5\xfe\x78\x4a\x37\x88\xd3\x9f\xf1\x5b\x81\x67\x8d\xa7\x78\x7e\x4d\x1f\x65\xc7\x9f\xd1\x8c\x17\x95\x7b\x8d\x8b\x7d\x9c\x6f\x9e\xd3\x38\xfb\xe3\xc2\x12\xf0\x92\x27\x59\x59\x58\x1d\x62\xd0\xd3\x6b\x3c\x9f\x4a\xac\xe3\x6c\x5e\x5a\x98\xe2\x70\x2a\xf6\x7a\x13\x7d\x61\x84\xbd\x4e\x76\xfb\xd2\x4a\x1c\x63\xbd\x8f\xf3\x78\x5d\xea\x3c\xba\x1b\x79\x79\x8d\xec\x58\x22\xc9\xd6\xe9\x69\x93\x64\x3b\x6c\x74\xb1\xd6\xd9\x46\xe7\x45\xb4\xbc\xa5\xcf\x0e\xf1\x2b\x69\x8e\x54\x67\xbb\x72\x8f\x4b\xfd\xea\x17\x87\xa4\x15\x57\xf8\x0b\x62\x6c\x4f\x9b\x9d\xc6\x36\x5e\x97\xc7\x3c\x5a\x7e\x26\x84\x8d\xde\xc6\xa7\xb4\x64\x61\x0f\xc7\x8d\x26\xc5\x2b\x53\x45\xcb\x3b\x5a\xc6\x54\x5a\xf9\x1a\xb8\x51\x34\x5b\xf5\xfe\xad\x22\x28\xac\x22\x03\xd3\xf8\x51\x66\x15\x5d\xaa\xab\x55\x04\x80\x7f\x68\xa1\x5d\x7a\x09\x03\xfb\xe2\x11\x8f\xf4\x6a\x86\x9e\x01\xf5\x40\xa3\xa2\x3f\xfb\x4c\x19\x18\x03\x43\x9b\xda\xa7\xf6\xbf\x19\x79\x0e\xa3\x8c\x51\xa6\xc2\x0b\xb1\xeb\x0d\x14\x3d\x33\x30\x58\x45\x02\xca\x4a\x2e\x95\x82\xb4\xe2\x02\xc2\xbe\x53\xa6\x47\x32\x2b\x94\x52\xa4\x2b\x04\xad\x83\x1d\x69\x89\x1d\xed\xc4\x8e\x56\x2c\xa1\x04\x3c\x27\x0d\x21\x56\xb5\x38\x24\x8b\x72\xaa\x5e\x42\xe1\xca\x41\x28\x48\x21\x2d\xbb\x97\xea\x01\xb8\x5f\x45\x90\x4a\x29\x21\x45\xf3\x5b\x04\x92\x59\xd9\xaf\x56\x91\x21\x69\x66\xcd\x9f\xc0\x2a\x24\x3f\x0b\xcf\xfa\x77\x7f\x21\x95\x64\xf4\xd5\x2a\x52\xc4\x95\x84\x74\x8c\xf5\xfd\x5a\x55\xab\x0f\xc0\xf6\x63\x45\xa4\x81\x11\xf4\xa9\xc4\x13\xe6\x44\x95\xa1\xb7\x6c\x2e\x81\x6b\x2c\x18\x5b\x0a\xa3\x8c\x14\xa1\xb1\xc2\x69\x93\xc7\xc0\x3b\xbc\x0b\xd4\xf6\x77\x6b\x66\x0d\xd2\x9b\xec\x77\x7f\x5a\x04\x76\xcc\x54\xcd\x14\xfd\xa3\x4d\xe9\x9f\xe1\xb7\xf5\x6e\xb3\xf6\x17\x53\x76\x6e\x07\x0f\x9c\xa7\x81\x3d\x6c\xd0\xd3\xe0\x3c\x6d\x50\xd7\x4a\x5a\x22\x8d\x23\x56\xcc\x79\x02\x41\xe6\x93\x30\x8a\xf7\x20\x27\x1b\x04\xa2\xbd\x40\x2e\xf2\xe4\x3c\x85\x07\x32\x03\x85\x90\x12\x3d\x41\xe3\x24\x10\x04\x21\x57\xd1\x93\xf5\x03\x31\x0f\x54\x59\x08\x08\xa5\x1c\x44\x0d\x32\xa4\x0a\x8d\xa6\x41\xce\x0c\xd2\x42\x2f\x48\x01\xe1\x80\x86\x54\xe1\xa0\xe3\xfd\x89\x13\x8b\xc2\x9e\x0c\xa1\x98\x13\x0a\x4b\xb7\x87\x39\x2b\x11\x21\x1a\x16\x8d\xdd\x19\x60\x89\x84\x9c\x24\x91\x03\xb2\xa1\x4f\x40\x4c\x08\x41\x2b\xce\x4c\x06\x52\x09\x2f\xda\x38\x47\xca\x09\xe2\xa8\x69\xf9\xcd\xc2\xfb\x8d\x7c\x97\xdf\x40\xe2\x91\x81\x28\x3d\x71\xb6\xa8\xc9\x3a\x6f\xb5\xc0\x01\x83\xcf\xec\x64\xaa\xd5\x1a\xc1\x32\x1b\x89\x9d\x81\x85\xdd\xf0\xe6\x10\x63\x1f\x62\x9a\xee\x71\x0f\xce\x57\x6c\xc6\x2a\x17\x36\x2b\xd4\x58\x36\x18\xc8\x10\x9d\x4c\x52\xeb\xe6\x4a\x80\xdd\x33\xdc\xfb\x01\x0f\x95\x5c\x14\xe4\xbd\xc1\x05\xde\xc9\xbb\xdd\x15\x7b\xad\x33\xba\x4f\x87\xb6\x62\x55\x7b\x56\x63\x3b\xd7\x2a\x07\x07\xf2\x43\x46\xa4\x7a\xf6\xe4\x0c\x87\x4b\x36\x9d\x15\x51\xa9\x6b\x56\xad\x6d\xb5\xda\xa5\x11\xba\x8b\x75\x69\xc9\x9e\x5c\x7b\x02\x9c\x27\x98\xc1\x20\xab\xd9\xac\x23\xc1\xe5\xab\xee\x93\x51\x07\xaa\x13\x11\xfc\x67\x02\x90\xae\x12\xd2\x6a\x1b\x64\xca\x56\x5e\xa7\xd4\x39\x89\xc0\x19\xad\x52\x24\x9c\x08\x57\xed\x54\x60\xb9\xb3\xf9\x03\xc6\x19\xd4\x3e\x71\x25\x99\xd7\x8c\xa9\xa6\x7a\x81\xe0\x81\x0c\x10\xe8\x68\xc9\x9e\x14\xf6\x9e\xda\x9a\x23\xf7\xca\xb0\x1d\xc7\x38\xe2\x56\x09\x4d\x46\x94\x9b\xbc\xc7\xfc\x41\xd6\xaf\x6b\x3c\x5c\x8d\xf7\x13\xdf\x07\x8c\x16\xb0\x3a\xeb\x1b\x9f\xda\x67\xae\x8b\x01\x5c\x23\x33\x29\x59\xfb\x2e\xd0\xee\xee\x1b\x29\x67\x6c\x50\xda\xa7\x27\x92\x3c\xc3\xaa\x26\xc7\x38\xf2\xa6\x99\x75\x26\xfe\x95\x3a\x5b\x19\x29\xd2\x38\xed\x5b\x5b\x39\x72\x17\xc0\xc2\xd1\x2c\x45\xe5\x9d\x35\xe3\x0d\xca\x3b\xed\xa4\xb7\x9e\xf4\xd6\x83\x34\x1e\xd7\x67\x03\x09\x8f\x35\xac\x62\x4f\xf6\x6f\x85\x8a\xa4\xc2\x76\xa6\xfd\x18\xcd\x02\xe0\xcf\xde\xef\xe1\xc3\x12\x89\xb9\x93\x48\xd4\x05\xe0\x3d\x12\xc1\xf9\x83\xe1\x5c\xce\x1c\x99\x09\x79\x49\xd1\xfe\x4f\x37\x37\x0b\xbb\x88\xc6\xc9\x5d\xc4\x94\x50\xa9\x27\x72\x24\x2f\xf5\x04\x6f\x77\xe2\x4b\xc1\xb9\x50\x19\x07\x9a\x64\x7e\x0e\x15\x8f\x84\x96\x37\xd3\x21\xc3\x81\x41\x48\xee\x7b\x24\xe5\xf6\xd1\x8a\x4b\x71\x05\xe9\xea\x7f\xd0\xdf\xc0\x17\xac\xdf\x29\xd1\x49\x7a\x32\x14\x7b\xc3\x40\xf4\x64\x3a\xe9\xd3\x5a\xb7\x49\x7d\xb2\x4f\xfc\x8e\xa5\x9e\xc1\xf7\x27\xa6\x6e\x26\x94\xcf\x47\xd4\x87\x54\x9d\x88\x13\xdc\xf5\x26\xa8\xba\x93\xb0\x37\xea\x97\xc0\xa7\x59\xd3\x33\xb8\x5a\xd0\x94\x00\x74\x56\xb4\xc6\xe4\xec\xdc\xff\xeb\x37\x1b\x3a\xa1\x9d\x9d\xcd\x86\x7a\xb7\xb0\x51\x04\x09\x31\x72\x90\x6e\xa9\x4d\x1d\x0f\x7c\x13\xef\x52\xe4\x65\x1d\x23\xd7\x83\xe1\xef\x1e\x35\x73\x3e\x4f\xe6\x75\xa7\x7d\xc5\x7c\x2d\xc6\x0a\x1c\x9a\x3d\x5e\x70\xa6\x70\xd2\x54\x45\xd2\x0c\xf5\x82\x0a\x41\x3d\xe4\x26\xc4\xbc\x5b\xa5\x11\x49\x7c\x03\xa8\xc4\xa8\x24\xd5\x7a\x53\x7b\x6b\xd3\xa1\x83\x4b\x9e\x55\xbb\x33\x7f\x87\x55\xbc\x9a\x82\xaf\x33\x38\x05\x4d\xb6\x0a\xfc\xfd\x17\xcc\xb0\x79\xfd\x41\xa4\x75\x73\xe6\x3e\x08\x1c\x8a\xb9\xf7\xe7\x19\x85\xc1\xd8\x11\x2e\x76\x02\x51\xbd\xa4\xbe\x51\xf3\x5d\x09\x57\xa3\x7b\x66\x2c\x08\xd8\x90\xed\xbe\x2b\xbd\x01\x11\xbb\x21\xe5\x8f\x5b\x96\xe6\x80\xa0\x27\x34\x48\x6a\x10\xd5\x20\xab\x8b\xd6\x6b\x46\x85\x0f\xb1\xdf\xeb\x0f\xcd\x73\x89\xa1\xc3\xd1\xb9\x4a\x36\x4d\x22\x17\xa5\x58\xb4\x3a\x12\xe3\x3a\x92\x8f\x7b\x28\x82\x56\x95\x33\x55\x3b\xc8\x94\x6f\xcf\xe7\xaa\x8a\x93\x6a\xeb\xa1\x70\xaf\x87\xfa\x92\xc6\x54\xb7\xa1\x66\xe0\x46\x07\x3d\xb5\x04\x2a\x88\x55\x2b\xb8\x99\x12\x9d\xc3\xc7\x84\xf3\x09\x26\x04\x1a\x2f\xc3\xc3\x45\xb7\xd7\x05\xab\x52\x58\x23\x86\xa0\x21\x6e\x13\xba\x8d\xd3\xb5\x12\x77\xea\xdc\x62\x12\xe0\x03\x07\xa7\x68\x34\x9a\x6d\x2b\xbd\x8b\xb3\xa1\x8c\xd6\x31\xb7\xe2\x12\x2d\x82\x3b\x86\x01\xab\x85\xd7\x24\xfe\xd6\x5c\x20\xb8\xb1\x70\xd7\x81\xe1\x6d\xc9\x58\x5e\x69\x64\x98\xce\xf5\x47\x05\x46\x87\xcc\xe6\xb6\xe1\x2d\x08\xf5\x0b\xc2\x50\xc3\x20\x5c\x7f\x30\x3c\x74\x5d\xd8\x65\x66\xc5\x09\x77\xf8\x9a\x62\xfc\x78\xda\x06\xea\x39\xde\x4e\xee\x70\x1d\x10\x1c\xd0\xaf\xf5\xdc\x15\xd0\xaf\x34\x26\x1d\xa0\x8f\xe4\xce\x2e\xd0\x47\xc3\xbe\xbb\xbf\x81\x4b\xb9\xe6\x9e\xab\x5c\xf0\xc4\x90\x44\x9d\xd3\xe0\x2a\xfa\x33\x00\x00\xff\xff\x1c\xdc\xe5\xe3\x81\x1d\x00\x00") + +func fontsDoomFlfBytes() ([]byte, error) { + return bindataRead( + _fontsDoomFlf, + "fonts/doom.flf", + ) +} + +func fontsDoomFlf() (*asset, error) { + bytes, err := fontsDoomFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/doom.flf", size: 7553, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsDotmatrixFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x5c\x4b\x6f\xdc\x38\x0c\xbe\xcf\xaf\xe0\xc1\x07\xe7\xb0\x69\x9b\x62\x81\xdd\xdb\x00\xfb\x23\xf6\x48\xa4\x6d\x02\x04\xc8\xa6\x40\x1e\xfb\xf8\xf7\x8b\xd8\x7a\xf0\x25\x4b\xb2\x25\xcf\x4c\xa6\xc9\x34\xf9\x2c\x53\xa4\x48\x91\x14\xe9\xc9\xfd\xe3\xfd\xcd\xed\x00\x5f\x3e\xbf\x7f\xdd\x7c\x85\xcf\xf0\xf5\xf0\xe3\xe7\xeb\x5f\xb7\xaf\xcf\x0f\xff\x5e\xdf\x3f\xde\xc3\xb7\xff\xe0\x8f\xb7\xe7\xd7\x87\x17\xf8\xf3\xf6\xe9\xe9\xee\x19\xc6\xef\xff\x4c\x3f\x1c\x6f\xbf\xbf\x5c\x7f\x7b\xbb\xbe\xfb\xf1\x76\x75\x78\xbc\x7d\x79\x85\xe7\xbb\xbf\x1f\x5e\x1e\x7e\x3e\xc1\x2f\xf0\xdb\xa7\x9b\x2f\x9f\x7e\xff\xf5\x70\x80\xe9\x35\xf8\xd7\xfc\xeb\xf1\x83\xc1\x1e\x07\x18\x00\x61\xf0\xbf\x10\x78\xc4\xab\x6e\xb0\xfb\x56\x70\x15\x27\x29\x22\x14\x76\xf8\x00\x80\x30\x7d\x0f\x74\xf8\x00\x23\x5e\x4d\xdf\x9b\x60\x3a\xf5\x69\xe0\x77\x7c\xfa\x11\x21\xbc\x0f\xf3\xf0\x77\xf8\x9d\x67\xff\x1e\x60\x9c\x00\xff\xee\xe1\x11\xaf\xe8\xd7\xca\xd1\x89\x29\x93\x30\x7d\x2d\xc0\x04\x47\x72\xc1\xc3\x9e\xbb\xe9\xa2\x87\x91\x32\x18\xe0\x11\xaf\xcc\xd1\x40\x47\xa3\x49\x7b\x92\xd3\x18\x4d\x39\xf1\x22\x0a\x06\x85\x40\x29\xd8\xaf\x78\x14\x52\xae\xf8\x0c\x92\x15\x0f\x30\x3a\x4e\x18\x71\x74\xb7\x30\x18\x03\x8b\x04\x76\x20\xd2\x29\xc3\x48\xae\xe4\x70\xbb\xd2\x7d\x85\x36\xdd\xda\x06\x49\xdd\xf0\xa8\x37\xe4\xf0\x8c\x08\x79\xa2\x26\xd8\x5a\x79\xab\x45\xbe\x56\x8e\xf3\xf7\x0b\x74\xad\x28\x6d\x34\x69\xf3\xd1\xa6\x98\x16\x1c\x3c\xb0\x17\x94\x6d\xe5\xd9\xab\x28\xa7\x92\x86\x21\xf0\xc1\xe1\xb0\xa7\xc4\x68\xf2\x43\x57\x58\xe0\x68\x0c\x9f\xb4\x21\x61\x34\x19\xf7\xc6\x55\x0c\xa3\xc9\x61\xf0\x56\x8a\x71\x83\x93\x4a\x31\xd1\x1c\xee\xbc\x84\xad\xb6\x2a\x56\x6c\x18\xad\x25\x34\x4d\x22\xb1\x56\xe5\x62\x2e\x6c\x65\xbf\x5d\x91\xc1\x64\xaf\xb1\x38\x13\xf7\x5a\x49\xe4\x40\xb7\xd7\x14\x6d\xbf\x2a\x6b\x22\x47\x3e\xa0\xa4\x86\xa3\x09\x6b\xdf\x89\x2c\xfe\xe4\xc5\x34\x89\x2c\xc1\x1b\xc5\xf4\xbf\x90\xff\x77\x82\x2d\xaf\x17\x22\x56\x21\x6c\x7a\x3d\x97\x91\x95\x1a\x6d\x0d\x8c\x40\xbf\x8a\xb4\xb9\x75\xca\x62\xf8\x23\x68\xb3\x60\xca\x80\x87\x17\xb2\xe1\x1e\x9c\x4d\x80\xc3\x21\xb7\xa1\x70\x4c\x78\x08\x4c\xc0\x08\x33\xd0\xc3\x02\x74\xb0\x04\x35\x83\x19\x98\xe0\x34\xe3\x0b\xac\xf8\x3c\x00\x29\x4c\x73\xaf\x08\x13\x5e\xe8\xa2\xd4\xc1\xd1\x87\x31\xda\x00\x34\xc7\xda\x24\x26\xc9\xdd\xe3\x70\x04\x4b\x11\x6c\x46\x46\xbc\x01\x1c\xdc\xb5\x31\x65\xb8\x61\xad\x98\xc8\xd4\xe9\x2c\x88\x10\x8f\xb6\x4c\x79\xb3\x6c\x39\x2c\x0d\x5f\x42\xcd\xa1\xb5\x84\xf4\x90\xc3\xa6\xe4\x27\x97\xb5\x62\x22\x17\x34\x6e\x14\x72\x86\xb2\x36\xca\xf4\x8b\xd8\xb2\x7e\x87\x31\x58\x10\xa1\xa6\x43\x88\xb8\xcc\x5d\x13\x11\x52\x6e\x31\x5a\x90\xda\xf4\x58\x42\x11\x02\x46\x76\xf4\xa5\x8b\x82\xcc\x12\x8b\xf4\x93\xb0\xce\x6d\x46\x8b\x52\x9d\x4b\xac\x08\xab\xb2\x46\xdb\xee\x9a\xd9\x32\x9a\x30\xa5\xcd\xa5\x6f\xb4\x37\x2d\xa3\x65\x76\x4b\x60\xe9\xf5\xa3\xef\x64\xe2\x07\x98\x19\x7e\xd6\x01\xf7\x13\xb3\x4a\x9b\xf1\x85\xd6\x46\x41\x8b\x15\xba\x30\x6c\x09\x15\x87\xe9\x25\x2c\x92\xa7\xbb\x0b\x4a\x79\x8f\x26\x44\x9a\xb9\xa0\xcd\x01\xa5\x60\x5b\xd9\xb1\x40\x64\x58\x54\x46\xd0\x62\x66\xe4\xc9\x89\x79\x92\x74\x74\xff\x9c\xf6\x82\xc5\x2c\x3c\x88\xb1\xb3\xcb\x5c\xf8\xe0\x70\xa8\x06\x11\x18\x09\x1d\x0f\x23\x3b\x10\x7b\x98\xa6\x6e\x9c\x48\xb8\xa2\xa6\x8c\xd5\x20\xc9\xa0\xe6\x04\x4c\xda\x1c\x3e\x76\x3f\x58\x9e\xc5\x31\x34\x2a\xc2\x5a\x95\x96\x8a\xd8\x6e\x12\x25\x6a\x5b\xd0\xa6\x9f\x32\xb6\x05\xc0\xc1\xc4\xd5\x92\x02\x33\x8f\xec\xba\x08\x8c\x06\x8c\xbc\xfd\xe1\x60\x59\xf4\x89\xa3\x2d\x38\x35\x1a\xcc\xd1\x5a\x9b\x49\x31\x49\xd0\xa3\x75\x74\x57\xbc\x13\xf2\x8c\xae\x8e\xce\xcc\x70\x86\xdd\x0d\x05\xa3\x09\x6d\x56\xba\x07\xc3\xc4\x6b\xc5\x04\xbe\x86\x2c\xfe\x60\x68\x2e\xd0\xd8\x36\xc6\xda\x23\x4b\x26\xfc\xaa\xf3\x28\x6b\x84\xd3\x85\x7c\x99\x58\x4a\xf1\xd9\x4f\xf3\xcd\x61\x9d\xec\x01\xe4\x92\xf1\x85\x7a\x00\x8b\xed\xed\x72\x5a\x99\x03\x9d\x38\x0b\x32\xe6\x2c\x81\xb3\xba\xef\x90\xba\x33\x56\x4c\xe2\x23\x29\x05\x95\x29\x79\x11\x96\xba\x9f\xd9\x60\xc9\x5e\x23\x6d\x56\x9f\x37\x0d\xe2\x51\x11\x12\xb6\x2a\x47\x59\x22\x3d\x8a\x24\x67\x25\x66\x39\x5c\x2b\x66\xc3\xbd\x69\xac\x95\x10\x36\x47\x64\xd5\x31\xa7\x48\x4c\x5a\x7f\xaa\xf2\x35\x79\x0e\x9b\x44\x8e\x46\x01\x25\xa1\xcd\x25\xff\xa6\xcc\xb0\x19\xdc\xb5\x4e\x6b\xd8\x9b\xa6\xcf\xa9\x24\xe6\x2c\x84\xd1\x84\x7d\x12\x24\x47\x37\x39\x56\xe7\x8c\x56\x9a\x21\x18\x75\xcd\x31\x3c\xfd\xc3\xe6\xa4\x5b\x5b\x8c\x06\xad\x36\x1a\xab\xf2\xdb\x67\x8b\x98\xc6\x9c\xfb\xc2\x5d\x03\x4a\x78\xa9\x94\xc4\xd0\x26\x75\x9f\x74\xb4\x6f\xca\x2b\xdd\xd3\xf7\xd3\xb9\xa0\x9c\x98\x72\xce\x90\xb5\x4b\x56\xdc\x05\xcd\x61\x78\x06\xcc\xb0\xce\x1d\x3d\x6d\x93\x02\x66\x13\xb8\x67\x01\x53\x26\xb5\xc1\xed\xd5\x1d\x2e\x72\x27\x14\x30\x89\xc0\x6a\xb8\x56\xcc\xfe\xda\x44\x13\x26\xee\x59\x68\xd3\xef\x18\x1d\x50\xa8\x92\xeb\xc4\xac\x3a\x6f\xae\xcf\x82\x38\xed\xfd\x03\x4a\x23\x6d\x9a\x44\x94\xe9\x6b\xaf\xb7\xd7\xde\x5c\xd1\x2a\xea\x97\xec\xd5\xc2\x55\x62\xc6\xd7\x5e\x2e\x35\x01\xf7\xd5\x26\x93\x72\x65\x08\x0f\x4f\xf8\x31\xdd\xfb\xf1\xdc\xd3\xf2\x94\x81\x72\x18\x1e\xfc\x93\x8c\x6f\xd5\x26\xcf\xf5\xbc\x36\xb9\x13\x0f\x4b\x9b\x81\xd1\x82\x91\xa6\x0d\x01\x0e\xf9\x12\x85\x31\x48\x4f\xb5\x19\x27\xed\xa2\xcd\xa4\x7e\xd0\x7c\xfc\x29\xa5\x88\x16\x55\x50\xb6\xb0\xe7\x29\xe6\xf9\xb8\xa0\x33\x69\xca\xf7\x7d\x2e\x28\x36\xae\x58\x9f\x70\xa0\xf5\x7c\x0e\x83\xbb\xaf\x0b\x5c\xce\x49\x90\x64\x19\x36\x02\x0a\x18\x46\xcb\x17\x4b\x5e\x60\x46\x6b\xc0\xfc\x02\x5b\x71\xb9\x37\xd5\x05\xa5\x9f\x1d\xb5\x19\x26\xec\x01\x93\x1e\x54\x53\x6d\xc6\x0e\x64\xb8\x74\x3c\xb0\x0e\x24\x02\x6f\xe3\xd2\x8c\x54\x7d\x24\x26\xf0\x7e\x3c\xc1\xf3\xc1\xe9\x36\xae\x84\x61\xfe\x10\xd2\xd9\xc3\xa8\x5c\x27\xc2\xf1\x20\xbc\xd5\xfc\x15\x9f\x3d\xa0\x4f\x3b\x78\xe5\x8b\x4f\xd5\x64\x61\xa7\x65\x05\xcf\xb8\x86\x01\xcc\xd1\x1d\xe0\xca\xad\x2c\xcf\x33\x0c\x16\xa7\x36\x35\x9a\x85\x1f\x34\xb3\x54\xde\xf7\x6b\x7d\xde\x54\xae\x56\xa6\x58\x0a\x6e\x52\x6b\x48\xc0\xe5\x25\x88\x5a\x31\x4b\x87\x87\xc8\x6e\xd5\xe8\x71\xb9\x4d\xa8\xd7\xca\x86\x6b\xeb\xff\xeb\xc4\x2c\x8a\x56\x66\x3b\x47\xda\xe1\x4a\x6d\xf6\x6b\x15\x15\x0f\x3f\x30\x21\x4b\x8c\xb6\xa0\x76\xa2\x4e\x73\x99\x12\x44\x9e\xc1\x65\x31\x25\xe3\x33\x68\xec\x08\xde\xcd\x31\xdc\x84\x20\x62\x24\xaf\xcd\xe0\x7a\x31\xcb\xa9\x18\x89\xfe\x89\x8c\xb6\xec\x49\x85\x73\xf4\xb4\xe5\x70\xb1\x7a\x28\x9e\xcc\xba\x35\x6c\x74\xdf\x83\xa0\xc5\x44\x92\xb4\xc7\xae\xfd\x4d\x22\xaa\x3e\x2e\x68\xd8\xdc\xc9\x90\x8e\x6d\x9b\x60\x37\x93\x38\xc9\xf3\x4f\x79\x2f\x2c\xf8\x5a\xa3\x65\x37\x51\x58\xcd\x39\xa9\x46\xab\x8d\x36\x9c\x24\x11\x7d\x6c\x73\xf7\x9b\x95\x09\xcd\xa0\x86\x77\x31\xc3\xf3\x33\xda\xcc\x70\x1a\x37\x65\x6c\x73\xfa\xd1\x59\xaa\x78\x6f\x07\xef\x22\xe6\xa5\x7a\xda\x62\x31\xed\x9c\x76\xf5\x13\x7e\x02\x2e\x8b\x84\xfd\xc5\xcc\xf7\xcf\x10\x2c\x31\xb5\x2d\x9b\x30\x3b\xa0\x68\xda\x60\xd2\x86\x65\x78\x9d\x36\xf3\xfd\xb3\xf5\x62\x9a\xa7\xd3\x6c\x4e\x9b\x81\x57\x6a\x93\x08\x2b\x39\x54\xac\x08\x6f\x4d\x60\x6b\xca\x3a\xb8\x94\xef\x3d\xb4\x59\xd3\xf6\xa4\xb4\x47\xec\xf0\xf9\xcd\x64\xe5\x55\xc3\xec\xef\x02\x99\x9e\xb6\x80\x88\x4c\x33\x28\x2c\xf7\x26\x10\x51\x97\xe5\x69\xeb\x82\xdc\x8b\x8a\x19\x65\x61\x62\x56\xc3\x51\x95\xf4\xf4\x13\x17\x70\xa7\x80\x22\xf3\x42\x9c\x60\x51\x88\x9f\x74\x41\x18\x8f\x20\x65\xbc\x4d\xbf\xa9\x8f\x98\xb0\xaa\x1b\xaa\xdb\x9e\x51\x65\xa2\x91\xe9\x54\x26\x8c\x96\xa6\x40\xfb\x65\x41\x44\x48\x96\xa3\x83\x8e\x05\x69\xfd\x58\x6d\xcf\xf9\xee\x51\x54\x41\x95\x8d\x7f\x5c\xa3\xd5\xc7\x1c\x34\xff\x14\x86\x0c\x60\x8d\x03\xca\xb2\xd3\x47\xe3\x60\xc9\x9e\x4b\x0d\xb4\x27\xac\x4b\xc9\x6b\x20\xdc\x0f\x71\xf8\x00\x31\x1c\x0a\xd8\xf3\xc7\x60\x34\x61\x72\xac\x2a\x21\x12\xdd\xad\xe4\x04\x4c\x4e\xfc\x6b\x11\x0e\xe2\xcf\x6d\x94\x41\xae\xca\xfc\xa1\xad\xcd\x70\x57\xda\xaa\x87\xa2\xe1\xa8\x08\x6b\xc5\x49\x22\x6e\x2d\x6d\x0a\x36\x14\x41\x5a\x96\x19\x22\x71\xc3\x69\x4e\xf2\x6a\x5b\xd0\xe6\x42\x9f\x30\x4a\x2f\xfe\xe4\x5f\x0c\x05\xa2\xbf\xe9\xf9\x39\xbb\xfe\xa6\x17\x03\xc0\xec\x5c\x98\x7f\xac\xe3\xd2\x3e\x08\x58\x20\xa6\x11\x94\x9b\x3c\xc8\x58\x07\x6f\xcb\xe8\x65\xea\xce\x15\x41\xe5\xbc\xe8\x47\xf9\x16\xc5\x54\xea\x54\xda\x64\xa3\xdb\xb4\x71\xfd\x3d\x3d\xda\xb8\x95\x46\x7b\xb1\xb5\x20\x2e\x25\x59\x43\xc6\x52\xb0\x65\xb6\x36\x92\xf1\xb3\x3c\x88\x19\xff\x8e\x87\xff\x03\x00\x00\xff\xff\xd5\x8f\x77\xd0\x99\x5b\x00\x00") + +func fontsDotmatrixFlfBytes() ([]byte, error) { + return bindataRead( + _fontsDotmatrixFlf, + "fonts/dotmatrix.flf", + ) +} + +func fontsDotmatrixFlf() (*asset, error) { + bytes, err := fontsDotmatrixFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/dotmatrix.flf", size: 23449, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsDrpepperFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x56\xdd\x6e\xe3\x36\x13\xbd\xe7\x53\x1c\xe4\x33\x3e\xed\xa2\x89\x98\x68\x93\xa0\x5d\x08\xaa\x8a\x76\x0b\xf4\xae\x40\x7b\x49\x60\xc2\x58\x94\x2d\x54\x96\x02\x89\xee\x6e\x00\x3e\x7c\x41\x52\x92\xf5\x67\xaf\x01\x5b\x7f\x3c\x1c\xce\x9c\x19\x1e\x4e\x5e\xe6\x91\xdc\xe0\x09\x8f\x88\xee\x71\x8f\x87\x67\xf6\x7b\x5d\x69\xe0\x33\x7e\x6b\x42\xfc\xa9\xde\xde\x54\x83\x0f\x32\xd7\xaa\x81\x44\x25\x0f\x0a\x45\x85\xba\x52\x68\x8b\x1d\x32\xfb\x50\x54\xd0\xfb\xa2\x45\xab\xdf\x4b\xf5\x31\x64\xbf\x1c\xf5\xbe\x6e\x3e\xe3\x8b\x6a\x6a\xfc\x2d\x0f\x87\xa2\x52\xd5\x2d\xf4\xc3\xd3\xfd\xa7\x87\xa7\x74\xbb\x0d\xf5\x51\x87\x79\x11\x32\xf6\xeb\x5e\x36\x72\xab\x55\xd3\x22\xf8\x5f\x00\x59\x65\x08\xfe\x1f\x40\x36\x0a\x65\x7d\x6c\xdf\xdd\x97\x3f\x82\x03\xaa\x5a\xe3\x5f\xd5\xbc\xa3\x95\xba\x68\xf3\x42\x65\xec\x6b\xa1\xf7\xd0\x7b\x85\x60\x13\xa0\x6e\x10\xe8\x20\x0c\x43\xfc\x75\xdc\xed\x54\xab\x8b\xba\x6a\x7f\x66\xec\xcb\xb7\xb7\x52\x56\xd2\xbe\xa2\xce\x91\x17\x4d\xab\x51\x16\x95\xfa\xcc\x6c\xe8\xb8\xc3\xcd\x41\xee\x8a\x2d\xaa\xe3\xe1\x55\x35\x37\xc8\xeb\x06\x79\x51\x2a\x14\x99\xaa\x74\x91\x17\x5b\x3b\x97\x49\x00\xb8\x43\xbb\xaf\x8f\x65\x06\x59\x7e\x95\xef\x2d\x5e\x15\x5e\x64\x70\xeb\xe6\x54\xf5\x57\xb6\xf1\x20\xeb\xd3\xcd\x5e\x36\xd9\x6b\x29\xab\x7f\x6e\x70\x77\x87\xb7\xa6\xa8\x74\x8b\x16\x12\xee\xe3\x2d\x5e\x8f\x1a\x5b\x59\x05\xda\x5a\x69\x0f\xc7\x76\xaf\x32\xf6\xe4\x0d\xec\x55\xb1\xdb\x6b\xeb\xaf\xc4\xb6\x67\x88\x3d\x5e\x18\xbc\x75\x04\x15\xd5\xb6\x3c\x66\x45\xb5\x43\xa6\xda\xad\xaa\x32\xd5\xb4\x2c\xba\xf7\xf3\x0e\xf2\x9b\x0b\x1c\xa5\xaa\x76\x7a\x8f\x0f\xea\x5b\x8f\xde\xd6\x87\x83\xaa\x3c\x2f\xed\x47\xfc\x00\xe4\xc7\x6c\xa7\x90\xcb\xad\xae\x1b\xd6\x19\xc8\x54\x2e\x8f\xa5\xf6\xce\x1e\xea\x4c\xb9\xb8\x5d\xe6\xf3\xba\xd2\xec\xe1\xd9\xc1\x3c\x91\xd6\xbf\x89\x59\xc6\x36\xe9\xf8\x97\x32\x10\x52\x66\x60\x52\x66\x88\xa7\x2c\xa6\x24\x65\x00\xfc\x08\xa5\xcc\x70\xc3\xdd\x17\x4c\xae\xfe\xe6\xde\x37\x64\xc8\xd0\x66\xf4\x00\x18\x98\x1e\x7a\x42\xa7\x0c\xc6\x90\x5d\xe3\x2e\x4e\x19\x27\xbb\x1e\x8c\xe9\x86\xc9\x8e\x24\x1c\xf6\x23\x07\x87\x45\xf0\x38\xe9\x8c\x78\x7f\xac\x8d\x18\x89\xbd\xf1\x50\xf0\x4d\xca\x04\x71\xb1\x19\x63\xac\xc7\xf6\x7d\xf8\xdb\x8b\xb5\x0d\x67\xda\x3a\xd6\x5f\x21\x48\xa4\x29\x23\xb2\x48\x01\xe1\x1c\xb4\x4c\xf8\x2b\x27\x7e\xf2\x9c\xb8\xa0\x94\x25\x40\x6c\xe7\xf1\x09\x0d\xde\x2f\x90\x81\x71\x64\xda\x77\x32\x8e\x06\x5a\xd0\xd0\xfd\x4f\x7e\xf6\xce\x33\x10\xf9\xe9\xe4\x27\x0f\x64\x77\x98\xfe\x62\x41\xe3\x34\x75\xe1\x61\x46\xdd\x64\x76\x67\x1a\x70\x89\x06\xb7\xb7\x17\x22\x0a\x26\xf4\xa6\x8c\xfb\x71\x57\x0d\xe6\x54\x08\xe4\xd7\x04\x92\x61\x85\x98\x88\x92\xe9\x0a\x2e\xb5\xe4\xbc\x88\x09\xc2\x63\xf8\x38\x06\x4f\x35\x78\xe8\xf8\xe7\x04\x84\x03\x4f\x3d\x4d\x63\x77\x1d\x13\x2f\xe4\x8c\x99\x99\xb1\x13\x26\xb1\xb7\xd0\x62\x5e\xd6\x31\xe4\xc2\xbe\x48\x4d\x8c\xd0\x06\xc7\xbd\x1d\x71\x66\xad\xd0\xf3\x06\xcf\x34\x71\xcc\xe8\xf3\x69\x99\x27\x68\x3a\x62\xba\x59\xb3\xc2\x8c\x11\xf7\x25\xc9\xe6\xee\xfb\x82\x58\x29\x8f\x59\xf1\x26\x2e\x04\x1a\x57\x67\x9f\xba\xd0\xa7\xce\x8f\xc5\x94\xac\x16\xc7\x8d\x4b\xbe\xe8\x48\x3f\xcf\x40\x5f\x47\x64\xc6\xae\x8c\x30\xa7\x8c\x9c\xcb\x1a\x7c\xd6\x62\x4b\xc0\xf9\xb5\x84\xdf\xaa\xe6\xbb\xd9\x77\x01\x99\x65\x49\xae\x60\xcc\x22\xfb\xae\xee\xbb\x61\xc4\xc4\xbb\x42\x1a\xd6\xeb\xc5\x10\x27\x6f\xce\x10\x30\x48\xe9\x7c\x07\xb9\x21\x3f\x97\xdc\x55\x0c\xe6\x3b\xe3\xe4\xf7\xa5\x93\x28\x97\x4d\x43\x43\x2d\x8c\xd4\xaf\x13\xd6\x41\x6a\x68\x96\x01\xf4\xfc\x0a\x78\xf6\x30\x72\x75\xc0\x4e\x43\x12\x93\x90\xc4\x99\x9c\x9a\x21\xfa\x85\x70\x4c\xf3\x05\x1b\xd9\x1a\xcf\x6b\x76\xc4\x05\x3b\x8e\x8c\x39\x0b\xe4\x65\xca\xe5\x54\xd0\xaa\xcc\x0c\xbb\x9e\x3a\x2d\x77\xdc\x9f\x24\x66\x25\x9d\xc1\xba\x20\x2e\x31\x86\xe6\xbb\x7e\x84\x1a\x62\xeb\x91\x27\xec\xb0\x59\xed\xf9\x28\xec\x09\xe2\x76\xbb\x4d\x93\x3f\x5c\x04\xce\x56\x9b\xf0\x54\x2c\x42\x70\x8a\x6b\x7a\x35\xea\xc4\x6d\x5e\x10\xfd\x76\x33\xeb\xb5\xe3\x9d\xea\x24\xa4\x77\xc9\x1a\x58\xa3\xbd\x17\xd2\xee\x94\xa4\x61\x67\x5a\x3b\xe0\x76\x62\xcc\x45\x72\xbe\x5d\xb8\x78\xd6\xa5\xcc\x7e\x12\x66\x71\x7a\x8f\x67\x58\xe9\x32\x3e\xe5\x66\x6d\x73\x8c\x2b\x68\x7e\xfa\x8c\xed\x70\x18\x04\xb6\x82\x0c\x85\x13\x0c\x61\x88\x8c\xfb\x6a\x15\xb3\xb5\x66\x76\xc2\xae\x12\x27\x76\x7a\xde\xdd\x22\x06\xe6\x6e\x45\x7a\xe6\x76\xfc\x5a\x61\x1f\x5e\xb0\x1a\xd7\x19\xd5\x89\xbd\xc4\xad\xaa\x8e\x3f\x7a\x46\x8d\x4d\xbc\x30\x3e\x12\x9f\x85\xea\x9c\x95\xb4\x2e\x80\xf0\xb4\x05\x82\x7e\x8f\x2c\xb5\x66\x84\x1d\xed\x25\x73\x91\xd8\xb5\x93\x78\x5a\x3e\x2b\x92\x73\x96\xd8\xbe\x74\x7d\xb1\xf6\x98\xde\x1f\xc7\xdf\x32\x41\xde\xcc\xac\x65\x9d\x35\x7e\x1b\xd7\xf8\x4d\xdb\xde\x95\xc6\x6f\xba\xe0\x20\x80\x6b\x04\x4c\x74\x67\x26\x38\x18\xa3\xa6\xca\xb3\x14\x1c\x0f\x1d\x7a\x04\x9e\x32\x3e\x69\x30\xd6\x9d\x9a\x96\x60\x97\xb8\x53\xa7\xc2\xfd\xf3\xb2\x85\xe1\x4e\x69\xc5\xb8\xb7\x36\x66\xfa\x5f\xf6\x2c\xc6\x66\x10\xc6\x1b\xe6\x63\xaf\xb8\xe0\x76\x16\x17\x1c\x8b\x76\x38\x4e\x28\x4e\xbe\xd3\x8f\xcc\x31\x6b\x67\xd7\x80\xb9\x74\x0e\xf8\xa5\x3b\xe8\xcb\xba\x00\x4d\x30\x17\x6b\x77\xba\xe4\xbc\x04\x56\x7b\x28\x84\xe3\xfa\x7e\xf8\xe9\xf9\xaa\xf8\xa3\x87\xc7\xab\x38\x88\xa2\xfb\xab\x78\x88\xa2\x4f\x57\xf9\x17\x45\x3f\x5e\xc5\x59\xf4\xf8\x7c\x15\x6f\xd1\x53\x74\x15\x77\xff\x05\x00\x00\xff\xff\xe3\x40\x2f\xeb\x45\x11\x00\x00") + +func fontsDrpepperFlfBytes() ([]byte, error) { + return bindataRead( + _fontsDrpepperFlf, + "fonts/drpepper.flf", + ) +} + +func fontsDrpepperFlf() (*asset, error) { + bytes, err := fontsDrpepperFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/drpepper.flf", size: 4421, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEftichessFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x56\x5b\x6b\xdb\x4a\x10\x7e\xd7\xaf\x98\x68\x03\x92\x4e\xb0\x25\x27\xe4\xe4\x78\x5f\x8e\xd3\x92\xbc\xb5\x0f\x25\xf4\xa5\x2a\x2b\x63\x62\x22\xea\x44\x10\x9b\xd0\xc2\xd2\xdf\x5e\x76\xf6\xae\x9b\xe5\x58\x2d\xb4\x74\x0b\xf1\x74\xf4\xcd\xb7\xb3\xb3\x73\xd9\xf5\x66\x7d\xbe\x3c\x85\x4b\xb8\x84\xab\x39\x64\xf0\x5f\x10\xdc\x3d\x94\x5b\x58\x57\x4f\x3b\x28\xb7\x10\xaf\x12\x78\x57\xae\x1e\xee\x37\x70\xb3\xde\x95\x8f\xcb\x2f\xe5\x16\x66\xf3\xf9\x25\x4c\x26\xf0\xf1\xfe\x79\x5b\x56\x4f\x30\x9b\x66\xe2\xbf\x17\xf0\xb8\xfc\x86\x1f\x03\x60\x47\x2c\x00\x48\x0b\x14\x02\xde\xdc\xbc\xb1\x38\xfe\x0d\xaf\xb7\xab\xb2\x9c\x5c\x3f\xef\xca\xed\x2e\x44\x55\x94\xa7\x0a\x11\x08\x8c\x64\x9a\x1a\xa6\xc5\xcb\x66\x5b\x4e\x57\xd5\xa3\xc7\xe3\xad\x37\x55\x95\x4c\x20\xce\xb2\x44\x22\x24\xd1\xdd\xfd\x06\x28\xc4\x17\x17\x09\xcc\xcf\x61\xfe\x2f\x9c\x5f\xc1\x6c\x5e\x77\xa8\xb9\x62\x56\x44\x2c\xd1\x44\x8c\xc1\xed\xf2\x6b\x83\x28\x9b\xd9\xd8\x71\x0c\xc2\xed\x87\xeb\xf7\x6f\x6f\xdc\x88\x02\x3b\x39\x61\xa8\xe0\x3c\x00\x7e\x4c\xac\xed\x5e\x81\xf1\x73\xb1\x5f\x3c\xd5\x6b\xb1\x08\xcc\xbf\x31\xc4\xd7\x3a\x41\xf4\x3a\x48\x1c\xd5\xe3\x31\x56\x1b\x1d\xeb\x11\x58\x43\xe8\xa0\x4b\x73\x25\x70\x99\x81\x00\xa9\xfe\xc4\x21\xd7\x9f\xa0\x26\x58\x0c\xe3\x2e\x5d\x3a\x99\xe4\x35\xba\xdc\x1a\xeb\xaa\xd3\x4e\x99\x7a\xc8\xd9\xff\x5a\xc3\xfd\x1b\x1c\x63\xe9\xbb\x10\x3f\x29\x97\xce\x72\x30\x4a\x8b\x61\xea\x24\x89\x84\xb2\x1e\x0c\x4b\xdc\x9f\x1a\x06\xa6\xce\x5e\xd1\xa4\x67\x2f\xee\xd3\xd5\x79\x94\xcf\xf8\x13\xb7\x63\x98\xf2\x32\x75\xa0\xed\x7b\x29\x82\x56\x9e\xdf\xaf\xd0\x0c\x85\x71\xc7\xd5\xc8\x61\xd1\x8f\xd1\x87\x20\xe4\x53\x11\x15\xd1\x67\x02\x90\x17\xdf\xa3\x54\xa0\xa2\xfc\x25\x2d\x04\x2a\xcd\xff\x11\x15\x42\x08\x2f\xce\x22\x2e\x34\xf8\x49\x68\x10\x2c\x34\xd2\x1c\x99\x08\x07\x40\x54\x5c\x41\x95\x08\x54\x5c\x41\x96\x10\x02\x29\x6a\x72\xa9\xa9\x12\x85\xc9\x34\x46\x6a\x44\xf6\x73\x50\x4c\x8c\x21\x13\xe4\x90\x82\xb0\x23\x31\x4b\xa4\xe7\x31\xd6\x56\xab\x06\xc1\x44\x16\x99\x66\xb2\x27\x0e\xdb\x63\xd0\xae\x09\xc5\x9f\x46\x11\xe9\x1c\x50\x61\x10\xa2\x3a\x07\x02\xa4\x23\x7e\x66\x78\x76\x2a\xc4\xca\xae\xd2\x76\xf2\x90\xd2\x2e\xfc\xe9\xf3\x03\x2f\xb3\xe9\xc4\x5e\xe7\x39\x3d\xa3\xc6\x8e\x5a\x3b\xda\xb4\xeb\xda\x3a\xa2\x2f\xd4\xc4\x8d\x66\x7d\x14\xa0\xdb\xb6\x87\x10\x77\x4b\x79\x5d\xec\xb1\x63\xd6\x0e\xac\x1d\x6b\xb5\x93\x75\x83\x9d\x44\x26\xfe\x22\x30\x99\x3b\x2c\x44\xa6\x18\x9c\x6c\x76\xd2\x71\x48\x88\x3c\x0a\x4a\x0d\x85\x12\x87\x50\xe4\xc5\xd4\x66\x99\xbd\xa8\x9c\xa6\xb4\x9e\x65\xb5\x53\x53\x7b\x6a\x6a\x4f\xdd\x7f\xc1\x43\x2b\x6a\x48\x2f\xfa\xcb\xd4\xa9\x69\xcd\x71\x0f\x7f\xb8\xc6\x49\xea\x81\x1d\xb6\x4b\xe3\x54\x18\xce\x01\x4e\x54\x47\x3f\x58\xe3\x14\x28\xce\x01\x8d\x3a\x5c\xe3\x64\xec\x91\xb1\x1f\xb7\x07\x1f\xeb\x8c\x6d\x12\x62\x0c\x4f\xe5\x18\x56\x9d\x95\x10\x2c\x62\xa1\x51\xed\x9a\x10\xfc\x44\x88\xe9\x0b\x66\xe4\x3b\x8d\x45\x8c\x61\xaa\x07\x33\x36\x66\xa2\x7a\x00\x31\x4d\x04\x31\x99\xc6\x48\x0d\x11\xe6\xc4\x67\x22\xd8\x68\x88\xe9\x1c\x22\x49\xa8\x4e\x9b\x86\x46\x76\x25\x87\xc9\xbb\xb4\xf0\x90\x38\x85\xf5\x4b\x73\x62\x66\x5e\x35\x42\x54\xcf\x12\x04\xe8\xfc\xed\xb4\x53\x2f\x1d\x65\x57\x69\x3b\xf9\xd6\x90\xd8\x70\xb4\x24\x69\xeb\xab\x42\x54\x0f\xb0\xba\x13\x7b\x9d\xc7\x24\xd0\x76\xd4\xda\xd1\xa6\x5d\xd7\xd6\x2a\x7d\x14\x45\xd6\x47\xe1\xd9\xb9\x08\x4e\x79\x8b\xd8\x65\x67\x0f\xa5\x5b\x82\x53\xd3\x9d\x76\x38\xb3\x51\x54\x33\x7b\x60\x88\x4c\x31\x98\x07\xac\xf3\x02\x1d\x16\x22\x8f\x82\x52\x43\x61\xc5\xbd\x14\xaa\x90\xeb\x17\x25\x4b\xc9\xcf\xb2\xda\xa9\xa9\x3d\x35\xb5\xa7\xee\xbf\xe0\x31\x7b\xd1\x9f\xcc\x34\xe2\x00\xf1\xbe\x1e\x31\xbc\x9d\xeb\x1d\xd8\x61\xbb\x34\x4e\x39\x9a\xb7\xf4\xeb\x34\x23\x33\x1d\x13\xe9\x8e\x47\xea\xaf\x15\x7f\x04\x00\x00\xff\xff\x0d\x6b\x9d\x8e\x3f\x16\x00\x00") + +func fontsEftichessFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEftichessFlf, + "fonts/eftichess.flf", + ) +} + +func fontsEftichessFlf() (*asset, error) { + bytes, err := fontsEftichessFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/eftichess.flf", size: 5695, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEftifontFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x57\xdd\x6b\xdc\xba\x12\x7f\xd7\x5f\x31\x2d\x86\xb5\x21\x89\x6c\x79\xbf\x7c\x29\xc5\xbd\x97\xf6\x69\xef\x5d\xb8\xb4\x0b\xe7\x44\xa9\x36\x84\x86\xee\x39\xa9\x0d\xdd\x9c\x43\x1f\x44\xff\xf6\xc3\x8c\x3e\x2c\x59\xde\xdd\x40\x8d\x23\x67\x3d\x3f\xcd\xb7\x66\xc6\x8f\x4f\x8f\xe2\x3e\x83\x05\xcc\xa1\x2a\x61\x29\x60\xcd\xd8\xc7\xaf\x87\x23\x3c\xf6\xdd\x33\x1c\x8e\x90\x3f\x14\xf0\xdf\xc3\xc3\xd7\x2f\x4f\xf0\xfe\xf1\xf9\xf0\xed\xfe\xcf\xc3\x11\xaa\xa6\x59\xc0\xf5\x35\xec\xbe\x7c\x3f\x1e\xfa\x0e\xc4\x4d\x89\x3f\xc5\x0a\xfe\xb8\xef\x88\xca\x40\xfd\xc2\x05\x00\x7c\x4f\xff\x30\x9d\x4a\x4f\x2e\x4d\xeb\xeb\x77\xc7\x87\xc3\xe1\xfa\xdd\xf7\xe7\xc3\xf1\xf9\x35\xbd\x9a\x49\x6e\x11\x0c\x31\x86\xd3\x8d\xe7\xd4\xfe\x75\x7c\x7e\xb8\xf9\xfb\xe9\x78\xb8\x79\xe8\xbf\x39\x3e\xd1\xf5\xef\xbe\x2f\xae\x21\x2f\xcb\xc2\xf0\x31\x8c\x3e\x7e\x79\x82\x7f\x41\x5e\xd7\x05\x34\x02\x9a\x25\x5a\x5e\x35\x63\x85\xd2\x2b\x57\xfb\x99\x2a\x1c\x23\xa5\xe0\xc3\xfd\x8f\x84\x51\x59\x0d\xbe\xd3\xe4\x84\x0f\xff\x7f\xf7\xbf\xff\xbc\x0f\x3d\x0a\xea\xd5\x2b\x45\x2f\xb4\x66\xa0\x7f\xc5\xd7\x83\x2c\x96\xb5\xe1\xdd\x32\xad\x5b\xb6\xd1\x2d\x03\x68\x59\x5e\xd0\x13\x17\xfc\x79\x7b\x77\x7b\x87\xff\x67\x30\xac\x9e\x4c\x08\xa5\x35\xde\x2d\x03\xad\xf1\x6e\xd9\xf5\x46\xe3\x3d\x60\x90\xb6\x81\x96\x71\x50\xba\x65\x52\x81\x6c\xd1\x2b\xbc\x65\xb0\xd1\x83\xac\xbc\xe0\xab\x96\x01\xe7\x08\xe5\x46\x11\xb3\x5b\x11\xb9\x2f\x90\xd0\xef\x56\xc8\xa3\x93\x01\x19\xf1\x2d\x83\xcc\xfe\x19\x96\x8a\x78\xa1\x75\x99\x5d\x40\xca\xb6\x65\xc4\x4c\x4a\x68\x59\x86\x86\xd3\x82\x32\x5b\x76\x03\x57\x2d\x83\x1f\xd0\xb2\x19\xec\x91\x97\x31\xd7\x5b\x8b\xa2\xb4\x56\x80\xda\x03\xd9\x02\x68\x00\x78\xba\x81\x07\x4b\x5e\x64\x2d\xdb\x05\xfe\x74\x6c\x0c\x13\xc3\xe2\xe4\x46\x4f\x41\x53\xc8\x16\xe7\x1c\x08\x03\x41\x06\x01\x07\x34\x49\x43\x09\xc8\x54\x2a\xee\xd4\x32\x0e\x44\xcf\x39\x6b\x33\x13\x6c\x43\x41\xd2\x6d\x0f\xe8\x6d\x9e\x23\x2c\xd2\x4a\x29\x45\xd6\xf2\x96\x29\x55\x48\xf4\x3c\xc5\xcd\xab\x76\x43\x62\xb8\x26\x15\x7a\x7c\x6c\x94\xe7\x30\x48\xd0\x80\xdb\xa2\xd0\x0f\xea\x7b\xbb\x7a\x48\x24\x28\xab\x01\xa9\x00\x06\x07\x83\x03\x06\x09\x39\xd9\x30\xcd\x22\xe0\x0d\xde\x8b\x5e\x81\x21\xf1\x83\xe4\x8f\x5f\xee\xe2\x94\x7a\xf3\xc6\x66\x53\x98\x1e\x46\x59\x13\x57\x74\x80\x8e\xfd\x10\xa4\xdd\xdb\xb7\x26\xe3\xa2\x18\x68\x1b\x03\x7a\x9f\x11\xbb\xbc\xb0\x62\x8d\x8b\xc8\x79\x79\xde\xa3\x6d\x99\x54\x6a\x15\xb8\x60\x94\x04\x3d\x90\x02\xdd\x28\x10\xca\x12\x0b\xf3\x30\xb1\x70\xae\x8a\x64\x51\x76\xe7\x90\xd3\x2f\x99\x04\x94\xf8\x78\x59\x85\x89\x29\x4c\xc8\x32\x9e\x00\xa5\x21\x71\xc8\x09\x8c\x8e\x42\x3b\xd2\x47\xab\xce\xe8\xc3\xa3\xe4\x36\x7c\x3e\x91\xd1\x58\x74\x27\x6c\x27\x84\x76\x8b\xd2\xe1\xe9\x22\x81\x44\xe8\x14\x3d\xe4\xd8\x1a\x0a\xbd\x06\x4d\xf5\x04\x20\x37\x9a\xda\x04\xf0\x18\x23\xc3\x3e\xd4\x94\xc5\x56\x9a\xa6\xf3\xa9\xc3\x7f\x50\xb0\xc7\x0e\x52\x0d\xd8\x6a\x2e\xa5\x85\xca\x01\x99\x04\x3f\x37\x01\x49\x2a\x80\x0f\xbe\x24\x13\xd0\x83\xa9\xb3\x27\xf9\x5c\xad\xce\xf0\x01\x3e\xe1\x0a\x35\x5d\xec\x63\x16\xca\x54\x50\xf4\x18\x3e\x28\xfc\x49\x4c\xad\xe5\x26\xb4\x89\x37\x23\xcc\x6e\xba\xf0\x05\x28\x87\x73\xc8\x6e\xc0\x9a\x23\x8a\xad\xb6\x65\x12\x76\x54\x25\x0a\x30\xf5\xb0\x53\x81\x71\x23\x8c\x04\x3e\xa5\xbd\xb3\xd0\x54\x2b\x3a\x45\xfc\x44\x59\x34\x39\x99\xf9\x15\x51\x54\x2d\x4c\xb9\xa0\xf3\x27\x89\xb9\x94\xa3\x9a\xa6\x15\x50\x49\x1f\x56\xbb\xdb\xd4\x7b\x6a\x91\x19\xa4\x8d\x2c\x79\x84\x75\x0b\x65\xa3\xe4\xa4\x9f\x1a\x24\x50\x2d\x45\x85\xa4\xba\xba\x8b\xdb\x17\x75\x7f\xdd\x53\xc0\xf9\x20\x71\xe8\xd0\xc6\x02\xdf\xed\xa8\xf3\xf6\x94\x25\xe1\x79\xf4\xfd\x0a\x0b\x76\x3e\x2a\x93\xdc\x7a\xec\x0e\x3b\x8d\x8e\xfa\x60\xbc\x91\x34\x50\x45\xac\x1b\x26\xe3\xa6\xd3\xa1\x6e\x58\xde\x83\x01\x28\x7e\x69\x66\x83\x98\x07\x0e\x29\x1b\x2d\x43\x1e\x0e\x1a\xf0\x70\x0e\xb6\xe9\x87\x67\x0a\x65\xab\x4e\x75\xf1\xd9\x75\x30\x03\x22\xc8\x78\x2a\x18\x19\x35\x45\x21\x87\xfb\x61\x2a\xde\xa3\x69\xce\x8a\x29\x8a\xb4\xcd\x52\x6e\xf9\xc3\x0c\xf7\x14\x11\xc5\x3a\xef\xce\x88\x60\xe3\x89\x45\x7f\xd2\x13\xba\x51\x02\xe3\xb0\x06\xbb\x28\xfd\xdc\x2a\x77\x86\xb8\x1b\xc7\xd0\x6f\xe4\x9d\x8c\x37\x9a\xce\x23\x81\x36\x16\x7e\x14\x0a\x13\x19\xc2\x83\x17\x9f\x38\xdf\xbe\x39\x1a\x2e\x87\x89\x70\xdc\x1d\x5c\x8b\x88\x67\x45\x89\x0b\x77\xb3\xa2\x3d\x09\x9c\x54\x49\xc7\xe3\xbc\x50\x34\x4c\x9c\x6b\xc8\x31\x66\xba\x6e\x5b\xcc\xf9\x42\x48\xb6\x6f\xd5\xf6\xcc\xd1\xdc\xaa\xed\x89\x1c\xda\xc2\x76\x22\x82\xc1\x48\xa2\x4d\xa9\x77\x19\x86\xe4\x6a\x59\x26\x1f\x11\xd5\xb2\x72\xf3\x53\x78\x1e\x90\xb0\xf0\x55\x53\xed\x90\xcb\xad\x52\xea\x2e\xa9\x9a\xd5\x72\x19\x7d\x89\x04\x0c\xd6\xfe\x0b\x81\x24\x64\xa6\xa8\x05\xee\xae\xd6\xb5\x4d\x5f\xca\xdc\x51\xd1\xab\x1a\x41\xe5\xf3\xd2\x84\x54\x35\xb5\x9f\x2d\xcf\xe3\xe6\x0c\xf8\xe7\x17\xf0\x5b\x30\xe0\x92\x5f\xc6\x2d\x5f\x94\x30\x55\xb3\x62\x60\x3e\x83\x2e\xe0\xd6\x34\x39\xb9\xd9\xc9\x4d\x58\xbd\x9b\xb1\xba\x20\x81\xec\x8e\xe6\xc2\xec\xa7\x68\x16\x15\x65\x89\x67\xe6\xe2\x6c\x27\xca\x0a\x3d\xf9\x02\x9c\x70\x9e\xbc\x80\xab\x87\xa3\x70\x16\x37\x37\x73\xf9\xe4\xac\x27\xca\x85\xfd\x24\x9c\xa6\x2e\x19\xff\x7c\x7a\xef\xca\xc7\xc8\x8d\x2c\x53\x93\x8b\x28\x1b\x06\xca\x04\xfd\xe2\xe4\x26\xaa\x72\x94\x98\xd3\x55\x40\x54\xd5\x28\x31\x4f\xe1\xc4\x28\x31\x4f\xe1\xea\x51\x62\x9e\xc2\xcd\x5f\x54\xa5\x44\xb5\x62\xa0\xa4\xbc\x34\xb2\x89\x6a\x3d\xa4\xc5\x59\x5c\x33\xa4\xc5\x39\x9c\x28\x5f\x54\x21\x85\xa8\x2f\x95\x34\x21\xe6\xb6\xa6\xc3\x7e\x3f\x5d\x48\x85\x58\xb8\xcf\xd5\xd9\xec\x14\x64\xe9\x7a\xdc\xe7\x13\x93\x92\x10\x2b\x07\xf9\x79\x12\xb2\xbe\x54\xd4\x85\x68\x1c\x97\xed\x29\x2e\x75\x39\xf4\x45\x3b\x0f\xfc\x66\x3a\xc0\x55\x1e\x06\xaf\xae\xd2\x21\xed\x77\x22\x08\x53\x54\x61\xbf\x4f\xc7\x31\x51\xd7\xb6\xa5\xce\x66\x53\xd4\x39\x73\x7e\x98\xa2\x2e\x92\xb6\x14\x52\x97\x34\x83\xa2\xd4\x68\xaa\x12\xf5\x8a\xb4\x44\x81\x23\xc2\x9a\x4c\x98\xa5\x3b\x1a\x22\x6c\xb7\x63\xc2\xbc\xf2\xc3\xc8\xcf\xe9\xd9\x4b\xcc\x53\xf3\x87\x0e\x29\xe6\xa9\xf9\x21\x35\x35\x3f\xa4\x5a\xf3\x49\x74\x4a\x5d\x9e\xe9\xd9\x62\xde\x04\x5a\x8d\xfb\xb6\x58\x94\x81\x56\x29\xb5\x0a\xb4\x4a\xa9\xe2\xcc\x44\x20\x16\xb5\x4d\x7e\x45\xc9\x3f\x31\x84\x89\xc5\xc2\xcd\x69\xc4\x62\x02\xf2\x4f\x00\x00\x00\xff\xff\x10\xe7\x55\x45\x87\x16\x00\x00") + +func fontsEftifontFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEftifontFlf, + "fonts/eftifont.flf", + ) +} + +func fontsEftifontFlf() (*asset, error) { + bytes, err := fontsEftifontFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/eftifont.flf", size: 5767, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEftipitiFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x95\x51\x73\xdb\x36\x0c\xc7\xdf\xf1\x29\xd0\x9e\x6e\x92\xb7\xd8\x12\x29\xc9\xb6\xda\xf3\x8d\xd9\xae\x7d\xca\xf6\xb0\x75\x7b\x89\x62\xca\xf5\xec\x45\xab\x13\x6d\x91\xd3\x35\x1b\x9b\xcf\xde\x03\x49\xc1\x92\xfc\x58\x5f\xe2\xfb\xfd\x41\x82\x20\x40\x90\xde\x1f\xf6\x72\x13\x60\x8a\x12\x17\x98\xe0\x12\xe0\xdd\x6d\xdd\xe2\xbe\xb9\x3f\x62\xdd\x62\xb4\x9d\xe0\x4f\xf5\xf6\x76\x77\xc0\x37\xfb\x63\x7d\xb7\xf9\x50\xb7\x28\x8a\x22\xc7\xe9\x14\x7f\xdf\x3d\xb4\x75\x73\x8f\x62\x26\x48\x8a\x1c\xf7\xbb\xf7\x76\x14\x50\x7f\xc5\x07\x11\xe3\xca\x02\x98\xf3\xe8\x67\x1f\x63\xbf\x5f\x5e\xb6\xdb\xba\x9e\x5e\x3e\x1c\xeb\xf6\xf8\xd2\x9a\xc2\x32\xf6\x33\x80\xe6\xb8\x95\x66\xbc\x92\x7a\x6c\x8f\xdb\xd9\xc7\x43\x5b\xcf\xb6\xcd\x5d\xb7\xce\xe0\xf3\x43\xd3\x4c\xa6\x18\x25\xc9\xc4\xad\xe3\x16\x7a\xb7\x3b\xe0\x2b\x8c\xd2\x74\x82\x85\xc4\x62\x8e\x72\x81\xa2\x18\x6f\xe8\xfc\x13\xe9\x2a\xd4\x93\x6e\x21\xad\xf1\xed\xe6\xd3\xd9\x42\x89\x38\xd5\xce\xd8\x22\xbc\xfd\xe5\xf2\xe7\x1f\xdf\xf4\x2b\x8a\xfa\xc5\x0b\x6d\x0d\xc6\x00\x9a\xaf\xa9\xf5\x29\x16\x04\x81\xea\xfe\x15\x5c\xdf\x28\x88\x26\x0a\x10\x95\x82\x8b\x0b\x0b\x4e\xd0\xfe\x15\xac\x8c\x59\x79\x56\x60\x14\xfc\x4a\xdf\x0a\x9a\x58\x41\xdc\x74\x33\x15\x7c\x43\xdf\x0a\x2e\x14\x04\x8e\x30\x50\x10\x07\x0a\x2a\x0a\x12\xa0\x82\xa0\x54\x10\x84\x6e\xf6\xb7\x7e\x8e\x82\xef\x98\x2e\x98\xa6\x4c\x33\x4f\x14\x0c\x4f\xc1\x12\x9e\x20\x98\x24\x53\xca\x94\x31\xe5\x4c\x73\xa6\x05\xd3\x92\xa9\x60\x7a\xc5\xf4\xda\x51\xac\xa0\x64\xdb\xca\x51\x69\xcd\xde\xf6\xbd\x27\x8d\x0a\xa2\x8d\x2d\x29\xeb\x38\x2e\x07\xfa\xda\x7c\x3e\x69\x05\x51\xd4\x25\xe7\x06\x7b\xce\x9a\x34\x55\xa4\x9a\x0e\x75\x37\x4e\xd1\x8c\x7e\xcd\x67\xe4\x8e\xed\xfa\x66\x6a\xd8\x44\xe7\x4b\xe7\xec\xc6\xed\x69\x1b\xa3\xe0\x5a\x1b\x37\x9f\x46\x29\x0f\xac\xe8\x8c\x0d\x2a\xb8\xd2\x37\xbd\xfd\xfa\x0c\x7c\x0e\x9d\xd5\xee\xb4\x3c\x45\x71\x89\x47\xc3\xc4\x0d\xe5\x72\x65\xc6\xe3\xd5\x28\x57\xab\xd1\x15\x22\xa4\x26\xa1\x4e\x55\x80\xe4\xd9\x4b\x4c\x81\xf9\xcd\x28\xa8\xa6\x76\x0a\x55\xae\x2c\xe3\x2e\x1e\xed\xd3\x59\xbc\xed\xe4\x65\x67\x85\x61\x35\xf0\x0a\x43\xeb\xe5\x92\xd3\xa7\x5d\x07\x0a\x0c\x75\xaf\xd1\xd4\xba\x25\x6d\xa0\xf4\xb5\x0b\x68\x72\x80\x86\x88\x6a\xb7\x3e\xb5\x3b\xba\x3b\xb5\x5a\x29\xdb\xb6\x01\xb7\xc5\x86\xe9\x3d\xd3\x96\xe9\x0f\xa6\x1d\xd3\x9e\xe9\x4f\xa6\x5b\xa6\x9a\xe9\x2f\xa6\x0f\x4c\x07\xa6\x3b\xa6\x7b\xa6\x86\xe9\x6f\xa6\x7f\x98\x1e\x98\x5a\xa6\x23\xd3\x23\xd3\x47\xa6\x7f\x99\x3e\x31\x3d\x31\xfd\xc7\xf4\xbf\x23\xe3\xff\xac\xed\x33\x8f\x3e\x7b\x9a\xcd\x86\x17\x85\x74\xbf\x9f\x48\xf7\xce\x9f\xe4\xa6\x7b\x15\x48\x34\x7d\xf1\x88\xfd\x2b\x65\xe8\xbe\xb9\x36\x14\xf3\x84\x4e\xc7\xfe\x91\x12\xf6\x7a\xd0\x7d\xb8\x32\xd6\xb0\xb4\xfe\x74\x98\xd6\x5f\x2c\x53\xaa\x9b\x3f\x50\x51\x48\xc0\x6a\x78\xa3\x45\x91\x02\x86\x63\x5b\x06\xb8\x1e\xdb\x72\xc0\xe7\xb1\x6d\x7e\x96\xb8\x28\x16\x80\xcd\x78\xde\xd2\xfd\xd6\x5a\xab\x7f\x00\xec\x93\x20\x8a\x82\xaf\x0e\x52\x5d\x64\x92\x00\x56\x9a\x72\xe2\x77\x43\x26\x02\x30\x1c\xdb\x24\xe0\x7a\x6c\x4b\x7d\x9d\xfb\xb6\x0c\x28\xe3\x2b\xe3\x2b\x22\x93\x9c\x22\xf5\x0d\x73\x58\x0f\x67\x2c\x6c\x11\x7b\x86\x02\x50\x3f\x8f\x9f\x0c\x29\x12\x57\xcd\xde\x31\x4b\x21\x5c\x35\x07\x36\xe9\xaa\x39\xb0\xa5\xae\x9a\x03\x5b\x76\xd6\x36\x52\x2c\x5c\x8c\x5e\xeb\x48\xb1\x74\x31\x06\xb6\xc2\xc5\xe8\xdb\x64\x72\xd6\x76\x52\xa6\x67\x3d\x25\x65\x46\xbf\x5a\xfe\xaa\x4b\x99\xd3\xaf\x19\xab\x39\xbd\x13\xac\x16\xd4\xef\xac\x96\xc3\x36\x96\xb2\xa0\xd7\xa3\x1b\x4e\x85\x7f\x2d\x6c\xe0\x54\x52\x90\x5d\x37\x96\x52\x10\x56\x19\x05\x61\x95\xdb\x65\x77\xbc\x6c\x3a\x27\xd7\xba\x1b\x5e\x90\x2b\xab\x25\xb9\xb2\x2a\xac\x6b\xcd\xae\x99\xa0\x0d\xfb\x37\x44\x66\x76\x0f\x4d\xa7\xec\x1e\x58\xd9\x3d\xb0\xca\xc9\x8f\xd5\x7c\x78\x45\x65\x56\xd0\x42\xfe\x51\x91\x79\x42\x0b\xb1\x12\xb4\x10\x2b\x39\xbc\xd0\x32\xb7\x51\x9f\xba\x61\x97\xeb\x53\x37\xfc\x25\x00\x00\xff\xff\x63\xa2\xf8\x2b\xe9\x0a\x00\x00") + +func fontsEftipitiFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEftipitiFlf, + "fonts/eftipiti.flf", + ) +} + +func fontsEftipitiFlf() (*asset, error) { + bytes, err := fontsEftipitiFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/eftipiti.flf", size: 2793, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEftirobotFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x57\xdf\x6b\x24\x37\x0c\x7e\xf7\x5f\xf1\x5d\x18\x18\x1b\xb2\x71\xb6\x70\xa5\xc7\xbe\xf8\x5a\xae\x4f\xd7\x06\x4a\x9b\x27\x73\xda\x10\x2e\xdc\xd2\xe4\x06\x6e\x43\xa1\x60\xfa\xb7\x17\x49\xf6\xcc\x78\x7e\xec\x6e\xe9\x92\xcc\xce\xac\x25\x59\x96\x3e\x7d\xd2\x3c\x3d\x3f\x7d\xf7\xd0\xe0\x7b\xbc\xc5\x76\x8b\x5b\xfc\x60\xcc\xef\x5f\x0e\x47\x3c\x75\x5f\x5f\x71\x38\xc2\x3e\x3a\xfc\x72\x78\xfc\xf2\xf9\x19\x1f\x9e\x5e\x0f\x2f\x0f\x7f\x1e\x8e\xd8\xbe\x7b\xf7\x16\x9b\x0d\xee\x3f\x7f\x3b\x1e\xba\xaf\xd8\xde\xdc\xf2\xe3\x76\x8b\x97\x87\xbf\x65\xd5\x80\xfe\xc7\x07\x80\xdf\xcb\x8d\x49\xf3\xdd\x67\x9f\x24\xd7\xab\xf7\xc7\xc7\xc3\x61\xf3\xfe\xdb\xeb\xe1\xf8\x7a\x25\x3f\xb5\xd1\x67\x09\xc3\x32\x6a\xe9\xa6\xb7\x14\xfe\x7a\x3e\x1e\x6e\x1e\xbb\x97\xca\x4e\xf5\xf9\xb1\xeb\xdc\x06\xf6\xf6\xd6\xa9\x84\x1a\x3a\xf7\x59\x93\xb1\xb4\x6f\xc9\x15\x43\x17\x04\x42\x64\xf0\xf3\x6f\xef\x7f\xfd\xe9\xc3\x38\xa2\xa0\x37\x6f\x48\x7e\x48\xc9\xe0\x12\x4b\x17\xec\x65\x9a\x30\xf9\x0b\x06\x84\x60\x2c\x5c\x30\x89\x52\x7e\xf4\x14\x83\x01\x10\xe4\xca\xeb\xce\x3a\xbe\x6f\x30\xbd\xf6\x42\x50\x49\x80\x73\x4b\x7c\x4b\x09\x49\xfe\x83\x81\x85\x93\xff\x60\x36\x9e\xd2\x26\x51\xdc\x8c\xd5\xb2\x5e\x16\x0c\xc6\x83\xd8\x99\x48\x40\x0c\xc6\x12\x91\x0f\x46\xac\xa9\xc7\xea\x34\x39\x0f\x71\xcb\xc3\xab\xae\x57\xef\xbd\x25\x57\xcc\x8b\x02\xe9\x29\xd0\x39\xd9\xc4\x59\x79\xf6\xe8\xf6\xad\xe3\x6d\xc8\xc7\x58\x2b\x88\x99\x60\xf4\x94\xc3\xa5\xf7\x35\x18\xc8\xe6\x09\xa9\x91\xf8\x35\x7c\x1f\x9b\x60\x10\x29\x66\x2f\x79\x21\x22\x98\x26\x21\x05\xd3\x48\x94\x1b\xcf\xf7\x9e\xbc\x5a\xba\xe1\x3f\x96\x8e\xfb\xd6\x73\xa4\x1c\x9f\xce\xca\xa9\xda\x7d\x44\x1f\xa5\xe1\xa6\xfe\x49\x8f\x06\x09\xb4\x04\x85\x7f\xd3\xa8\x50\x9c\xa9\x55\x97\x1c\xc4\x60\x7c\xdb\x5b\x1d\xbe\x18\x84\x1a\x7b\x37\x59\x3b\x61\x69\xf0\x8f\x74\x71\x96\x21\x68\x86\x46\x27\x1a\x6f\xad\x20\xf0\x12\x35\x8b\x5b\x51\x8e\xe4\xeb\xad\xd5\x3b\xb1\x33\x8e\x6b\x06\xed\xd8\x1e\x89\xbd\x44\x48\x62\xd6\xaa\x12\x15\xc9\xca\x1e\x89\xa0\x0f\x86\xc8\x65\x50\xcc\xec\xe5\x64\xc1\xab\xbd\xce\x9d\xb1\x07\xb1\xe1\x18\xc4\x7e\x6a\x6f\xc0\x11\x1f\xcf\x76\x2c\xb4\xb8\x29\x15\xdf\x20\x6b\xce\x89\x12\xc5\x95\xa0\xd8\x8e\xc3\xe1\x97\xed\xf5\x91\xcb\xcb\x12\x60\x5f\x0b\xcd\x92\x5a\x67\x76\x79\xb9\x40\x68\xa8\x0c\x0f\x2f\x95\xd1\xd7\x44\xcd\x16\xa3\x6f\x1a\xa0\xa6\xf6\x46\x98\xc3\xb4\x94\x22\x3b\xde\xe8\x0e\x3d\x30\xc2\x08\xaf\xd7\x19\x71\x0a\x38\x92\x68\x35\x99\x5c\xf8\x69\xec\x00\x65\xbc\x91\x02\xce\xde\x39\xde\xb8\x89\xd4\xc7\xad\x14\x7c\x61\x27\x68\xf4\xbb\x8c\x50\x22\x75\x24\xf6\xe8\x1b\x9c\x51\xc6\x11\x05\x6e\x17\xaa\x70\x07\x55\xa0\x01\xd4\x63\xef\xb5\x76\x13\xf7\x17\x61\x95\x25\xc0\xf6\x6c\x06\x61\xb3\x8b\x6c\x17\xe3\xd9\x7a\x76\x0e\x54\x00\x3c\xf3\x7e\x59\x81\xb2\x42\x9c\x94\xef\x5c\xa1\xb4\x15\xa9\xa5\x85\x1d\x50\x04\xb4\xf5\xe0\xa3\x50\xe4\x89\x80\xf6\x5d\x4a\xe5\xdc\xac\x4b\x65\xc2\x91\x25\x70\xc5\xb3\x9c\x25\x9b\x39\x64\x1a\x93\xdc\xd8\xe0\xa0\x95\x9a\x3c\x97\x97\x05\xc1\xea\x11\xa3\xb4\xc3\x4a\x41\xe5\x71\x36\x41\x28\xa7\xdb\x6f\x5a\xf5\x99\x39\x4c\xfd\x46\xf1\x1d\xa3\x13\x4e\xd4\x22\xf2\x51\x63\x2c\xc7\x8d\xb1\xa8\xc5\x89\xda\x0c\x9b\x35\x1c\x16\x8e\x5e\xb0\x89\x8c\xcd\x3b\x94\xd8\xfb\xc5\xec\x5e\xb0\x43\xdf\x3e\xff\x39\xbd\xc3\x5a\x70\x55\xc1\x67\x44\x46\xba\xda\x68\x57\x24\xb7\x0e\xe9\x49\xbb\xcb\x39\x11\x04\xe8\xcd\xbc\x01\xce\x60\x97\x72\x7c\x3f\x26\x5c\x86\xd3\xa2\x10\xfd\x09\x5c\x51\x41\x4a\x51\x22\xca\x6a\x2d\xb0\x2f\x4d\x6d\xd4\xd7\xa6\xbb\x15\xeb\x28\x6d\x53\xa7\x22\xd6\x99\xba\x57\x34\xa8\x57\x51\x9d\x21\x0c\x3c\x36\x4d\xc2\x50\xf8\xb6\xa2\xca\xc5\xee\x58\x0a\xbb\xd4\x75\xa3\x47\x6b\xa0\xa8\x96\xee\xca\xb3\xda\xb8\xa4\x32\x7e\x22\x72\xf8\xa3\xc2\x06\x98\x7a\x5f\xb9\xa1\x43\x12\x9a\x5c\xc0\x52\x2f\xbd\xed\x82\x90\xeb\xdd\x7e\xb7\xdb\x5d\x07\xb3\xdb\xa1\x43\xc7\x31\xdd\x81\xae\x44\x33\x55\xc9\x58\xec\x34\x2b\x9d\x47\xc6\x6b\xde\x47\x9e\xd4\xcb\xe5\xc1\x6f\x69\x42\x82\x36\x5c\xfa\x34\x23\x81\x31\x5f\x14\xd1\x98\xe1\xbc\x38\x4c\xc9\xdc\xc9\x3c\x14\x2b\x6e\xa3\x92\xd0\x3c\xe8\xc9\xe8\x0a\x1d\x52\xa6\xb3\xc7\x92\x7f\x52\x20\xd1\xd6\x13\x1c\xaf\x5d\xb7\x85\xae\x25\xc7\x99\x61\x66\x63\x4a\x35\x37\xe8\xb6\x51\x5c\xf1\xeb\x27\x2d\x3c\x9e\x52\x15\x94\x32\x4d\xf4\x7c\x3e\x66\xf1\x85\xe5\x94\xa7\xe5\x09\x67\x17\x12\xce\x7b\xb5\x6b\x9c\x7d\xae\x69\x8c\xf1\xd0\x4f\x8c\x05\xf2\xfb\xb6\x1c\x82\x67\xf3\x58\xd7\xea\x0a\x94\xd6\x3b\xd8\x09\xec\x2c\x8c\xb8\x8b\xa2\x51\xdf\x8c\xbc\xbc\xab\xad\x8b\x0e\xe0\x40\xcc\x04\xb0\x98\xcc\x3e\xe3\xcd\x89\x8c\x5b\x6a\x95\x16\xdc\x68\xd4\x2a\x69\x38\x83\x9d\xb1\x67\x39\x19\x7f\x14\x9e\x3d\x01\xdb\x2c\x7a\xbf\x16\x1a\x54\x6f\x45\x34\x52\x28\x2a\x9f\x96\x88\xa0\xff\xe2\xb7\x6b\x0a\x26\xe2\x9e\x4b\x6d\x5a\xb8\xa7\x9c\x71\xf0\xe5\xf5\x65\x29\xf8\xd0\x8e\x94\x07\xf4\x89\x55\x2d\x62\x65\x69\x6d\x6b\x56\xdb\x21\x93\xbb\x04\xb7\x7f\x71\x9c\xa1\x36\x71\x22\xd3\x40\xb3\x99\x65\xf9\x4b\x58\x9b\xa7\x18\x27\x4f\x18\xfb\x27\x07\x15\x9f\x6c\x74\x9a\xc0\xa6\xc9\x3e\xcf\x6f\x7a\x5f\xf9\x6d\xff\x3f\x4d\xbb\x33\x85\x73\x23\x48\x56\xb8\xbc\x0f\xe7\x10\xdf\xe1\xee\x2c\xf1\x9e\x10\x5d\xa9\xb3\x22\x7a\x0a\xa2\xe7\x06\x7a\x40\x4c\x4b\x5d\xb2\xc2\xbf\x01\x00\x00\xff\xff\x62\x49\xb5\xa2\x76\x13\x00\x00") + +func fontsEftirobotFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEftirobotFlf, + "fonts/eftirobot.flf", + ) +} + +func fontsEftirobotFlf() (*asset, error) { + bytes, err := fontsEftirobotFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/eftirobot.flf", size: 4982, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEftitalicFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x58\x5f\x6f\xdb\x36\x10\x7f\xd7\xa7\xb8\x16\x02\x28\x01\x8a\x29\xd1\x7f\x14\xaf\x45\xa1\x6e\x68\xdf\xb6\x00\x43\x97\x27\x21\x74\x10\x34\xa8\xb7\xd4\x02\xea\x6c\xe8\x03\xd1\xcf\x3e\xf0\x8e\xa4\x48\x91\xb2\x1d\xd4\x28\xc4\xd8\xfa\xf1\x78\xf7\xbb\xbf\xec\xe3\xd3\xa3\xb8\xcf\x61\x0d\x2b\x68\xd6\x50\xc3\x75\x96\x7d\xfa\xb2\x3f\xc2\xe3\x70\x78\x86\xfd\x11\x8a\x87\x12\x7e\xdf\x3f\x7c\xf9\xfc\x04\x1f\x1e\x9f\xf7\x5f\xef\xff\xd9\x1f\xa1\xd9\x6e\xd7\x70\x75\x05\xb7\x9f\xbf\x1d\xf7\xc3\x01\x9a\x45\xad\xbf\x8a\x16\xfe\xbe\x3f\xe0\xdb\x0c\xe4\x4f\x7c\x00\x80\xef\xf0\x8f\x4c\xc5\xa7\x47\x1f\x85\xcf\xd7\xef\x8f\x0f\xfb\xfd\xd5\xfb\x6f\xcf\xfb\xe3\xf3\x6b\xfc\x89\xf5\xdc\x20\x32\x8d\x21\x49\x0b\x27\xa9\xfb\xf7\xf8\xfc\xb0\xf8\xef\xe9\xb8\x5f\x3c\x0c\x5f\xad\x9c\xe0\xf3\xeb\x30\x94\x57\x50\xd4\x75\x49\x72\x48\xd0\xa7\xcf\x4f\xf0\x0b\x14\xcb\x65\x09\x5b\x01\xdb\x8d\xb6\xbc\xd9\x4e\x15\x8a\x3f\x85\xdc\x31\x59\x5a\x41\x52\xc2\xc7\xfb\xef\x91\xa0\xba\x19\xb9\x53\x48\xc2\xc7\x3f\xdf\xff\xf1\xdb\x07\x9f\x51\x90\xaf\x5e\x49\xfc\x41\xa9\x0c\xd4\xcf\x70\x3d\x9e\x95\x69\xc5\xf2\x4e\x2f\x39\xe8\x25\x07\xbd\xe4\xa0\x17\xfd\x84\x0e\x5f\xf2\x56\x2f\x9c\x23\x06\x5f\x16\x25\x2e\x30\x62\xa0\xcb\x78\x8b\xc0\xdc\xee\x36\xbf\x1b\x18\x98\x5f\x24\x6f\xf5\x3f\x23\x91\x73\xfd\xeb\x15\xe7\xfa\x9f\x13\xe9\xed\x93\x74\x78\xc5\x40\xf2\x2e\x03\xd9\xc3\x6e\xa1\xcf\x92\x52\x56\x4c\xe3\x39\x0f\x94\x28\x4a\xdc\x80\xca\x72\x5e\x94\x9e\x12\x12\xe5\x17\x43\x09\x5d\x56\xb1\xe1\xb6\xed\x32\x25\x0f\x2a\xb0\x43\xe2\x5e\xb4\x02\x8d\xc0\x77\x56\x15\x54\xe4\x0d\x43\x48\x8e\xf2\x11\xa1\x94\x87\x31\xd2\x94\x42\x3e\x89\xb3\x9c\xac\xac\xde\x30\x02\x2e\xa0\xea\x32\xf8\x0e\x5d\xc6\x60\x17\x1d\xa3\xbf\x48\xde\xa2\x91\x20\x8d\x31\x21\xd9\xc1\x43\xdb\xc8\xd9\xc8\x80\x15\x21\x25\xd2\xc4\x7d\x37\xf8\x8f\xa2\xcc\xbd\x63\x8d\x43\x46\xea\x42\xff\xa1\x5d\x15\x83\xbe\xcb\x38\xd4\xc0\xbb\xac\x37\xfc\xc3\x94\x20\xc6\x8d\xc9\x5d\x96\x4f\x35\xd7\x91\xac\x8f\x90\x00\x25\x0a\x7c\xd0\x32\x0a\x29\x65\xeb\x7c\x6f\x0e\x44\xfd\x41\x43\x2b\x86\xe6\xf0\xb7\x48\x36\xfa\x7d\x0c\x14\x4b\xda\x02\x57\x3a\xbc\x62\x03\x4f\x8b\x25\xa1\x9c\x62\x49\x49\x34\x47\x0b\x9c\x86\x32\x54\xac\xed\x32\x3e\x68\x80\x72\xef\x7d\x11\x12\xf8\x98\x14\x91\x83\xc8\x4c\xe0\x83\x36\xb3\x62\x03\x28\xa7\xfa\x44\x1f\x30\xef\xf5\x6b\x48\x0b\xf3\x1d\x5d\x94\x7e\xac\x44\x6f\x6e\x27\x61\xfc\xf6\xad\xf6\x71\xdf\x07\xf1\x05\xc6\x0c\xb2\x03\x23\x04\x17\x5f\x35\xcc\x95\xbe\xd7\x3e\x7c\xf7\x0e\xc3\xc1\x8f\x15\x6b\x1e\xa0\x7d\x00\xd0\x1a\xc3\x72\xb0\x0a\xf9\x61\x41\xe8\x8a\x21\xdf\xa0\x8a\x01\x19\xcd\x75\x15\x6b\xf3\xa9\x27\x09\xbc\xc0\x50\xc3\x13\x50\xbd\x43\xa8\x5e\xa8\xc4\xa2\x34\xd0\x3e\xa8\x0c\x21\x1a\x9d\x8a\xb6\x6a\xe7\xa1\x37\x78\xda\x69\xe0\x8e\x56\x26\x3c\xe2\xa3\x09\x88\x61\xa4\x17\x48\x50\xe8\x07\x31\xc8\x11\x8a\x58\x3e\xe6\xea\x9c\x8e\x07\x13\x31\xa1\x8e\x60\x24\xea\xda\x89\x12\x77\x40\xa7\x1f\x24\x4f\x4b\x04\x8b\xe4\xf6\xe4\x49\xd6\x4a\x2a\xf2\x1a\xa5\x85\x68\x58\x1f\x24\x99\x3d\x1a\x0c\x3f\x9c\x4a\x2c\x56\x65\x14\xd9\xf7\xe7\x8e\x96\x86\xa0\x89\x31\x23\xae\xaf\x98\xc1\xe2\x5f\x46\x55\x1e\x18\xe5\xed\xb3\xdb\x94\x3d\x41\x29\xbb\x49\x85\x7b\x12\xf5\xcb\xa4\xdb\xb4\x7e\xf9\x61\xad\xc8\x5b\xd6\xc0\x94\xb7\x52\x12\x4d\xa6\xcd\x4b\x74\x94\xed\x64\x1f\x15\x31\x13\x2d\xf3\xbd\x6e\x44\x4b\x69\xea\x3b\xd8\x20\xf4\x02\x2b\x2c\x31\xc4\x95\x09\x18\x0e\x7f\xcd\x39\x38\x04\x2a\xb8\x45\xa0\x4a\x01\xc1\x41\x5b\x07\xb6\xf0\x43\x58\x9f\xa7\x2e\x53\xa0\x38\xc5\x34\x07\xd2\xb9\x62\xf2\x20\xfb\x68\x8b\xdd\xe1\x36\x28\x20\xc1\x73\xc9\x63\xb9\x96\x40\x1d\x03\x2a\x46\x1b\x2a\x86\xa9\x19\xe9\x64\x13\xde\x26\xb2\x26\x11\xe7\x20\x5c\x89\x78\xee\xca\x21\xd5\x43\xc0\x72\x8a\xdb\xac\xaf\x43\xe7\xb9\xce\x90\x53\x26\xd1\x1a\x08\x33\xb1\xc3\x17\xfd\xfc\xb4\x14\x2d\xd6\xdf\x3a\x87\xf4\x57\xb0\x75\x3d\x31\xa8\xd8\x23\x2a\x36\x50\x58\xb6\x81\x70\x34\x15\x33\x78\xc0\xfe\x17\xb4\x37\xdb\x1d\xf4\x6e\xfd\xa2\x77\x71\x1a\xb4\x0f\x90\xdc\xeb\xb4\x2a\xe0\x37\x92\x33\xe8\x10\x91\x45\x28\xc7\x35\x05\xd4\x44\xb6\x60\xa6\x8e\xb4\x1c\xdf\x1a\xd6\x65\x52\x37\xa4\xa9\x35\x98\x8a\x87\x70\xe0\xc1\xd7\x38\x05\xd2\x44\xc5\x93\x44\xd3\x98\x88\x08\xd3\x7e\x39\x9f\x60\xec\x4b\xad\x2e\xe7\x7d\x1f\x4d\xbf\x56\x8b\xd4\xf4\xe4\xac\x00\x0a\xaf\x9e\x21\xf1\x07\xa7\xec\x2c\x50\xcb\x4c\x75\xbf\x24\x2b\x91\x5d\x76\x0a\xc1\xda\x83\x10\xdf\xae\xa4\x88\x90\x58\x0a\x54\x54\x82\x52\x23\x76\x2f\x14\x0f\x0c\x03\xb3\x9c\x09\x32\x62\x6c\x3e\xc0\xe9\xda\xc0\xa3\x76\xec\x46\x59\x02\x28\x57\x02\xc3\x40\x74\x10\x02\xdd\x45\xdd\x9a\xd2\x87\x74\xed\x6f\xb5\x91\x15\x3b\xc4\xfe\x23\x44\x7f\x8b\xd9\x5b\x3a\x2f\xc6\x19\x08\x24\x62\xd2\xcc\xec\xc4\xc5\x55\xde\x65\xbd\x32\x63\x57\x17\x75\x58\x30\x8d\x78\x52\xb1\x83\x89\x4b\xf5\xfa\xc1\x69\xec\x42\xe5\xb5\x78\xce\x7b\x4e\x95\x05\xc0\x5b\xac\x02\x45\x29\x29\x8c\xf5\xe8\x04\x76\x76\x9a\x19\x0d\x2c\xb8\x22\x2c\xf6\xa7\x74\xb1\xb7\x48\x6c\x0b\x60\x1b\x88\xeb\x20\x51\x0f\xd3\xcf\x9b\x9b\x93\xb5\x27\x78\x9d\x08\xdb\x1b\xb8\x39\x15\x16\xae\x6a\x9b\xe1\x93\x93\xee\x3a\x37\xa9\xb9\x72\x8b\x6e\x36\x35\xee\x38\x77\xc5\x6d\x36\x0d\xe2\xec\x6d\xd1\x65\xbc\xad\x07\x63\x69\xd2\xe0\x75\xa2\x3b\xe1\x55\x6d\xa6\x3b\x35\x9b\x0d\x7e\xa3\x9b\xac\x77\x8f\xa6\xe8\x9f\x8a\xbf\x76\x4c\x14\x25\x6a\x94\x53\x8f\xc8\xc1\x77\x78\x73\xbd\xb4\xf9\x59\x60\xea\xc5\x8d\xa4\xd9\x0a\xfc\x93\x1a\xd5\x99\x89\xba\xd9\x2e\x49\x49\x7e\x11\x78\x85\x96\xdc\x5d\x26\x79\x8d\xe0\xfe\x32\xc9\x9b\x17\xc6\x72\xb3\x6d\x3d\xe7\x9d\x15\x7f\x6d\x9b\xb5\xe9\xd7\x78\x82\x9d\xa1\x06\x6c\x6c\xf6\x9c\x60\xa0\x77\x02\xb6\xd9\xc9\xeb\x84\x29\x1a\xa2\xc6\xd0\x93\xc4\xfd\x99\x9b\x82\xa8\x9b\xb1\xa5\x9e\x07\x0b\x8f\xfb\xb3\xe0\xa5\x4f\xa7\x81\x79\xf7\x10\x39\xa5\x53\xd4\xe8\x5a\xab\xf7\xec\xcd\x41\xd4\x6b\x2f\x5e\x92\x35\xcd\x57\x43\x7b\xd5\xa9\x7c\x42\x68\x8b\xea\x5a\x6d\xcf\x08\x25\x57\xf0\x9e\xbf\xe4\x32\x20\x9a\xda\x19\x78\xea\x42\x20\x9a\x26\xb3\xf6\x9d\xc6\x09\x67\xdb\x69\x9c\x76\x05\x65\xc1\x69\xdc\xea\xf2\xfa\x2c\x1a\x4d\x98\xec\xfb\x4b\x46\x7c\xd1\x5c\xe3\xff\xbf\xf1\xcb\xc0\x5b\x32\xeb\x22\xb0\xa8\x5f\xd8\x2a\x84\x58\xbe\xb0\x98\x0b\xb1\xb2\x05\x0f\x76\xbb\x64\x83\x11\x02\x63\x12\xa3\x81\xb1\x19\xc8\x66\x9c\x5e\xee\x66\x20\xed\x08\xf9\x31\x03\xb9\x3e\xd7\xec\x84\xd8\x8e\x52\x66\x20\xcb\xda\x31\x32\xb6\x14\x0d\x34\x13\x73\xd5\x2e\x8a\x90\xb5\x65\x33\x19\xad\xc7\x11\xdd\x16\x9e\xa5\x98\xd2\x34\x9d\xbe\xc5\x72\x39\xa5\x29\x86\xac\xa6\x34\xc5\x90\xf5\x94\x83\x18\xb2\x09\x74\x49\xcc\xe0\x62\xd9\x06\xba\x24\x21\x23\xd9\x6c\x4e\xca\x36\xd0\x25\x05\x59\x35\xfe\x80\xfd\xe3\xe4\x80\x2d\x56\x11\x89\xd3\x69\x45\xac\x22\x12\x63\x48\x44\x62\x0c\x59\xa7\x63\xcd\x87\x6c\xce\x4d\x4e\x62\xb5\x1d\xa7\x7d\xcb\x74\x62\x7a\x12\x6b\x0a\x38\x1a\x47\x0d\xdd\x49\x5c\xe3\x0d\xe9\x77\xb3\x43\xba\x58\x8b\x8b\xa6\x36\xb1\xb6\xb3\x8a\x64\x2c\x3d\x64\x8b\xb5\x25\x42\xde\xdc\xa4\x21\xff\x07\x00\x00\xff\xff\x8b\x4c\x56\x74\xb9\x1a\x00\x00") + +func fontsEftitalicFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEftitalicFlf, + "fonts/eftitalic.flf", + ) +} + +func fontsEftitalicFlf() (*asset, error) { + bytes, err := fontsEftitalicFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/eftitalic.flf", size: 6841, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEftiwallFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x58\x6b\x6f\xdb\x36\x17\xfe\xae\x5f\x71\x1c\x19\x15\xa9\x8a\x64\xe2\xf6\x6d\x9a\x20\x0e\x94\x77\x68\xd1\x0f\xcb\x0c\x0c\xdd\x56\x60\x5a\x8e\x32\x43\x5e\x84\x26\x26\x60\xbb\x41\xd6\xb2\xf9\xed\x03\xa9\x1b\x25\x5f\x55\xd2\x86\xad\x23\xf1\x3c\x3c\x97\x47\x87\x97\xd9\xfd\x6c\x74\x3b\x84\xd7\xf0\x1a\x5e\x8d\xe0\xcd\x08\xde\x7a\xde\xc7\xbb\x7c\x09\x33\x39\x5f\x41\xbe\x04\x32\xa5\x70\x9d\x4f\xef\xb2\x7b\x78\x37\x5b\xe5\x0f\xb7\x9f\xf3\x25\x9c\x9c\x9d\xfd\x0f\x18\x83\xdf\xb3\xc5\x32\x97\x73\x38\xe1\x46\x1c\x8d\x60\x96\xfd\x6d\x9e\x7a\x80\x0e\x0d\x00\x44\x6a\x2e\x3c\xb5\x3e\xfa\x5a\x53\xe6\xf7\xe8\x6a\x39\xcd\x73\x76\xb5\x58\xe5\xcb\xd5\x91\xb9\x15\x24\xa2\xec\xe1\xe9\x3e\x05\x12\xaf\x91\xe2\x2f\xcb\xd5\x94\x3f\xde\x2f\x73\x3e\x95\x0f\x15\x4e\xab\xfd\x5f\x4a\xca\x80\x1c\x1f\xd3\x02\xa7\x00\xfa\x98\xdd\xc3\x39\x90\x57\xaf\x28\x9c\x8d\xe0\xec\x0d\x8c\x4e\xe1\xe4\xac\x6b\xd0\x7a\x23\x98\x06\x48\x2b\x20\x44\x78\x7f\xfb\xb4\x06\x74\x7c\xd2\xc4\x4e\x99\x20\xbc\xff\xf5\xea\x97\x9f\xde\xd9\x11\x05\x1c\x0c\xd0\xdc\x50\xca\x03\xe5\x12\xeb\x66\x2c\x6f\x38\x1c\xc6\xd6\x0f\x63\x2c\x8e\xbd\xca\xf6\xcd\x57\xac\x6c\x55\xc7\x44\xe8\xc7\x8f\x76\xc7\xc7\x6e\xc7\x61\xf5\xd5\x02\x2f\xba\xf2\xa2\x77\x08\x40\x00\x20\x8c\x3d\x7d\xc9\xcd\x8d\x46\xb7\x19\x46\x24\xd6\x00\x42\x0f\x9b\x18\x49\xa4\xa2\xbc\xd1\x52\x6b\x14\x0b\x72\x69\x01\x51\x8f\x44\x8d\x40\xca\x10\xd0\xb6\x96\x56\x4a\x01\x10\x20\xd0\x0f\x74\xfe\x28\x00\xd3\xda\x01\x80\x7e\x64\x75\x8f\x63\x2f\x2a\xae\x78\xec\x29\xf8\x90\xc3\x00\x54\xec\xa5\xe6\x16\xc1\xaa\x27\xc0\x96\x8e\x48\xcd\xbd\x40\xa3\xdb\x36\x63\xe5\xa8\x94\x09\x54\xd6\xd2\x64\x2d\xf8\x85\x67\x60\x25\x49\x00\x12\xca\xa0\xfa\x5c\xc6\x1e\x28\x43\xcd\xc1\x60\x3d\x81\x1b\xfc\xd8\xdc\x8c\xd1\x57\xcb\xcf\x30\x93\x0b\x90\xab\xbb\x6c\x01\x41\x36\x5b\xe5\x81\x29\x19\x4b\xcb\x95\xcd\xcd\x72\x70\x73\xb3\x38\x57\x67\x2c\xf6\x2e\x6a\x3f\x18\xa1\x68\xf2\x5b\xf5\x30\xee\x68\xcf\xfa\x7b\x63\x9c\xd9\x5e\xe2\xea\xfc\x6d\x6c\x4d\x52\x37\x36\x93\x69\x6d\x5f\x68\x72\x18\x16\x49\x09\xa1\xa4\x51\x41\x6d\xfd\x47\x40\x02\x0d\x0b\xda\x4b\x39\x29\xd5\x27\x52\x76\x43\x51\xbf\x7a\x9c\x73\x5b\x26\x1c\x38\x2d\x65\x03\x40\x74\x06\xfa\x00\x48\x90\x6e\x00\xcc\x15\x40\x02\x73\xb5\xc0\x11\x20\x84\xd0\x0d\xe0\x09\x9e\x5c\x63\x30\x71\x03\x98\xb8\x66\x01\xe4\x41\x00\xa5\x70\x54\x09\x69\xf9\x9a\x9a\x32\x59\x50\xf8\x20\x25\xd3\xd6\x95\xf6\xbe\xb1\x7f\x2c\xf2\x55\x06\xff\xca\x2f\x0b\x78\xc8\x96\xcb\xdb\x7f\x32\xb8\xcb\x16\x99\x5d\x71\x9d\xde\x58\x80\x79\x63\xb4\x9e\x18\x04\x26\x58\x16\x22\x13\xa7\xaf\xf0\x75\x6b\x9c\xdc\xac\xdf\x55\x3b\xf7\x95\xce\xca\xfa\xee\x1f\xe3\x18\x35\xc9\xb0\xff\x0b\x97\x58\x00\x90\x6e\xe5\x48\x3d\xb1\xfc\x68\xad\x88\xa2\xc8\x0d\x40\x4a\xe9\x06\x30\x1e\x8f\xdd\x00\x5e\xbe\x7c\xe9\x06\xe0\xfb\xbe\x13\x00\x0b\x9f\x43\xe6\x64\x41\x18\x86\x6e\x2e\x28\xa5\xdc\x5c\x48\x6f\x02\x37\x17\xc4\xe9\xe9\xa9\x13\x00\x8a\x53\xb7\x18\x50\x4a\xdd\x00\x08\x21\x6e\x00\x4f\x4f\x4f\x0e\x00\x4d\xa1\x36\x32\xab\xe7\x0c\x76\xb8\x05\x71\xef\x7a\x80\xdd\x18\x38\x16\x94\xcb\x4f\x17\x6e\x00\x41\x98\x3a\x01\x24\xec\x86\x09\x27\x0b\x6e\x6e\x6e\xdc\x5c\xa0\xca\x91\x48\x89\x12\x6e\x00\x2f\x5e\xbc\xe8\x07\x40\x3a\x00\x48\x7b\xf0\xa0\x75\x38\x10\x7b\xea\x67\x29\x3f\xc3\x47\x3d\x6f\xa6\x5c\xcb\xcd\xf3\x28\xa8\x36\x02\x4a\x59\x13\xa3\x05\x50\xa9\xc3\x87\x6c\x91\xa9\x96\xb2\x6a\xab\x96\x5b\x30\x5b\x15\xa2\xa0\x19\x5b\xe9\x75\x0c\x5f\x57\x6f\x01\xe8\x48\x1b\x65\x7d\xa1\x01\xe3\x67\x01\x11\x87\xe4\x39\x2e\xf6\x69\x48\x20\x41\x14\x40\xb1\xda\xb6\x26\x88\xbf\xa1\xa8\x2d\x08\x12\x91\x16\x61\x22\x66\x81\x66\x36\x81\xe6\x28\x41\x77\xc7\xc1\x00\xbb\xdb\x26\xb5\x81\xb4\x49\x92\x94\x4f\x84\x10\x2d\xfa\x16\xcb\x3e\x65\xe0\x61\x2d\x07\x13\xa9\x2f\x3b\xe9\x54\x1d\x78\xc5\xc7\xe3\x31\x6f\xe4\x6f\xdf\x25\xc8\x6f\xdf\x77\xf0\x81\x98\x4f\xa5\x10\x70\x53\x99\x78\x59\x99\x02\x00\x72\x09\x17\xb4\xac\x54\x07\x31\x32\x8a\xaa\x95\x86\x91\x85\x21\x54\xb2\x87\xd2\xed\x15\xb0\x8c\xc2\xc8\xe6\x21\x0c\xdf\x1a\x91\x20\x9d\x48\x39\xac\x90\x0c\x90\x41\xb2\x56\x5d\xea\xa8\x35\x3f\xa2\x42\x85\xfd\x5e\xb0\x76\x9d\xfc\x01\x80\x56\x3c\x10\xaf\xaf\xaf\x11\xfb\x00\x74\x96\x7a\x5c\x29\xc5\x7b\x58\xd0\x9c\xe5\x68\x05\x0e\xc0\xcf\xcf\xcf\x2d\x80\x73\x03\x70\xae\x7b\x1c\x5a\x64\x1e\x1f\x5b\x67\x49\xe4\xf8\xf9\xb8\x97\x0b\x22\xf1\xed\xb3\xa2\xbd\x9c\xc0\x26\x11\xb1\x07\x32\x80\x24\x1a\x73\x01\xa9\xec\x93\x46\xb4\xd3\x06\x02\x13\x48\xc3\xc3\xb3\x80\xb5\x19\x46\x0e\x92\x84\x09\x91\x1e\x9a\x05\xbf\x13\x03\x1f\xe0\x02\x43\xbc\x2c\xe5\xa1\xdf\x06\x60\x6f\x59\x1b\xc0\x87\x6e\xf3\x63\xcf\x1f\x4b\x39\x19\x57\x6d\x22\xe5\xd8\x37\x3d\x75\x41\x29\xd1\x74\x41\xf1\x9b\xdd\x15\x69\xf6\x31\x5b\x32\x13\xe8\xb8\xf4\xa2\xf7\xa0\x3a\xb7\xaa\x56\x32\xd0\x59\xc9\x7c\x9a\xf4\x58\xc9\x18\x80\xa8\x05\x20\x69\xbd\x14\xd2\x1b\xd2\x60\x1f\x3b\x09\x45\x42\x77\xb9\x90\xca\xa0\x03\x20\x6d\x80\xd4\x70\x23\xa8\x64\x56\x02\xb0\x83\xe9\xcd\x03\xf4\x31\xe5\xb5\xac\xfe\x94\x20\xff\x52\x3b\xb8\x51\x7f\x4b\x6e\xf8\x9c\xd7\xb2\xe6\x86\x3f\x01\x9f\x96\xb2\xf4\x27\xcc\x27\xe8\x33\xe6\x6f\x65\xa7\xc6\x2f\x8e\xb3\xfa\x54\x29\xbb\xa8\x44\x0c\x99\xb2\x00\x8d\x0b\x74\x0f\x40\x3b\x0d\x04\x91\xda\x79\x68\xce\xeb\x21\xf6\xe6\x73\xc6\x88\x44\x94\x94\xb1\xf9\x5c\x83\x94\x9f\x9d\x17\x00\x56\x3d\xb6\xc6\x69\x33\x76\xc3\x64\xd9\xb1\xb4\xa5\x00\x1d\x3b\x0f\x3d\x6e\x69\x18\x6b\xaf\xbd\x35\x63\x39\xdf\x6b\x01\x74\x2c\x88\x6b\xca\x1b\x0b\x38\xef\xb7\xfa\x47\xbf\x1d\x03\xca\xc8\x4e\x0b\xcc\xe1\xa6\x0d\x10\xa9\xf6\x69\x40\x0c\x71\x3f\x17\xa6\xd3\x69\x0b\x20\x0c\xb7\xc7\xe0\xbf\x00\x00\x00\xff\xff\x02\x9d\xcc\x12\xdf\x1a\x00\x00") + +func fontsEftiwallFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEftiwallFlf, + "fonts/eftiwall.flf", + ) +} + +func fontsEftiwallFlf() (*asset, error) { + bytes, err := fontsEftiwallFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/eftiwall.flf", size: 6879, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEftiwaterFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x56\x4f\x6b\xeb\x46\x10\xbf\xef\xa7\x98\xf7\x58\x90\x16\x6c\xaf\xdd\xb4\xb8\x81\x24\x6c\x5a\xd2\x93\xdb\x43\x69\x7a\x12\x8c\x8c\xb1\x1b\xd1\x24\x1b\x22\xa7\x27\x7d\xf8\x32\x33\xbb\xd2\xae\x24\x97\x86\x27\xfc\x1e\xd2\xfc\xe6\xef\x6f\x76\x66\x73\x7a\x3e\x7d\xb7\xd7\xf0\x3d\x5c\xc1\x66\x0d\x6b\xf8\x51\xa9\x3f\x9e\x9a\x16\x4e\xfe\xf5\x0c\x4d\x0b\xe5\xc1\xc0\xaf\xcd\xe1\xe9\xf8\x0c\x0f\xa7\x73\xf3\xb2\xff\xbb\x69\x61\x73\x7d\xfd\x03\x2c\x97\xf0\xe7\xf1\xbd\x6d\xfc\x2b\x6c\x56\x6b\xfa\xbc\x5a\xc3\xfe\xe3\x2f\x46\x15\xe0\x37\x3c\x00\x60\x6b\x7e\x51\xdd\x34\x7a\x78\xbe\xde\xb7\x87\xa6\x59\xde\xbf\x9f\x9b\xf6\xfc\x95\x24\x1d\x8c\x9e\xa2\xb2\xf2\xd2\x29\xc2\xc4\xd3\xaa\xf7\xe4\x5a\xff\xf6\xd4\xec\x57\xc7\x8f\x77\xff\x76\x5c\xfd\xf3\xdc\x36\xab\x83\x7f\x49\xfd\xfc\xe4\xbd\x59\x42\xb9\x5e\x1b\xf1\x23\x8e\xfe\xef\x33\xd6\x2d\xb1\x2e\xd0\x44\x47\x9f\x20\x84\x75\xe1\x97\xdf\xef\x7f\xfb\xf9\x81\x98\x05\xfc\xf2\x05\x99\xe2\xae\x53\xf0\x19\x4f\xff\x11\x43\x69\xed\xd2\x7f\x4e\x01\x82\x53\x60\x8c\x53\x65\xa9\x9d\x02\x0f\x24\x04\xa7\x1e\x1f\x45\x0b\x44\x80\xfc\x73\x0a\x96\xc6\xd0\xcf\xa9\x65\x59\xd2\x8f\x55\xe4\x61\x77\xaf\x48\xfe\x4a\x74\x6a\x85\xc6\x12\xf8\x28\x1e\xc0\x29\x6f\xb7\x4e\x59\xeb\xe5\x33\x08\xa1\xa4\xe0\xb8\xed\x85\xe8\x94\x2d\x46\xc1\x9d\x02\x6b\x35\x25\x09\x24\xae\x2a\xd1\x24\x7b\x4d\x1f\x4a\x73\x0d\x60\x6d\xcc\xff\xee\x26\x73\xc1\x91\x80\x8c\x97\x5d\xcc\x39\x68\xb2\x13\x09\x1a\x53\x62\xbd\xa0\x16\x85\x9a\x84\xda\xe7\x42\x90\x8a\x06\x21\x75\x8e\x09\x05\xa6\x14\xcb\xc8\x4f\xe4\xba\x18\xb8\x8e\x16\x64\x80\x22\xc6\x24\xb5\x04\xc0\x68\xd0\x37\x83\x42\x17\x1d\x4c\x62\x48\x7c\x63\x90\x8b\x62\x4e\xfa\xe0\x9c\xd8\x4e\x6c\x06\x00\x11\xd9\xd9\x5c\x56\x66\x67\x46\xda\x02\x04\x11\x66\x59\x41\xa0\x67\xcc\x91\x08\xa9\x32\x5b\xb0\xd0\x5a\xa7\x6e\x6e\xa4\x8f\x99\x26\x62\xce\xbb\x74\xf6\xee\x8e\x28\x86\x3c\xb5\x22\x10\x06\x72\x68\x03\x2f\x81\xfb\x83\x64\x8d\x8b\x84\xfb\x00\x5a\xac\xc5\x2e\x6f\x4c\xb4\xc4\x60\x69\x72\x50\x4b\x4b\xa5\x72\x84\x0c\x14\xc3\x8a\x21\x9b\xf2\x24\x56\xbb\x60\xa5\x61\xc6\x65\x00\x61\x0c\x4a\x2e\xd2\xdc\xb4\x83\xa1\x29\xb3\x25\xe4\x73\x9c\x4e\x8e\x8c\x06\xcb\x57\x44\x5b\x72\x18\xb6\xa4\x5f\x57\x93\x53\x62\x42\xa9\x19\x20\x21\x2a\xbb\xe0\x28\x75\x7f\x26\xf3\xec\xaa\x3a\xc0\xb3\x04\xcf\x8e\x45\xce\xfe\x84\x8d\x19\xcb\xda\xcc\x59\xd6\xd5\x85\x49\xc8\xe6\x07\x47\x0d\xd5\x90\xc5\x8b\x4c\xd6\xb3\x03\x9c\x81\x8b\x62\x0c\xf6\x70\x50\x48\x54\x06\x1a\x25\x38\x0d\x03\x9d\xeb\xaa\x2f\x97\x93\x63\x17\x35\x93\x08\x73\x59\xdb\x45\x31\x77\x0c\xc1\xa9\x0e\x3b\xa7\xba\x4e\xcb\x1b\xb9\xa2\xc2\xaa\x8a\xcb\xe3\x51\x9a\x68\xeb\xae\x8b\xda\xa4\x55\xd4\xb4\xe6\x86\x9d\x99\xfe\x77\x7b\x7b\xcb\x3e\x29\x39\xa7\x20\xdd\xac\xd0\x77\x62\xca\x57\x7a\x9a\x4c\xba\x2c\x20\xd9\x25\xd9\x16\x91\xad\x17\xf7\x61\xe6\x2e\x18\x11\x50\x4c\x36\x55\x2d\xbd\xcc\x62\xcc\x64\x36\x0c\x40\x38\x39\x30\x3d\xad\x1e\x02\x49\xe9\x2c\xe5\xc2\xd1\x1c\x2d\x48\x5c\xe9\x9c\xe2\xe9\x38\xc6\x94\x20\x38\xaa\x37\xf9\x0c\xf5\xb8\xc0\xd5\xa8\x7a\x18\xed\xdf\x8b\x80\x31\x97\x2c\x24\xed\x14\xe8\xf4\x68\x89\x33\xc3\xf1\xea\x18\x6a\xd9\x4d\x6b\xe9\x79\x9c\x36\x8a\xb7\x39\x5d\xe5\x55\x69\x8b\xbc\x27\x01\x12\xb0\xec\xe1\xd4\x92\x43\x9a\xfc\xd2\x9b\xb8\xec\x8b\x64\x39\x27\x91\x2c\xbe\x78\xcf\x94\xe1\x9e\x99\xf6\x84\xa9\xc0\x30\x23\x4e\x69\x13\xee\x99\xe0\x73\x5b\xd1\x9f\x24\x7a\xb8\x77\xc0\xfb\xcb\x37\x88\x47\x7f\x79\xc1\x79\xf0\x97\x77\x4a\x70\x3b\x3f\x40\x11\x9b\xf6\x3c\x20\xb3\xf4\xcf\x5e\x67\xbc\xee\x9c\xfa\x37\x00\x00\xff\xff\x14\x6c\x6e\x6d\x0e\x0c\x00\x00") + +func fontsEftiwaterFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEftiwaterFlf, + "fonts/eftiwater.flf", + ) +} + +func fontsEftiwaterFlf() (*asset, error) { + bytes, err := fontsEftiwaterFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/eftiwater.flf", size: 3086, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEliteFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x56\xbd\x6e\xdb\x30\x10\xde\xf9\x14\x37\x64\x0d\x6d\x07\x09\xda\x78\xa9\xbb\xa4\xc8\xd0\x87\x50\x6d\x29\x76\x7f\xa2\x40\x56\x6b\x74\xd3\xc0\x41\x83\x06\x4d\x7a\x80\x8c\x7c\x01\xed\x7a\x14\x3d\x49\x71\xe4\x91\x3a\x32\x2e\x0a\x14\x05\x9a\x41\xb0\x21\x93\xf2\xf1\xf8\xdd\x77\xdf\x1d\x99\x7d\xcd\xae\x92\x0b\xb8\x81\x1b\x58\xad\xe0\x72\x05\x6f\x60\x89\x1f\x71\x97\x3f\x96\xf0\xfe\x7b\xb9\xcf\x8b\x35\xbc\x13\xe2\x63\x5e\xa4\x70\xff\x98\xe5\x6b\x21\xf6\x65\xf9\x74\x5c\x2f\x16\xa7\xf4\x93\x4c\x8a\xed\xfe\xf0\x23\x95\x79\xf1\x80\xf3\xc5\xd5\x72\x75\xb5\x7c\xbb\xba\x5d\x5e\x5f\x5f\xdf\xdc\x2e\xd0\x14\x2d\x4f\x27\x59\xe4\x3f\x8f\xc9\x56\x6e\xf3\x6f\x8b\x72\x9f\xee\x8a\xe4\x94\xe5\x8f\xe5\xf1\xb2\xdc\x65\x32\x39\x3e\x09\x71\x77\xff\xc1\xec\xba\x2d\xd2\xa4\x4c\x77\x70\x3a\x94\xfb\x35\x90\x87\xa7\xa4\xcc\x8b\xcf\x5f\xcc\xf2\xec\xf0\x80\x4b\x2f\xd3\xdd\xa1\xcc\x0b\x71\x01\x70\xb1\xf9\xdd\x73\x23\xc6\x4e\x8d\x9d\x02\x1c\xd4\xe6\xdb\xe0\xb0\x1d\xbb\x7a\xe8\x37\x42\x8e\x5d\x05\x1b\x01\xe6\x67\x23\xfc\xe7\x3f\x0e\x01\xe0\xe5\xe3\x15\xe1\xfb\x07\x11\xda\x9c\x8c\x9d\x1a\x7a\x70\xd9\xc0\x10\xf1\x87\x32\x56\x9b\xa9\x7f\x43\x16\xda\x8c\x1a\x22\xc4\xb1\xe2\xdd\x9d\xf5\x38\x76\xda\xbd\xf1\x3e\xc7\x4e\x4d\x82\x50\xce\x6f\xbd\x11\x43\xef\x6c\x2a\x72\x0e\xe7\xfc\x36\xe4\xb4\xb6\x53\x45\xfe\x5a\xe7\xd2\xca\x8c\x39\x43\x4f\x38\x9d\x80\x02\x93\xa4\x06\x72\x35\x49\x13\x9c\xb7\xc6\x99\x49\x6e\x54\x05\xdf\xea\x39\xa0\x01\x24\x99\x28\x94\x37\xca\xdc\x38\xb3\xd6\x3a\xc0\x6a\x57\x10\xa1\x67\xa1\x92\xad\xe5\x80\x43\x66\x15\x25\x19\x24\xc6\x19\x18\x60\x2c\x1b\x40\xb4\x29\x9e\x9d\x10\xcd\x1f\x32\x01\x32\x48\x9d\xb7\x76\x2f\xaa\xf8\x45\x63\x43\xe7\x00\x87\xde\x90\xa5\xa7\x0c\x00\x6f\x09\x01\xf1\xb4\xc6\xee\xde\x32\x52\x00\x10\x60\x4d\x7b\x69\x13\xad\x9f\xb6\x6e\xdf\x3a\xe2\x76\xac\x9e\x6d\xa2\x90\x1a\xd6\x96\x1a\x9b\x86\x49\xaa\xd6\x5e\x4d\x78\x64\x20\x2a\x23\x7d\x96\xf2\x86\xa9\xa9\x7a\x0e\xa5\x05\x2c\x24\x57\x3e\x32\x20\x16\x97\xa0\x0f\x04\x34\xf4\x12\xec\x26\xa4\x6d\xae\x69\x1d\x96\xc0\x14\xe6\xa4\x0c\x08\x89\xe7\x3c\x02\xb8\x9c\x85\xac\x02\x11\x81\x8c\x4c\x0b\x27\xd4\xad\x43\xed\x5c\xc7\x7e\x09\x9b\xe9\x95\x60\xe3\xd5\x34\x02\xd6\x4c\x14\x67\x42\xf2\x4e\xe2\xc4\x57\x91\x1b\xd6\x4d\x58\xc4\xca\xa0\xf2\x01\x56\x5c\x2c\x7a\xe8\x31\xb7\xf6\x3c\x01\x24\x55\x46\x95\xde\xda\xf4\xb3\x9e\x86\x30\x86\x5e\x86\x87\x92\xd5\x12\x51\x3a\xd5\x40\x2d\xc3\x0a\x77\x4d\xc0\xa8\x2e\xaa\x70\x16\xa7\xa3\xd4\x01\xb3\xa4\x13\x34\x88\x7a\xa5\x74\xbd\xb7\x8a\xfb\xe4\xcb\xea\x84\xb8\x38\x79\x6b\xb3\xab\x68\xef\x9a\xd2\x40\x22\xd6\x7e\x62\xa2\x8f\x1b\x0f\xae\x36\x6a\x54\x53\xc6\x75\x70\x72\xbf\x90\x96\x07\x17\x17\x1b\xf5\x0c\xd4\x75\x6b\x29\xd2\x86\xac\x78\x29\x31\xc5\x52\x8b\xf9\x91\xf6\x88\x09\x8a\x8c\xc4\xca\xc4\x68\x1a\x75\x28\xda\xa0\x33\x05\xea\xf5\x45\x18\xc9\x38\x24\xd3\xcb\x90\x1a\x8e\xa5\x03\x1c\xbc\xc6\x3d\x91\x49\x57\xa7\x94\x49\xed\xaa\xd1\x87\x48\x82\x23\xaf\x54\x71\xca\xa0\x56\xb1\xea\xea\x98\x18\xcf\x33\x4f\x5a\x35\xf4\x12\x2b\xcd\x38\xb5\x07\x4f\x78\xb0\x19\x2a\x6d\x99\x07\xea\xe6\x87\x7a\x45\x46\x14\x89\xff\x33\x3c\x33\x6d\xb7\x9c\xef\x1b\xf3\x7d\x63\xbe\x6f\xcc\xf7\x8d\xf9\xbe\x31\xdf\x37\xe6\xfb\xc6\x7c\xdf\x78\xfd\xf7\x8d\xbf\x1c\xfe\x0a\x00\x00\xff\xff\x36\xe3\x42\xc2\x8d\x13\x00\x00") + +func fontsEliteFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEliteFlf, + "fonts/elite.flf", + ) +} + +func fontsEliteFlf() (*asset, error) { + bytes, err := fontsEliteFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/elite.flf", size: 5005, mode: os.FileMode(420), modTime: time.Unix(1559099119, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsEpicFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\xdd\x6e\xdb\x36\x14\xbe\xd7\x53\x7c\x08\x02\x94\xc4\x92\x12\x4e\x8b\xa2\xd9\x95\x87\xa1\xbb\x1b\xb0\x07\x20\xc0\x29\x36\x6d\x0b\xb3\xe5\xc0\x92\xd7\x06\xe0\xc3\x0f\x3c\x3c\xa4\xfe\x63\x3b\xe9\xdd\xec\xd6\xd6\x11\x75\xc4\xf3\xff\xf1\x50\xca\x6a\xbb\x7a\xc8\x6f\xf1\x88\xaf\x98\x7d\xc2\xec\x0b\x66\x5f\xb2\x6f\xcf\xc5\x02\x4f\x2f\xf8\x7d\x9b\x1f\x97\x16\x7f\xe6\x87\xba\x28\x2b\xcc\x1e\xd4\xe3\xe7\x2c\xfb\xa3\x58\x6f\x6d\x8d\x83\xdd\xda\xbc\xb2\x78\xf8\x38\xc3\xfd\x3d\x66\x0f\xf8\xed\xb8\xc6\xec\xf1\xf1\x73\xf6\x97\x3d\xec\x8a\xaa\x2a\xf6\x25\x8a\x0a\x1b\x7b\xb0\x4f\x2f\x58\x17\xff\xda\x12\xf5\x1e\xbb\xfd\xb2\x58\xbd\xa0\xde\x14\x15\x56\xfb\xb2\xbe\x43\x5e\x61\xbb\x2f\xd7\xfe\x58\x6f\x6c\x46\x0c\x85\x3d\x7c\xa8\x50\xe6\x3b\xeb\xe7\x78\xde\xe6\x0b\xbb\xc4\xbe\x44\x8e\xc5\x7e\xb7\xb3\x65\x8d\x6d\x51\xda\x8f\x59\xf6\xed\xc7\xf3\x36\x2f\xf3\xda\x4b\xdb\xaf\xb0\x2a\x0e\x55\xb8\xf6\x6b\xe6\x6d\xc3\x3d\x6e\x76\xf9\xba\x58\xa0\x3c\xee\x9e\xec\xe1\x06\xab\xfd\x01\xab\x62\x6b\x51\x2c\x6d\x59\x17\xab\x62\x41\x37\x67\x39\x00\xdc\xa3\xda\xec\x8f\xdb\x25\xf2\xed\xf7\xfc\xa5\xc2\x93\xc5\xdf\xf9\x87\x3b\xba\xa9\xdc\x7f\xcf\x6e\x03\x53\xbd\xb1\xb8\xd9\xe4\x87\xe5\xd3\x36\x2f\xff\xb9\xf1\x0e\x78\x3e\x14\x65\x5d\x79\x1b\x72\xd0\xe8\x1d\x9e\x8e\x35\x16\x79\xf9\xa1\xf6\xd3\x54\xbb\x63\xb5\xb1\xcb\xec\x31\xcc\xb0\xb1\xc5\x7a\x53\x7b\x8d\x73\x2c\x36\xf9\x21\x5f\xd4\xf6\x90\x7d\x7d\xe5\xe2\x1d\xca\x7d\x8d\xa2\x5c\x6c\x8f\xcb\xa2\x5c\x63\x69\xab\x85\x2d\x97\xf6\x50\x65\xb3\x4f\x74\xdb\x2e\xff\x41\x96\x63\x6b\xcb\x75\xbd\x81\xb0\x3f\x22\x73\xdb\x69\x95\xc4\x2f\xc8\xb1\x3a\x2e\xd7\x16\xab\x7c\x51\xef\x0f\xd9\xec\x0b\xcd\xb0\xb4\xab\xfc\xb8\xad\x83\xb2\xbb\xfd\xd2\x92\xe1\x29\x54\x91\x2d\xb8\xd2\xeb\xd7\x99\x37\xcb\x6e\x6f\xe7\xa7\xbe\xf3\x0c\x06\xf3\x4c\x40\xce\x33\x07\xd7\xfe\x11\x46\xc6\xab\x44\x01\x81\x3b\xde\x40\xf7\x40\x3b\xa7\x40\x57\x3d\xc3\xb9\x47\x22\x0c\x7f\xfd\x89\x80\xa4\xaf\x3f\x31\x0e\x8e\xbe\x24\x3a\x32\xca\x70\x45\x18\x39\x76\x05\x8e\xff\x85\xd9\x8c\xa4\x6f\x12\xd8\x92\x4a\xf7\x90\x18\xe3\x8c\x33\xc6\x78\x63\x60\xfc\x47\x79\xcb\x05\x8d\x92\x00\xff\x81\xb7\x52\x69\xcf\x2b\xbd\x57\xb4\x09\x9f\xe0\x11\xaf\x67\x47\xca\x3c\xd1\x4c\x28\x3d\xcf\x84\x04\x14\x14\x8d\x28\x04\x77\xf9\xa3\x27\xe8\x88\x79\x46\x47\x21\xe7\x99\x56\x1d\x57\x45\x5f\x45\x47\x29\x63\x34\x51\x02\x5a\x41\xd2\x98\x06\x4f\xe6\x0f\xda\x4b\xf4\xd3\xfa\x51\xb2\xcd\xcf\xe8\xc7\xbc\xea\x4a\xab\x46\xdb\x6e\xf4\x39\x8c\xd3\x3f\x1c\xb4\x20\x47\xb2\xee\xfe\x6e\xe1\x39\xd8\xf9\x7c\x06\x0d\x1d\x74\x63\x4f\xa5\xec\x21\x1e\xdd\xe1\x91\x88\x21\x6c\xce\x78\x6e\xa3\x92\x2b\xe6\xed\x60\x7a\x0e\x1d\xfc\x1b\x67\x8a\xae\xf5\xb1\xf7\xe1\x8f\x39\x92\x52\x24\x8c\x37\x5e\x67\xf1\xc1\xe1\x5a\x75\x92\xa5\x97\x2b\x21\x41\x99\x24\x4b\x39\x83\x90\x72\x85\x82\x24\xbb\x0c\x1d\x92\x12\x72\x98\x2b\xaf\xfd\x74\x82\xd3\xcf\xac\x2e\x11\x92\x35\x66\xad\x7c\x9d\xf9\x22\xf9\xad\xe2\x1f\x64\x36\x70\x66\x66\x7b\x62\x2c\xb3\xb9\x9a\xc0\x45\x08\x04\x20\x12\x3e\x5a\x8c\x44\x74\x3b\x91\x42\x41\xc9\x40\x7a\x31\x0c\x55\x10\xc6\xc8\x00\x58\xa6\x67\x3c\x6b\x1d\x8b\x87\x62\x1e\x42\x2e\xb9\x74\x28\x40\xa3\x84\x21\x0c\xe1\xb2\x31\x6a\x42\x6b\x05\x13\xb5\x26\xf3\x82\xd6\xc1\x2d\x08\x69\xed\x53\x38\xb8\xc6\x13\x21\xa1\x05\xe5\x83\xf1\x2e\x8c\x80\xa2\x7a\x5a\xb3\x88\x46\x86\x8e\x2e\xd4\xa1\x9c\x09\xb4\x64\x70\xbe\x37\x1d\x22\xb9\x56\x7a\x06\x45\xf5\x4e\x20\xc0\x32\xd4\x08\x52\x99\x84\x2c\x60\xfd\x39\x86\x92\xc1\x54\x31\x9a\xc6\xdc\x4a\xe5\x44\xdc\x22\xc5\x33\x81\x6f\x4c\xf5\x7e\x39\xf5\x42\x6d\x8c\x57\x92\x42\xed\xab\x8f\xc8\x68\x31\x43\xaf\x6e\x19\x24\xd9\x20\x43\xe4\xa4\x41\x49\x06\x54\x92\x21\x46\x65\xb8\x60\xbb\x46\x54\x82\x26\x16\xc4\x10\x20\x48\x8f\xcb\x98\x0e\x8c\xe4\x88\x93\x2b\x55\x72\x6a\x83\xcd\x2a\xe5\x21\x97\x04\xb4\xe9\x15\x45\xdb\x0e\x24\x3b\x48\x46\x47\xb7\x50\x82\x03\x86\x0b\xec\x98\x92\xd1\x9f\xc2\xc5\x29\x52\x6a\x7b\x06\x77\x5e\x3c\xc6\xa1\x64\x70\x3a\x0a\x46\x63\xb7\x74\xc1\xd0\x44\x1f\xcb\x08\x3c\x11\x77\xa2\x25\x71\x6d\x08\xcb\x24\x27\x54\xb3\x26\x25\x54\x6b\x0e\x0d\x8c\xca\xc1\x59\x97\x13\xad\x45\x2d\xae\x6a\xe3\x02\xe5\xa8\x82\x46\xf5\x5b\xa2\x13\x11\xa1\xa5\x2a\xc2\xac\x5f\x92\x52\xf5\xb5\x8a\xaf\x5d\x7b\xa9\xb4\x79\x34\xa2\x46\xd3\x4d\xb0\x98\xa8\x96\x61\x9d\xfd\xd4\x0a\x3a\x88\x75\x70\x42\x48\xd5\xac\xf6\x0c\x2c\xc1\x54\x8a\x7b\x5c\xeb\x87\x59\x30\x5e\xfb\x0d\xcc\x83\x01\x3d\xe5\x1a\xd7\x25\x8f\x36\x0c\x92\x2c\x76\x14\x7e\x2a\x67\x37\x59\x97\x62\xb4\xb4\x49\x46\x30\x82\x64\xc4\xb6\x45\xc4\x40\x39\xc8\x98\xcf\x4e\xb1\x91\xa3\x32\x5e\xc3\x30\xc7\xcc\x93\xa4\x08\x0e\x6b\x96\xab\x69\xe0\xe7\x25\x51\x37\x6a\x72\x38\x1a\xaf\x38\x20\xae\x83\xae\xe3\x4c\x45\xbc\x62\xa2\x2e\xcf\xc2\xe2\xa8\x31\x2d\x85\xc9\x57\x17\xda\xf1\x2e\x19\x32\x91\x2a\x4e\xfc\xae\x78\x30\xaf\x27\x75\xca\x41\x73\xaa\x7d\x48\xa2\x1d\xa3\x6e\x2f\x1f\xdf\x9f\xbb\x26\x7e\x08\x49\xa9\x46\x15\x03\x87\x18\xe9\x24\x47\x48\x13\xda\x15\x9e\x61\x2c\x1e\x03\x19\x48\x32\x12\x8e\x38\x24\x1c\x19\x21\x9d\xf6\x31\x92\x69\x81\x56\x23\xf1\x68\x7c\x05\x6e\xcf\xc9\x2b\x9a\xdb\x44\xea\xd6\x52\x0d\x22\xd5\xa0\x07\x15\xce\x73\xee\xcc\xe7\x99\x33\x2a\x86\x71\x42\x86\xe0\x65\xb0\x9f\x36\x3f\xab\x06\x39\xaf\xe8\x13\x72\x85\xb6\x10\xa1\xda\xc2\xff\x80\x8e\x46\xba\x41\x39\x9e\xc6\xab\x96\x1d\xa2\xc9\x2b\x6f\x93\x88\x4d\xae\x8e\xb3\x09\x0d\x9d\x4a\x5e\xc7\x86\x58\x12\x77\x92\x21\x07\xb9\x7b\x1a\x77\x7b\x00\x32\x24\x4f\xd6\xc7\xb0\x06\x93\x0c\xc8\x34\x85\x91\xa9\x3e\x68\x8a\xf7\xd5\xf9\xf9\x76\x28\x9d\xec\xf0\x19\x16\xed\xd0\xea\x9d\x76\xc4\xed\x1e\x85\x46\x24\x3b\x68\x41\x64\x3b\xf4\x1b\x31\xd1\xf4\x1f\x3f\x70\x06\xb6\x7b\xae\xc1\x93\x88\x9f\x88\x25\x5d\x52\x1a\x31\xb6\x6d\x4d\x11\x9b\xc0\xc4\xf7\xe7\xd5\x05\x32\x3a\x3d\x12\xf5\xd4\x2a\x3e\x18\x89\xdd\x37\x77\xda\x6f\x90\x61\x92\xf2\x02\xa9\xce\x9b\xea\x4f\x98\x70\x91\x1d\x82\x75\x0b\x1a\xfb\xee\x4c\xf1\xa3\x10\x93\x1e\x51\x08\xca\x27\xcf\x1b\x76\x7d\x72\x1a\x4b\x2e\x92\xd1\xf6\xca\x78\x52\x8c\xfa\xaa\xc9\xdd\x33\x36\xbd\x48\x9b\x5e\xa6\xe2\xfe\x57\x9c\xc4\xdd\x58\x1c\xa9\xfa\x62\xa8\xc7\x8e\xa2\xa9\x95\x41\x4f\x4f\x5b\x34\xcd\x84\x0e\x6b\x45\x6c\xcf\x53\x7f\xce\x06\xa7\xbd\xa6\xe6\xbd\x75\x67\xe1\x69\xab\xd6\xd4\xa4\x64\xab\xdd\xe8\xb1\x9d\xdd\x1d\xd5\xa8\x59\x16\x4a\xf3\xe0\x59\xbf\xf3\xde\xb3\x97\xcb\x89\x2e\xac\xc8\xce\xa3\xbf\x98\x4f\xd3\x3f\xd7\x16\xfe\xda\xc2\xbf\x4d\xc6\xb5\x85\xbf\xb6\xf0\xd7\x16\xfe\xda\xc2\x5f\x5b\xf8\x6b\x0b\x7f\x6d\xe1\xff\x07\x2d\x3c\xc2\xeb\x17\x11\xde\x12\x49\xf2\x84\x0a\xab\x8f\xe6\xf5\x48\x8a\xc0\x32\x7c\xa8\x3e\xf8\x73\x84\xd6\x1f\x21\xf4\xc6\xa2\x45\x61\x2d\xe3\xd7\x79\x32\xbc\x03\x73\xa1\x89\x77\xf4\x72\xcc\xd0\x20\xbd\x24\x1c\x4a\x4b\x6f\x78\x95\xd1\x46\xf9\x76\x4b\x91\xeb\xde\xd8\x67\xb3\x46\x88\xe5\x2f\xa3\x91\x53\xe8\x37\xda\x76\x9c\xd1\x27\x5f\x22\x63\x14\x61\xcf\x58\x13\xc6\x64\x5c\x84\x1a\x6f\x94\x71\xf5\xd5\xf4\x1a\x3d\xdc\x1b\xa5\xfd\x4e\x18\x55\xcd\x68\x60\x70\x71\x17\x74\x7a\x8d\xfe\x2f\x00\x00\xff\xff\x8c\x73\xa3\xb7\x7e\x26\x00\x00") + +func fontsEpicFlfBytes() ([]byte, error) { + return bindataRead( + _fontsEpicFlf, + "fonts/epic.flf", + ) +} + +func fontsEpicFlf() (*asset, error) { + bytes, err := fontsEpicFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/epic.flf", size: 9854, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsFenderFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x18\xdb\x6e\xdb\x36\xfb\x9e\x4f\xf1\x21\x48\x41\x1b\x3f\x45\xfd\x2e\xd6\x75\x5b\x8a\x41\x37\xdb\x0b\xec\x56\x17\x52\x6c\xd9\xd6\x6a\x4b\x85\x2c\x23\x2d\xc2\xbc\xfb\x40\x7e\x3c\x8b\x52\xec\x06\x31\x4f\x22\xbf\xf3\x89\xdc\x9f\xf6\x1f\xeb\x47\xf8\x0c\x9f\x60\xf3\x2b\x64\x1b\xd8\x7c\x24\x7f\x37\xdd\xae\x19\xe0\xf9\x07\xfc\xb3\xed\xfb\xb1\x19\xe0\xb7\xfc\xf7\x5f\x60\xf5\xef\xd7\xa1\x1e\xc7\xa6\x2b\x4e\xf5\x0b\x3f\x34\xfd\x70\x68\xc6\xfe\xa5\xe3\xcd\xee\xba\x26\x40\xfe\xfa\xfe\xed\x54\x77\xf5\xd8\xf6\x1d\xf4\x7b\xd8\xb7\xc3\x65\x84\x53\xdb\x35\x7f\x10\x89\x05\x32\x78\x38\xd7\x87\x76\x0b\xdd\xf5\xfc\xdc\x0c\x0f\xb0\xef\x07\xd8\xb7\xa7\x06\xda\x5d\xd3\x8d\xed\xbe\xdd\xaa\xc3\xa4\x06\x00\xc8\xe0\x72\xec\xaf\xa7\x1d\xd4\xa7\x97\xfa\xc7\x05\x9e\x1b\xa8\x6a\xca\xd4\xa1\xae\x7f\x21\x8f\xb8\x69\x3c\x36\xf0\x70\xac\x87\xdd\xf3\xa9\xee\xbe\x3e\x40\x96\xc1\xb7\xa1\xed\xc6\x0b\xd4\x17\xa8\x41\xad\x32\x78\xbe\x8e\xb0\xad\x3b\x3a\x4a\x30\x97\xf3\xf5\x72\x6c\x76\xe4\x33\x42\x38\x36\xed\xe1\x38\x4a\x8a\x6b\xd8\x1e\xeb\xa1\xde\x8e\xcd\x40\x3e\x2d\x7c\x64\xd0\xf5\x23\xb4\xdd\xf6\x74\xdd\xb5\xdd\x01\x76\xcd\x65\xab\x44\x76\x21\x9b\xff\xab\x63\xe7\xfa\xbb\xe2\x1c\x4e\x4d\x77\x18\x8f\xb0\x6a\xbe\x9b\xcd\xdb\xfe\x7c\x6e\x3a\x14\xcc\x65\x0d\xff\x83\x1a\xf6\xd7\xdd\xa1\x81\x7d\xbd\x1d\xfb\x81\x64\x1b\x05\x61\xd7\xec\xeb\xeb\x69\x44\x62\xcf\xfd\xae\x51\x8c\x8f\xc7\xf6\x02\xfb\xbe\x1b\x61\x75\x6a\xbf\x36\xf0\x90\x9d\x61\xf3\xe9\x01\xfa\x4e\xc1\xad\xbb\x9d\x82\xbb\x26\x9b\x8f\x0a\x0a\x4a\x5a\x92\x1f\xa0\x25\x40\x1e\x1f\x1f\x8b\xe5\xa6\x20\x42\x80\xdf\x00\x04\x23\xd9\x14\x84\x52\xa0\x14\x67\x66\xf5\x96\xbe\x20\x20\x40\xc8\x05\x2a\xff\x60\x7e\x3e\x3d\x88\x5f\xa0\x20\x9c\x0a\x2a\xa8\xa2\xca\xae\x41\x25\xa8\x60\x78\x00\x57\x0b\x02\x76\x9f\x3d\x5b\x90\x0a\x00\x0c\x33\x42\x63\x12\x1a\x86\xea\x91\x59\x00\x66\x91\x7b\x03\x89\x9c\x32\x44\x8d\x74\xf2\x8a\x32\x60\x86\x94\x82\x54\x8c\x89\x84\x60\x94\xc4\x3c\x09\xa6\x1a\x49\x81\xdc\x64\x78\x53\x2d\xf2\x24\xb8\x06\xa5\xdb\x82\x54\x42\xb3\xad\xd9\x2b\x08\x30\xd9\x72\x2b\x3d\xbb\xd5\x90\xc2\x40\x20\x57\x0c\xa1\x66\x99\xc8\x32\x2d\x27\xa5\x02\x10\x40\x43\x91\x23\x72\xb5\x59\x64\x76\xec\x41\xf7\x70\x4c\x1a\x86\xc8\x1c\x77\xde\xb1\x0c\x31\xc7\x80\x96\xc0\x71\x1e\x09\x0b\x00\x28\x55\xbd\x96\x2d\x45\x65\xcb\x4e\x59\xd4\x8c\x09\x72\x2a\x75\x88\x12\x16\x89\xbe\x62\x8c\x25\x55\xa8\x0d\x86\x6a\xbb\x09\x3a\x2e\xac\x8e\x6c\xa7\x68\x11\x4c\x89\xd6\x18\x1d\x47\xeb\xc8\x73\xb5\x6f\xb5\xe2\x9c\xa7\x50\x31\xaa\x4f\x82\xb5\x57\xb9\x22\xfc\x15\xca\x39\xf7\x6c\x2d\x30\x53\x00\xc8\x8d\x59\xe7\xb9\xb1\xf0\x3c\xe7\x92\x4e\xee\xa0\xc0\x74\x18\xc2\x52\xd0\x84\xd0\xde\x89\x1e\x22\x65\x34\xa1\x86\x2f\x51\xc3\x18\x63\xd6\xb3\x10\x90\xe6\x4f\xae\xa8\xf3\x95\x98\x3f\xaf\x82\x43\x8e\x4b\x52\x72\x72\xa0\x25\x88\xbd\xe2\x2d\x3a\xe6\x3b\xad\xa0\x1e\x3a\x2d\xcf\xf5\x3a\x5b\xad\x20\x58\x93\x34\x04\x44\xf8\xa3\x34\x18\x3c\x62\x24\xe1\x82\xa2\xb6\x45\x30\xd6\x08\x81\x6e\xe6\x43\x2b\x59\xd8\x43\xe3\x3d\xda\xc3\x98\xf6\x36\x70\x33\xf3\x2d\x32\x47\xed\x97\xcc\xfa\x1b\xb5\x04\x47\x1e\x98\x06\x17\xa2\x0a\x81\x27\x85\xac\x14\x46\x5d\xb8\xc5\x11\xe7\xa1\x4c\x02\x21\x5b\xef\xe4\xda\x2b\x85\xf2\x46\x6b\x79\xd2\x63\xd2\x09\x42\x22\xe3\xa5\xff\x21\xcf\xa1\x2c\xdd\x5c\xda\x3f\xc7\x05\x6b\x42\x80\x3b\x0a\xc2\xf5\x54\x2e\x70\xdf\x01\x62\x87\xb0\x56\x29\x34\xc7\x36\x77\xd8\x88\xf0\xf4\xf4\xf4\x04\xf1\xba\x72\x3d\x6e\x32\x52\x0a\xb0\x91\x22\x65\xce\xd7\x66\x46\xca\xee\xf8\xac\xa9\x6a\xda\xb8\x4f\xc2\xdc\xd0\xd2\x35\xeb\xff\x08\x8d\xba\x73\x5c\xf3\xa6\x16\xed\x2a\x38\x2e\xb9\xb8\x1d\x1a\x07\x0b\x2d\x48\xc4\x36\xac\xc6\x20\xc8\x54\x60\x54\x97\x2b\xdc\x09\x4a\xe8\x08\x85\xc0\x9c\xb3\xbe\xc7\xa9\xdc\x5b\x19\x2a\xac\x46\xa9\x1f\xf5\xad\x3e\xe5\x90\x2f\xc4\x4d\x4a\x91\x2b\x67\xff\xb3\x43\x81\xa1\x79\x41\x6e\xf7\x40\xa3\x68\x68\x0b\x72\x53\xfc\x68\xdf\x14\xc2\x86\x53\x21\xbe\x7c\x01\xab\x05\xe3\x1a\x72\x6f\xe4\x15\x09\x68\x81\xf2\xd2\x23\x6d\x6b\x0b\x66\x5b\x62\xea\xd2\x1a\x28\x4b\x9e\xe7\x22\xb0\xb1\xd4\x8c\xdb\x59\xec\xb9\xd1\xcc\xa2\xf0\x30\xf8\x20\xcb\xd2\x87\x5f\x96\x3e\xf8\x72\x02\x3d\x9c\x18\x5b\x74\x31\x10\x62\x03\xbc\xd7\x16\x6d\x15\x10\x38\xae\x3d\x17\xf8\xc9\x14\x44\xc2\x4f\x84\x4b\xc3\x9a\xd1\x78\x6c\x78\xd6\xf4\x95\xe5\x52\xb4\xba\x89\xc4\x9b\x8d\x88\xdb\x78\xe2\xc7\x39\x8b\x80\x3b\xf8\x8b\x69\xfa\x1e\x3f\x91\x05\x99\xe0\xef\xfa\x89\x1f\x12\x02\x03\x4c\x8c\xab\x1b\xa2\xbc\x4e\x4a\xd2\xeb\x0a\xa2\x53\x94\x29\x6c\xe4\xcc\x78\xa3\x9c\x99\x02\x47\xb6\x65\x9e\xb0\xbb\xc8\x08\x8d\x2b\x06\x34\x87\xa4\xe6\xce\xe6\x15\x02\x8d\x04\x11\xe6\x06\x4d\x94\xf2\xe2\x69\x41\x28\x92\x5a\x59\x38\x66\xd7\x9f\x26\x84\x80\x84\xed\xb2\xeb\x72\x08\x99\x83\xf6\x53\x6a\x13\x7e\xcd\x68\x65\xeb\x04\x6b\xcb\x46\x1e\x67\xab\xd0\x96\x84\x5f\xf8\xa6\xfa\x99\x02\xde\x95\x1d\xae\xde\xd0\xa2\xd0\x25\x87\xd6\xa8\xe7\x5f\x51\xc9\xeb\x5d\x53\xa7\x7d\x50\x75\x42\x50\x06\x71\xed\x6e\x20\xfc\x5b\x4e\xb4\x31\x5d\x3f\xcd\xb7\x52\x4a\xb6\x38\xac\x50\x49\x0b\x4d\x52\x9c\x78\x87\xe1\x48\x9d\x49\xa8\x15\x26\xbe\x85\x8c\x60\x16\xfd\x91\x1f\x74\x5c\x11\x33\x1f\x13\x62\xae\x25\x7e\xe6\xe9\x51\x52\x91\xbe\xf3\x39\x47\xf2\xb2\x8d\x21\xde\x15\x03\xef\xb0\x31\xd1\x30\x02\x61\xda\x84\x5c\x46\x80\xb4\x39\xc8\x7b\xe3\x93\x15\x02\x06\x5e\xf0\xe6\xe8\x0e\x33\xf4\xcf\x22\xf6\xaf\x5e\xc1\x65\x0e\x2a\x14\xe6\x5d\xf2\xf7\x6a\xfe\x34\xff\xfa\x6e\x7e\xe3\xd5\xd9\xcd\x29\x5d\x70\x04\x9b\x4f\x63\x8a\xc3\x04\x89\x35\x4e\x15\xd5\x38\x5c\xa5\x27\xbe\x1c\xfe\xab\x09\xc5\x37\x10\x1e\x83\xd2\x4a\x5b\x33\xb6\x72\x37\x06\xfc\x77\xe9\xfb\xdd\x02\x23\x49\x24\xa9\xb4\x3e\xe0\xce\xf2\xf4\x66\xdb\x98\x7f\x64\x88\x35\x8d\x15\x41\x64\x1b\xe0\x7c\xd3\x2f\x04\x97\x99\x42\x27\xab\x7c\x2f\x73\xa6\x0a\xce\x58\xdd\x23\x49\x35\xc3\x94\xab\x06\x8c\xb7\xc7\xc9\x63\xca\x94\xeb\x57\xe6\x61\xb2\xa2\x74\xad\xef\x9f\x33\x91\xc2\x65\x27\x5b\x86\x2b\xdc\xe1\x1a\x40\x18\x6c\xd2\x91\x22\x92\xc6\xcc\xbd\x44\x41\xba\x4f\xc5\xba\xbc\x50\x89\xc9\x96\x1c\xf9\xb2\x34\x62\xc0\x8a\x22\x53\xb5\xd8\x84\x1d\x14\x11\x89\x12\x62\xc6\x98\x93\xb4\x61\x09\x81\xaf\x3a\x73\x29\xf2\x7d\x49\x19\x6b\xd1\x1b\xcc\x90\x01\x88\xe0\x79\xc6\x8f\x50\x46\x71\x58\x3b\xa0\x84\x72\x2f\xc9\xfb\x8e\xfe\xfa\x2a\xa7\xaf\xaf\x72\x55\xb6\x6e\x66\xbe\x85\x27\x84\x48\xff\xd4\xc7\xb7\x37\xdc\x2b\x7b\xb9\xf4\xf6\xe6\x66\xe6\x5b\x4c\x00\x4e\x3f\xc0\x07\x28\x88\x6c\xe3\x3d\xd3\x13\xea\xed\xc6\x7b\xc0\x31\x0f\x26\x6a\x66\x9f\x4b\x0a\xe2\x3f\x96\xb8\xb7\x91\xe9\xcb\x48\x34\xd3\xcf\xae\xca\x63\x52\x0f\x42\xe2\xd6\xb7\x36\x07\x26\x75\xf8\x66\x30\x92\x53\xf3\x84\x65\x3f\xdf\x5f\x85\x68\x30\x3f\x1d\x29\x53\x64\x38\x37\xbe\xb9\x8a\x48\x4b\x14\x1f\x9b\xc2\x35\x23\x94\xe9\xb5\xb0\x28\xc8\x7f\x01\x00\x00\xff\xff\x82\xd6\x40\xe2\xfd\x1b\x00\x00") + +func fontsFenderFlfBytes() ([]byte, error) { + return bindataRead( + _fontsFenderFlf, + "fonts/fender.flf", + ) +} + +func fontsFenderFlf() (*asset, error) { + bytes, err := fontsFenderFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/fender.flf", size: 7165, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsFourtopsFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x55\x7f\x8b\xdb\x38\x10\xfd\x5f\x9f\xe2\x5d\x08\x24\x4b\x57\xd1\xa6\xcd\xfd\xb1\xa5\x1c\x2a\xdc\xf5\x03\xdc\x5f\x7b\x20\xd0\x2a\xb1\x92\x98\x3a\xf6\xe2\x1f\x74\x0b\xc2\x9f\xfd\xd0\x48\xb2\x95\xb4\xdb\x83\x63\xbd\x8e\x3c\x9a\x79\x7a\xf3\x66\x3c\x3e\x56\xc7\xf7\x66\x89\x1d\x3e\x60\xbb\xc5\x03\xb6\x3b\xf6\xa5\x19\xda\xbe\x79\xe9\xb0\xff\x8e\xbf\x4d\x5d\x98\xaa\xf2\xbf\x5d\x73\xc1\x4e\x3c\xee\xf0\xa9\xa5\x07\x79\x2a\xdb\xde\xec\x37\x43\x77\xd8\xd8\x62\xf8\x83\x7d\x29\x4f\x95\xed\xd1\xda\xca\x9a\xce\xe2\xfd\xe6\x01\x9c\xe3\xf3\x70\x1a\xba\x1e\xbf\xdf\x63\xfb\xf8\xf8\x81\xfd\x69\x7a\xfb\x11\x8f\xf8\xfc\xd2\x7a\xc3\x8e\xb1\xbf\x5e\x5f\x2a\x53\x9b\xbe\x6c\x6a\x34\x47\x1c\xcb\xb6\xeb\x51\x95\xb5\xfd\xc8\x3c\x39\x70\x2c\x2e\xe6\x54\x1e\x50\x0f\x97\xbd\x6d\x17\x38\x36\x2d\x8e\x65\x65\x51\x16\xb6\xee\xcb\x63\x79\xa0\x60\x66\x00\x80\xa3\x3b\x37\x43\x55\xc0\x54\xdf\xcc\xf7\x0e\x7b\x8b\x67\xb3\xba\xa7\xa0\xba\xf9\xc6\x96\xc1\xa9\x3f\x5b\x2c\xce\xa6\x2d\xf6\x95\xa9\xbf\x2e\x3c\xd1\x97\xb6\xac\xfb\x0e\xa6\x83\x01\x59\xef\xb1\x1f\x7a\x1c\x4c\xbd\xea\x3d\x4c\x77\x19\xba\xb3\x2d\xd8\x2e\x20\x9c\x6d\x79\x3a\xf7\x9e\xb1\xc1\xe1\x6c\x5a\x73\xe8\x6d\xcb\x3e\xfc\x62\xf3\x1e\x75\xd3\xa3\xac\x0f\xd5\x50\x94\xf5\x09\x85\xed\x0e\xb6\x2e\x6c\xdb\xb1\xed\x96\xc2\x2e\xe6\x95\x32\x47\x65\xeb\x53\x7f\xc6\xda\xbe\x26\xe7\x43\x73\xb9\xd8\x3a\x08\xd3\xdd\xe1\x1d\x0c\x8e\x43\x71\xb2\x38\x9a\x43\xdf\xb4\xec\x21\x1c\x5c\xd8\xa3\x19\xaa\x3e\x90\xbd\x34\x85\xa5\xc4\xfb\x73\xd9\xe1\xd8\xd4\x3d\xd6\x55\xf9\xd5\x62\xc1\x2f\x78\x58\xa0\xa9\x09\xd6\xd4\x05\xc1\xde\xb1\xed\x8e\x40\x82\xd0\x9e\xfd\xd5\xa9\x8c\x2d\x97\x32\xff\x97\xcc\xd1\xb5\x91\x8c\x1e\xdc\xcd\x2e\x36\xd8\x2c\x25\xe3\x8e\x3b\x3e\xfd\x60\x85\x55\xf2\x5e\x8f\x92\xe9\x3b\x5a\x4b\xb6\x81\x90\x6c\x29\x96\x92\x09\x78\x44\x78\xaf\x65\xf4\x5a\x6b\x5a\x4b\xe6\x7d\xe2\x25\x99\x8f\x70\x08\xff\x50\x52\x32\xe5\x17\x2e\xfc\x0b\x90\x41\x48\xc6\x9f\xb8\x7f\x54\x13\x28\xb1\x7a\xc7\x25\x5b\xae\x96\x93\x31\x5d\x42\x06\x13\xe3\x9c\xc7\x55\xee\x11\x93\x85\x07\x86\x80\x07\xf6\x87\x02\xc4\x67\x1c\x15\xf1\x01\x88\x86\xd2\x5a\x84\xdd\xe0\x20\x88\xa9\xbf\x69\xa7\x53\x94\x18\x95\x64\xb8\x17\x92\x09\x7d\x63\xc4\x27\xc9\x94\x16\x33\xbe\x0f\x15\x31\xd6\x45\x64\x2f\xe6\x38\x4a\xf6\xec\x43\xf4\xec\x2d\xbc\xf1\x1f\x6f\xcc\x20\xc6\xf1\x27\xbc\xd7\xe3\x5d\x3c\x51\xe5\xe1\x4a\xb2\x67\x9e\xdc\x83\x31\x08\x10\x35\x48\x6a\x90\x64\x5e\x0f\xef\xa7\x52\x2d\x66\x0d\x27\x21\xa7\xdd\xe4\x9d\xf2\xa4\x53\x7f\x9b\x4f\xf1\x3a\x2e\xbd\x8e\x6b\xa7\x7d\x57\x29\xad\x43\x1d\x00\x44\xf5\x15\xd1\x17\x5a\xab\x90\x0b\x08\x16\xb3\x20\x4a\x32\xc7\xf9\x27\xc9\x5c\xd2\x24\x95\x28\x14\x28\x94\x27\x97\x30\x95\xce\x85\x98\x54\xb8\xa4\xaf\xf3\x69\xb8\xb9\x44\x99\xf1\xa6\x05\xc8\x42\x8e\x6a\x3a\x9b\x22\x02\x36\xe7\x2e\xad\xe3\xc6\xe8\xc6\x1f\xfb\x62\x0c\x56\x32\x2b\x9d\x57\x9b\xfa\x7e\x4d\x6c\xd5\xc4\x06\x48\xb7\x8c\xa2\x02\x04\x1d\xa6\x04\x5c\xd6\x98\x13\x23\x15\x28\x41\xc5\x6d\xe5\x66\xba\xff\xd9\xce\x51\x65\xca\x31\x4a\xfa\x76\xe4\xd3\x2f\x22\xd5\x14\x19\xe4\x7b\xe6\x7c\xe3\x93\xce\x0a\xe7\xd5\x88\x72\x20\xfb\x99\x92\x4f\xe9\x85\xd3\xc4\xcd\x66\x30\x23\x76\xb2\x12\xb8\x22\x43\x68\x2e\x5b\x78\x0f\x35\xa7\x3a\x4d\x12\x3c\x21\x4e\x92\x64\x0c\x73\x40\x45\xe0\x2b\x52\xd9\xab\x96\x37\xcd\x3c\xb2\x9c\x0e\x10\x04\x40\xbc\xa6\xd2\xbb\x79\x8e\x69\x1a\x7b\xea\x66\xb6\xa6\x07\x8f\x4c\x06\x75\x35\x18\xa9\x18\x62\xf4\x38\x4a\x6b\x97\xb5\x0e\x75\x48\xa6\x7e\xaa\x58\x0c\x20\xff\xa9\x99\x49\x8c\x9f\xc0\x44\x6f\x21\x99\x12\x3a\x6b\x7d\x45\x63\x3e\xcc\x38\xe4\xde\x37\x6c\xe8\x1e\x2b\x23\x99\x13\xf9\xab\x97\xe4\x5b\xa5\xcf\x4b\x84\x58\x25\x3e\xe1\x3e\x43\xf8\x4c\xc4\xf5\xbb\x70\x13\x1a\x8b\x18\x0e\xca\x0e\xcb\x5f\x86\x99\xea\x9b\x8c\x62\x1a\xd7\x63\x12\x3f\x28\xea\xae\xa0\x7c\xda\x08\x74\x83\x22\x4e\xcc\x51\x22\xce\x8a\x5c\xab\xf9\xb3\x18\x0c\x3e\x6c\x9e\x0f\xd7\x34\xe7\x7e\xbf\xa6\x49\x73\x38\x74\xa6\xb8\xae\x43\xdc\x4a\x9b\xea\xea\x7d\xa6\x4d\x11\xba\xed\x0d\x28\x2d\x26\xcf\x91\xbe\x58\x13\xcd\x31\x7e\x52\x42\x43\x53\x6f\xbb\xf9\xf2\x39\xcc\x3d\x2e\x7c\x5b\xcf\x07\x68\xae\x79\x5a\x47\x2a\xf1\xef\xff\x2e\xfe\x0d\x00\x00\xff\xff\x60\x99\xf4\x95\xd7\x0a\x00\x00") + +func fontsFourtopsFlfBytes() ([]byte, error) { + return bindataRead( + _fontsFourtopsFlf, + "fonts/fourtops.flf", + ) +} + +func fontsFourtopsFlf() (*asset, error) { + bytes, err := fontsFourtopsFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/fourtops.flf", size: 2775, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsFuzzyFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x58\x4b\x8f\x1c\x35\x10\xbe\xf7\xaf\xf8\x0e\x2d\x19\xa4\x76\xe5\x21\x50\x84\x5b\x23\xb5\x04\xe2\xc0\x81\x1b\x27\x40\xf6\x64\x35\x4b\x36\x4a\x76\xc3\xce\x0e\x28\xe1\xf1\xdb\x51\x55\xd9\xdd\x76\x3f\xa7\x43\x2b\x69\x7b\x3a\x55\xae\xd7\x57\x0f\xe7\xf6\xdd\xed\xcb\x63\x8d\x57\xf8\x1a\x2f\x5e\xe2\x39\x5e\x55\xb7\x97\x4f\x9f\x3e\xd2\xed\xbb\x5b\xbc\xfe\x88\x1f\x2e\xc7\x7b\x7c\x7b\x7c\xc4\x17\x6f\x6f\xba\xb7\x97\xdf\x2e\xa7\xa7\x13\xfd\x7e\xb9\x7b\x4f\x97\x9b\xf7\x74\x3a\x7f\x59\xfd\x71\x7a\x3c\xdf\x3d\xdc\xe3\x05\x3d\x87\xb5\x78\xf9\xec\xfb\xd3\xeb\x67\xdf\x7c\x55\x55\x3f\x9d\x4f\x67\x9c\x3f\x1c\xef\xef\xce\x6f\x70\xf3\xe6\xf8\x78\xbc\x79\x3a\x3d\xe2\x7c\x7a\xc2\x9f\x77\x4f\x6f\x60\xbf\xc3\xc3\x87\xa7\xbb\x87\x7b\x57\xe1\x73\x1e\x3f\xec\xaa\x9f\x71\xc0\xd1\x00\xf8\x05\x07\x9c\x78\x83\x5f\x71\xc0\x9d\xec\xfe\xc2\x01\x0f\xb2\xfb\x1b\x07\x5c\x64\xf7\x0f\x0e\xb8\x17\xe6\x7f\x71\xc0\x8f\x55\x55\xd7\xdd\xd2\xdf\xae\x22\x4b\x5d\xe5\xe0\xd2\xcb\xb7\xf1\x05\x40\x5f\x42\x13\xa9\x98\x24\x58\x63\x4d\x57\xa1\x46\x8d\xf9\x45\xf8\xbc\x9a\xd1\x55\xf0\x0e\xce\xc1\x79\xf0\xc9\x00\x11\xe0\x9d\xee\xc5\x44\xc7\xf4\xce\xb7\xce\xb7\x51\x66\x7c\x8a\x7d\x57\xa1\x71\x8e\x20\x7a\x38\xd6\x20\x90\xfe\x86\x77\xaa\x18\x39\xd7\x18\x39\xcc\x0d\xcc\x5d\x57\x35\x96\x20\x16\x04\x6b\xc8\x90\x90\xf0\xca\x34\xbc\xaa\x66\xac\x62\xdb\x8b\x1c\x36\x4c\x64\x2d\x41\x24\x53\x8b\x96\xff\xad\x05\x94\x0d\xae\x0d\x68\x59\xb8\xf7\xb4\xcc\x4f\x5d\xa5\x92\x83\x15\xb1\x75\x3d\xf7\x66\xa6\x46\x69\xc1\x96\xb0\xc7\xd1\x55\x01\x41\xec\x0c\x83\x80\xb4\x48\x70\x84\x86\x10\x48\x6c\x67\x57\xa0\x31\x60\x7e\xdf\x18\x4c\x38\x06\x0d\x3d\x59\x52\x2b\x02\x0c\xf3\x35\x8c\x20\x23\x9e\x72\x5e\x9c\x3c\x67\x4e\xf6\x29\x0b\x71\x8a\x6f\x1f\x50\x97\xb3\xe5\xfc\x11\x29\xe9\xa5\x1a\xb4\x5d\xd5\x9a\x1e\x72\x13\xb6\x0a\x9e\x1f\x91\xc1\x8f\x9b\xa5\x59\x97\x31\x82\x35\x93\x4a\x64\xa0\x70\x18\xc3\x02\x11\x16\x8b\x32\x04\x16\xac\x7b\x43\x31\x81\x52\x22\xb5\x8a\x47\xef\xa9\x77\x61\x86\xc7\x21\xc8\x12\xab\xa0\x21\x43\x5a\x66\x83\xac\xa2\x02\x2f\x42\xd5\x18\x06\xc8\x80\x5e\xef\x07\xf0\x65\xa2\x98\x51\x91\x6f\xc1\x2e\x06\x19\x88\x81\x3e\x34\x8c\x97\x15\x1d\x49\x24\x26\x8f\x0c\x92\x10\x92\x9e\xae\x5e\x91\xe8\xd8\x3d\x26\x02\x93\x0f\xb0\xa1\xd9\xf0\xca\x58\x22\x0c\xa5\xa4\x5b\x63\x9c\xd8\xc8\xbe\x61\x82\x98\x3e\x25\x14\x73\x89\x7d\x00\x93\x04\x06\xc0\x15\x12\xa7\x8c\xbe\x51\xa7\xa4\x20\xb2\xcc\x59\x1b\x07\x30\xba\x65\x58\x4e\x69\xe6\xd2\x43\x16\x36\xbc\x56\x30\x04\x12\x47\xd7\xb3\x08\x9a\x26\x4b\x63\x53\xa4\x64\x23\x95\x9d\x9f\x95\xa4\x67\x5f\x43\xab\x4d\xa0\x3a\x7a\x5a\xca\x0c\xea\xa9\xc4\xe4\x24\xdf\x14\x98\x45\xaa\xf3\x45\xbd\x1f\x79\xd7\x0e\xa9\x65\x6d\x13\xb3\x8a\xbd\x1d\xcb\x22\x79\xdf\xf8\x58\x76\x0b\xec\x97\x6d\x63\x26\x56\x92\xcf\x79\xb6\x4a\xe3\x59\x80\xd5\x98\x31\x47\x07\xe7\xdc\x16\x3a\xc4\x9b\x02\x43\xed\x15\x52\xc0\x56\x81\x1c\x19\x31\x57\x50\x76\x48\x0c\xed\x5e\x89\x63\x46\x55\x99\x43\xb4\x91\x3a\xbd\x8d\xa9\x1f\x6e\x24\x2b\xa5\x61\x62\x6f\x38\xca\x41\xa5\x9f\x56\xca\xd4\x49\x05\x5d\xf3\xd0\xe3\xba\x8a\x9c\x69\x65\x38\xd5\x58\x2b\xa3\xe6\x10\x97\xc9\x35\xad\x50\x38\x6c\x14\xeb\xc5\x90\x0d\x12\x83\x51\x15\x89\xca\x58\x2f\x4b\x4c\x8c\xd1\x36\x0a\x91\x91\x56\x19\x3f\xbb\x5d\xcd\x24\x82\x27\xb3\x0b\x24\x53\x89\x5e\x25\xee\x4d\xbd\x8d\x70\x8c\x61\x99\x9a\x0f\xb7\xbb\xed\x1e\xa2\x4d\x84\x67\x45\x13\xfb\xb1\xd2\x0c\x9b\x62\x42\xcd\xcb\x63\x06\x20\xb7\xd3\xb9\x4b\x8c\x41\x90\x88\x40\xc3\x0c\x37\x45\x5e\xe2\x25\x4b\x19\xfb\x70\xc2\x70\x48\x71\xce\xa4\x4a\x8e\xe1\x88\xa0\x09\xc0\x33\xca\x56\x02\x14\xfa\xa7\x1e\xca\x83\x46\x8b\x38\x82\xce\xeb\xdf\x77\xed\xcf\x1b\x69\x96\xca\xab\x2b\xc6\xc3\x99\x5c\x0d\x6c\x9c\xcc\xd3\xbc\x61\x02\xde\x08\x61\xd8\x1a\xa2\xa8\x68\xf3\xde\xbb\x54\x97\x17\xda\x3d\x8f\x24\x83\x17\x83\x35\x21\xde\x9d\xd2\x6c\x3a\xb7\xea\xa6\xff\xb2\xb4\x59\x1c\x88\xb5\x4d\xeb\xbd\x81\xcd\x54\x91\x8b\x37\x8e\x11\x98\x87\xeb\x8e\x49\xd7\x9d\x90\xda\xed\x3c\xf2\x47\x95\x2f\x48\x0a\x9a\xcd\x21\xaa\xfc\xd0\xe7\xae\xe1\x2b\xa2\x59\x67\x2c\xab\x3c\x59\xc1\xec\xff\x90\x68\xfc\xba\x44\xea\x11\x97\x9a\x64\x7f\x39\xcb\x6b\xc2\x78\xdc\x9a\x13\xd5\xeb\x48\x3a\x8f\x47\x48\xf1\x07\xb3\xe2\xce\xbe\x3f\x2c\x96\xbe\x7e\x96\xcc\x3a\xe5\xb4\x3f\xfa\x54\xc8\xba\xaa\x77\x63\x7e\xff\x88\xfa\xcc\xa8\xd3\xd7\x87\x38\x04\x6d\xd6\x05\x64\x4e\xea\x97\x34\x8e\x2c\x4e\xa8\x65\x65\xe2\xbb\xbb\xfe\x89\x6d\xa4\xc9\xbc\xe0\xd6\xc6\xbe\x51\x00\x8a\x43\x56\xfd\xb8\x1d\xb9\xeb\xd0\x55\x34\xb1\x20\xeb\x5c\xdb\x9c\x97\xc8\xaa\x4e\xb0\x52\xde\x2e\x46\x0e\xac\x86\xd6\x27\x52\x76\xc1\x32\x90\xb7\x5b\x19\x40\x5a\xc8\xf4\xd6\xa8\x35\x9e\x4d\x89\x83\xfc\xa6\xa8\xf1\xe0\xb7\xc3\x8f\x7d\x6b\xba\x86\x71\x0c\x20\x65\x1f\x4e\xc8\x0e\xc9\xcf\xd9\x04\x10\x45\x00\xf5\x97\x44\xdf\xd0\x55\x00\x9a\x31\x7c\x3e\xf5\x97\x00\x14\xec\x50\x9e\x16\x24\x6a\x7d\xaa\x1d\x1a\x2b\x2d\x54\xd3\xad\x6e\x35\xdf\x90\x27\xdc\xde\x99\x3a\x35\x03\x2e\x43\x7a\xe1\x77\xda\x56\x7c\x2b\xbf\x1d\x7b\x71\x61\xfa\xa3\xf8\xff\x1c\x8d\xba\x4c\xdb\xdf\x46\x57\x8b\x9b\x54\xe6\xf5\xc4\x56\x67\x97\x5d\x8d\x29\x6b\x13\xad\xdc\x3b\x77\x54\x7b\x66\x13\x26\x8a\x39\xe5\x62\xc2\x2a\x59\x7c\xaf\xc9\xd8\x2e\x16\x05\xe3\xf5\xd9\x91\x37\x7d\xb7\xa3\xae\xa5\x21\xa6\x8d\x97\xfd\xab\xef\x0d\xff\x05\x00\x00\xff\xff\x1d\x41\x25\x8d\x47\x17\x00\x00") + +func fontsFuzzyFlfBytes() ([]byte, error) { + return bindataRead( + _fontsFuzzyFlf, + "fonts/fuzzy.flf", + ) +} + +func fontsFuzzyFlf() (*asset, error) { + bytes, err := fontsFuzzyFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/fuzzy.flf", size: 5959, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsGoofyFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\xdd\x6b\xe4\xc8\x11\x7f\xd7\x5f\x51\x7b\x74\x70\x6b\xe3\x39\x39\x3e\xd8\x87\xc3\x38\xe6\xe0\xc8\x5b\x72\x24\x7b\x84\x80\xa0\xd0\x8c\x7a\x66\x94\x95\x25\x47\xd2\xc4\x6b\xd2\xf9\xdf\x43\x57\x55\x7f\xcd\x87\x77\xf7\x25\x70\x60\x63\x7b\xaa\xbf\x7e\xf5\x5d\x5d\xd2\x6c\xfb\xed\x6d\xa3\xe0\x03\x7c\x80\x1f\x6e\xe0\x06\x6e\x6f\x8b\x9f\x3f\x3f\xf5\xcd\xd0\x2c\xdd\x38\xc0\xb8\x85\x6d\x37\xcd\x0b\xf4\xdd\x60\x7e\x84\x57\x7e\xde\xbf\x7f\x0f\x7f\x1a\xc7\xed\x3f\xe0\xe6\xfb\x3f\xb8\x51\x41\xd0\xb0\x82\xc1\x6c\xcc\x3c\x37\xd3\x0b\xcc\xcb\x61\xbb\x85\x65\x84\xae\x35\xc3\xd2\x6d\x5f\x60\xd9\x1b\xf8\xd4\x0d\x2d\x33\xea\x4d\xa1\x08\x6c\x45\x0b\xdf\xed\x9b\xa9\x5d\xf7\xcd\xf0\xe9\x3b\x58\xad\xe0\x69\xea\x86\x65\x86\x66\x86\x06\x68\xf6\x1a\xd6\x87\x05\x36\xcd\x70\xb5\xc0\xda\xc0\xfc\x78\x98\xf7\xa6\x2d\x3e\x08\xc4\xde\x74\xbb\xfd\xe2\x90\x1b\xd8\xec\x9b\xa9\xd9\x2c\x66\x7a\x7d\xf5\x1a\x86\x71\x81\x6e\xd8\xf4\x87\xb6\x1b\x76\xd0\x9a\x79\x63\x86\xd6\x4c\x73\xf1\xc3\x0d\x9f\x7b\x6c\x3e\x93\x31\xa0\x37\xc3\x6e\xd9\x83\x36\x9f\xfd\xee\xcd\xf8\xf8\x68\x06\xb6\xd5\x5c\xc2\xef\xa1\x81\xed\xa1\xdd\x19\xd8\x36\x9b\x65\x9c\x8a\x1b\x61\xfd\xe4\xa4\xf6\x2c\x67\x58\xc6\x9d\x59\xf6\xcc\x9c\x95\x70\x60\xe3\x04\xcf\xfb\x66\x31\xff\x76\x0b\x4f\xbd\x69\x66\xf3\xae\xb8\xbd\x65\x84\xe1\xf0\xb8\x36\x93\x13\x3e\xe3\x09\x7a\x5e\x9a\x69\x71\xc7\x9f\xbb\x65\x4f\x46\xbc\x4a\xdc\x79\x45\xdb\xca\xa2\xf8\xfb\xd4\x2d\x8b\x19\x60\xfd\x02\x7f\x73\x2c\x06\xb8\x23\xdf\xdd\x43\x6b\xe0\xa7\x69\x3c\x3c\x9b\x09\xb4\x9b\x9a\x4d\xff\xd0\xb6\xf3\xf7\x43\x5f\x16\x3f\x35\xb3\x69\x61\x1c\x08\x77\x5e\x0e\x4f\x5d\x0b\x93\x69\x36\x14\x29\xdb\x69\x7c\x84\x27\x33\x3e\xf5\xc6\x79\xf8\xb9\x59\x36\x22\xc0\x76\x9c\xcc\x6e\x1a\x0f\x43\xbb\x1a\xd7\xff\x34\x9b\x65\xbe\x82\x82\xa3\x4a\x77\xcb\x95\x73\xa7\x0f\x81\xdd\x38\x6e\x5f\xae\xa1\x9b\x9d\x4b\xbb\xe5\x8f\xf0\x63\x09\xc5\xc7\x7d\x37\xc3\x76\x1c\x16\xe8\x66\xf8\xf3\x5f\x3e\x3a\xaf\x74\xbb\xc1\xb4\x8e\xcf\xda\xc0\xc1\x49\xb5\x1d\x27\xd8\xf4\xa6\x99\xae\xa1\x1f\x87\x1d\x2c\xe6\xf3\xf2\xae\xf8\xb8\x37\x93\x71\xfc\xa1\x1f\x9f\xcd\xb4\xda\x34\xb3\x71\x28\xe6\x5f\x87\xa6\x77\xc7\x7f\xfd\xe5\x97\x9f\xff\x4a\xd3\xef\x0a\x00\x40\x44\x94\x78\xc6\x40\x67\x14\xc4\x65\xfa\x1f\xd6\x0b\x80\xca\x2d\x94\x3c\x51\x0a\x0d\x1a\x52\xaa\x96\xb3\xa5\x1b\x69\x9a\xe7\x9d\x05\x6f\xac\xe8\x97\xd9\x38\x38\x8d\xb5\x50\x7c\xd6\xba\x29\x94\x51\x5d\xc9\x52\xe5\xb8\x97\x20\x98\x9a\x16\xb5\x08\xea\x38\xa1\x97\xc1\x12\xf7\xd2\xa3\xf1\x69\x0d\x50\x30\xe5\x30\x69\xb9\x94\x75\x9a\xae\x49\xc4\x4a\xb8\x43\xc0\x15\xb1\xa0\x84\x42\x93\x0d\x18\x98\x24\x64\x5a\x63\x4a\x31\x0b\xde\x59\xf9\x45\x47\x15\x85\x2b\x1e\xbf\xce\x86\xe2\xe5\x77\xe4\xcb\x06\xda\xf1\xb0\xee\xcd\x6a\x04\x3d\x8e\xe5\x35\x34\x43\x4b\xcb\xff\xf9\xaf\xab\x00\x6b\xb3\xeb\x86\x15\x4d\x9a\xa1\x5d\x3d\x36\xd3\x27\x33\xcd\xef\xa8\xee\x20\xe2\x43\xa1\x94\x3a\xf9\xe7\x16\x1e\x0a\x54\x4a\x29\xb7\x45\x3b\xa2\x74\x8b\xb5\x52\x15\xef\xaa\x2b\xd9\x8e\xc8\x47\xb4\x52\x65\x38\x86\xc9\xc9\xd2\x1f\x2e\xb1\xa2\x3f\xe6\x22\x3f\x39\x8d\xe1\xc7\x01\x45\x24\x27\x27\x96\x4a\xd3\x9f\xdb\xa8\xc3\x1a\x41\xbf\xb2\x86\x61\x2d\x2a\x25\xd2\x39\xd1\x50\xb6\x69\xa5\x34\x5a\xcb\x12\xd5\x34\x5f\x13\x8d\xd6\x62\xc9\x1a\x68\x3e\xaa\xa3\x74\x24\x5b\xa4\x9c\x32\x4e\x5d\xde\xe8\xa9\x87\xc2\x19\x4d\x69\xac\x15\x99\x4f\x09\xb4\xc0\xa9\x52\x69\x85\x2a\xb5\xb1\xec\xab\x09\xa2\x52\xa2\x05\x71\xc6\x48\x05\x21\xd8\x60\x85\xd2\x4a\x97\x74\x84\xac\x7a\xaf\xd4\x1d\xe9\x41\xfc\xab\xda\xb9\x8c\xb5\xd0\x84\x71\x47\xb8\xc4\xed\xee\x5e\x95\xc1\x75\xc1\x6d\xe2\xb2\xe0\xa2\xf8\x99\xb8\x88\x8e\xb8\x95\xca\xfd\xb2\xaa\x95\x68\xa7\x83\x35\x6b\x3e\xe5\x28\x77\x4c\x64\x7e\x28\x78\x29\xdd\x44\x66\x2b\x53\x20\xf7\x19\xb8\x85\xf0\x28\xd8\xc7\x28\x82\xa1\x26\xf5\xc4\xff\x62\x15\x82\x51\x1a\xab\x9a\x6c\x96\x03\xc4\x91\xc7\xf2\x40\x11\x25\x42\xe4\xe7\xfd\x61\xb1\x48\xfc\x40\x14\xd5\xd9\xd0\x55\xc2\x27\xb1\x22\xa2\xe7\x20\xf8\xa7\x56\x3d\x83\x7d\xc2\xc2\x6f\x55\xde\x05\x64\xbb\x8a\xf7\x56\x21\x53\x99\x8a\x16\x15\x9b\xea\x3c\xcd\x88\x61\xe5\x43\xd3\x12\x37\x7b\x81\xe2\x40\x4e\xd2\x89\x4e\x57\x48\xcb\xcc\xd4\x9e\x23\xd1\x91\x98\x84\x7e\x99\xc7\x30\x49\x80\x3e\x24\xb4\x2b\x17\xa4\x52\xa2\x50\xd0\x88\xcd\x80\x19\x50\x10\x85\x22\x40\xd2\xd7\xb9\xce\x86\xa8\xb0\xf9\x9c\x8a\x8a\x20\x26\x49\x5c\x45\x55\xc8\x62\x36\x0b\x2c\x76\x5a\xd4\x26\x93\x22\xa9\x2f\x96\xd5\x2a\xc9\x78\x16\xa5\x52\x32\xe1\xa5\xcb\x6b\x4b\x95\x67\x35\xb3\xa7\x3d\x84\xa6\x3d\x44\x2c\x22\x56\x85\xca\xc1\xb9\xcc\x10\x89\x14\x9a\x19\x97\x5f\x13\x17\x9a\x73\xed\x62\x5c\xe8\x58\xa6\xee\x95\x94\x10\x9d\x0b\x70\x56\x85\xbc\xe2\x91\x13\xea\xe8\x8f\xe8\x10\xcc\x3d\x42\x65\x9f\x2d\xcf\x65\x3e\xd2\x49\xaa\x9c\xdd\x44\x02\xd9\xea\x4b\x55\xea\x8e\x94\x38\x5f\xa5\x42\xde\x1e\xe7\xeb\xd1\x38\xe4\xed\xab\x65\xed\xde\xfd\x9e\x2f\x6b\xc1\x51\xa1\xf2\x71\x05\x45\x29\xa0\xe8\x9d\x53\xa6\x55\x3f\x0f\x11\x8c\xf1\xe0\x8a\xbd\x44\x95\x55\x96\x13\xbd\x92\x35\xe9\x2a\x62\xe9\x4f\xd2\xd0\x4d\xc7\xd2\x53\xb1\x0e\x3e\x54\x82\x46\xae\x74\x94\x49\xf0\x05\x37\x72\x7c\x3a\x47\xc6\x00\xf4\xd6\xf0\x01\xa0\x6c\x0c\x20\x22\xef\x8e\x66\xab\x33\x69\x10\xb3\x40\xd4\x93\x1c\xb0\xca\x53\x1c\x4c\x5e\xa5\x98\x86\x28\x59\xf8\x55\x55\xcd\x9e\x64\x8f\x0a\xd2\x7b\xcf\x27\x39\xa8\xac\x0a\xa9\x29\xf3\xa8\x82\x06\x79\x31\xf8\x36\x20\x15\x6a\xb4\xf8\xeb\xd4\xe5\xd4\xb9\xa0\x3f\xc4\x21\x25\x70\x7c\xef\x6a\x6f\x70\x0a\x66\x5f\x64\xb8\x77\x08\x2d\x8c\xe0\xc9\xfd\x51\x73\xc9\xb3\x92\x21\x64\xd5\x8a\xc6\x5e\x46\x95\x8c\x2a\x55\xcb\xc8\xed\x41\x44\xeb\xb3\x26\xad\x3c\x49\xb9\xfc\xa6\x1b\x41\xae\x15\xbe\x7b\x43\x99\x90\x93\x9c\x11\xd6\x97\x70\x8b\x36\xb9\x96\x52\x0f\x26\x9a\x29\x1b\xee\x46\xb7\x3b\x54\x41\x76\xbf\x0e\x8e\xb0\x3e\x61\xe9\x06\x70\xe6\xaa\xd3\x68\x0a\x60\xf1\xc0\x79\x32\x8b\x85\x2c\x14\x48\x22\xeb\x69\x09\x3c\x5b\x57\x36\x0e\x82\x72\x27\x03\x64\x83\x25\x12\xa5\x80\x75\xb6\xb7\x56\x75\x86\x53\xab\x3a\x83\xad\x53\xfe\x48\x65\x8f\xa1\xcf\xf7\xb3\xbe\x90\x1f\xf7\xae\x69\xc7\x9a\xf6\xa9\xaf\xe6\xa0\xa4\x72\xa8\x4b\x31\x95\x2d\xe6\x11\x1f\x2e\x90\x70\x5f\x27\xd7\x47\x0c\x0b\x2b\x75\xd8\xe6\xd6\xb9\xc8\x59\xc4\xb5\x6c\x26\xe1\x5c\x67\x11\x1c\x1f\x11\x24\x69\xb5\x4f\x72\xe1\x9f\xde\xe2\xe7\x1f\x11\xd2\xf4\x3f\xea\x4a\xd3\x80\xb9\x38\x40\xf1\x37\x9e\x49\x57\x1b\xd2\x35\xba\xf4\xe2\x88\x53\x39\xc9\x92\x2c\x51\x8e\x30\xbf\x18\x89\x54\x54\xaa\xe0\x71\x9f\x76\x89\x8c\x47\x42\x1e\xc9\x75\x61\xc8\xb0\xc7\xc0\x29\x34\x86\x9b\x9c\xd3\xd4\xdf\xe6\xa9\x3c\x74\xd3\x86\x5b\xdd\xcd\x26\x29\x5d\x25\xb7\xbb\x57\x5c\x1e\x78\x74\x7c\xf2\xc8\xe1\x6a\x89\x16\x76\x4e\x19\x2e\x20\x0a\xde\xd8\x45\x67\x9e\xfe\x8a\x86\x0b\x4f\xca\x5e\x5e\x1e\x9c\xeb\xa3\x89\x8e\x29\x3e\x2d\x97\x57\x6a\x78\xb6\x8d\xbf\x43\xa4\xff\x08\x8a\xd4\x5e\x8d\xd8\xed\x60\x5e\xe4\x98\x7f\xd2\x9b\x05\xef\x24\xd5\x17\x6d\x76\x79\x9e\x36\x7c\x95\x64\x95\xeb\xe1\x55\x9d\x3c\xe5\xe4\x4f\xfc\xc9\x13\xd9\xb9\xe7\xa4\xa3\x4f\xdf\xb8\x94\x69\xc3\x9f\x3d\xae\xd6\xae\x63\x7a\xfd\x71\xf5\xad\xc7\x79\xeb\x71\xde\x7a\x9c\xb7\x1e\xe7\xad\xc7\x09\x21\xfc\xd6\xe3\xbc\xf5\x38\xea\xff\xd7\xe3\x84\x37\x6c\xf4\xc6\x9a\x8f\x85\x1a\x2f\xaf\x55\xfc\x56\xce\x9b\x0b\xff\x7d\x19\x65\x99\x19\x94\x31\x19\xf2\xa1\xa0\xd4\xe3\x34\xa3\x3f\xe6\x51\x57\xb4\x91\xde\x0b\x1f\x7d\x1d\x71\xb9\x37\xa1\xa8\x64\xcd\xf9\xfb\x04\x7e\x6f\x85\xb1\x6a\xf8\x04\x44\xef\x78\xb7\xcb\x7e\xcb\xe1\xf3\x6f\xd2\xce\x1d\x96\x6f\x38\xd8\x18\xd1\x20\xf6\xf4\xf5\xec\x6f\x53\xec\xb4\x3a\xf3\xb5\x1f\x5f\x6e\x61\x99\xbc\xda\x62\x6f\x3b\x1a\xe3\xbc\x0e\x40\xff\x0b\x00\x00\xff\xff\x7e\x7a\xbc\x62\xb5\x1f\x00\x00") + +func fontsGoofyFlfBytes() ([]byte, error) { + return bindataRead( + _fontsGoofyFlf, + "fonts/goofy.flf", + ) +} + +func fontsGoofyFlf() (*asset, error) { + bytes, err := fontsGoofyFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/goofy.flf", size: 8117, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsGothicFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x39\xef\x6e\xe3\xb8\xf1\xdf\xf5\x14\x83\xe0\x00\x4b\xbf\x68\x42\xf8\xf6\x7e\x87\x4d\x63\x07\xc6\x15\x6d\xef\x01\x0e\xed\x17\x02\x34\x6d\xcb\xb6\xba\xb6\x14\x58\x32\x36\xdb\xe5\xe6\xd9\x0b\xce\x90\x12\x25\x4b\x4e\xb6\xb9\xf6\x82\x44\x1c\x51\xc3\xf9\x3f\xc3\x21\xb3\x3d\x6c\x7f\xd4\x3f\xc0\x3d\x7c\x84\xe9\x4f\x80\x53\x98\xfe\x1c\xfd\xad\xac\xf7\xf9\xfa\x6e\x7b\xd8\x02\xc2\x6f\xfa\x53\x56\xc0\xf6\x54\x1e\x41\xc3\x4a\x17\x45\x76\x82\xa7\x53\xb9\x3b\xe9\x23\xac\xf5\xe1\x90\x6d\xe0\x66\x47\x0b\x6e\x60\xf5\x05\x7e\x2d\x3f\xeb\xd3\x06\xfe\xbc\x3f\x47\x7f\xcd\x77\x87\xac\xce\xff\xa5\xeb\xbc\x2c\xa0\x3a\xef\x76\x59\x55\x67\x1b\x58\x9d\xe1\x97\x72\x05\xbf\x9c\xf2\xdd\xbe\x86\x78\x45\xe3\x62\xbd\xbe\x3b\x1f\x75\x91\xd7\xe5\x4a\xdf\xad\x75\xd2\x2c\xb7\x2b\xbe\xc0\x3f\xb2\x62\x93\x1d\x0e\xf0\x6b\xbe\xb6\xf2\x4c\xa7\x62\x7a\x2f\xee\x3f\x40\xfc\x79\x4f\x33\x8b\x27\x7d\xd2\x55\xb9\xad\xef\xd6\xe5\x31\x89\xfe\x9e\x9d\x2a\xcb\x75\x7a\x37\x05\x84\xf3\xd3\x46\x5b\xce\x25\x2d\xfc\xf1\x83\xb8\xff\x10\x62\x0c\xa0\x7c\xb4\xb4\x57\x5f\xac\x90\xba\x80\xdf\x74\x09\x71\xad\xcb\xd5\x22\x2f\xef\xca\xd3\x2e\x89\x20\xfc\x41\xd0\x9b\x7f\x9e\x59\x37\x5d\x65\x87\xbc\xc8\x40\x17\x1b\xd8\xe6\xcf\xd9\x06\xaa\xf2\x98\x41\xf5\xa4\xd7\x79\xb1\xb3\x96\x5b\x1d\xb2\x63\x15\xfd\xe5\xf9\xe9\xa0\x0b\x36\x4d\xb9\x85\x6d\x7e\xaa\x6a\xb0\x2b\xff\x14\x59\x97\x00\xc2\xcd\x51\xef\xf2\x35\x14\xe7\xe3\x2a\x3b\xdd\xc0\xb6\x3c\xc1\x36\x3f\x64\x90\x6f\xb2\xa2\xce\xb7\xf9\x9a\x16\x47\x9a\x25\xa8\xf6\xe5\xf9\xb0\x01\x7d\xf8\xac\xbf\x54\xb0\xca\x60\xa9\x27\x29\x2d\x2a\xca\xcf\xd1\x0f\x8c\x54\xef\x33\xb8\xd9\xeb\xd3\x66\x75\xd0\xc5\xa7\x1b\x40\x84\xa7\x53\x5e\xd4\x15\xe8\xca\x7a\xd7\xce\xa6\xb0\x3a\xd7\xb0\xd6\xc5\xa4\xb6\x64\xaa\xe3\xb9\xda\x67\x9b\xe8\x9e\x29\xec\x33\x72\x5a\xb9\x05\x0d\xeb\xbd\x3e\xe9\x75\x9d\x9d\xa2\x8f\x57\x3e\xa6\x50\x94\x35\xe4\xc5\xfa\x70\xde\x58\x0b\x6c\xb2\x6a\x6d\x7d\x79\xaa\xa2\xe9\x4f\xb4\xec\xa8\x9f\x49\x73\x38\x64\xc5\xae\xde\x43\x9c\x3d\x7b\xe4\x75\x79\x3c\x66\x05\x1b\xa6\x4a\xe0\x16\x34\x6c\xcf\x9b\x5d\x06\x5b\xbd\xae\xcb\x53\x84\x53\xa2\xb0\xc9\xb6\xfa\x7c\xa8\x59\xd8\x63\xb9\xc9\x48\xf1\x7a\x9f\x57\xb0\x2d\x8b\x1a\xe2\x43\xfe\x29\x83\x1b\x3c\xc2\xf4\xff\x6f\xac\x8b\x2d\x5d\xeb\x21\x4b\x37\x89\xa6\x3f\x13\x15\xb6\xb4\x15\xbf\xc3\x36\x72\xce\x5e\xbc\x77\x24\x60\x11\x09\x09\x8b\x48\x0a\x58\x44\xdf\xbe\x7a\x88\x3e\xcc\x1e\x3d\xc4\xd8\xb3\x47\xe0\xa9\x04\x20\x79\x1f\x5b\x26\xc5\x93\xdf\xbe\xda\x5f\x66\x78\x7b\x3b\xbf\xbd\x75\x7c\x5f\x9f\xef\xd2\xf1\x3f\x1d\x78\x11\x81\xba\xbd\x55\x29\x2c\x22\x63\x8c\xa1\xcf\xc6\x20\xde\x11\xde\x0b\xcd\xd9\x05\x0e\x48\x51\x29\x04\x37\x13\xd2\x6a\x81\xae\x3e\x29\xdb\x09\x9c\xd1\x04\xcf\x0b\xfe\x2e\x18\x4f\x00\xdb\x6d\x39\x60\x7e\x87\x28\x79\x94\xa2\x79\xb7\x84\xc5\x44\x4a\x92\x1c\x40\x92\x93\x24\x92\xb3\x2e\x0c\x3a\x7b\xb4\x5e\xb1\x13\xa3\x7f\x24\xce\x22\x12\x82\x0c\xd0\x79\x10\x6d\x90\x5d\x67\x4b\xf0\x5f\x7a\xd8\x44\x41\x40\x17\x3b\xb0\xbd\x73\x89\x9d\x93\xce\x10\xb3\x47\xfc\x3f\x74\xde\x13\x20\x2f\xf0\xfa\x7e\x0b\xa0\x01\x07\xb0\xdb\x9b\xd8\x71\x0c\xb0\x8d\x8e\x1e\xce\x90\xff\xae\x3c\x7c\x88\x77\x55\x1b\x1f\x67\x8f\x9e\xf5\x5b\x92\xed\x15\xb6\x7d\x8b\xf2\xd8\x0b\xae\x5e\x90\x5d\x04\xdb\x68\x90\x49\x8e\x7e\xf0\xce\x1c\x1c\x41\x4a\x31\x26\x3f\xd3\x31\xc4\xc7\xd0\x00\x43\x43\x2a\x70\x02\xed\x82\xfe\x7a\x12\x23\xf6\x35\x84\xe2\xc9\x29\x25\x62\x42\xf9\xaa\x94\xba\x5c\xaf\x78\x76\xd9\x59\x01\xac\x14\x40\x92\x04\x64\x26\xc1\xc2\x80\x7f\x1a\xca\x0f\x34\x28\x65\x2c\x51\x44\x44\xce\x7a\x42\x49\xa9\x60\x0c\xf1\x67\x43\xc1\x92\x46\xc5\xe5\x84\x42\x9a\xf0\x92\x24\xf4\x50\x28\xc6\xa0\x47\x20\x15\xa1\x0b\xe3\x98\x6b\x8f\x31\x4c\x28\x8e\xbd\x47\xc4\xb0\x47\xbc\x44\x4b\x70\x88\x00\x22\x0d\x25\x88\xd9\x9e\x4d\x2d\x33\xaf\x49\xd4\x8b\x11\xce\xe0\x76\x5e\x08\xf0\x25\x61\x28\x56\x5e\x0f\x36\x90\xc2\x4b\xea\xde\xd5\x68\xb0\x8d\x67\xc7\x50\xb2\xbc\x8a\x3d\x9c\xd1\x9d\xd4\x72\x7e\x98\xcd\xdc\x77\x29\x3d\x9e\x77\x30\xc8\x61\x47\x5e\x2f\x09\xaf\x94\x88\x76\x42\xfa\x0f\xd2\x23\xf8\xf0\x86\xc7\xc7\x4e\x68\x8d\x25\x3b\x2a\x9e\x27\x37\x2d\x7d\x58\x98\xd4\x47\x43\x1b\xd5\x41\xf1\xed\xe5\xe8\xa0\xff\x84\x1b\xa5\xf1\x39\xe0\x4c\xd4\xa6\x4a\x57\x23\x8a\x4d\x9a\x43\xdb\xcd\x10\x5a\x0c\xf0\xe2\x0a\x47\x6c\x8d\xee\xaa\x86\x14\xf3\xb9\x03\x85\x02\xd5\x20\x20\xf0\xfe\x07\xfe\x27\x04\x17\x9d\x2d\x5f\xa1\x02\x95\xa6\x1e\x07\x85\x2d\x2f\x04\xbf\x18\xa3\x66\x7e\xde\x98\xd6\xa2\xa9\x40\x74\xf6\x51\x88\x0a\x27\x9c\x32\x03\xbc\x3a\xcc\x28\x6f\x11\xd4\xcb\x1d\xa1\x4f\xa8\x9e\x70\xb6\xfa\x3c\x0b\x40\x88\x81\x2a\x0d\x2d\x43\x9b\xb0\xf8\x36\x85\x08\xd7\x5b\x10\x26\x60\x00\x9b\xf2\x65\x00\xcc\xd2\xe5\xb5\x21\xd3\x2d\xa2\x17\xc3\xf3\x64\xaf\x17\x5a\x9d\x3a\x33\xbe\x55\xa1\xb4\x55\x48\x34\x5a\x88\xf9\x3b\x15\x52\x90\x3a\x0e\x84\xa8\xa8\x6c\x2e\xa2\x49\xdb\x60\xb5\x40\xea\x7c\x29\x3a\xa4\x16\xbd\xde\x4b\x29\x57\xc9\x21\x45\x03\x2f\x4e\xac\x89\x31\x42\x71\x94\xd9\xba\x69\x6d\xd1\xc0\xf3\xb9\x09\x44\x4f\x5b\xd9\x45\xcf\x2c\x23\x26\xe2\x00\xc3\x16\x49\xa4\x2d\x6c\x0c\x28\xde\x09\x6c\xac\xa1\x0d\x5b\x37\x3f\x77\xbe\x81\x18\x6c\x9a\x98\x80\xc1\x72\x98\x19\xf1\x69\x4a\xb8\xa3\xce\x84\xdb\xb7\x06\x65\x20\x75\xc9\xc4\x68\x33\x81\x53\xcc\x29\xe8\x72\x2a\x00\x5c\xee\xd9\x7e\x37\xbd\x62\xeb\x50\xfd\xb4\x55\x5f\x40\xda\xaa\x2f\x7c\x9d\xb4\x52\xce\x06\xe6\x15\x2a\x95\xf6\x52\x79\xcc\xd6\xad\xa5\x6d\xac\x37\x2c\xb9\xe0\xb8\x38\xef\xcf\xc5\xe4\xcd\x90\x7c\x00\x85\x7c\x58\x78\x29\x53\xe1\xa4\x11\x54\xdf\xda\xe6\x25\x7c\x99\x9b\xb9\x7b\x79\xe9\x7e\x49\x6d\x59\x62\x02\x3e\x26\x7a\xfa\x04\x91\x0a\xcd\x06\x83\x69\x1b\xed\x89\x8f\x16\xc4\x00\x4c\xbd\x11\x7d\xab\x67\x52\xd7\xe5\x91\x49\x10\x5f\x86\xf2\x4b\x35\xec\x53\x34\x06\xd3\x36\x1f\x8c\x2b\x82\x9c\x04\x88\x6d\x57\xe1\x5f\x7c\x4a\x24\x9d\x7c\xee\x2b\x33\xa0\x99\xfb\x80\x4a\xf9\xad\x82\x35\xe3\xb3\x84\x30\x46\x29\xb7\xc1\xb7\x20\x21\x18\x36\x9b\x31\xe8\x93\xaf\x75\xe8\x7f\x5d\x33\x29\xbf\x53\xb3\x36\xc5\xa5\xaf\xbe\xc6\x37\xde\xd2\x18\x81\x41\x09\x93\x1c\xbc\x76\x2b\x31\x6f\xd8\xb5\x58\x2a\x74\x8d\x9a\x72\x54\x63\x05\xc8\x7b\x38\x10\x40\xaa\x58\x4b\xb1\x86\x2e\x37\xc6\x02\xdd\xed\x18\x4e\x03\xbb\x0f\x22\xb6\x15\x3a\x76\x35\x97\xde\x9a\x92\xee\x3b\x83\xee\x37\x11\xb6\x16\x3e\x2c\x3a\xe6\xea\xbf\x59\xf6\x40\xec\x15\x55\x58\x5b\xef\x6d\xd4\xfb\x72\x68\xdd\xe6\xab\x0e\x90\xab\x68\xfb\x1d\x7c\x13\x4d\x7d\x72\xdc\xb1\x5f\x3d\x86\xd9\x37\x9a\x23\x78\x79\x13\xf4\x07\x8d\x44\x29\xc5\xc9\xd6\x40\x7c\x93\x41\x7e\xa5\x70\x1c\x37\x6c\x68\x56\xb4\x75\xc9\x35\xda\x89\xa0\x5f\xcf\x20\x09\x58\x24\x5d\x26\x89\x67\x83\xaf\xed\x3a\x3d\x66\xb6\x7a\xd0\x6e\x4f\xad\x4b\x23\x25\xb5\xde\x0c\xcf\xe7\x42\xce\xe7\x4d\xa5\x71\x65\x97\x11\xde\x58\x76\x29\xe2\xc9\x77\xbe\xf2\xa0\xdb\xa3\x6c\xff\xa4\x92\xa4\x07\x82\xb1\xfd\x16\x21\xe0\x04\x5c\xbd\x8e\x6d\xa5\x87\x91\x88\xa7\xc2\xde\x9c\xeb\x3a\x7d\xf7\x5c\xb0\xf0\xbe\xab\xc5\xb0\xa9\x58\x74\x9b\x4a\xdf\x7c\xbe\x71\x08\x0e\x92\x8b\xf1\x36\xbb\x69\xf4\x9b\xd1\x37\x89\xc3\x8d\x7f\x53\xd7\xc3\x6d\xf9\xda\xd0\x94\x40\x2f\xc8\xb3\xdb\xe0\x98\x81\xe8\x32\x78\xeb\xf8\xdd\x0b\x5a\x57\xa8\xe0\x88\xdd\x9c\xa0\xe8\x90\xe2\x0f\x09\xe3\x8f\x21\xb2\xce\x20\x33\x70\xfb\x2a\x77\xd6\xc1\x09\x56\x0e\x5e\x5c\xf9\x89\xd4\xb5\x13\xbe\x06\xd9\xf3\x46\xda\x3b\x3f\x4e\x9a\x98\x1e\x23\x74\x31\xd8\x70\xec\xc7\x84\xdd\xfd\xfb\x98\x01\x41\xd3\x0b\x81\xde\x89\xf7\x6d\xb7\x24\x43\x96\x57\xe8\x6f\x0f\x1c\x41\xd1\x8a\x73\x55\x23\xbe\xf2\xe3\x72\x38\x37\x66\x0e\x17\xb7\x2e\x41\x8e\x0f\x68\xd4\xf7\xd1\x88\x46\x52\x2a\x34\x2e\x0b\xed\xf7\x09\x22\xe2\xf2\xba\x8f\x46\x08\x81\x61\x0b\xab\x8b\xcb\x01\xff\x98\x5c\x5e\x2e\x4a\x79\x11\x63\x57\xb0\xf9\xf1\xe0\xae\x22\x1b\xe5\xdd\x09\xb0\xf1\xb8\xe0\x55\x6a\xe6\xe4\x63\x83\xcb\x41\x6b\xd1\xea\x4b\x1e\x23\xb2\x75\xcd\xdb\x07\xa5\x0d\xf8\xd0\x3c\x81\x85\x3c\x45\xfe\x7d\xa5\x4f\xb8\x1c\xbb\x74\x3b\x66\xbf\x9e\x63\x17\xa3\x98\x8c\x10\xba\x16\x91\x03\x84\xf0\x22\xb4\x3d\x41\x3e\x53\x1b\x57\xd7\x27\xef\x90\xa8\x7b\x6b\x04\x30\x59\x5e\x23\x94\xde\x71\xe6\xb7\x17\x5f\x06\x9a\x4c\xf9\xde\xac\xe5\xa0\x50\xee\x3f\x06\xe0\xff\x4f\x80\xd7\xb3\x36\x85\xdf\x29\x6b\xbd\xd7\xc7\xb2\xf6\x5a\x65\x6d\x86\x87\x36\x76\xda\x4c\x68\x12\x42\x5c\x94\xce\x26\x14\x03\xe0\x21\x8c\xc0\x96\x8e\x09\x01\x8e\x7b\x18\x58\x3e\xe0\xa4\x56\x35\xee\x6f\xfc\x2e\xec\x2f\x1b\x01\xe4\xc3\xb8\xb3\x86\x98\x44\x93\xc6\x1e\xc6\xc0\xc3\x84\x01\xb7\x65\xf8\x30\x8c\x9b\x55\xae\x99\xe6\xf5\xdd\xa7\xa3\x22\x02\xd8\x98\x66\x26\x4e\xfd\x32\xde\x44\xdd\x9e\x99\x04\x03\xfa\xff\x43\x24\x9d\x21\x56\xfd\xa2\x33\x54\x6f\x7a\x0f\xc6\xf6\x9d\x55\xc2\x43\xdc\x19\xe0\x1b\x5e\x4c\xaa\xa4\xeb\x51\x47\x40\xd0\xa5\xeb\xeb\x3d\xc2\xc8\xb8\xf8\x9f\xdd\xed\xfd\x01\x27\xdb\x3f\xf8\x04\xe3\xa7\x97\xfe\x7e\xe8\x9d\x6d\x55\x4b\x08\xde\x59\xf2\x7b\x84\xfe\xb3\x72\xe4\x26\x7f\xc7\xbb\xda\xf0\xee\xc0\xc3\x8b\xe8\xdf\x01\x00\x00\xff\xff\x76\x13\xf5\x69\x44\x22\x00\x00") + +func fontsGothicFlfBytes() ([]byte, error) { + return bindataRead( + _fontsGothicFlf, + "fonts/gothic.flf", + ) +} + +func fontsGothicFlf() (*asset, error) { + bytes, err := fontsGothicFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/gothic.flf", size: 8772, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsGraffitiFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x58\xc9\x8a\x1c\x47\x13\xbe\xd7\x53\x7c\x87\x06\x75\x1f\xa6\x02\xe9\x97\x7e\x90\x69\x9a\x3a\x18\x9f\x2c\xf0\x41\x17\x41\xe2\x70\x5b\xd3\x3d\x6a\x98\xc5\x9e\xe5\x60\x93\xe8\xd9\x4d\x6c\x99\x59\x4b\xcf\x48\x20\x7b\xd4\x15\x55\x95\xb1\x64\x2c\x5f\x44\xd6\xf1\xfa\xf8\x66\xbf\xc2\xff\xf1\x0e\xff\x7b\x83\xd7\xef\xf0\xb6\xfb\xe5\xee\xf6\x11\xb7\xfb\x9b\x03\x4e\x0f\xb8\xba\xdf\x1f\x8f\xa7\xc7\x53\x7f\xbc\x3e\x76\x1f\xbf\x9c\x1e\x70\x3c\x5d\x5d\x1f\x1e\x71\x94\x55\x97\x87\x87\xd3\xd5\xed\xe1\x12\x7f\xfe\x83\x5f\x0f\xa7\xab\x2f\xf8\xed\xe9\xfe\xf2\x74\xc0\xfa\x2f\xbd\x0e\xff\x1e\x9e\x1e\xfa\xa7\x87\xbf\xfb\xc3\xe5\x53\xbf\x7f\xda\x74\xaf\x8e\xa7\xab\x0b\xe1\x3d\x5c\xbe\x9a\x71\xed\x6f\x2f\xf1\xf1\x74\x83\x0f\xfb\xab\xab\xd3\x1d\xd6\x8f\xa7\x9b\xe1\xf3\xf5\xfe\xfe\x70\x73\x77\xfb\xd8\x7f\xbe\xbb\xd9\x74\x3f\xef\x1f\x0f\x3f\xe1\x1d\x3e\xec\xef\xf1\xfa\xfd\xfb\xb7\xdd\x6a\x98\xfc\x0d\x5d\xcf\xfd\xd0\x65\x64\xff\x41\x92\x1f\x66\xa1\x68\x18\x3a\x4a\x94\x86\x6e\x43\x1b\x1a\x3a\x00\x58\xf8\xd5\x0b\xeb\x1f\x86\x8e\x39\x23\x67\x64\x11\x91\xe4\x85\x3c\x16\xe6\x0c\x79\x8e\x8c\xa1\x23\x06\xbe\x7e\x05\x38\x09\x6f\xe6\x9c\x39\xbb\x20\x66\x66\x4a\xaa\x9f\x84\x11\xc4\xc2\x9c\x98\x09\x84\x04\x7d\x4e\xfa\x2e\x89\x20\x59\xc0\xa6\x00\x49\x9f\x92\x0a\x52\xad\xb2\x5c\x98\x64\xfd\xd0\x25\x26\x59\xad\x66\x13\x88\x44\x80\xbe\x4a\xa6\x42\xb8\x1b\x2b\x6c\x83\x24\xfb\x4a\x4a\xee\x84\xdc\x8a\x3b\x08\xd8\x72\x12\x07\x75\x49\x96\x0a\x6d\xce\x50\x03\xd4\x6d\xe2\x34\x79\x36\xfa\x37\x74\x2b\x75\x09\xaf\x94\x22\x80\x94\x12\x02\x42\xad\x81\x35\x94\x12\xa5\xc9\x28\xd9\x7d\x92\x58\xad\xcc\x2c\x79\xa8\x6f\x67\x0b\x37\xc0\x66\x2c\x90\xc4\x71\x58\xb9\x66\xfb\xcf\x56\xa4\x9c\xc8\x45\xf0\x46\x1e\xaf\xcd\xac\xc4\xba\x88\xa9\x88\x6c\x0c\x51\xae\x89\x38\x7f\x87\x5e\x8d\xb3\x3b\xc9\x02\xc9\x01\x76\x1b\x54\x62\x88\x94\x17\xb9\xf2\x15\x29\x7a\x3b\xff\x11\x5f\xae\x36\xd4\xe8\x5c\x4d\x48\x8d\x41\xa8\xe2\xaa\x67\xb6\xf6\x39\x0d\xa9\x68\xa0\xe4\x4c\xe4\xe1\xd1\x2c\x71\xbf\x92\x89\xb4\x14\x54\x7f\x51\xc8\x36\x33\x2c\x71\x92\xd5\x84\x66\x8e\x04\x83\x35\x4a\xfa\x3c\xb1\x67\x2f\x92\xaf\xa7\x26\x79\x2c\x7b\xe5\xb1\xd5\x89\x16\x65\x9e\x5e\x99\x39\x07\x53\xd5\xcc\xaa\xd9\xa9\x14\xe9\x2b\x0e\x31\x2b\x4c\x45\x59\xc3\xdc\xa4\xad\xe5\xed\x59\x41\xe0\xb5\xd0\x5b\x17\x6a\x1c\x62\xe1\xb9\x2d\x98\x62\x86\x97\x1b\xbc\xf4\x95\x59\x73\xc3\x2d\xfa\xdd\xe0\x21\xb9\xe7\xb2\xa3\x8a\xe5\x88\x08\xea\xc3\x26\xdf\xbf\xee\xc7\x7d\xd0\x6c\xf3\x1b\x2c\x6a\x04\xb9\x5f\x44\x10\x39\x48\x79\x74\x32\x6b\xa8\x9e\x89\x0e\x17\x27\x15\x47\x22\x1c\xa9\x86\x18\x83\x91\xb6\x7d\x8b\x01\xaa\xb3\xd1\x5a\x54\x80\x26\xb6\xb3\xb3\x25\x5b\x8f\xda\xc5\x85\x6d\x2d\x9d\xdd\x5a\x63\x51\x6c\xa7\x30\x7c\x8f\x45\x2b\x4f\xee\x64\x89\x3d\xa5\x4b\x09\x2d\x2f\xda\x8c\x17\x39\xca\x79\x11\x45\x09\xa5\x00\x93\xc4\x51\x66\x23\x3c\x39\x5f\xd0\x2f\xd7\xb6\x6c\x76\xac\x03\x69\x64\x03\x13\x5a\x9d\xf3\x48\x22\x22\x69\x6e\x54\xbf\x65\xcb\x58\x58\xaf\x32\xe8\x12\x7a\xcb\xcc\xbb\xda\x00\xdb\x6c\xd7\xd2\x8a\x5e\xd5\x5b\x7b\xd8\xc2\x33\x8a\x4a\x6e\xa5\x85\x94\x40\xc5\x10\x58\xf7\xf1\xf6\xd3\xa2\x88\x46\x32\xa3\x89\x72\x1e\xe7\xc5\x34\x55\x47\xb9\xaa\x5b\xcc\x2e\x82\xa9\xb9\xb1\x37\xf3\x34\x9b\x65\xbe\x8a\x33\x63\xab\x41\xc9\xd6\x29\xf6\xf9\x66\xcb\x56\x9f\x13\x07\x34\xd6\xd9\x66\xc3\xa0\x54\x6e\xfe\x18\x95\xf7\xf3\x9b\xe5\x6a\x1d\xd7\xfd\x31\x6f\xb8\x88\xc3\x8f\x10\x57\x4d\x15\xb3\xe1\x9b\xb5\xe4\x71\x71\xd6\x1d\x46\xf0\x83\x16\x7f\xdc\x77\xc9\x5c\xe9\xbe\x4b\x63\x08\x5a\xb2\xce\xd6\x37\xe0\x61\xe1\x8b\x50\x7c\x8d\xdc\x90\xeb\xa7\x80\x80\xa4\x89\x12\x18\x50\xec\xd3\x29\x50\xb5\x7b\x7f\x99\x5c\x9a\x66\x33\xf8\x55\xac\xea\x8d\xb6\x50\x8d\x69\x99\xe0\x82\x8e\x5e\x53\x1b\x56\x08\xb2\xad\xb1\x0d\xa0\xb2\x98\xc0\xae\x56\xd1\x0f\xf1\xdc\x36\x96\x23\xcf\xa7\x4d\xab\x0f\x98\x6b\x18\x96\x69\xdf\x65\x04\x62\x2a\x68\x5e\x7d\x68\xaa\x2f\xd9\x8f\x7b\xf8\xd3\xcb\xd5\x87\x36\xbf\x91\x30\x16\x37\x0e\xd8\x77\x14\x33\xc6\xcd\xf9\x65\x71\x2f\xe5\xf7\x32\x36\xa0\x4d\x76\xf7\xa2\x61\x43\xdc\x8c\xa2\xf9\xbc\x75\x84\x06\xb9\x12\x53\xdf\x58\x97\x7c\x00\x47\xc0\xc6\xee\x5b\xac\x3b\x8b\x5c\xf9\x6c\xb9\x44\x31\x3b\xb8\x8f\xa0\x78\x32\x48\x7c\x1f\x36\x70\xe3\xae\x6a\xd1\xfc\x86\x9b\xee\x31\xaf\x84\x5a\x82\xb1\xa1\x42\x53\xa5\xa9\x26\xf0\xbc\x7f\xbb\x97\xdc\x2e\x78\xce\x5a\xed\x7b\xce\x5a\xed\x5b\x32\x86\x80\x64\xc2\x96\xec\x72\x57\xfb\x64\x91\x9c\xcb\x91\x25\x51\x6a\xc5\xbb\x4c\x95\x2f\xa7\xb8\xaa\x00\x81\x84\xc5\xc6\x6a\x21\xb5\x02\xa8\x06\x41\xb3\x85\x4d\x4e\x29\x54\xc9\x14\x77\x7f\x6f\x10\xa4\xde\x8f\xb9\x38\xea\x20\x47\x5c\xcb\xf3\x0a\x41\x89\xe6\x49\xeb\x61\x6c\x86\x25\x9a\x9a\x43\x8c\x26\x25\xa6\xc8\xb1\x32\x0c\x92\xe1\x42\x67\xd4\xec\x54\x06\xe6\x94\xaf\x53\x83\x56\x7a\x58\x84\x9f\x19\xa2\x8b\x78\xe1\x28\x61\x11\x0a\x8d\x7e\x4a\x51\x07\x98\x14\xd9\xa2\x0d\x35\x79\x4e\x71\x50\x55\x9b\x1c\xfd\xfc\x08\x03\x1f\xe1\x12\xd5\xa9\x68\x7a\x2d\x1b\x7d\x9e\x08\x2f\xfa\x90\xe6\x47\xe0\xb4\x5e\x38\x02\x97\x04\xeb\x46\x80\x51\xd1\x42\xfc\xcb\x43\xb7\x1e\x17\x5f\xa9\xbc\xde\xbd\xc5\xc8\xde\xfe\x32\x62\xde\xca\xda\x3b\x03\x0b\xa6\xcc\x68\x0c\x67\xef\x9d\xac\xb9\x51\x8e\x66\xd6\x9e\xad\x8d\xef\x66\xcc\xbd\xe1\x87\x1e\x6f\xb5\xda\xd5\x58\xfd\xa4\x01\x19\x2e\x73\x80\x55\x3d\xc7\xbc\xa0\x39\x8e\x1d\x3e\x0b\x9c\xd1\x1c\x40\x68\xc6\x72\x8a\x4f\x2b\x31\x1e\xe5\x82\xce\x63\x78\x69\x34\xa3\xb4\xb7\x66\x48\x8d\x19\x55\xaa\x7f\xd7\x8c\x2f\x25\x8e\x31\x1c\xb8\x04\x3b\xd4\x07\xe5\x6d\xd9\x80\x25\xf9\xa0\x30\xf5\x79\x1f\x2d\xd7\x3a\x7b\xfd\x2d\x03\x41\x9d\x29\xdc\xf2\xfa\x2a\xb7\x93\x45\x9d\x2b\xea\x54\x91\x2b\x4a\xb9\x4d\x12\xa0\xac\xb3\x44\x80\xe6\xd6\xf5\xd7\xaa\x8d\xa9\xa1\x72\x4d\xae\x2a\x24\x1a\xc4\xa2\x27\x99\xdb\xd9\xab\x78\xe2\x53\x75\x45\x6e\x7d\xf1\x4c\x1a\x80\x1a\x7e\x75\xea\x19\x4f\x9e\x63\xb6\x1c\x5a\xcb\x81\x64\x87\x4d\x20\x1f\x2d\xa7\x41\x60\x5e\xe9\xd2\xa1\x39\xf3\x4e\x52\xc0\x4e\xd8\xde\x64\xf2\x8c\x19\x68\x9a\xa8\x3b\x68\x8b\xad\x07\x05\xa9\x20\x6e\x84\x71\xae\x99\xfd\xec\xe0\x28\xaa\x6e\x4f\xf4\x42\xf6\x4e\x34\xa3\xe9\xdd\xa5\x51\x2c\xd6\x0d\xbc\xd2\x04\x75\x4b\xd5\x94\xa2\x29\x5a\x2b\x94\x8d\x41\x0d\xd1\x93\x73\x34\x64\x0b\x4b\x9e\xb8\xb8\x65\xf3\xfe\x66\x80\x42\xa5\xb9\x45\x63\xa4\x45\x6d\xc6\x68\xdf\x51\x95\x35\x51\xcb\x5c\xd8\x29\x4d\xda\xfe\xd4\xc3\xb5\xb5\x46\x67\xdd\x45\x11\x90\x7e\x5a\x6d\x9a\x29\x8d\x3d\x6c\x4d\xb5\xd7\xa3\x2a\x6a\x44\xb9\xed\xa1\x79\xf2\xa5\x74\x12\x5b\xef\xa3\xde\x45\xcb\x07\x07\x9a\x74\xcd\xda\xc5\x96\x3e\x0d\x6c\xb1\xc5\xe8\x39\x27\x5e\x59\x93\xb5\x9e\x17\x2d\xcd\x2e\x17\x79\xfa\xd0\xbf\x94\x2e\x7d\x02\xc0\x0e\xbb\xf6\x39\x13\xd3\x2a\x8c\x01\xe2\xe3\x9e\x74\xa0\xf8\xc0\x61\xe3\x51\xfb\xb1\xf2\x3c\x3d\x74\xcd\xff\x3f\xfa\xe6\xbf\x00\x00\x00\xff\xff\xd7\x26\x94\xd0\x7f\x18\x00\x00") + +func fontsGraffitiFlfBytes() ([]byte, error) { + return bindataRead( + _fontsGraffitiFlf, + "fonts/graffiti.flf", + ) +} + +func fontsGraffitiFlf() (*asset, error) { + bytes, err := fontsGraffitiFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/graffiti.flf", size: 6271, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsHollywoodFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5b\x4b\x6f\xe3\xc8\x11\xbe\xeb\x57\xd4\xa1\x01\x4a\x00\xa9\x9e\xdd\x24\x58\x24\x03\x03\x02\x92\x53\x80\xc4\xa7\x9c\xc2\xa0\xe5\x4c\x34\x19\x2f\x66\xed\x8d\x1f\x59\x0c\xc2\xf0\xb7\x2f\xba\xfa\x51\x8f\x6e\x4a\xa4\x46\xf6\xd8\x80\x25\x56\x89\x62\x7d\xf5\x2e\x36\x5b\x1f\x3f\x7f\xfc\xfe\xc6\xc0\x77\xef\xe0\x07\xf8\xfe\x37\xf0\x0e\x7e\x58\x7d\xba\xff\xfc\xf9\xcb\x2f\xf7\xf7\xff\xda\x7e\xfc\xfc\x11\xfe\xf9\x05\xfe\xfc\x7c\x73\x07\x7f\xbc\x79\x80\xf5\x8f\x1f\x76\x3f\x3e\xff\xfb\xf9\xf0\x74\xd8\xfe\xe7\xf9\xf6\xa7\xed\xf3\x87\x9f\xb6\x87\xc7\xcd\xea\xbf\x87\x87\xc7\xdb\xfb\x3b\xf8\x6e\xfb\x0e\xba\x0e\x7e\x67\xff\x72\xf3\x60\x7f\xff\xdb\xd5\xea\x6f\x8f\x87\x47\x78\xfc\xf9\xe6\xee\xf6\xf1\x13\x7c\xf8\x74\xf3\x70\xf3\xe1\xe9\xf0\x00\x8f\x87\x27\xf8\xe5\xf6\xe9\x13\x74\x7f\x82\xfb\x9f\x9f\x6e\xef\xef\xfe\xb0\x82\x73\xfe\x1c\x1d\xad\xfe\x0e\x57\x70\xd3\x00\x40\x0f\x57\x70\xf0\x07\xf0\x0f\xb8\x82\x5b\x3c\xfa\x1f\x5c\xc1\x3d\x1e\x0d\x70\x05\xcf\x78\xf4\x7f\xb8\x82\x3b\xfc\xf2\x08\x57\xf0\xd7\x95\x80\x60\x76\xd3\xa4\xe1\xa4\x01\x03\x90\x48\x24\x12\x19\x89\x40\x66\xc2\x93\x8c\x08\x24\x4c\x93\x02\x87\xe1\x38\x0c\xd8\x26\x93\x9e\xc8\xb0\x02\x11\x71\x24\x02\x49\x22\x50\xd2\x35\xcc\x87\x05\x25\x2c\x13\x2d\x63\x6c\x03\x08\xc6\x78\xc3\x22\x0e\x13\x70\x84\xcb\x44\xe9\xc6\xe8\x4b\x9f\x38\x52\x3e\x50\x7e\x88\xa7\x19\x69\x03\x00\x32\x8b\xe9\x6c\xd3\x75\x9d\x7f\x35\x64\x0b\x3c\x23\x9a\x87\x9f\x11\x85\xf3\x33\x24\x1e\xa3\x6d\x53\x98\xeb\x98\xeb\x7c\xac\x5a\x0e\xdf\x36\xb0\xb7\x0d\x83\x1f\xac\xe8\x36\xe4\xc6\xb5\x73\xd6\x01\x73\x65\x40\x06\x1b\x48\xee\x8c\x0c\xdb\x24\xf8\xf8\x15\xd7\x46\x46\x00\x16\x54\x3a\x1f\xbe\x61\xf1\x9d\x62\x3b\x72\x0d\x5c\x47\xd7\x27\xab\xe1\x21\x4a\x34\xd1\x9a\xd7\x46\x08\x60\x81\xc6\x82\x6c\x77\x24\xc3\x04\x02\xd8\x76\xe3\x86\x50\xbc\x77\xdb\x31\x20\x71\xae\xeb\xc6\x21\xca\x6d\x47\x3c\x7d\x70\x0e\xc9\xfd\x16\x3f\xdd\x6f\x4d\xe1\x50\x65\x0d\x65\x09\x84\x65\x4c\xb0\x22\x06\xb8\x09\x81\x1c\xce\x3b\xf1\xaa\x22\xc0\x68\xf7\x4b\xdf\x53\xfe\x06\x2a\x79\x3d\x51\xc1\xe5\x44\x45\x7f\xb7\xca\xb3\xd2\xad\xc7\x8a\xcb\x71\x80\xd0\xc0\x86\xe5\x16\x06\x59\x8e\xcc\x18\x72\x39\x2c\x73\x00\x72\x2a\x00\xa4\x58\xfc\x2a\x80\xf5\x20\x6c\xd1\x3f\xbd\x97\xe9\x76\x2b\xe3\x9c\xeb\xad\x1b\xf1\xb0\xb3\x4d\x4f\x41\x08\xbd\x01\x2a\x4b\x1c\x08\xc3\x30\x37\xe6\x63\x91\xf1\xe2\xc0\x3a\xe7\xca\x98\x87\xf9\x31\x6f\x8e\xbd\xc6\xd0\xf3\xaf\x8d\x99\x08\x2e\x82\x4a\x48\x09\xa8\x09\x38\x5d\x82\x99\x78\x30\xab\x40\x7b\x87\xd6\xff\xcd\xb5\xff\x82\x3f\x15\xff\x8f\xf5\x4b\xd6\xa7\x30\x7e\x09\x30\xeb\x53\x89\x88\xfd\x52\x14\x60\x22\xbe\xae\x7d\x3a\xd5\x41\x7c\x0e\xf6\xaa\x02\x03\xe4\xb0\x4f\x74\xea\x28\x8c\xa6\xbe\x07\xac\x9f\xf8\xea\xeb\xb0\xfa\xca\xda\xa2\xcb\x4b\xa5\x1a\x9b\x09\xd0\x2c\x29\x8d\x2f\x61\xd4\xdc\x46\xa0\x9e\x6f\x44\x53\x63\xfd\x2b\x45\xe6\x65\x6d\xc9\x0d\x69\x5c\x37\x92\x15\xcd\x1a\xc8\x84\xf1\xeb\xce\x8e\x29\xe0\xfc\x71\x8e\xbf\x70\xec\x4f\xf3\xb0\xc6\x60\x39\x53\x1a\x49\x59\x48\xd5\x90\x85\x2e\x8e\x90\x36\x54\xc9\x42\x6b\x60\x86\xf4\x7f\xef\x03\x2c\x92\x13\x8a\x18\x77\xb1\x51\x46\xd2\x03\x8c\x51\x86\x34\x35\x5b\x7a\xa6\xcb\xa2\xb3\x83\xd1\xd5\xb9\x9f\x76\x68\x1b\xeb\x5c\x2e\x61\xd1\x89\xd2\xbb\xd2\xb9\xd2\xb7\xd2\x97\x8a\x3a\x2b\x49\xf6\x7c\x4a\x71\x8e\xca\x56\xfa\xe2\x86\xa2\xf1\x25\x2d\xb8\x08\x34\x65\xb6\x93\xa0\x53\x26\x27\xd0\x94\xd9\x2f\x05\x9a\x8a\x72\xbe\x50\xc8\x1d\x67\x59\x97\x8d\x29\xc3\x4c\xdb\x63\xda\x30\xdb\x3a\x3b\xf6\x7c\x96\x89\x69\xc5\x32\x7e\x94\xc0\x66\xce\x7f\xb2\x36\x5c\xc0\xe6\x2e\xd9\x50\xda\x7c\x03\xaf\x62\x73\x82\x4b\x40\x19\xc4\x28\x2c\xb4\xda\x0c\x23\x1c\x67\x40\xc0\xa1\x68\x10\xc5\x71\x12\x7c\xfc\xdd\xf7\xd1\xd4\xb2\xcd\x75\xec\x23\xa2\x7b\xe4\xf7\x33\x2e\x08\x36\x94\xd7\xc6\x1c\xb9\x20\x6b\xc8\xe9\x52\xd1\x52\x2e\x54\x23\x8c\x38\x3f\x46\xf7\xb1\x5a\xe3\x38\x45\x56\xc8\x6a\xd3\x01\x0f\x0c\xd6\x38\xf8\x20\xcf\xe6\xaa\x38\xaf\x74\xe1\x8f\x26\x96\x3c\xa9\xb0\x81\x8a\x89\x24\xa9\x46\x5b\x7d\x5a\xa7\x3e\x67\x53\x1f\xa7\x24\xd4\xd2\xb4\x49\x4b\x12\x66\xaa\x3a\x19\xa0\x50\x62\xcd\xaf\x6c\x7d\x3e\x74\x23\x41\xf3\x3d\x1f\x0f\xaf\xa5\x3a\x7c\x48\x28\xb2\xb5\x30\xeb\x79\xf3\x0c\x2e\x99\xb0\x79\xc6\x36\xb0\x11\xf3\xcc\xbe\xeb\xf6\x9d\x9a\x67\x00\x2e\x36\xcf\x40\x1a\xda\xe9\xcf\x36\xfc\xbe\xd8\x83\xb1\xb4\xde\xd0\xe2\xed\x2d\x2d\x40\xd8\x7d\xd7\xb5\x16\x12\x23\xde\xad\x07\xc1\xeb\x36\x2a\xbc\x46\xc8\x5a\xec\x1c\x86\xe6\xb8\xe2\x2c\xb4\x31\x28\xc0\xc0\xac\x1c\x38\xeb\x50\xb9\x32\xec\x54\xeb\x14\xf2\xf4\x1e\xc1\xc7\x6f\x4d\xa0\x5b\xc2\x13\x4c\xa5\x85\x52\x01\xe5\x6d\xf3\x48\x94\xc4\xc7\xaf\x10\x89\x0c\x4e\x22\xec\x3c\xf1\xd6\xb1\xbc\xa6\xcd\xe3\xbb\xb2\x39\xbe\xbf\x71\x9b\x23\x08\xba\xb0\xaf\x2a\x7e\xc4\x82\xa3\x36\x9f\xc4\xf2\x92\x36\x67\x98\xa1\x55\xa1\x12\xd3\xb3\x6b\x39\xaf\x00\xdf\x4a\xce\x2b\xdb\x9c\x62\xa6\x16\xe7\x2e\x33\xc2\xa7\x4d\xc8\xd8\x17\x88\x73\x57\x39\x2d\x49\xdb\x94\x91\x6e\x1b\xd0\x66\xc7\x57\x69\xf7\x88\x4e\x94\xc6\x26\x0b\x52\xe5\xb1\x0a\x72\x21\x53\xab\xe4\x94\x3e\x62\x91\x8b\x25\x9e\xa4\x99\x1a\x2a\xa0\x64\xf1\x59\xb7\x96\xa0\x57\x31\xcd\xe2\x48\xd0\x71\xd0\xe0\xcc\x75\x1b\xc1\x14\x5f\xa6\x52\x52\x53\xc9\x95\x5a\x79\x03\x68\xc5\x54\x28\xd5\x51\x7e\x9d\x1f\xf4\x99\x21\xb2\x9c\xee\xb3\x78\xab\xc0\xbd\x91\x6e\xc9\xb3\x3f\xfa\xc8\x10\x1e\xe9\x19\x9a\x1c\x54\xbd\x6b\xa7\x11\x9e\x97\xce\xa0\x03\x0a\xc0\x8e\xac\xfe\xc0\xd8\xd9\xa6\x1b\x67\x07\x94\x1d\xfb\x56\x64\xb9\x07\xbf\xef\xbd\x37\x36\x13\x90\xce\x08\xa8\xda\x89\x29\xb3\x9d\x4c\x6d\x8b\x77\x6b\x3c\xb7\xfd\x30\xe4\x9d\x20\x93\xbb\x01\x3b\x66\x5d\x44\x66\xd4\xd2\xfb\x54\x7e\x2f\xe3\xce\x0b\x2d\x28\x14\x03\xad\x96\xae\x58\x7e\xcc\x2b\x54\xb2\x15\x85\x8e\xab\xf3\x8a\x9d\x22\x16\x04\x35\x57\x14\x43\xc5\x1b\x9a\x88\xea\x53\x28\x93\xfa\xe6\xbb\x33\xb7\xb9\x53\x36\x6f\x60\xa3\x6c\x1e\x43\xe5\xcd\xd9\xfc\x3d\x97\x2a\x1b\x73\xad\x2f\x7f\xcb\x29\xd4\x15\x17\xde\x14\xd7\xb8\x74\x9c\x3b\x5d\x34\xd7\xbe\xb6\xbb\xa2\xdd\xda\x7d\xe7\x5c\x65\xe6\xff\x46\xe3\x83\x2b\x4f\x4b\x85\x9e\x07\x05\x2b\x10\x45\x70\x83\xba\xcb\xa2\x82\xa8\x38\xf5\xc1\xe1\x02\x81\xa2\x94\x28\x14\xb0\x51\xd7\x78\x4f\x6e\x21\xaf\xe2\x7b\xda\xd2\x32\x3e\x82\x76\x34\x2f\xc0\x6e\xc5\x88\x0a\x88\x93\xb4\x06\x38\x81\x73\x02\x2e\x04\x52\xa0\x26\xf4\x1c\x3c\x53\x82\xe9\xc0\x75\x21\x55\x84\x4a\xf5\x96\xb9\x94\x9d\xf9\x65\x3c\x35\x30\x80\x50\x0f\x70\x0b\x4b\x54\x82\x5d\x70\x88\x60\xe9\x5a\x76\x10\xd7\x0a\x9f\x0f\x89\xb5\x6e\x93\x22\x53\x25\x73\x36\x4b\x79\x29\x7a\xa7\x57\xb0\x7b\x35\x11\xf4\x7a\x18\x80\x32\x6b\x55\xca\x2a\xe1\x27\x48\x96\xa6\x72\xc0\x0f\xd3\xbd\x98\x57\x84\x31\x85\x25\x29\x06\x76\xab\x40\xd0\x8d\x5b\x48\xc7\x38\x3a\x9e\x1b\xdd\xe9\xcf\xc8\x87\x01\xb4\x7a\xc7\x57\x4f\xf3\x46\x8e\xc4\x33\x6c\x6b\x47\xe0\x71\x4e\xe0\x49\x0e\xe0\x13\x01\x14\x56\x3e\xf9\x2b\x1f\xff\xe9\x27\x00\xea\x79\x3d\x29\x62\x76\x2b\x33\xb0\x57\x18\xd8\x2b\x0c\xec\x55\x6e\xa0\x10\x56\x88\xcb\xbe\x95\x5d\x3e\x72\xaf\x53\x62\x01\xdf\x90\x00\xfc\x19\xa8\x66\x21\x53\xb3\x10\x44\x10\xa9\x9f\x8a\x4a\x49\x8c\x59\x79\x42\x3a\xf1\x94\xc4\x38\x3b\xc4\x9d\x29\x3e\x6e\xd0\x12\x29\x9a\x06\xbe\x92\xba\x78\x89\xfb\xc4\x03\x87\x72\xe9\xbb\x38\x30\xb4\x41\x20\x8b\x37\xbb\x15\xad\xbf\x73\x64\x06\x2f\x8d\xf8\x3d\xee\xbd\x78\xee\xaf\xff\xb9\x3f\x4f\x1c\x53\x5a\x86\x08\xdd\xe4\xae\x87\x85\x62\x1d\x27\x3a\x77\xea\x3a\x52\x79\xfd\x09\x0b\x1c\x36\x4e\xd8\xc6\xcf\x1c\x5c\x38\x70\xe9\x42\x3c\x4c\x5d\x5a\x13\x32\xa3\x67\x50\x71\xdc\xe2\x30\xbc\xf5\x41\x74\xf8\x00\x64\xf6\x35\xeb\x60\x2a\x67\xd4\x56\x38\x98\x85\xf8\xac\xc1\x00\xca\xb5\x8d\xca\xca\x46\xb4\xd9\x84\xc4\x85\x5d\xa5\xfc\x7c\x6e\x1c\xa5\x55\xdd\x5c\xa6\xcf\x8a\xa3\xd9\x90\x6d\xb3\x91\x2c\x7c\xb4\xc2\x58\xb8\xd1\x50\x8c\xb3\xac\xcd\x45\xc6\xda\xc9\xf9\x5b\xdc\x8a\xe1\xc0\xd2\x28\x10\xfa\x66\xad\x06\x7f\x31\x47\xdb\x8a\xac\xca\x69\xd6\xcc\xab\x99\xca\x26\x59\xad\xee\x5a\x4f\xb1\xd3\x81\xcc\x6d\xc8\xef\x06\x58\xfe\xf2\x04\x16\x19\x1c\x8f\xcb\x14\x9e\x97\x38\x13\xef\xd7\x29\xd2\x52\x06\xa0\x3e\xd3\xe7\x57\x5c\xb2\x9c\x71\x5d\x04\x97\x34\x92\xf8\x8a\x95\x78\x54\x9c\xc9\xda\x22\x6f\xf7\x4f\x44\x8f\x3f\x97\xaf\x21\xaa\x15\xc4\x56\xaf\x1f\xea\xd5\x43\xbd\x76\x98\x56\x0e\x27\xcd\xb0\xf4\x86\x6c\x5e\xe5\x07\x59\xd1\x00\x44\xc5\x4d\xee\x2c\xa5\xcd\xae\xfc\x13\x40\x8f\xb0\x5a\xe7\x54\x95\x4a\x41\x0d\x29\xf5\x78\xb5\x45\xff\x0b\x86\x6e\x93\xcb\x40\xcc\x55\x93\x81\x95\x30\x05\x40\x91\x79\xdf\xa8\x77\x52\x66\x3a\xf9\x54\x64\x96\x84\x8a\x8d\xaa\x86\x9b\xc5\xe4\x40\xe3\x1f\xc7\x2b\x58\xf2\xa6\xa8\x44\x5f\x34\x80\x82\x55\xed\x0a\x8b\x5d\x0c\x27\x5d\x5c\x14\x7d\xc2\xe9\x5b\x5d\x05\x8a\xb6\xea\x3c\x52\x58\x6f\xc3\xed\x26\x0a\x59\xd1\x2a\xe7\x93\x17\x8b\x3c\x68\xf2\xb3\xcf\xcb\x4c\x6d\xb5\x5a\x86\xbf\x55\xe8\x5e\xab\x96\xcd\x3e\x8e\x91\x71\xa1\x09\x7e\xe2\x68\x8b\x07\xed\x6e\x15\xd6\x31\x70\x6d\x67\x48\xc5\xdd\x0d\xba\xd3\x57\x8f\x16\x79\xdb\x0b\xdc\x42\xcb\x84\x0e\x96\x0b\xe6\xc2\x1b\x28\x00\x9c\x94\x70\x52\xe3\xa0\x72\xd8\x2f\xd3\xe7\xd5\x2c\xe8\x83\x50\x7f\xd7\xda\xbb\x65\x1a\x97\x28\xce\xe0\x90\xb7\x05\xfd\x32\x13\xe1\xb1\x43\x2a\x53\x38\x9a\x84\x24\x71\xb1\x8a\x3a\x3f\x24\xb6\x27\xae\xa0\xc5\xb1\x0d\xb9\x90\xf7\x55\xc7\x67\xd8\x66\xeb\xc2\xb5\xd3\x5d\xf4\x90\x4e\x42\xfa\xd5\x7e\xa8\x01\x46\x3c\xc8\x35\xfc\x57\x44\x71\xe7\x7c\x56\x20\xef\x14\xe6\x54\xa0\xd5\x6e\x61\xbe\x04\x52\xec\xfe\x2c\x00\x5d\xf0\x97\x24\x30\xf0\x35\x9b\xb6\xeb\xf6\x62\x1f\x7b\x5e\x0c\x09\xc3\xaa\xdc\x78\x36\xdf\xa2\xa8\x71\xdc\xf1\xeb\x75\x1d\xf0\x49\x42\x5a\x79\x99\xfd\x83\x88\x05\xbf\x58\x3b\x59\xcb\x43\x23\xe1\x7d\x04\x8e\x2d\x42\x54\x56\xa3\x2a\xc4\x45\x20\xe4\x7c\xe4\xb7\xcf\xe7\x40\xa8\x1d\x66\xe1\xad\xec\x1e\xac\x75\xb1\xc6\xc5\x7f\x2f\x53\x3f\x9c\x59\x70\xa9\x3c\x9d\x31\x3a\x4e\xe7\xc2\xb1\xbd\xf4\x27\xcd\xcf\x3e\x89\xc2\x2f\x1d\x01\x13\xf6\x68\xbb\xce\xb9\xb4\x0f\xb3\x65\x51\x30\xf3\x6e\xfa\x3c\x7b\xb8\x6e\x1c\xbb\x4e\x28\xff\xba\xdb\x07\x8a\xc5\xeb\xf9\xbc\xdd\xea\xd7\x00\x00\x00\xff\xff\x0b\x5d\x3b\xd8\x2d\x3d\x00\x00") + +func fontsHollywoodFlfBytes() ([]byte, error) { + return bindataRead( + _fontsHollywoodFlf, + "fonts/hollywood.flf", + ) +} + +func fontsHollywoodFlf() (*asset, error) { + bytes, err := fontsHollywoodFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/hollywood.flf", size: 15661, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsInvitaFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x58\xdd\x6e\x23\x37\x0f\xbd\xf7\x53\xf0\x82\x80\x35\x80\xc7\xcc\x06\x8b\xe0\xfb\x80\xa2\xf0\x6d\xdf\x61\x00\xd6\xdd\x24\x4d\xb0\x69\xbc\x48\xbc\xed\xcd\x3c\x7c\x21\xfe\x48\xd4\xfc\xd8\x69\xbb\x8b\xcd\x8c\x66\x4c\x1d\x1e\x91\x47\x94\x34\x8f\x2f\x8f\xb7\x47\x84\x3b\xf8\x0c\xb7\x37\x70\x03\xb7\x9b\xe7\xd7\x3f\x9f\xcf\xc7\xfd\xe3\xcb\x23\xdc\xdc\xd1\xff\x3f\xc3\xb7\xaf\x77\xff\xfb\xf4\xe9\xfd\x70\xfc\x72\xbc\xdf\xdf\xbf\x1d\xbf\x3e\xec\x1f\xee\xbf\x6f\x8e\x2f\xc7\xf7\x1d\x9c\x9f\x1e\xe0\xcb\xe9\x8f\x6f\xdf\xcf\x0f\x6f\xf0\x74\x7c\x87\xdf\x1e\xce\xda\x7c\xbd\xff\xeb\xed\xf9\xfc\xfc\xfa\x3b\x9c\x9f\x8e\xaf\xf0\x0b\xdc\x9f\x60\x03\xf2\xef\x20\x77\x44\xb9\x23\xca\x33\xa2\xbc\xcf\x37\x38\x6c\xe4\x06\x07\x35\x24\xb5\x44\x52\x13\x20\xc5\xc0\x13\x56\xb0\xa6\x21\xad\xc3\x86\x08\xad\x65\x1d\xed\x52\x0c\xf3\x73\x4f\x7d\x4f\x3d\x1e\x36\xd6\x80\xe6\xd7\x79\x4b\x9b\xcc\xc4\xf6\x3a\x65\x3e\x9d\x7a\x82\x01\x40\x7c\xa5\x6c\xd0\x21\x04\xc2\x0d\x4c\x64\x91\x1b\xa9\xb3\x41\x8a\x6d\x8e\x03\xa5\x0e\xd7\xc6\xc6\x0c\xd9\x38\x99\xdf\xdd\x00\xa8\x3e\x07\x36\xcb\x84\xde\xc3\xfc\x1c\x36\x40\x80\xde\xc6\x02\x69\x57\xcb\x08\xec\x7b\x4b\x4a\xb6\xd6\xbc\x48\x4b\x28\x69\x4b\xba\xa7\x4c\xa1\x19\x90\x43\xf4\x7b\x70\x3f\x15\x24\xa0\x44\x18\xde\x6b\x73\x39\x30\xf5\x8e\x3c\x10\x63\xf6\x4b\x03\xe0\xec\x77\x6b\x08\x45\x35\x1f\xd5\x7a\x2c\xc6\x7e\x53\x32\xed\x15\xf5\x4a\x93\x60\x80\x33\xc7\x10\xf2\x6c\xab\xf1\xc3\x29\x01\x6c\x02\xba\xc1\xd3\xe4\x8d\xc5\x44\x03\x89\x26\x0b\xf1\x6b\x88\x13\x96\xee\x52\x14\x21\x79\x26\x91\x07\x4a\xa6\x6b\x68\x57\x04\xea\x7d\x3d\xeb\x04\x05\x0d\xc1\xc0\x66\x73\x86\xcd\x68\xeb\xca\xea\xfb\x6d\x7e\x43\x2c\xf9\x76\x6b\xbc\xd2\x0d\xa0\x4f\xce\x73\x55\xc5\xec\x48\x32\x2c\x73\x22\x69\x6e\xd9\xce\x49\x32\x97\xdf\xa4\x3f\xab\x7f\x66\xe6\xf5\x39\x03\xb8\xd2\xed\x32\x49\x33\x2a\xa1\xff\x60\x24\xb9\x4e\x51\x0d\xc9\xde\x22\x99\x2e\x93\xb4\x6e\x9e\xf2\xc4\xbc\xab\x6e\x11\xd6\x42\x02\x45\xcb\x5b\x11\xd3\x16\x97\xa6\x37\x84\x89\x72\x2a\x32\xce\x12\xc4\xa9\xf6\xd4\x53\x2e\x49\xf8\x13\x6a\x65\x1a\x70\x6d\x32\x69\x64\x3d\xbe\x8b\xb2\xcf\x4f\x83\xb9\xfe\x19\x7d\x88\x0b\x78\x16\xb8\x1c\x37\xd7\x92\x0f\xfc\xd4\x54\x9c\x65\x09\x7a\xe0\x28\x97\xd4\xfc\x66\xc8\xaa\x20\x5c\x8f\x77\x55\x76\xda\xe5\xee\xa3\x87\x8e\xfa\xbe\xd7\x87\x4e\x63\x3e\xb2\x14\x59\x9a\xcf\xb9\x16\xae\xc5\xab\x6b\x43\x06\x4c\x50\x10\x45\xae\x60\x90\x69\x09\x53\x40\x3b\xb0\xa0\xf8\xaa\xa2\x32\x0f\xcb\x4a\x10\xa1\x42\x24\x36\xa3\x7f\xc1\xd1\x2e\x92\x0a\x99\xf5\xba\xce\x25\x7b\x58\xe6\x08\x0e\xdb\xd5\x35\xcc\x89\x01\x74\x32\x87\x7c\x0d\x9a\xf1\xec\x66\x2b\x40\xcb\xd4\x51\x9d\x6a\x19\x55\x66\xb4\x33\x5c\x4d\x11\x16\xe0\xc6\x0f\xfc\x23\xbe\x14\x02\x6e\xbf\x90\x16\x08\xe1\xab\xd3\x7e\x95\xaf\x5c\x1b\xc2\x14\x18\xf7\x7d\x4f\x3e\x5c\xa5\x9c\xab\xcf\x9c\xf3\x94\x74\xcb\xda\x72\x25\xf8\xb5\x94\x90\x2f\xea\x52\x46\xd9\x23\x0c\x0b\x4b\x6c\x5d\xe2\x22\xde\x14\xb0\x22\x46\xc8\x3a\xa6\x89\x83\x5a\xc1\xf2\xa5\x84\xa0\x6b\x02\x90\xa0\x19\xfe\xb0\x30\xf8\x15\x79\x29\x08\x2a\x13\x4d\xc7\xce\x72\x01\xa5\x1a\x6b\x5b\x75\xd5\x24\xa9\x8b\xec\x1a\x7e\x34\x02\xd0\x58\x19\xc2\x98\xff\xb0\x72\x1c\x49\xa6\x7e\x61\xb9\x6d\x44\x38\xd7\xc0\xa2\x03\x98\x38\x88\xb2\x1d\x43\x34\xd5\x01\x5e\xd3\x80\x2f\x09\x69\xe7\x53\x97\x2c\x61\xae\x59\xf2\x6d\x28\x87\x34\xb6\x40\xb1\x0e\xb8\x58\xbd\x0e\x48\xba\xa9\x94\xc2\x3a\xe4\xae\x3c\x4c\xd3\x56\x41\x03\x66\x80\xac\x04\xd5\xa8\x32\x64\x2d\x7f\xc5\xac\xbb\x50\x9e\x6b\xa5\x2a\x04\x95\xd2\xc0\x70\xb5\x3c\x87\x4d\x55\x2e\xa5\x01\x2c\xa8\x32\x14\x11\x9c\x23\xc5\xd8\x79\xf9\xb0\x79\x53\x0c\x4b\x3e\xd1\xd5\xaf\x4f\x99\xdc\x02\xa4\xef\x34\x82\x70\xa6\xa5\x83\x22\xc7\xfa\x60\xf1\x4b\x7c\x19\xb4\x60\x76\x51\x89\x63\x45\xcc\x1a\x0c\x9a\x1b\x97\x48\x06\x8e\x2d\xe2\x38\x01\x9d\xe0\xb6\xd0\xb4\x82\x1e\x1d\xd4\xe9\xed\x53\x43\x4d\xc7\x58\x3b\x46\xae\x69\x82\xa5\x09\xe3\x07\xa5\x4a\x57\xab\x51\x64\xdb\x84\x55\xca\x06\x39\x6c\xb5\x9c\xd5\xb9\xb0\x0d\x05\x57\x66\x94\x79\x99\x36\x7e\x46\xec\xca\xb1\x2e\x95\x35\x54\x2d\xd0\xf7\xe9\x57\x0f\x5c\xc4\x30\x99\xc6\x87\x72\x68\xc0\xc1\xba\x0e\xd6\x6d\xf0\x03\x58\xbb\xa9\xc3\xb2\x51\xff\xc0\xd9\x0c\x78\xf5\x6c\xa6\xa3\x42\x2a\x7e\xca\xd9\x6b\xf1\x6c\x16\xfe\x73\xbe\x86\xbd\x18\x22\x0e\x88\xcd\xee\x0c\xeb\xc6\x01\xd6\x36\xba\xf9\xca\xba\x22\x27\x3e\xac\x6c\x74\x49\x73\x4e\x79\xd7\x46\x61\xcf\xdd\xc4\xc4\x2f\xac\x9b\xed\xf0\xae\x19\x89\x62\x71\x22\x77\x1a\x46\xb8\x59\xe6\x87\x19\x92\x13\x2d\xf2\x2b\x8b\x81\x6f\x40\x4c\x33\x64\xc8\xd4\x99\xec\x08\x60\x46\xa6\x2c\xca\x5e\xc9\xc4\xc7\xbe\xa7\x5a\x00\xc3\xa1\x5f\x26\x2a\xe9\x19\xa6\x5b\x22\x93\xaf\x3b\xfd\xc0\x92\x33\x94\x18\x2e\x31\xc6\x9d\x67\xcb\xd5\x23\x01\x56\xf7\xb5\x00\xcf\x08\xe4\x68\x90\x47\x78\x35\x5b\x1a\x87\x44\x35\xbe\xad\x82\xeb\x5b\x99\x7d\x87\x0d\x11\xb8\x02\x26\xa6\x7e\x35\x3b\x58\x14\x4a\x55\x53\x66\x96\xb8\x5b\x19\x3b\x4c\x5b\x65\x09\x06\xe2\xae\x84\x3f\xae\x40\x33\x0f\x7a\xfa\x93\x5c\x51\x02\xf3\x36\xa3\x81\x57\xb9\x22\xeb\x89\x7c\x35\x99\x32\x6a\x62\x2b\x37\x97\x46\x6d\xe7\xdf\x8b\xee\x18\x58\x53\xb2\x9a\x3a\x33\x03\x90\x10\x26\x5a\x3c\x26\x36\xa9\xd3\x83\xa5\xca\x81\x16\x4c\x1b\x9d\x23\x5c\xd5\xf9\xf2\xc4\xd8\x86\xcc\x4c\x3a\x2c\x7c\xe8\x42\xe8\xe1\xe3\x1f\xba\x4a\xb1\x2b\x1f\x99\xe6\x8d\x56\x38\xd7\xbe\x8f\x41\xdf\xd6\xff\xcb\xdf\xc7\x24\xdc\x72\xde\x87\x5f\xfb\x6d\xab\x4f\x8c\xd3\x73\xaa\xde\x83\x2d\x5d\x5b\xde\xfe\xb0\x53\x6f\x0e\x35\x6f\xff\xf3\x96\x94\x39\xef\xad\xb7\x3f\x76\x07\xa4\x2f\xf7\xb0\xd7\x2f\x4e\x97\xe4\x6e\x46\x17\x2a\x41\x31\xba\x30\x71\x66\xc7\xeb\x66\xd3\x4a\xf1\x13\x00\xc5\x4f\x00\xcb\x01\xce\xa0\x7f\x07\x00\x00\xff\xff\xd3\x8b\x3b\xc6\xa3\x17\x00\x00") + +func fontsInvitaFlfBytes() ([]byte, error) { + return bindataRead( + _fontsInvitaFlf, + "fonts/invita.flf", + ) +} + +func fontsInvitaFlf() (*asset, error) { + bytes, err := fontsInvitaFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/invita.flf", size: 6051, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsIsometric1Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\xdf\x6f\xdb\x36\x10\x7e\xd7\x5f\xf1\x3d\x54\x90\x0d\xd8\x64\xdc\xa5\x80\xc3\xc2\x83\x1f\x86\x6e\xdd\xba\xa2\x40\xf6\x28\x8c\xb2\x1d\x3a\x33\x92\x48\x85\xad\xb4\x0e\x40\xe8\x6f\x1f\x28\xfe\x10\xe9\xa4\x8b\x85\x38\x4e\x30\x88\x6d\xd1\x1c\x7f\x1c\x3f\x7e\xbc\xe3\x9d\x0e\x59\x5e\x2f\xdf\xce\xde\x60\x34\xaa\xff\x8e\x31\x1c\xe1\xed\x4f\xd1\x6a\x53\xdc\x88\x72\xbd\x5a\x8c\xc8\xf2\x7a\x19\x45\x1f\x56\x97\xd7\xa2\xc4\xa2\xc8\xbf\x89\xf5\x66\x55\xe4\x98\xdf\xe1\x0f\x91\x97\xf8\x3c\xdb\x6c\x44\x8e\xde\x95\xc8\xcb\x7c\xba\xb8\x9b\x8b\xf5\xe6\xeb\x6c\x21\x48\xb1\xbe\xec\x0f\x30\x1e\x8e\x4e\x86\x67\xa7\x03\xcc\x67\x1b\x71\x11\x15\x39\xca\x7f\x04\x96\x45\x5e\x6e\xf0\xb5\xd8\x94\xe2\x42\x69\xfa\x24\xf2\x5c\xac\x4b\x9c\x97\xc5\xe2\x8a\x45\xd1\x87\x75\x71\xc3\xb0\x51\xd2\x74\xf9\x7d\x45\x6e\xbf\xcd\x48\x7e\x8d\x5e\x30\xaf\x1f\xfd\x32\x2b\x05\xc3\xe8\x1d\x7e\xbf\xbd\xc6\xe8\xec\xec\x14\x27\x27\xec\xe4\x94\xbd\x7d\x87\x5f\xff\xfc\x2b\x8a\x7e\x13\x6b\x81\xd9\x5a\x40\x9d\x46\x6f\x4a\xf0\xb9\xc8\x87\x4b\x7d\x9c\x8f\xc9\x0d\x66\xcb\xf5\x6c\x75\x31\xc0\x6a\x89\xbb\xe2\x16\xdf\x67\x79\x3e\x33\xe7\x2c\x15\xd6\x9b\x01\xe6\x22\xba\xb9\xc3\xe5\xad\xd8\x94\x04\x1f\x2d\x6c\x75\x0e\x47\x93\x39\xd1\x5c\x2c\x8b\xb5\x20\x51\x34\x3c\x68\x8b\x22\xa8\x46\xb6\x71\xdd\xb6\x04\x6d\x5a\xb3\xac\x56\x33\x88\x5d\x6b\xa3\x87\x78\xcb\x22\xa3\x26\x01\xfa\x09\x90\xf6\x5b\xe8\x61\x3d\x20\x53\xff\x8c\xa2\x41\xbc\x8d\xfb\xdc\xb4\x3e\xdc\x99\xf1\x09\x02\x39\x72\x08\xac\x51\x02\x38\x47\x89\x02\x0b\x5c\x19\x45\x3d\x70\x00\x9c\xa3\x17\x6f\x6b\x4d\xbd\x38\x8e\xab\xbf\xc7\xe3\x2f\xd5\x78\xfc\x45\xb6\x80\x24\xab\xc9\xcf\x20\x93\x61\x85\x38\x8e\xfb\x5a\x13\x63\x04\x84\x0d\x52\x90\xa4\x85\xa6\x8c\x80\x0e\xea\x95\x4c\x6b\xca\xde\xc7\x2c\x4b\x09\xb2\x61\x82\x36\x90\x00\x59\xaf\x21\x34\x61\x31\x4b\x14\xe1\x59\xb6\xcd\x08\x86\x93\xc9\xe4\x41\x48\xf7\x9c\xc5\x11\x99\x91\xc9\x64\x32\x04\x49\xb6\x49\xad\x08\x14\x19\xcb\x08\xe7\xe4\x7d\x2b\x44\x00\x53\x8b\x12\x96\x20\x8d\x40\x2e\xc6\x73\x6d\x3f\x84\x64\x2d\x0d\x32\x21\x66\xa5\xd2\x11\xbd\x31\xdd\x6f\xa6\xc7\xfd\x71\x1a\x3d\xf8\xa7\xeb\xee\xba\xff\x27\xdd\xda\xd2\x39\xe7\xf5\xff\x46\xa6\x29\x90\x36\x32\x65\xcc\x74\xd4\x32\x65\x34\x35\x1d\x4a\x56\xa3\x95\xe9\x98\x46\x76\x34\x65\x29\xe7\xe9\x34\x42\x4a\x39\x4f\x59\x4a\x19\x05\xa8\xdd\x2f\x65\xb5\x68\xf7\x83\x1e\x6d\xf6\x37\xa2\x95\x95\x0e\x5a\xe3\x39\x24\x60\xce\x43\xc0\x92\x73\xa9\x00\xeb\xd1\x06\xb0\x1e\xf5\x00\xbb\x51\xbb\x9f\x1a\xd5\x08\x0d\x9e\xaa\x32\x4f\xc8\x81\x00\x53\x8d\xc2\x31\xac\x36\x6b\x18\x36\x53\x6b\x96\x2c\x60\xad\x1d\x0e\xb0\xed\xb0\x80\xeb\xd3\x3f\x1f\xc3\xd4\xed\xe1\x03\x6e\x18\x56\x53\x43\x86\xed\xa5\xbf\x10\xc3\xff\x6d\xc3\x66\x34\x60\xd8\x72\xe8\x18\x36\x1c\x1e\x89\xe1\x7d\x9c\xae\x01\xec\x43\xb2\x72\x08\xb8\x69\xf7\xe4\x67\xb3\x61\x1e\xda\x30\x7d\x84\xe1\xfb\x26\xd1\xf6\x95\x08\xae\xc4\xbd\x32\x0e\xb0\x59\xef\x18\x56\x60\x95\x8e\x86\x61\xad\xe3\xd9\x9e\xb5\xf0\x02\x34\xfc\x90\x6d\xbb\xd4\x31\x69\xa9\xd7\xb0\xc0\xb9\x42\x6a\x58\xa4\xa9\xbf\x65\xe3\x43\x30\x0c\xfb\x74\xa4\xfe\xd8\x03\x97\x1f\xa2\xd9\x81\xe3\xe3\x51\xf3\x1a\x40\x54\x7b\xb6\x45\xe4\xdd\xa2\x01\x11\x5e\x42\x1a\xdc\xc1\x8f\x8d\x72\x57\x3c\xd4\x8d\x73\x9e\x72\xff\xc6\x55\x6b\x6e\x5c\x32\x59\x55\xb2\x72\xfa\x24\x93\xd0\x39\xfb\x7e\x32\x52\xf5\x02\xde\x03\xec\x83\xf5\x81\x5a\x90\xd6\xa4\x3c\x73\xa2\xde\x73\x98\xfa\x7e\xe7\xfb\x5c\x68\x25\xce\xd7\x9c\xe9\xed\xcd\x9a\x3d\x86\x01\x24\x7d\x99\x32\x2a\xeb\x87\xdd\xb1\x06\x19\xb2\xc6\x69\x55\x85\x7e\x12\xbc\xf5\x07\x09\xff\xad\x01\x03\x01\x60\x19\x38\xb6\x64\x32\x00\x2c\x59\x08\x58\xb2\xe3\xe6\x2b\xfb\x84\xff\xb6\xd1\xf4\x08\x09\xd6\x53\x32\xc2\xa3\x06\x27\x17\xe0\x7d\x67\xd9\x71\x98\x57\x99\xc2\xfe\x98\x61\xc9\x42\xc0\x92\xc9\x00\xb0\x64\x32\x60\x78\xcf\xa7\xaa\x15\x60\x34\x80\x79\xcd\x30\x1c\xe0\x54\x33\x0c\xcf\x86\x1f\x4d\xb0\x0e\x6c\xc3\xe1\xbb\xe9\x77\x34\x57\xba\x13\x65\xa9\x6f\x12\xda\x2d\x5d\x82\xd5\xc4\x96\xdd\x48\xf6\x04\x1b\x7e\x24\x7a\xf9\x1b\x5a\xb1\x89\x5e\xf5\xfe\xf4\xb8\xaf\x44\x3b\xc0\xb5\x3a\x1b\x01\x8d\x05\xba\x28\xa8\xe5\x06\x70\x1d\x65\x42\xc0\xcc\x66\x34\x7a\xbf\xaa\xb2\x5f\x05\x4f\x60\x98\x53\xee\x03\xa6\x8d\x0d\xd6\xa2\x99\x61\x18\x76\x33\x9a\xd4\xc6\x67\xd8\xcc\x38\x20\xc3\x72\xe7\xab\x22\x88\x6c\x0f\xcb\x2e\x34\xab\xc4\x86\xfa\xa1\xb9\xe6\xaf\xaa\xa8\x4d\x68\xaa\x4a\x27\x38\x78\x6a\x42\x73\x20\xc0\xc6\x07\x59\xe8\x74\x1e\xe0\xe7\x71\xba\x96\xaf\x84\x17\x9a\x4d\x89\x3a\x0d\x18\xb6\x56\x6a\x9c\xae\xaa\x52\x6d\xa5\xd3\xe8\x38\x5f\xe6\x8c\x85\x81\xc3\x75\xd4\x36\xcc\xd8\xa7\xf3\xc0\x24\x46\x67\x67\xa7\xcc\xb7\xe1\x1a\xff\x8e\xd3\xed\x6b\xc3\x5d\x41\xeb\x98\xd9\x40\x57\xd0\x7a\xee\x14\xbc\x2b\x68\xbd\xec\x37\x43\x57\xd0\xea\x0a\x5a\x5d\x41\xab\xcd\x8d\x77\x05\xad\xae\xa0\xd5\x15\xb4\x5e\x41\x46\xd8\x15\xb4\xba\x82\x56\x57\xd0\xea\x0a\x5a\x5d\x41\xab\x2b\x68\x75\x05\x2d\x03\xf8\xf5\xfc\x1e\xd4\x0b\x74\xff\x1b\x00\x00\xff\xff\x1f\x45\xa0\xac\x57\x2d\x00\x00") + +func fontsIsometric1FlfBytes() ([]byte, error) { + return bindataRead( + _fontsIsometric1Flf, + "fonts/isometric1.flf", + ) +} + +func fontsIsometric1Flf() (*asset, error) { + bytes, err := fontsIsometric1FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/isometric1.flf", size: 11607, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsIsometric2Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\x5d\x6f\xdb\x36\x17\xbe\xd7\xaf\x78\x2e\x2a\xc8\x06\x6c\x32\xce\x9b\x02\x0e\x0b\xbf\xf0\xc5\xd0\xad\x5b\x56\x14\xc8\x2e\x85\x51\xb6\x43\x67\x46\x12\xa9\x90\x95\xd6\x01\x04\xfd\xf6\x81\x12\x3f\x6d\x2f\x89\xe2\xc4\x1d\x06\xb1\x2d\x9a\x43\x93\x47\xcf\x79\x74\xbe\x7c\x90\xe5\xed\xf2\x74\xf6\x0e\xa3\x51\xfd\x77\x8c\xe1\x08\xa7\xff\x0b\x56\xeb\xec\x4e\x14\xf9\x6a\x71\x4a\x96\xb7\xcb\x20\xf8\xb8\xba\xbe\x15\x05\x16\x59\xfa\x4d\xe4\xeb\x55\x96\x62\xfe\x80\xdf\x44\x5a\xe0\xf3\x6c\xbd\x16\x29\x7a\x37\x22\x2d\xd2\xe9\xe2\x61\x2e\xf2\xf5\xd7\xd9\x42\x90\x2c\xbf\xee\x0f\x30\x1e\x8e\x4e\x86\xe7\x67\x03\xcc\x67\x6b\x71\x15\x64\x29\x8a\xbf\x04\x96\x59\x5a\xac\xf1\x35\x5b\x17\xe2\x4a\x6a\xba\x10\x69\x2a\xf2\x02\x97\x45\xb6\xb8\x61\x41\xf0\x31\xcf\xee\x18\xd6\x52\x9a\x2e\xbf\xaf\xc8\xfd\xb7\x19\x49\x6f\xd1\xf3\xce\xf5\x83\x9f\x66\x85\x60\x18\xbd\xc7\xaf\xf7\xb7\x18\x9d\x9f\x9f\xe1\xe4\x84\x9d\x9c\xb1\xd3\xf7\xf8\xf9\xf7\x3f\x82\xe0\x17\x91\x0b\xcc\x72\x01\x69\x4d\xf3\x50\x82\xcf\x59\x3a\x5c\x36\xe6\x7c\x8a\xee\x30\x5b\xe6\xb3\xd5\xd5\x00\xab\x25\x1e\xb2\x7b\x7c\x9f\xa5\xe9\x4c\xd9\x59\x48\xac\x77\x03\xcc\x45\x70\xf7\x80\xeb\x7b\xb1\x2e\x08\x3e\x69\xd8\xd2\x0e\x43\x93\xb2\x68\x2e\x96\x59\x2e\x48\x10\x0c\x5f\x75\x05\x01\xe4\x22\x9b\xb0\x5e\x1b\x82\x36\xcb\x5e\xab\xd5\x0c\x42\xb3\xda\xe8\x21\xce\xb5\x40\xa9\x89\x80\x7e\x04\xc4\xfd\x16\x7a\x58\x0f\x48\xe4\x3f\xa5\x68\x10\x6e\xc2\x3e\x57\xab\x0f\x63\x33\x2e\x20\x90\x22\x85\x40\x8e\x02\xc0\x25\x0a\x64\x58\xe0\x46\x29\xea\x81\x03\xe0\x1c\xbd\x70\x53\x6b\xea\x85\x61\x58\xfd\x39\x1e\x7f\xa9\xc6\xe3\x2f\x65\x0b\x48\x65\x35\xf9\x3f\xc8\x64\x58\x21\x0c\xc3\x7e\xa3\x89\x31\x02\xc2\x06\x31\x48\xd4\x42\x53\x42\x40\x07\xf5\x4d\xd6\x68\x4a\x3e\x84\x2c\x89\x09\x92\x61\x84\x36\x90\x80\xb2\xbe\x43\x68\xc4\x42\x16\x49\xc2\x93\x64\x93\x10\x0c\x27\x93\xc9\x5e\x48\x3b\xc1\x62\x88\x4c\xc8\x64\x32\x19\x82\x44\x9b\xa8\x56\x04\x8a\x84\x25\x84\x73\xf2\xa1\x15\x22\x80\xc9\x4b\x11\x8b\x10\x07\x20\x57\xe3\x79\xe3\x3f\x84\x24\x2d\x1d\x32\x22\xea\xa6\xd4\x11\xbc\x53\xdb\xef\xa6\xc7\xfd\x71\x1a\xec\xfd\xd3\x6d\x77\xdb\xff\x91\xed\xc6\xd3\x39\xe7\xf5\xff\x4a\xa6\x31\x10\x5b\x99\x32\xa6\x36\x6a\x99\x32\x1a\xab\x0d\x29\x53\x46\xcd\x89\x69\x20\x45\xde\x9c\xe0\x3c\x9e\x06\x88\x59\x2c\x4f\x20\xa6\x9c\x53\x79\x3e\x66\x4c\xfe\x68\x9f\xa7\x95\x99\xe7\x35\x77\x1d\x3c\xb1\xbe\x60\x00\xeb\xa5\xe4\xba\x32\xb4\x05\x2c\x9f\xe1\x02\x2e\x39\x2f\x2d\xe0\x1a\xb4\x05\xdc\x88\x06\xb0\x12\x0d\x60\x25\xfe\x33\xe0\x1d\x86\x3d\x0b\xb5\x3a\x0b\x58\x89\x06\xb0\x14\xa5\x0e\x05\x58\x6a\xa7\x96\x61\x69\x9b\x0b\x38\x36\x97\x5e\x0c\xf8\x50\x86\xf5\x5b\xb4\x80\x5d\x86\xdf\x00\xf0\xd3\x0c\x73\xca\x5d\x86\xd5\x09\xeb\x12\xcd\x09\xeb\x12\x2e\xc3\x3f\xc4\x25\x9e\x06\xac\x1e\x48\x2d\x9c\x5a\x76\xe0\x34\xe7\x5f\x25\xe8\x5e\x00\xf8\xb1\x2c\xc1\x63\x2f\x4b\x1c\xc3\x25\xbc\xb4\x66\x39\xd0\xe7\x5d\x1f\x96\x77\x5d\xc0\xb1\xf6\xf2\xb7\x4f\x6b\xfb\x24\xcf\x14\x8f\x79\x6a\x9e\x5b\xf3\xca\x1c\x23\x63\x15\x95\x4d\xde\x40\x55\x49\xd4\xca\x00\x87\x42\x4d\x8f\xc3\xb6\x01\xe7\x43\xf3\xb0\x78\x50\x3c\x24\x1e\x10\x07\x07\x2c\x0c\x45\xab\xe5\xc5\x03\xe1\x65\x45\xc5\xde\x16\x9e\xed\x77\x5b\xa2\xe9\x9e\x1b\xb9\x64\x8f\xc9\x9c\x7b\x32\x8d\xe5\xe7\x9c\x97\x32\xc7\xe9\x77\x2b\x97\xfb\x6e\xab\x8a\x56\xee\xbb\xad\xaa\xc3\x4a\x56\x2b\x59\x1b\xab\x2b\x80\xa2\xf6\x6d\x2b\xc0\xe3\xd1\x53\xfa\x15\x40\x32\xe8\x45\x4f\xc9\xca\xd8\x09\x77\xc6\x58\xc9\xdd\xe8\xa9\xf9\x73\xa2\xc7\x04\xcb\x2b\xe6\xa7\x76\xe1\xce\xf9\x16\x60\xb9\x3c\xc0\x55\x75\x4c\xc0\xcf\xab\xb1\x6e\x42\xa5\x38\x4a\x42\xf5\xe1\xfa\x60\x1d\xa8\x2a\xa8\xbd\xca\x49\xbd\xba\xb9\x05\xc1\xa6\x50\x9f\x51\x9f\xcf\xc7\xf2\xd3\x23\xe1\x74\xec\x16\xd7\x6d\xe0\x2c\x93\x87\xbd\xfc\x27\xca\x3f\xe7\x74\x3b\x81\x39\xfd\x8a\x4c\x60\x55\x55\xbd\x3c\x81\xbd\x72\xf9\xdf\xc3\xa0\xdf\x60\xf9\xde\x6a\x1b\x1c\xbf\x5c\x1d\x90\x71\xdb\xf6\xe4\x6e\x79\x77\x0a\xad\x32\x28\xf6\x2b\xaf\xc9\x26\x5e\xbe\x71\x3d\xf8\xe9\x9a\xd6\xb6\x5f\x71\xf3\x41\xf3\xc9\x1b\xe7\x83\x76\x41\xf6\x3c\x03\xf4\x97\x86\xfa\x13\x55\xa7\x8d\x01\x56\x6e\x8e\x36\x75\x5b\x1b\xc0\x74\x02\x69\xf4\xd7\xee\xde\x26\xe8\xb6\x5e\x19\x8d\x7d\x80\x52\xf0\x18\x56\x1b\x96\x61\xc3\xa1\x66\x58\x73\x78\x70\x11\xde\xe9\x73\x76\x1a\x9b\xfd\x1b\x86\x22\xe3\xa8\xaa\xa8\x71\xd5\xec\x48\x92\x34\x73\x5c\xb7\x82\x25\x2b\xab\xca\x69\x9f\x76\xfa\x29\x58\xad\x7a\xc3\x86\xc7\xf4\xc9\x6e\x76\xbb\x92\x34\xca\x7d\x03\x76\x25\x6b\x8b\x31\x44\xbf\x9c\xc6\x0a\xb7\xab\xf4\xdb\x4a\x9d\x9c\x9f\x1b\xfa\xda\x5c\x1d\xfa\xae\x4c\x19\x6d\x50\x99\xaf\xe3\x35\x4a\xd3\x8c\xd1\x1a\xa7\xdb\xc8\xa2\x64\x7e\x73\x5d\x32\x3f\x97\x95\x5b\x7e\x50\x1e\xda\x2a\xb8\xdc\xe8\x66\xc6\xed\x6d\x2e\x2e\xdd\xde\x86\x8d\xce\xcf\xcf\xbc\x6a\xa1\x8b\x87\x1b\x59\xcf\x75\xdc\x6e\x8a\xf5\xc2\xaf\x04\xdd\x14\xab\x9b\x62\x75\x53\xac\x6e\x8a\xd5\x4d\xb1\xba\x29\xd6\x56\x2f\xb7\x4f\xee\xa6\x58\xdd\x14\xab\x9b\x62\x75\x53\xac\x36\x5c\x77\x53\xac\x6e\x8a\xd5\x4d\xb1\xba\x29\x56\x37\xc5\xd2\x1b\xdd\x14\xeb\x28\x53\xac\x7f\xcf\x6f\x3c\xfd\x80\xed\xbf\x03\x00\x00\xff\xff\x98\xa5\xd7\x37\x41\x2d\x00\x00") + +func fontsIsometric2FlfBytes() ([]byte, error) { + return bindataRead( + _fontsIsometric2Flf, + "fonts/isometric2.flf", + ) +} + +func fontsIsometric2Flf() (*asset, error) { + bytes, err := fontsIsometric2FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/isometric2.flf", size: 11585, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsIsometric3Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\x5d\x6f\xdb\x36\x14\x7d\xd7\xaf\x38\x0f\x15\x64\x03\x36\x19\xa7\x29\xe0\xb0\xf0\xe0\x87\xa1\x5b\xb7\xac\x28\x90\x3d\x12\xa3\x6c\x87\xce\x8c\x24\x52\x21\x2b\xad\x03\x08\xfa\xed\x03\x3f\x24\x91\x8e\x93\x58\x89\xd3\x0e\x9b\x98\x04\xc9\xa5\xc9\xcb\xc3\xc3\xfb\x85\x8b\x2c\xaf\x97\xc7\xb3\x37\x18\x8d\xf4\xf7\x18\xc3\x11\x8e\xdf\x06\xab\x75\x7a\x23\xf3\x6c\xb5\x78\x4b\x96\xd7\xcb\x20\xf8\xb0\xba\xbc\x96\x39\x16\x69\xf2\x55\x66\xeb\x55\x9a\x60\x7e\x87\xdf\x65\x92\xe3\xd3\x6c\xbd\x96\x09\x7a\x57\x32\xc9\x93\xe9\xe2\x6e\x2e\xb3\xf5\x97\xd9\x42\x92\x34\xbb\xec\x0f\x30\x1e\x8e\x8e\x86\xa7\x27\x03\xcc\x67\x6b\x79\x11\xa4\x09\xf2\xbf\x25\xbe\xa4\xeb\x5c\x69\x38\x93\x49\x22\xb3\x1c\xe7\x79\xba\xb8\x62\x08\x82\x0f\x59\x7a\xc3\xb0\x56\xe2\x74\xf9\x6d\x45\x6e\xbf\xce\x48\x72\x8d\x9e\xb7\xb0\x1f\xfc\x3c\xcb\x25\xc3\xe8\x1d\x7e\xbb\xbd\xc6\xe8\xf4\xf4\x04\x47\x47\xec\xe8\x84\x1d\xbf\xc3\x2f\x7f\xfc\x19\x04\xbf\xca\x4c\x62\x96\x49\xa8\x6b\x60\x99\x26\xf9\x9a\xe0\x53\x9a\x0c\x97\xe6\x1e\x1f\xa3\x1b\xcc\x96\xd9\x6c\x75\x31\xc0\x6a\x89\xbb\xf4\x16\xdf\x66\x49\x32\xb3\x17\xcc\x15\xc8\x9b\x01\xe6\x32\xb8\xb9\xc3\xe5\xad\x5c\xe7\x04\x1f\x35\x6a\x79\xa1\x2f\x50\xf3\x63\x94\x63\x2e\x97\x69\x26\x49\x10\x0c\x0f\x3a\x82\x00\x6a\x90\x4d\xa8\xc7\x86\xa0\xcd\x68\xb6\x69\x35\x83\xb0\x1e\x6d\xf4\x10\x67\x5b\x60\xd5\x44\x40\x3f\x02\x78\xbf\x85\x1e\xd6\x03\x62\xf5\x63\x15\x0d\xc2\x4d\xd8\x17\x76\xf4\x51\xdf\x19\x67\x90\x48\x90\x40\x22\x43\x0e\xe0\x1c\x39\x52\x2c\x70\x65\x15\xf5\x20\x00\x08\x81\x5e\xb8\xd1\x9a\x7a\x61\x18\x96\x7f\x8d\xc7\x9f\xcb\xf1\xf8\x73\xd1\x02\x52\x51\x4e\x7e\x02\x99\x0c\x4b\x84\x61\xd8\x37\x9a\x18\x23\x20\x6c\xc0\x41\xa2\x16\x9a\x62\x02\x3a\xd0\x3b\x99\xd1\x14\xbf\x0f\x59\xcc\x09\xe2\x61\x84\x36\x90\x80\x42\xef\x21\x34\x62\x21\x8b\x14\xe1\x71\xbc\x89\x09\x86\x93\xc9\x64\x27\xa4\x7b\xce\x52\x13\x19\x93\xc9\x64\x32\x04\x89\x36\x91\x56\x04\x8a\x98\xc5\x44\x08\xf2\xbe\x15\x22\x80\xa9\x4d\x11\x8b\xc0\x03\x90\x8b\xf1\xdc\xd8\x0f\x21\x71\x4b\x83\x8c\x88\xdd\xa9\x74\x04\x6f\xec\xf4\x9b\xe9\xf7\xfd\x73\x1a\xec\xfc\xea\xa6\xbb\xe9\xff\xc8\xb4\xb1\x74\x21\x84\xfe\x6d\x65\x0a\x50\xde\xc8\x4a\x64\x8c\x57\xb2\x16\x29\x57\x13\x4a\x36\x62\xa9\x57\x4c\x03\x50\x21\x28\xa3\x76\xc5\x34\x00\x07\x38\xe3\x94\x51\x21\x38\x55\xeb\xf5\x04\xa3\xcd\x79\x66\x85\x73\x7e\x33\x61\xf1\x70\xb5\x57\xcb\x15\xe0\x6a\x58\x59\x67\x86\x03\x00\x2e\x5c\xc0\x25\x65\x1e\x60\xb5\xca\x01\xac\x44\x17\xb0\xbe\xd2\x83\x80\xf7\x60\x98\xc2\x07\x5c\xaf\xaf\x44\xa5\xa3\x01\x6c\x74\xd4\x80\xed\xaa\x1a\x30\xc7\x8b\x00\x3f\x83\x51\x73\x6a\x03\x50\xe9\x74\x18\x3d\x14\xc0\x6d\x0b\x68\xc3\xb0\xd8\x62\x98\xfa\x17\xd0\x2b\x3c\x93\x70\x19\xb6\x53\x07\x36\x09\x1f\xae\x0f\xd6\x87\xea\x03\xf5\x61\x7a\x20\xe1\xbb\x99\xef\x64\xbe\x8b\xf9\xe0\x0e\xc8\xa5\xf0\xdd\x4b\xe9\x77\xe3\x01\x68\x59\x1e\xd0\x5a\xef\x01\x16\x82\x7a\x01\xc4\xbf\xbd\xd1\x50\xdd\x5f\xed\xad\xec\xdb\x02\xe6\xf8\x3e\x01\xcc\x48\x1e\x78\x8f\x6b\x2f\x2e\x68\x26\x6b\xb5\xdc\x52\x2a\x84\x77\xa8\x31\x93\xea\x86\x86\x61\x4b\x48\xc5\x6f\xf3\xdc\xc1\x0e\x47\x72\xdd\xbe\xc2\xe2\x41\xf1\x90\x38\x40\xb6\x2e\xef\xc2\xe0\x8e\x1d\x55\x90\x3c\x44\xbb\x00\x3d\xf6\xb6\x45\x23\x17\x40\xc1\x1e\x94\x85\xf0\x64\xfd\xb6\xa2\x60\x85\x8a\x6e\xce\xdb\xaa\xe1\xbe\x6d\x59\x96\xe5\xc1\x92\xd3\xa3\x72\x75\x39\x27\xb6\x73\xbc\x56\x6c\xdf\x37\x74\xfa\xde\xa3\x19\x74\xbc\xc7\x30\xda\x78\x4f\x21\x14\xa3\x6e\xec\x67\x8c\x15\x70\xbd\xa7\x2c\xb9\xe7\x3d\xdc\x21\xe4\x39\x0c\xb7\x76\x77\x27\xdc\xe9\xcc\x66\xfd\xdf\x01\xac\x86\x07\xb8\x2c\x7d\xc0\x2f\x32\x89\x97\x17\x58\x3b\xb3\xeb\xab\xa5\xff\xa7\x92\x53\xe5\xe0\xd4\x09\x94\x4d\x61\xf5\xba\xc9\x69\xdb\x9d\x7e\x6c\xf1\x5a\x31\xf7\xb2\xca\xe4\x69\x80\x14\x3e\xc0\xd7\x0c\x60\x07\x29\xa5\x9e\x60\xf4\xe1\xea\x5a\xc3\x71\xac\xb5\x2e\x78\xf7\xcb\xa6\x4f\x17\x56\xdb\x55\x76\x0d\xbb\x7a\x05\x73\x60\xf5\x06\xf6\x38\x27\xa9\xd9\x5b\xd4\xa7\xd7\xc1\x6d\x8f\xc7\x6f\x19\xaa\xdc\x7c\x6a\xf7\xbe\xb2\xe7\xef\xf6\xae\x2d\xf8\x5b\xe8\xb7\xc0\x37\xd8\x2b\xa8\x66\x9f\xc9\x1c\x35\xf2\x5a\xb4\xb2\xce\x23\xb5\x0d\x68\x93\x6e\x9c\x4a\x5b\xf4\xbe\xb4\x0a\x2e\x5c\x5a\xcd\xa7\x0d\xad\xf6\x53\x8f\x56\x3b\xd1\xd0\x6a\x88\x73\x68\xe5\xec\xc0\x15\x6a\x9b\x2a\x46\x73\x03\xb7\xfc\x62\x8a\x30\x1d\x04\x14\x3b\x2a\x5d\xd5\x41\x40\x37\x2c\xcb\x82\x15\x55\x10\x70\x14\x3e\x24\x0b\x51\xec\xef\x53\x06\xbc\x0f\xf5\xbe\xd4\xc0\x76\x41\x57\x2b\x0d\xe4\x83\xf8\xd4\xae\x80\x5a\xf8\xf1\xa9\x96\x8d\xa8\x4b\x40\x27\x3e\x29\x94\xa6\xe0\x32\x0f\x57\xb0\x82\x36\x5c\x2a\xd9\x29\x9f\x4d\x41\x44\x7d\x2e\xe9\x63\x5c\xb6\xcf\x00\x0d\x53\x95\xc8\xbc\x7a\xe5\xec\xdc\xad\x57\xd8\xe8\xf4\xf4\xc4\xcb\x00\x8e\xf7\x54\x13\xfb\x59\x6b\xd7\x83\x7a\x66\x99\xdf\xf5\xa0\xba\x1e\x54\xd7\x83\xea\x7a\x50\xfb\x57\x7a\x5d\x0f\x6a\xdb\x95\xba\x1e\x54\xd7\x83\xda\xe9\x3d\x5d\x0f\xaa\xeb\x41\x75\x3d\xa8\xae\x07\xd5\xb2\x94\xea\x7a\x50\x3b\xb9\xec\x7a\x50\x5d\x0f\xaa\xeb\x41\xfd\x6f\x7a\x50\xff\x9e\xff\x36\xfa\x01\xd3\xff\x04\x00\x00\xff\xff\x92\x72\x45\x3e\xb6\x2c\x00\x00") + +func fontsIsometric3FlfBytes() ([]byte, error) { + return bindataRead( + _fontsIsometric3Flf, + "fonts/isometric3.flf", + ) +} + +func fontsIsometric3Flf() (*asset, error) { + bytes, err := fontsIsometric3FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/isometric3.flf", size: 11446, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsIsometric4Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\xdf\x6f\xdb\x36\x10\x7e\xd7\x5f\xf1\x3d\x44\x90\x0d\xd8\x54\x9c\xa5\x80\xc3\xc2\x83\x1f\x86\x6e\xdd\xb2\xa2\x40\xf6\x28\x8c\x92\x1d\x3a\x33\x92\x48\x85\xac\xb4\x0e\x20\xe8\x6f\x1f\xf8\x43\x24\x65\x3b\x35\x94\xc4\xce\x3a\x88\x6d\xd1\x1c\x4d\x1e\xbf\xfb\x78\xbc\x3b\x1f\xb2\xb8\x5b\x9c\x25\x27\x18\x8d\xe4\xdf\x31\x86\x23\x9c\xfd\xe4\x2d\x57\xd9\x3d\x2f\xf2\xe5\xfc\x9c\x2c\xee\x16\x9e\xf7\x61\x79\x73\xc7\x0b\xcc\xb3\xf4\x2b\xcf\x57\xcb\x2c\xc5\xec\x11\x7f\xf0\xb4\xc0\xa7\x64\xb5\xe2\x29\x7a\xb7\x3c\x2d\xd2\xe9\xfc\x71\xc6\xf3\xd5\x97\x64\xce\x49\x96\xdf\xf4\x07\x18\x0f\x47\xa7\xc3\x8b\xf3\x01\x66\xc9\x8a\x5f\x7b\x59\x8a\xe2\x1f\x8e\x45\x96\x16\x2b\x7c\xc9\x56\x05\xbf\x16\x9a\x2e\x79\x9a\xf2\xbc\xc0\x55\x91\xcd\x6f\xa9\xe7\x7d\xc8\xb3\x7b\x8a\x95\x90\xa6\x8b\x6f\x4b\xf2\xf0\x35\x21\xe9\x1d\x7a\x8d\x75\x7d\xef\x97\xa4\xe0\x14\xa3\x77\xf8\xfd\xe1\x0e\xa3\x8b\x8b\x73\x9c\x9e\xd2\xd3\x73\x7a\xf6\x0e\xbf\xfe\xf9\x97\xe7\xfd\xc6\x73\x8e\x24\xe7\x10\xd6\xa8\x43\x09\x3e\x65\xe9\x70\xa1\xcc\xf9\x18\xdc\x23\x59\xe4\xc9\xf2\x7a\x80\xe5\x02\x8f\xd9\x03\xbe\x25\x69\x9a\x68\x3b\x0b\x81\xf5\x7e\x80\x19\xf7\xee\x1f\x71\xf3\xc0\x57\x05\xc1\xc7\x1a\xb6\xb0\xc3\xd0\xa4\x2d\x9a\xf1\x45\x96\x73\xe2\x79\xc3\x57\x1d\x9e\x07\x31\xc8\xda\x97\x63\x4d\xd0\x66\xd8\x6d\x52\xcd\xc0\x37\xa3\x8d\x1e\xe2\x6c\xf3\xb4\x9a\x00\xe8\x07\x40\xd4\x6f\xa1\x87\xf6\x80\x58\xfc\xd3\x8a\x06\xfe\xda\xef\x33\x3d\xfa\x30\x36\xe3\x12\x1c\x29\x52\x70\xe4\x28\x00\x5c\xa1\x40\x86\x39\x6e\xb5\xa2\x1e\x18\x00\xc6\xd0\xf3\xd7\x52\x53\xcf\xf7\xfd\xea\xef\xf1\xf8\x73\x35\x1e\x7f\x2e\x5b\x40\x2a\xab\xc9\xcf\x20\x93\x61\x05\xdf\xf7\xfb\x4a\x13\xa5\x04\x84\x0e\x22\x90\xa0\x85\xa6\x98\x20\x1c\xc8\x9d\x54\x69\x8a\xdf\xfb\x34\x8e\x08\xe2\x61\x80\x36\x90\x80\x52\xee\x21\x61\x40\x7d\x1a\x08\xc2\xe3\x78\x1d\x13\x0c\x27\x93\xc9\x4e\x48\x5b\x8f\xc5\x10\x19\x93\xc9\x64\x32\x04\x09\xd6\x81\x54\x84\x10\x31\x8d\x09\x63\xe4\x7d\x2b\x44\x00\x15\x9b\x02\x1a\x20\xf2\x40\xae\xc7\x33\xe5\x3f\x84\xc4\x2d\x1d\x32\x20\x7a\xa7\xd0\xe1\x9d\xe8\xe9\x93\xe9\x71\x7f\x9c\x7a\x3b\xff\x74\xd3\xdd\xf4\xff\x64\x5a\x79\x3a\x63\x4c\xfe\xaf\xe5\x10\x08\x23\x2b\x0b\x91\xd2\xa8\x96\xa5\x18\x46\x62\x42\xc8\xf5\xa7\x62\x62\xea\x21\x64\x4c\x7d\xca\x22\x1a\x4d\x3d\x44\x8c\x45\x21\xc4\xa7\x21\x0d\xeb\xf3\xc4\x24\xa5\x61\x7d\x9e\x56\x69\xcf\x97\x3a\x2c\x1e\xad\x03\x38\x34\xe0\x52\x00\x96\x60\x1d\xc0\x6a\xc2\x01\x1c\xd5\xe6\xd4\xe7\xd5\xe6\x18\x3c\x00\xaa\xea\x75\x01\x2b\x0e\x2d\x60\x8d\xc2\x02\x56\x2c\x59\xc0\xb0\xe7\x39\x13\x1a\xb0\x99\x38\x1c\xc3\xdb\x80\x59\x93\x61\xb5\xca\x05\x1c\xbe\x29\xc3\x3b\x5c\xa2\xc9\xb0\xf4\x02\x17\xb0\xd9\x6f\x7c\x24\x6c\xc1\x70\x3d\x8e\x61\x80\x79\x84\xda\x00\x17\xa2\xc1\xa3\x57\x6d\xe1\x3b\x98\x4b\xb0\x88\x35\x18\x66\x2c\xdc\xc3\xb0\xeb\x12\x62\xe2\xc5\x51\x22\x44\x13\xb0\x59\x5f\x9b\x23\x74\x34\x18\x45\x78\xb8\xb0\xb6\xe1\x11\xda\x00\xa3\x4e\xf0\x63\x45\x79\x5a\xd4\x3c\x4c\x92\x29\xf6\x69\xfa\x25\x34\xb9\x2f\xa4\x61\x55\xc1\x50\xeb\x18\xbe\xe9\xaa\x16\x96\xb7\xdb\x0f\x9e\x8b\x52\xac\xb3\x28\xf5\xba\x1a\xa5\xb9\xdf\xa7\x50\xd6\xa8\xb6\x60\x3d\x81\xf2\x15\x2e\x9f\x6d\x5c\xbe\x18\xe6\xf2\xab\x92\x96\x55\x55\x55\x4a\x5f\x09\x94\xb4\xb4\xfa\xb7\x64\xc6\x1a\xb2\xd4\x51\x6e\x01\x76\xc1\xba\x40\x5d\x90\xee\x73\xb2\xd1\x55\xfe\x6c\xae\xd2\x8d\xaa\xee\x25\xbb\xc1\xc9\xf5\xc2\x56\x6f\xbc\x6c\xb0\x56\x1a\x59\x81\x2a\x69\xe9\xb2\xc6\xca\x06\x6b\x40\x58\x55\xce\x93\x71\x8d\xc2\xa1\x2a\x81\x56\x80\xe5\xbd\xb9\x6f\xbc\xa4\xa5\x05\x2c\xef\xd5\x01\x2c\xef\xf5\xb8\xa5\xcb\xee\xc4\x1a\x3d\x33\xb1\xee\x8f\xa2\x9b\x0f\xed\xe8\xc5\xa2\x86\x78\xa0\x3c\x65\x03\xbb\xfb\x50\x36\x1f\x8b\xfb\x60\x7e\x94\x6a\xd6\x0d\x55\x0e\x60\x15\x9a\x2c\x60\x25\x5b\x46\x55\xa8\xaa\xf6\x85\xaa\xe7\x32\xac\x93\xbe\x0b\x38\xc2\xde\x5a\x8b\xbd\xac\x12\xd8\xe7\xc3\x2a\x17\x59\x05\x6e\x1c\x85\xcd\x62\x9b\xc9\xd6\x25\x40\x9a\xe8\xc6\xec\xba\x1c\xb7\x0e\xb0\x95\xca\x5e\xe4\xc3\xdf\xcf\x5e\xf6\x00\xe7\xf8\xd0\xfd\xbe\xf0\xe6\x51\xe2\x69\x03\x34\xe2\x3a\x23\xea\x64\x6a\xb2\xa2\x92\x6d\x98\x53\x1e\xab\x36\x19\x86\x45\xda\x71\xa3\x84\xc8\xd5\x6d\x19\x66\x1b\x00\xc3\x66\x1c\x96\x2b\x2c\xc3\x6a\x85\x5b\xcb\x34\x18\xd6\x2b\x9e\xcd\xb0\xc6\x0b\x27\x13\x45\x1b\xb2\xcd\x74\x3b\x64\x4d\x92\x90\x45\x61\x23\x41\x3b\xa9\x59\xf2\xe5\x14\x34\x55\xf5\xd2\x82\x66\xdb\x25\x5e\xcd\x00\x73\x45\x94\x36\x1f\x9d\x31\xe0\x35\x1e\xdd\xde\xa8\xb0\xf5\x0d\xca\x56\x59\xaa\x5b\xdd\x4c\xcd\x7a\x38\x8f\xae\x92\x5e\xe9\x26\x1e\x7b\xde\x41\xbe\xa4\x53\xda\x4c\x1c\x6a\xa2\xf6\x61\x4a\x2f\xaf\x6a\x97\x90\x80\x47\x17\x17\xe7\x2e\x60\x85\xdf\x01\xec\x3e\xb2\xef\xfa\x70\xd7\xdb\x7a\xe3\x6a\xa0\xeb\x6d\x75\xbd\xad\xae\xb7\xf5\x82\x7a\xab\xeb\x6d\x75\xbd\xad\xae\xb7\xd5\xf5\xb6\xd0\xf5\xb6\x8e\x9c\x58\xbb\xde\x56\xd7\xdb\xea\x7a\x5b\xcf\xf5\xe1\xae\xb7\xd5\xf5\xb6\xba\xde\x56\xd7\xdb\xfa\x61\x7a\x5b\xff\x9d\xdf\x8e\x7a\x83\xe9\x7f\x03\x00\x00\xff\xff\x2b\xc1\x51\xba\x6d\x2d\x00\x00") + +func fontsIsometric4FlfBytes() ([]byte, error) { + return bindataRead( + _fontsIsometric4Flf, + "fonts/isometric4.flf", + ) +} + +func fontsIsometric4Flf() (*asset, error) { + bytes, err := fontsIsometric4FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/isometric4.flf", size: 11629, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsItalicFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x55\xcd\x6a\xdc\x30\x10\x3e\x47\x4f\xf1\x1d\x04\x91\x21\xf6\x6c\x93\x9e\x4a\x29\xa2\xf7\x5e\x7a\x28\x2d\x18\x06\x67\xe3\x6d\xdd\x6c\xd6\x90\x4d\x68\x03\x7a\xf8\xa2\x91\x6c\x4b\xb2\xd3\xc0\xae\x25\xcd\x9f\x66\xbe\xf9\xd1\xe1\x78\xb8\xee\x34\xde\xe3\x06\xef\x76\xd8\xe1\x46\x0d\x4f\xdd\x71\xd8\x37\x87\xe3\xe1\xe2\xe2\x5b\xff\x78\x1e\xc6\x13\xae\xd5\xed\xcb\x07\xe0\x73\x77\xc6\x97\x7e\xf8\xdd\x3f\x02\x78\x90\x8d\x1d\x4e\x87\xb1\xf9\x33\x9c\x9a\xa7\xe7\xbe\x39\x1d\x01\xdc\x76\x67\x7b\xd7\x3d\xf4\xf7\xcd\xfd\xd3\xaf\xe6\xdc\xab\xc3\xf0\xb7\xbf\x83\xb7\xf0\xf5\xa5\x3b\xe1\xc7\xf8\xbc\xbf\x07\x5e\xfc\x62\xf7\xe7\xe6\xf9\xb1\xff\x39\x9c\xba\x66\xdf\x29\x6d\xe3\x0f\xd6\x2a\xc0\x2a\x07\xab\x1a\xc8\xde\x2a\x22\x58\xa5\xb5\x1c\xe3\x47\xbe\x72\x62\xc7\x8e\x61\x55\xed\x6a\x57\x63\xa6\x5b\x05\x27\x74\xe3\x9c\x16\x31\x57\x41\x88\x93\xb6\x55\x3b\x6f\x97\x76\x99\x49\xab\x4c\xe5\xc9\xe6\x7b\xbc\x4a\x1c\xf0\xf7\xc7\xbf\x10\xb4\x88\x08\xa9\x9d\xd5\x5b\x39\xfb\x5b\x48\xcf\x44\x0d\xf8\xc8\x5a\xf2\x5f\x6a\x75\xa4\xc8\x22\x1c\x76\x2c\x44\x07\x3d\x11\xad\x92\xbd\xb6\xea\x4a\xcb\x7e\x36\xc3\xac\xe7\x7d\x94\x0c\x7b\xed\xe1\xd2\xe1\x18\xa3\x13\x1f\x13\x4f\x00\x66\x89\x89\x00\xef\xa4\x61\x26\x64\x80\xf9\x53\x2d\x5a\x09\xcc\x41\x87\x25\x2c\xe6\x05\x93\x94\xc1\xf2\x45\x86\x22\x53\xbc\x2b\xd1\x60\x31\x82\xb5\x46\x30\x1c\x58\x66\xe1\x79\x26\x07\x1e\x47\xbf\x32\x2d\x39\x19\xae\xd6\x5a\x31\xd6\x48\x4d\x23\x8d\x05\xd6\xa4\x05\x36\x11\xae\xe6\xfc\x0a\xaa\x92\xb3\x36\xe2\x5b\xe4\xa0\xae\xd3\x54\xce\x82\x94\x48\xb3\x54\x0c\x2a\xff\x6d\xc2\x5e\x2e\x5b\x7e\x4b\x10\xc4\xbe\xe2\x0d\xe0\x32\x3f\xe7\x8c\x05\xfc\x7d\xa8\x79\xc6\x26\x7e\x84\x00\xdb\xda\x88\xda\x45\xbe\x03\xb4\x86\x05\x55\x2a\xb4\x67\xa6\x0e\x65\xf4\x8a\xe9\x05\xe6\xa2\x94\x82\x42\xc5\x53\xe2\x36\x6a\x0d\x14\x62\xce\x5a\x5a\x16\x8a\x15\x94\xdd\x19\xb9\x34\x9b\x5c\xdf\x39\x55\x9c\xc9\x2a\x75\xd6\x74\xe4\x62\x15\xb9\x4d\x6f\xc9\x05\xa4\xe0\x4a\x6f\xdf\x6c\x9d\x49\x20\x14\x1c\x01\xfa\x2d\x0b\xed\x1b\x16\xcc\x86\x05\x5f\xee\x98\xfa\x27\x6f\x92\xd0\x41\x92\x2d\x03\x6c\x23\x37\x01\xbb\x6e\x7c\x04\x35\xcf\x76\xdb\xb8\x9b\x98\x16\x47\x2b\x70\x82\xf5\x36\xf4\x21\xda\x95\x76\xbc\xd2\x7b\xa0\x8b\x1a\x5b\x5a\xbb\x98\x2d\x56\xc9\xd8\x76\x9c\x56\x87\x55\xc1\xf8\x32\x73\x05\x33\xed\x64\xc0\xe7\x92\xd4\xa6\x8f\x46\xf2\x74\x28\x5e\x6c\xb6\xe5\x60\x0f\x52\x52\xf9\x54\xa8\x0a\x74\x55\x41\xe4\xe9\x1d\xc8\x6a\x50\x22\x32\xd9\xec\x0b\x74\x6d\x95\xa9\xd3\xd9\x2a\x1d\x56\x8c\xe9\xa0\x16\x92\xed\x0d\x49\x3e\x52\x2f\xa8\xf4\xe2\xb2\x98\xd9\x73\xfe\x2f\x65\x11\x16\x53\x19\x0c\x99\xad\x08\xb1\x0a\x46\xd0\xa4\xaa\x0c\x06\x5b\x9e\x60\x05\x12\x96\xce\xac\xa6\x11\xbe\xc2\x99\x0a\xe2\x1a\x0f\xed\xc9\x5c\xe9\xc2\x0b\x2e\x9f\xab\x20\xbc\x91\x3f\x29\x85\x96\x56\x76\x45\x76\x12\xce\xa5\xab\x12\x20\xbd\xe1\x6f\x80\x67\x29\x29\xaf\xf3\x11\x61\x15\x82\xc3\xf2\x17\x82\x87\xe1\x13\xc2\x9a\xb8\x42\x2d\x21\x43\xcc\x2a\x8c\x3c\xbe\xfe\x3a\x8c\x1c\xb9\xaf\x0e\xa5\x11\x18\xff\xdf\xf7\x56\xd5\x5c\xaf\xc1\xaa\xeb\x8d\x62\xaf\xb1\x92\x9c\xde\x8c\xf8\x40\x61\x7a\xa1\x68\xbe\xe3\x5f\x00\x00\x00\xff\xff\x4a\x0e\x3c\xe0\x68\x0a\x00\x00") + +func fontsItalicFlfBytes() ([]byte, error) { + return bindataRead( + _fontsItalicFlf, + "fonts/italic.flf", + ) +} + +func fontsItalicFlf() (*asset, error) { + bytes, err := fontsItalicFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/italic.flf", size: 2664, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsIvritFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x5a\x7b\x6f\xda\xc8\xfa\xfe\x9f\x4f\xf1\xfc\xaa\xea\xd7\xe4\xc8\xc1\x81\xb6\x69\xab\x2d\x51\x68\x43\x1a\x76\x73\x3b\x81\xa6\xda\x23\x4b\x53\x03\x43\xf0\xc6\xd8\xac\x6d\xd2\x66\xe5\x0f\x7f\x34\xf7\xf1\xd8\x24\xb4\x3d\x68\x1b\x5f\x98\x79\x9f\x77\xde\xe7\xbd\xcd\xb0\xf3\x78\xde\x0d\x9f\xe3\x00\xaf\xf1\xe6\x00\x9d\xd7\xe8\xbc\x42\x07\x9d\x83\xee\x9b\x4e\x6b\x78\x9f\x45\x05\x76\x4e\xe9\x24\xa3\xdf\x76\xf1\x39\x89\xa6\xe9\x8c\x62\x9e\x26\x05\xc2\x3c\xa7\xcb\x49\x4c\x67\x98\x3c\xe0\xf7\x74\x91\xe0\x63\xfa\x2d\x4c\xf0\x7e\xca\x2e\x47\xd3\x69\x14\xb7\xd3\xec\xf6\xb0\x75\x16\x16\x51\x82\xe9\x22\xcc\x72\xcc\xb3\x74\x09\x8c\x8a\x30\x99\x85\x19\x9f\xf9\xa9\x8d\x8f\x8b\x70\xb5\xa2\x71\x8c\xff\xc7\x30\x4c\xd8\x63\xd4\x12\x90\xf6\xac\xdf\x69\xb6\xce\xc3\x98\x2e\xf9\x2c\x3a\x0b\xe3\x28\x5c\xe0\x24\x8b\xe8\x8c\x26\x13\x9a\xdd\xe2\xfd\xed\x5c\x3c\x1d\x25\x0f\xdf\xdb\xd3\xbc\x3d\x5b\xb7\xe9\x6c\x7d\xd8\x82\xfc\x7c\xce\x29\x9e\x45\xf1\x82\xcb\x6e\xcf\xe3\xe9\x33\xcc\xd3\x0c\x12\xeb\x8e\x3e\x4c\x52\xa6\xd5\x32\x5c\xad\xa2\xe4\xb6\x3a\x6d\x9d\xbb\xd3\x3e\xb7\x47\xed\xbd\xbc\x78\x88\x69\x6d\x2a\x76\x9e\xcd\xf9\xe8\x67\xc8\xa7\x59\xb4\x2a\x76\xab\xc2\xde\xbe\x7d\xfd\x6e\xef\xad\x11\x35\x1c\x5d\x42\xbc\x43\x41\xbf\x17\x7a\xf0\x65\x86\x75\x4e\xf1\x79\x7c\xb2\xf7\xb6\xf5\xa5\x7f\x7d\x31\xbc\xf8\xf4\x7f\x38\x19\x7e\x62\x0c\xe4\x08\x33\x9a\xbc\x28\x30\x89\x66\x51\x46\xa7\x45\x94\x26\x61\xfc\x1b\x8a\x45\x94\x23\xca\x91\x17\x59\x34\x2d\xe2\x07\x64\xd1\xed\xa2\xd8\x2b\xd2\xbd\x98\xce\x0b\x68\xe1\x3b\x93\x07\xcc\xe8\x3c\x5c\xc7\xc5\x2e\xe8\x3d\x4d\xb8\x2a\xc5\x82\xc2\x30\x16\x4e\x0b\x9a\xe5\xed\xd6\x3c\xba\x8d\x69\x81\x8c\xc6\x34\xcc\x29\xba\xed\x2e\xf6\xf6\x70\x91\xde\xd3\xe5\x84\x66\xe8\xbc\x7b\x77\xd0\x6a\x9d\xa7\xb3\x68\x1e\x09\x87\xb8\x0a\xd7\x31\x3e\xac\xb3\x22\x4d\xf0\x3e\x4f\xe3\x35\xd3\xee\x88\x86\x59\xb1\x88\xa3\xe4\xae\x9d\xd0\xe2\x10\x9d\xae\xff\xee\x00\x45\x8a\x28\x99\xc6\xeb\x19\x45\x42\xbf\x61\x15\x66\xe1\x92\x16\x34\x6b\xe5\xeb\xd5\x2a\xcd\x0a\x21\xf0\x64\xf8\x89\x69\x10\x26\x33\x76\xfb\x25\x4a\xda\xc0\x79\xf8\x80\x30\xce\x53\x4c\x28\xf2\x98\xad\x32\x7e\xc0\x52\x69\xc1\x56\x33\xa1\x45\x41\xb9\x11\x5b\xe9\x9c\x8b\x9f\xaf\xe3\x78\xef\x5b\x34\x2b\x16\xfe\x1d\xcd\x12\x3f\x5f\xae\xf3\x05\xc2\xb8\xa0\x59\x12\x16\xd1\x3d\xcd\x3d\x4c\xd6\x85\xb2\x0c\xd2\x75\xb1\x5a\x17\xcc\x9e\x17\x97\x63\x66\x93\xe4\x96\xce\xda\x2d\x3c\x3f\x6a\xfa\x77\xd4\x02\x08\x8e\x5a\x28\x51\x9a\xbf\x84\xfd\xdd\x21\xbb\x47\xdc\xf6\x72\x10\x1f\xb6\x83\x12\xfc\xf5\x0d\x6e\xc0\xbf\x7e\x8e\xea\x55\x4d\x60\x53\xd8\x7f\xfc\x2d\x29\x51\x96\x28\x05\x12\x01\xda\x6d\xf6\x4e\x3e\xf0\xb1\x25\x9f\x53\x92\xb2\x24\xa5\x91\x64\x89\x23\xe2\x6d\x89\x92\x5d\x7d\x10\xae\x5e\x40\x08\x02\xae\x17\xe0\xf3\xef\x49\xa9\x14\x06\x21\x62\x19\xbe\xf8\x0a\x3e\x7c\x2e\xc3\x87\xcf\x35\xf1\x89\xaf\x17\x69\x70\x88\x52\x7a\x07\x04\xbb\x90\x33\x08\x02\x3f\xe0\x16\xda\x21\x87\xc0\x7b\xf6\x36\x20\x84\xf8\x81\x6f\x29\x6b\xec\xb9\xc3\xcd\x54\x0a\xc0\xe7\xd6\x5f\x83\x23\x55\x11\x66\x47\xed\x82\x80\x04\x6c\x28\x11\x2b\x0f\x10\x28\x03\xd4\x2e\x3e\xf1\x95\xd1\x85\xee\x4c\x2f\x0e\x10\x40\x1a\xc6\x67\xf6\x08\xf8\x98\xc0\xb7\x0c\x6c\xa6\x41\xb3\xa6\x29\xd3\x7c\x59\xfc\xd8\xe4\xc8\xc9\xee\x5f\xc7\x00\x2e\x82\x1e\xc6\x3e\x02\x81\x7d\x04\x02\xf3\xa3\x6d\x11\x2a\x0e\xaa\x4d\xaa\x8c\x5a\xe1\x1b\x92\x6f\xd4\x96\x4e\xa4\x7d\x05\xc7\xca\xfe\x3a\x1c\x88\x34\x34\xe7\xda\x55\x4c\x78\x91\x1d\x39\x56\xfc\xe8\x41\x0a\x81\x2d\x53\x92\xc8\xbc\x57\xc8\x65\x8e\xec\xd7\x8c\x50\x99\xac\x6d\x24\x97\xc3\x18\x09\x94\x05\x77\x25\x62\xb3\x7a\x8a\x4d\x11\x7e\xd6\xad\x92\x69\xa8\x75\xb9\x75\x95\x67\xb7\xa5\xb3\x8c\x27\xf0\xa1\x43\x89\x47\x9c\x10\xf3\x42\x5b\x79\x47\xd9\xa0\xd9\xb6\x95\x95\xa3\xd4\xd4\x42\x73\x2b\x44\x57\x49\x75\x59\x15\x41\xec\xf0\xfb\x38\x72\xa3\x4b\xd8\x53\x3c\xad\x4d\xc3\xe4\x9a\x7f\x36\xfa\x6a\xf3\xa0\x6a\xc8\xd8\x19\x42\xae\x5b\x25\x81\x40\x86\x72\x2d\x7e\x1b\x62\xea\x89\xe8\x72\xd2\x4b\xc0\xd3\xa9\x42\x35\xeb\x53\x9c\x28\xbf\x09\x14\x0b\x3a\xe9\x32\x63\x93\xdd\x5a\x11\xd0\xc6\xe4\xae\x2e\x51\xd8\x72\xc8\x57\x15\x32\xd8\xd1\x51\xc6\xcd\x2b\x95\x0d\xa4\x53\xa9\xe8\x36\x29\xc0\xe7\x79\x4d\x09\x65\x0f\x2a\x94\x24\x80\x70\x0a\x63\xa5\x0d\x0e\x2d\x3c\xa3\x84\x15\xf9\x4f\x39\xb4\xb0\x80\xc0\x52\xea\xeb\x18\x53\xdf\x06\x9b\x42\x19\x35\xbc\x5a\xa6\xd9\x10\xca\x9a\x57\x68\x5e\x79\x8e\xae\x22\x3f\x95\x44\x60\xa9\xad\x53\x83\x16\x43\x4a\x34\x87\x52\xd3\x9a\x55\xd7\x60\x27\xc8\x3a\x32\xcc\xc0\xfa\x4a\xf9\xf7\x32\x61\x9a\xa4\x59\xf7\x36\x99\xa2\x54\x5d\x54\x65\xd2\xc6\xd3\x61\x40\x94\xfe\x52\x2d\x82\xad\x73\xb9\x08\xb8\x92\x05\xa0\x28\xca\x2f\x44\xd0\x95\x68\x4b\xba\x48\x69\xbb\x94\x5e\xa1\x22\xa1\xe9\xee\x09\x62\xf8\x3f\x61\x0b\x56\x96\xa5\xa2\x81\xaf\x75\x86\xa9\x29\xa8\xda\xa8\x66\xe1\x40\xaf\x14\x81\x9e\x1f\x18\x0b\x07\x35\xfc\x5f\x2b\x7e\x8d\xfe\xac\xe2\x07\xa6\xae\x6d\xf2\xaa\x2d\x91\x2b\x51\xbc\x1d\x32\xde\x43\xaf\x79\xe3\x64\x5f\x57\xb4\x60\xeb\x8a\x66\xa5\x57\xab\x29\x82\x2a\x9c\xd6\x5d\x43\xa3\xd4\x1c\x0c\xdb\x59\x9b\x08\x3f\x93\x3d\x5d\x60\x52\x2f\x7b\xd0\x45\x31\xc0\x0d\x74\x73\x17\x38\x85\xb1\x22\xa8\x2a\xcb\x11\xe7\x48\x74\x84\xd6\xe5\x5a\xa2\x95\x50\x2d\x4e\xb7\x60\x3a\x31\xbb\x21\x24\x9a\x10\xa5\x8c\x9e\x78\xa3\xbb\x9d\xc7\x8d\xca\x3e\xaa\x97\xd9\xd4\xe6\x57\x42\x4f\x45\x9e\x48\xa6\xe5\xa6\xf6\xbb\x64\x73\xb4\xc1\xb4\xa9\x94\x91\x94\x79\x02\x6d\x18\x37\x31\x28\x3f\xa9\xb7\xeb\x3a\xa4\x05\x02\x33\x38\xaf\xfb\x81\x50\x52\xee\x13\xaa\x97\x86\x4e\xfd\xb1\xbb\x5a\x1f\xe0\x34\x19\x10\x58\x4d\x1b\x13\x23\x8d\xc8\x4d\x9f\x5d\xac\x77\x2c\x2f\xf5\x1a\xf3\xbd\xc9\x7c\xb5\x8e\xcf\x54\xd7\x76\x53\x75\xb5\x80\x4d\xc5\x51\xb0\xa6\xb2\x3a\x54\xc2\x64\x7d\xa2\x94\xdb\x5a\xe1\x46\x4c\x04\x3a\x83\x6d\xc2\x34\x83\x75\x35\x55\xb5\x54\xe5\xbc\x5a\x69\xda\xd6\xaa\xd2\x47\x4c\xf3\xf3\x94\x55\xad\x12\xd1\x50\x45\x4d\x97\xb9\x79\x8f\xa2\x2d\xa8\xfa\x54\xa9\x8e\xae\xa2\xbe\xf2\x58\x5b\x25\x89\xee\x96\x4d\xe8\xf4\xdb\x50\x2d\x5d\x35\x1a\x95\xa9\x78\x32\x37\x98\xbd\xec\xaf\x4e\xc5\xa8\x2e\xdf\x29\x92\x0d\x2d\x32\x7e\xd8\x86\x8e\x9b\xb8\x75\xeb\x89\xed\xc4\x13\xc8\xba\x6e\xb5\x9d\x8a\xf9\x73\x7e\x23\xf2\x64\xd5\xb9\x89\xce\x77\x2f\xec\xae\x55\x63\xd5\x03\xea\xc8\xf4\x5f\x2a\x08\xd5\x51\x0b\x77\xcc\x8a\x73\x9b\x4d\xa6\x6e\x51\xed\xa8\x60\x33\xeb\x8d\x9a\xd2\xec\x91\x06\xf1\x91\x90\xc5\x16\x85\x83\x17\xaa\xc6\xc9\xd6\xfc\x8a\x88\x8a\x94\x8a\x20\x47\x96\x9b\x3e\x1a\x4a\xdf\xa1\x0c\x83\x86\x8a\xf7\x43\xeb\xaf\x65\x03\x8b\x1e\x22\x3b\x11\x5f\x96\x3b\x55\xed\x5c\x6b\xcb\x83\x11\xb5\x5b\x13\x05\xee\x3d\xde\x57\x8e\xd4\xf4\x81\xd3\xc6\x28\xd5\xb1\xaa\x8c\xa7\xaa\x62\x45\xca\x21\x0e\xad\x67\xbe\x13\x93\x55\xce\x2f\x45\x99\xf3\x55\x65\x6b\xbe\xd6\xfa\xa5\x1d\xb2\x4b\x54\x62\xf2\x49\xe0\xc4\x1f\x43\x68\x68\xf3\x1a\x26\xdb\xad\xe6\x53\x7b\x01\x3d\xd9\xe4\xce\xed\x37\x12\x4d\xc8\xdb\x17\xd0\x47\xd4\x7e\xe2\xc8\x64\x7b\xb5\x1b\xc2\xaa\x5e\x00\x75\x46\x2f\x03\xf5\xa6\xe4\xa1\x2f\xb3\xc6\x51\xab\x73\xb0\x0f\x5c\x5c\xee\x7d\xb8\x1e\xf4\xff\xc0\xe8\xaa\xff\x71\xb0\xf9\xa4\xb9\xf3\xe6\x25\x30\xba\x3c\x19\xe3\xf4\xcf\xab\xd3\xc1\xc5\xb6\xad\x0b\xff\xb6\x76\x70\xd1\x79\x77\x00\x9c\xf5\xc7\xc3\x0b\x7c\xec\x5f\x0d\xc7\xfd\x33\x9c\x0d\xc6\xe3\xc1\x35\xfa\xf8\x32\x1c\x9f\xe2\x78\xd8\x1f\x5c\x0f\x46\xc3\xd1\xaf\xb9\x52\xb7\xf3\x6a\x03\xce\xe5\x76\x38\x5b\x7a\x5d\xb7\xbb\xbf\x01\xe7\xf3\xe3\x38\x3f\xe8\xa0\xdd\xee\x4b\x85\x33\x3a\xef\x9f\x69\x94\xd1\x69\xff\xfa\x0a\xa3\x9f\xf5\x84\x6e\xf7\x6d\xa3\xd8\x2d\xc9\xd8\x32\x40\xba\xaf\x0e\x1a\x51\x7e\x9c\x8a\xc7\x22\xa9\xfb\xba\xdb\x88\xf2\x6b\x44\xb8\x6b\xd9\xff\xbe\xff\xfa\x78\x1f\x38\x1d\x7c\xb8\x1e\x7c\xd1\xf6\x3a\x1b\x9c\x34\x97\xb6\x52\x57\x24\x41\x03\x36\x6c\x71\xb9\xdc\x8e\x2b\xf7\xc3\x60\x2c\xa9\xd5\x1d\x4c\x29\x6e\x4b\xb3\x78\x95\xc3\x45\xe0\xa1\x24\x6a\xf7\x44\xec\x9a\x5f\x01\xea\xba\x40\x9f\x86\xe7\x83\x33\x59\x75\xf4\xd1\x5f\x59\x05\xb2\x71\x74\x9d\xe0\x3f\xa6\xd4\x4e\xec\x38\xc8\x4b\x17\xe4\xb8\x7f\xc6\xd7\x23\x55\xd3\xd9\x02\xfa\xa0\xd8\x2a\x48\xee\x3d\x29\xd1\x04\xf2\xca\x05\x39\x1d\x34\x20\xe8\xa3\x68\x02\x7b\x3b\x67\xee\xeb\xe7\x34\x1a\xe1\xb5\x8b\x70\xd3\xbf\xa9\x9c\x73\x55\x9b\x6e\x73\xad\x14\x74\x2e\xea\xc0\x15\xf5\x9f\xfe\x9f\xc3\x8b\x96\x52\x97\x0b\x0c\xa4\x17\x49\x93\xea\x9e\xcf\xbd\xaf\xb6\x80\x16\xc8\x9b\xba\x45\x5c\xa3\xb7\xc5\x21\x56\xdd\x0c\x5b\x9a\xe4\xad\x0b\x31\x96\x10\x26\x0d\x95\x6d\x94\xfc\xe7\x03\x23\xd8\x42\x21\x7a\xcf\x57\x4a\xbd\xca\x46\x3f\x7d\xe7\x02\xfd\x79\x79\xdc\x68\x7b\x53\x70\xaa\x57\x23\xaa\xef\x8a\x3a\x19\x5e\xf4\xcf\xf0\x47\xff\xa4\xd1\x23\xdb\x4f\x7b\xa4\xeb\x9d\x12\xe8\x83\x0b\x64\x41\xc0\x20\x04\xb6\x20\x2b\xac\xb4\xcb\xda\xe9\x4d\x0b\xff\xe8\x0a\x3f\xeb\x9f\x0f\x8e\x5b\x28\x7b\xa5\xb3\x00\xa5\xbf\x3e\x14\x32\x3f\xbe\xd4\x7e\x7d\xd1\xe2\x8f\x9b\x8d\x74\x3e\x38\x37\x46\xb2\x5c\xe8\x57\xe9\x1d\xb8\x70\x15\xa0\x27\x5d\xd5\x80\x94\xa5\x8b\xa1\x20\x4e\x9a\x57\x74\xf1\xf9\x62\xab\x18\xb6\xfc\x4b\x08\x1c\xd4\x72\xbf\x10\xa5\xcb\xaf\x11\x66\x88\xb5\x68\xad\x1c\x49\x70\x81\xb5\xa4\x3f\xea\x9f\x0f\xfe\x38\xdd\xde\x0e\x44\xee\xf3\x9b\xfc\x46\xa3\xd4\x32\xbe\xc8\x3c\x32\x66\xc5\x86\x4f\x9f\x45\xf3\x7b\xad\x7d\x80\xaf\x96\x5b\xba\x87\xd9\x5c\x7a\x2d\xd5\x0b\x2b\x5f\x99\x5c\x0c\x75\x38\xa3\x8f\xcb\x89\x39\x50\x23\x65\x59\xa9\x00\x56\x58\x29\x84\x5a\x9e\xbf\x72\xf3\xbc\xed\x90\x01\x6b\x12\xad\xb8\xaa\x04\x56\xb3\xa7\x0c\x6a\x79\x5e\xac\x61\x3c\xea\x1f\x0f\x2b\x86\xe2\xa9\xcd\x72\x43\xdf\x57\x5b\x29\xdf\x37\xbb\x2a\xa8\x72\x5d\xda\x20\xb5\x0a\xa0\xc4\xab\xe4\xd9\x56\xe6\xb7\x02\xd7\x3a\x20\x26\x8c\x0e\xb9\x79\x53\x7c\xd4\xb7\xb8\x1c\xaa\x56\x07\xfe\x7d\x79\x52\xa5\xc3\x29\x8c\xd6\x92\x4a\x52\x3e\xbe\x8e\x5a\x05\xb8\x1e\x8c\x4e\x37\x4b\x77\x68\xad\x32\x5c\x77\xa7\x5a\xda\x1f\x9d\x0a\x67\x45\x7d\xef\x8d\xca\x99\xbc\x79\xf2\x79\x96\xb3\x7e\xa4\x27\xb5\xc3\x00\x8d\x57\xab\x0d\x63\x5e\xe2\xed\xe5\xc0\xf6\xde\x6a\x08\x92\xca\x0f\x3e\xa4\xa1\x6e\x0a\xa0\xee\x9b\x6e\x07\x18\x8d\xfb\xd7\xb8\x3c\xc1\x71\xff\x66\x78\x5c\x39\x8c\xf8\x81\xff\xf9\x63\x6f\xff\xfb\xfe\xfe\x7e\x17\x68\x85\x31\x5d\x2d\xd0\x43\xe1\x61\x42\x0b\xff\x9e\x16\xe8\x61\xea\xe1\x36\x5a\xd2\x18\x3d\xcc\x3c\xcc\xc2\x98\xbf\xcd\x3d\x2c\x28\x7a\xb8\xf7\x70\x1f\xde\xa3\x87\xb5\x87\x7f\xc2\x87\x28\x41\x0f\xff\x30\xf9\x0b\x3e\xec\x2f\x0f\x05\xbf\x79\xf0\xf0\x90\xce\xd0\xc3\xc2\xc3\x5d\x38\xf7\xa7\x8b\x70\x8e\x1e\xe6\x1e\xe6\x51\x12\xc6\xec\x1d\x7a\x88\x3d\xc4\xe1\x92\xb2\x71\x77\x1e\x96\x74\x89\x1e\x92\xa3\x96\x18\x22\x1e\x53\x0f\xc9\x9a\xa1\x4c\xd4\x54\xf1\x18\x79\xc8\xc3\x25\xbd\x63\xfa\x7f\xf7\x20\x55\xb9\xf5\xb0\xa2\xfe\x9c\x69\xba\xf2\xb8\x6d\xc4\x9c\x15\x7b\xf3\x9b\x87\x22\x0f\x67\x11\x7a\x58\x2a\x61\xea\x45\xdb\xc3\xdf\x29\x53\x89\x7a\xc8\x68\xce\xa4\x66\x1e\xf2\x45\x94\xf8\x39\x97\x1c\xe2\xa8\x55\xf0\x95\x7b\x98\xa6\xcb\x65\x88\x1e\x5e\x30\xb4\x2c\xe2\xeb\xf4\x3d\xe4\x74\x19\x4d\xd3\x38\x65\xc3\xbf\x7a\xc8\xe3\x90\xcb\xf9\xdb\x43\xb8\x4a\xf3\x22\x4b\x57\xdc\x84\xdf\xb8\x5e\xa3\x22\xcc\x90\xce\x71\x1c\xde\x47\x6c\xfe\xbf\xf0\xb3\x1f\x4d\xe9\x4b\x8b\xd2\xd0\xa6\x74\x62\x28\xbd\x35\x94\xce\x24\xa5\x0b\x45\xe9\x7d\x23\xa5\x53\x45\x69\xa1\x28\x7d\xa8\x50\x7a\x57\xa5\x74\x6e\x28\x8d\x15\xa5\xcb\x0d\x94\x26\x9b\x29\xa5\x9a\x52\xf2\x18\xa5\x6b\x43\xe9\x5f\x2e\xa5\xdf\x14\xa5\x7f\x6f\xa0\x34\xd7\x94\x7e\xff\x69\xe3\x5b\x34\xfc\x6f\x29\xfd\x6f\x00\x00\x00\xff\xff\x0a\x39\xf3\x83\xf4\x2a\x00\x00") + +func fontsIvritFlfBytes() ([]byte, error) { + return bindataRead( + _fontsIvritFlf, + "fonts/ivrit.flf", + ) +} + +func fontsIvritFlf() (*asset, error) { + bytes, err := fontsIvritFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/ivrit.flf", size: 10996, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsJazmineFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\x4f\x8f\xdb\xb8\x0e\x3f\x8f\x3f\x05\x51\x3c\x40\x78\x40\x1f\xf1\xda\x93\xd6\xa7\x7c\x04\x5f\x7d\x73\x5c\x39\x9d\x6c\x33\xc3\x6e\x26\xd3\xc5\xee\xa7\x5f\x88\x14\x65\xc9\x96\xed\x78\x66\x80\x4d\x1b\x8b\x56\x6c\x92\xa2\xf8\xe7\x47\xcd\xe9\x72\xfa\x7a\xfc\x0f\x7c\xf9\x3f\xff\xff\x0a\xff\xfb\x02\xbf\x55\xd5\xc3\xc3\xc3\x03\xc0\xef\xc7\xbf\x9f\xce\xcf\x03\x9e\x2e\xa7\xaa\x7a\x00\xfe\xfc\x39\x5c\x2e\x9f\xe1\xf6\x38\x00\xd0\xf5\xfc\xfd\xfc\x7c\xbc\xc0\x75\xf8\xe3\x75\x78\xb9\x0d\x57\x78\x3a\x7f\x7f\xbc\xc1\x33\xdd\xe0\xf1\xf8\x6b\x80\xcb\xf9\xc7\xe0\xf4\x4d\xff\xce\xcb\xf9\x3b\x9c\xc1\x9d\x9d\xbb\x0c\x0e\x4e\x74\x85\xc7\xe1\xfa\x19\xfa\xd7\x9b\x9f\x1e\xbe\x9d\xdd\xe0\xe0\x46\xf0\x74\xfc\x31\xc0\xd1\xab\x01\x27\x7a\xbe\x01\xbd\xde\x80\x4e\x70\xbe\x21\xbc\x90\xe7\x74\x1d\x10\xea\xff\xb2\xa2\xc2\x1d\x7e\x1d\x9f\x7e\xfe\x75\x3d\x1c\xbf\xbd\x60\xff\x8a\x83\x7b\xad\x2a\x80\xc3\xe2\xb7\xae\xe3\xd7\x4f\xc0\xa1\xb2\x76\x7e\xe9\x8c\x52\x88\xe8\x1f\xae\xe5\xc2\xaf\xf8\x9f\xc0\xff\x66\xc0\x80\xce\xcc\xaf\xb5\xbe\xa5\x57\xf9\x21\x3e\x91\x13\x68\x11\x2d\x7a\xd1\xb6\x33\xd6\x14\x67\x6a\x44\xff\x5f\xd8\x45\xbe\x29\xeb\x74\xb4\x81\x6f\x23\x4c\x5a\xab\xf3\xad\xdc\x13\x09\x4f\x0b\xc2\x12\x13\xd6\xa9\xae\xae\x07\x40\x12\x7b\x20\xc9\x8a\xfd\xe8\x09\x1e\xbd\x14\x32\x40\xfe\x19\x7f\x1f\xec\xc6\x1c\x57\x74\xcd\xcd\x60\x83\x19\xa8\x21\xf4\x44\x4b\x88\x62\x6a\x02\xe0\x99\xae\xb5\x0d\x4b\x15\x95\x83\xce\x73\xd6\x87\x8a\x37\x47\xf7\x66\x76\x89\x9b\x19\x77\x94\x57\x07\x8e\x99\x5b\x13\x36\x38\x1b\x44\x15\x68\x51\xe4\x77\x24\x7c\xe2\xfa\x0e\x87\x8a\xc2\x33\x5d\x2f\x92\x3a\x2b\x02\xb3\x01\x79\xc0\x06\xd8\x54\xbc\x88\x74\x09\x2b\xfb\x48\x68\x59\x4d\xb1\xc2\xa1\xc2\x46\xf6\xf1\x13\x58\xf8\xe4\xef\xf3\xfd\x9b\xee\xe3\x9c\x61\x64\x4c\x96\x77\x6e\x3a\x2f\xc6\x2d\x33\x5c\xb9\x48\xf8\x34\x6c\xa6\x2c\x68\x72\x1d\xf2\x91\xfc\xa7\x30\x3f\x91\xbd\x4b\x07\xb5\x46\xae\x43\x60\x8e\x41\xda\x9a\x3f\x8b\x66\x46\xe3\x13\x27\x6e\x5c\xf2\x67\xa4\xa6\x25\x14\xbf\x41\xe2\x6c\x02\xd8\x18\x26\xd0\x19\x76\x02\xcb\x21\x12\x22\xb0\x09\x61\x8d\x2b\x61\xed\xd9\x46\xa7\x2e\x5c\xea\x42\x86\xca\xb5\xf1\xb7\xea\x8e\x22\x12\xfd\x10\xdd\x5d\xc2\x2c\xec\x01\xca\xe7\xce\x85\x26\xac\x51\x58\xfb\x89\x5e\x7f\xaa\xf7\x2c\x34\x6e\x0f\x5a\x21\x9c\x11\x82\x2d\x07\x87\xaa\x49\xfd\xc4\x42\x4a\xd4\x1c\x8d\x1b\xac\x49\xdf\xb7\x3a\x63\x7f\x16\x4c\xe4\xe3\x73\xaf\xd6\xf8\x73\xdc\x79\x35\xa8\xce\x98\xc0\xda\x67\x91\x37\xb0\xa6\x64\xd5\x1f\xec\xaf\x80\x8d\xa4\x34\x6f\x44\x9b\x7b\xf0\x3b\xb5\x5e\x60\x14\xfd\xe1\x4d\xb6\x9e\x05\xf9\x76\xcc\x6f\xbd\xb2\x9c\xaa\x82\xad\x83\xa9\xd5\xd2\xde\xb7\x43\x9e\xc7\x90\xe8\xd9\xdb\xeb\xac\xdc\xad\xe6\xdf\x72\xce\x9b\xde\xaf\xe5\x3e\xd1\x24\x56\x1c\x52\x4d\x82\x2b\xa3\x04\x22\x4a\xb9\xc4\x10\xea\xe2\x11\x8b\x0c\xd3\x3d\xeb\xba\x31\x1a\x90\x1a\x09\xc6\x66\x56\xb2\xc7\x42\x81\x9b\x9e\x06\x4e\x58\x7b\x6d\x50\x32\xa3\x0b\x99\x11\x2c\xb1\x0c\xcf\x0e\x71\xa7\xa7\x45\x9d\x30\x5a\x0f\x1b\x2d\xba\xbe\xd0\x0a\x49\x3e\x77\xc4\xe2\x2b\xb3\x42\x81\x1d\xa3\x25\x49\x7b\x89\xb0\x4c\x1c\x7b\x84\x26\x0d\xab\x59\x83\xac\xaa\x2c\x73\xbd\x52\x21\x4d\x53\xb6\xa0\x64\x45\x29\xb5\x50\x48\x84\xc7\x98\x57\x32\x62\x7f\x7e\x25\x0a\x7e\x63\xbd\xee\x2a\xa3\xb3\x53\x61\x1c\x98\x5e\x71\x71\x22\xbc\x63\x93\x47\xad\x51\x7d\xb4\x27\x92\x9f\x9a\xa9\xfa\x3b\xab\x42\x0c\x0f\xab\xeb\xb0\x81\x75\x9c\x99\x13\x75\xee\xf3\xdb\x45\xbb\x60\xeb\xb1\x60\x64\xb9\x8b\xb5\xb6\x1a\xa7\x91\x40\xcc\x7d\xc5\xab\x08\x94\x79\x03\x71\xa9\xb5\x13\x0f\x99\x53\xa2\x7c\xea\x92\x13\x5f\x39\x54\xa2\xda\xfc\x2b\x2f\x8d\x4d\x8e\x84\x40\x00\x77\xa5\x6b\x40\x05\x71\x1b\x0a\x1d\x0b\x25\x0b\x61\xdf\x20\x4b\xbd\x51\x73\x8f\x1e\x6f\x53\xa2\x96\x36\x60\xb5\x63\xa1\xcc\x33\x56\xc6\x0c\xa0\x2c\xe7\xb1\xc8\x92\xad\xd3\x03\x80\x63\xbb\x74\x3d\x03\x0a\xcf\xb0\x23\x93\x6c\x74\x4e\x89\xcb\xac\x98\x3d\x95\x11\x45\xa8\x84\xc0\xac\xeb\x95\x6b\xd7\xdb\x34\xc4\xa6\x7b\xba\xc7\x23\xcb\xc4\xae\x64\x39\xcb\x5e\x50\xc8\x5e\x69\x0c\x65\x54\x3d\x05\x15\x45\xd3\x8c\x0b\x18\xd5\x9c\x52\x0e\x95\xea\x7a\x3b\xa6\xfc\x46\x97\x51\x6f\xa5\xc8\x37\xe6\xe1\xbb\x63\xab\xb8\x13\x9a\xb6\xe6\x88\x71\xef\x4e\xa4\xe5\xde\x16\xdb\xb2\x3d\xed\xd8\xd4\x21\x3f\xde\x6b\x92\x80\x2a\x84\x4c\xc7\x41\x66\x42\xf7\x2b\x7d\xb4\x37\x8e\x51\x20\x93\xb7\xe9\xe5\x0d\x8d\x52\x12\x31\x69\x36\x0e\xa4\xeb\x55\xa6\xc7\xae\x41\xa8\x47\xaf\x26\x5a\x29\x2b\x54\x4b\x75\x3c\x49\xce\x41\xef\x51\xf1\xde\x19\xc5\x80\x8c\x8f\x15\x36\x08\x56\x96\x52\xc6\xcd\xb7\xe6\x8a\x2d\x71\xb2\x34\x12\x53\xa9\xa1\xcc\x3b\xb7\x3b\x05\x8c\x2e\x74\x5e\xba\x04\x5d\x80\x0b\x6d\x81\xbb\xa7\xbb\x4b\x9b\xa3\xfb\x86\x84\xe7\xf4\x04\x83\x55\xc4\x18\x35\xa4\x88\x00\x02\xa9\x78\x35\xe0\xb7\x88\x61\x15\xc5\xd6\x33\xcb\x66\x5e\x33\xae\xbd\x74\xcc\x52\x18\x42\xc9\x9d\xe9\x8a\xaa\x03\x3a\x23\x88\xde\x3b\xd5\xd8\xc1\x42\x8a\x7a\x17\xa8\x82\x86\x33\x0f\xdf\x33\xd0\x92\x5d\x19\x59\xb1\x13\xcd\x7a\x1b\x55\x64\xf9\xcc\x23\x27\x62\x66\x43\x45\x23\x0b\x18\x67\x0b\x99\x15\x80\xe9\x72\xf9\xda\x7d\x14\xb0\xa4\x35\x73\x34\x4a\xec\x6d\x1a\x92\xa4\x1d\x09\x66\xfd\xfe\x64\xb9\xae\x75\xb4\x75\x04\xc8\x2d\xe9\x39\xec\x06\x6b\x8e\xe6\x58\xcf\x7c\xa9\x6b\x60\xbc\xcb\x87\x34\x79\xcc\x02\x73\xdb\xae\x2b\xde\x20\x40\xd7\x9b\x03\xc7\xce\xef\xad\xde\x10\x89\x0d\x54\xc4\xa9\x3a\x50\x74\xdf\x09\xd8\xe6\x2b\x82\x7a\xc3\x99\xc1\xfa\x02\xf8\xb8\x50\x56\xa2\x87\xc3\xa1\xf9\xb7\x9a\xad\xd6\x4f\xbb\xcb\x20\xbd\x08\xd4\xa7\xa9\x65\xa4\x88\x5a\x8a\x27\x2a\x36\x1a\x70\x46\x49\x01\xdc\x80\x36\x85\xe3\x08\x17\x99\xc7\x5e\x30\x1d\x73\x20\x5f\x2a\x48\xfb\xbd\xea\xc3\x12\x41\xee\x55\xca\xda\x42\x60\x6d\x21\xb0\x56\xfc\xfa\x91\xb1\xb0\xd0\xfd\x15\x2c\x3c\xb2\xed\x4c\xa1\xbf\xd9\x3e\x9f\x59\xd1\xb5\xed\x51\x6b\xa9\x69\xfb\x7d\xf9\x50\x9a\x43\x05\x1f\x3e\x72\x6c\x93\xdc\x65\x43\x5a\x93\xef\xca\x2c\x2b\xa8\xf4\xbd\x3e\x10\x59\xb7\xf1\x88\xb2\x47\x85\x84\x6d\x13\x01\xe1\xc6\xb9\x45\x16\x67\x53\x5a\x70\x5b\x14\x83\x0d\x46\x49\x1e\x2f\xa8\x30\xc3\xdf\x88\xdb\x26\x08\x34\xa7\x57\xd7\xd4\x11\x00\x05\x80\x18\xc0\x9c\x33\xf2\x27\x25\x32\x3e\xdb\xdc\xdb\x43\xee\xdf\x89\xbb\x73\x7c\xc9\x0d\x63\x53\x84\x4e\xca\x92\xe6\xca\xbb\x0a\x1c\x6a\xc0\x2a\xc6\x01\x39\x3b\x02\x85\xdb\x53\xe4\xd9\x89\xd7\xd7\x39\x42\x4a\x33\xeb\x34\xeb\x66\x99\xb6\x8b\x87\xfe\xf2\xa8\xfc\x59\x8e\x02\x8a\x6e\x71\x86\x1e\x31\x1e\x90\xe0\xdc\xed\x59\x49\xdf\x8f\x98\x1d\x98\x71\xa4\x0a\x69\x3a\x4d\xd8\x85\x7f\xff\xf6\xe4\x3f\x01\x00\x00\xff\xff\x6f\x29\x93\x28\x3c\x20\x00\x00") + +func fontsJazmineFlfBytes() ([]byte, error) { + return bindataRead( + _fontsJazmineFlf, + "fonts/jazmine.flf", + ) +} + +func fontsJazmineFlf() (*asset, error) { + bytes, err := fontsJazmineFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/jazmine.flf", size: 8252, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsJerusalemFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x59\x6d\x6f\xe3\xb8\x11\xfe\xce\x5f\xf1\x60\x11\x34\xf6\x35\x31\xcf\xb7\xbd\x3d\xdc\xde\x66\xa1\xa2\x7b\xd7\xf6\x80\x7e\x28\xae\xd8\x7e\x21\xc0\xa5\x2d\xca\xd2\x45\x96\x5c\x49\x4e\xe2\x05\xd1\xdf\x5e\xf0\x4d\x22\x65\x39\xf6\x6e\x13\x04\x12\x65\x91\x33\xc3\x99\x67\xde\xa8\xac\xcc\xbe\x13\x57\xf8\x01\x6f\xb0\xfc\xf6\x5b\x2c\xbf\xc7\xeb\x25\x96\xe4\x57\xd9\xec\x5b\x51\xca\x2d\x56\x07\xfc\x55\xa6\xa2\x2c\x44\x8e\x5f\x9a\x42\xa6\xb2\x5a\xc9\x66\x83\x5b\xac\x44\x2b\x53\xd4\x15\x7e\xeb\x44\x95\x8a\x26\x35\x73\x17\xf8\x4b\x2e\x76\x3b\x59\x96\xf8\x03\xfe\x2e\x2a\xfd\x58\x90\x7f\xee\x65\xdb\x15\x75\xd5\x42\x54\x29\xd6\xf5\x76\x2b\xab\xae\x45\x23\x37\xa2\x49\x8b\x6a\x83\xdf\x3d\xc3\x45\x56\x66\xe8\x6a\x6c\x32\xcb\x2c\xa9\x0e\x4f\x8b\x75\xbb\x48\xf7\x0b\x99\xee\xc9\x3f\xea\xb4\xc8\x0a\x99\x22\xab\x1b\x64\xc5\xa6\x94\x1d\xbe\x5b\x2c\x0d\xeb\x52\x56\xd5\xc0\x7d\xf9\x06\x1f\xe4\x1a\xcb\x1f\x7f\x7c\x4d\x3e\x88\x4e\xbe\xc5\xf2\x35\x7e\x91\x2b\xfd\xcb\x9f\x08\xf9\xf9\x69\x57\x8a\x4a\x68\xa1\x50\x67\xc8\x8a\xa6\xed\x50\x16\x95\x7c\x4b\xb4\x4e\x70\x8b\x57\x5b\xb1\x29\xd6\xa8\xf6\xdb\x95\x6c\x5e\x39\x86\xa5\x44\x91\xca\xaa\x2b\xb2\x62\x6d\x16\x13\x01\x00\xb7\x68\xf3\x7a\x5f\xa6\x10\xe5\xa3\x38\xb4\x58\x49\x7c\x12\xd7\x37\x66\x51\x55\x3f\x92\x2b\x3b\xa9\xcb\x25\x5e\xe5\xa2\x49\x57\xa5\xa8\xee\x5f\xe1\xf6\x16\xbb\xa6\xd0\x9a\x10\x2d\x04\xcc\xaf\x37\x58\xed\x3b\xac\x45\x75\xdd\x69\x32\xed\x76\xdf\xe6\x32\x25\x3f\x58\x0a\xb9\x2c\x36\x79\xa7\x25\x16\x58\xe7\xa2\x11\xeb\x4e\x36\xe4\xcd\x33\x2f\x6f\x50\xd5\x1d\x8a\x6a\x5d\xee\x8d\xa6\x53\xd9\xae\x65\x95\xca\xa6\x25\xda\xe2\xb8\x05\xb6\xe2\xc9\x6c\x1d\xa5\xac\x36\x5d\x8e\x99\x7c\xf2\xb3\x9d\xa9\xcc\xeb\x76\x8e\x3f\x42\x20\xdb\xa7\x1b\x89\x4c\xac\xbb\xba\x21\xcb\xef\x0d\xe7\x54\x66\x62\x5f\x76\x56\xda\x6d\x9d\x4a\xb3\xf3\x2e\x2f\x5a\x64\x75\xd5\x91\xd7\x4b\x33\xcd\xea\x52\x0b\x18\xd1\x25\x4b\x2b\xbf\xd1\x05\x1a\xbd\x89\xdb\xae\xbe\x2d\x65\xd6\x61\x16\x18\xb9\x6e\x50\x8a\x4e\x13\xa8\xca\xc3\x9c\x90\xbf\xc9\x55\x23\x1f\x71\x2f\x0f\xab\x5a\xe3\x6f\x2b\x76\xed\x5b\x42\xfe\x5c\x96\x58\x8b\x5d\xd1\x89\x12\xa5\xec\x3a\xd9\xb4\x8e\xb4\xd6\xbf\xac\x36\x65\xd1\xe6\x83\x82\x9c\xa8\xa2\xd3\x84\x16\x80\x5e\x6e\x05\x6d\x41\x34\x5c\xdb\xc3\x76\x55\x97\x2d\x44\x23\x0d\x85\x56\x6c\x25\x8a\x4a\xff\x5b\x09\xe8\xcf\x8e\xa6\x7c\x5a\xcb\x5d\xe7\x08\xd6\xad\xd4\xa8\x78\xac\x90\xcb\x46\x2e\xc8\xbf\x72\x89\x6f\x1c\xb1\xc8\xec\xbf\xca\x47\xbd\xb8\xed\x44\x83\xd9\x6f\xfa\x5a\x67\xf8\x20\x1e\x8a\x74\xbe\x20\xff\xce\x0d\xe6\x2a\x07\xf6\x1b\x74\x87\x9d\xc4\x7f\xb5\xdf\x09\x6b\xb4\xd5\x01\x45\xd7\x4a\xeb\x36\xad\x94\x56\xed\x65\xd1\x76\x6f\x09\x11\xa5\xdc\xe5\x00\xee\xd0\x41\xe1\xb3\x38\x14\x15\xcc\xe3\x67\x28\x94\x62\x2b\x53\xf3\x74\x0f\x85\xe1\xdd\x06\x0a\x6d\x5e\x54\xb4\xd5\xbf\xdc\x41\x90\x95\xec\xe8\x83\xec\x70\x87\x35\x14\xd6\xb9\xec\x60\xa7\xfe\x0e\x85\xad\xdc\xba\xa7\x0a\x0a\x3b\x79\xa0\x99\x3c\xe0\x0e\x3b\x28\x74\x22\xa3\xad\xc8\xcc\xdb\x1b\xb2\x29\xb6\x5b\x59\xea\x71\xaa\xdf\x39\x2a\xb8\xc3\x01\x0a\x59\x51\x89\x12\xaf\x70\x87\x3a\x7a\xfa\x09\x0a\x37\x33\x0d\x18\x31\xd7\x73\xaf\x49\x2a\x4a\x27\x77\x0b\x85\xc3\x3e\xf5\x64\x72\x28\x54\xfb\xca\x3d\xad\x34\x8f\xcf\x42\xa4\x85\x7e\xda\x42\x61\x31\xdb\xc9\xa6\xa8\xd3\x39\xee\x40\x49\x2e\x0e\x6e\xe6\x03\x14\xee\x45\x46\xd7\xb9\xc8\x70\x87\x2c\xe2\x5f\x44\x4f\x0b\x28\xfc\x04\xff\x77\x87\x4f\xe4\x41\x3c\xb8\xf1\x3e\x98\x89\x3b\x94\x5a\x8d\x62\x2b\xef\x73\xfd\xf4\xa4\x79\xec\x33\x37\x55\x42\x81\x06\x64\xfe\x63\xe0\x21\xda\x4e\x36\x45\x7b\x8f\xd9\x37\x73\x14\x1a\x1b\x11\x1c\xec\x64\x85\x46\xb6\xb9\x5d\xd6\x40\xe1\x7a\x26\x76\x75\xdb\x2d\xf4\xa6\x1e\x09\xb9\x4a\x8e\xff\x13\x02\x20\x21\xe0\x48\x88\x82\xf2\x17\xae\x12\x32\xe3\x73\xfb\xd6\x5c\xdd\x2c\x3d\x6f\x06\x05\xfd\xea\x23\x3e\xea\x1f\x71\x85\xf0\x16\xac\xf0\xcb\x00\x6e\xfe\x0d\x09\x05\xa5\xa0\x0c\x3f\x0e\x2c\x16\x00\x57\x76\xac\xff\xf4\x18\x50\x5c\x29\xae\x7a\x7a\x11\x51\x7d\xe3\xe6\xa6\xa0\x90\x10\x0a\xae\x05\x65\x9c\x83\x69\xd9\x00\xaa\xdf\x71\x35\x88\x61\x24\x07\xe7\x66\x4f\xd4\xbc\x07\x05\xd5\xbf\x53\x50\x2d\x0a\xe5\xd4\x6f\x37\x96\xdf\x70\xe3\x4e\xf6\x19\x38\xe6\xb0\xcb\x38\x18\x65\x5a\x5d\x33\xfe\x1e\x78\x97\x10\x30\xce\x39\x65\x34\x58\xeb\x95\x3b\xba\x58\x1d\xce\x13\xa2\xa8\x67\x65\xd9\x58\x79\x8c\x0d\x10\x5f\xc1\x38\xeb\xa7\x72\xb3\x7d\x06\xe6\x94\x10\x5f\x29\xa7\xd1\xce\xe1\x17\x51\xa6\x39\x30\x58\x0d\x51\xad\x11\xa6\xdf\x33\x1a\xcc\xeb\xa5\x8f\x07\x81\xf1\xbc\xe5\x06\x53\x85\x93\x8f\x2d\x6f\xfe\xcc\x58\xe9\x27\xc0\x2e\xd3\x4f\x56\x6a\x3e\x8c\x15\xe7\x9a\x9c\xb2\xa4\xc7\x96\x1f\x0b\xe5\x4d\xeb\x18\x28\x33\xb0\x2b\xaf\x30\x2d\x94\x99\x6e\xf6\x93\x10\xb5\x30\x52\x38\xc5\x81\x52\xa7\x4e\x4a\x7b\x74\xd9\x15\x66\x8b\x67\xcc\x39\xf2\x95\x00\x38\x1e\x2d\xce\x96\xde\xc9\xb8\xe5\x6c\x40\x33\x16\xd5\x11\xa5\x83\x3f\xf6\x4e\x39\xe6\xc1\x1d\x0f\xbd\x79\x0b\x08\xed\x0e\x86\xb4\x76\x0c\x3a\xd2\xcb\x91\x3a\x43\xe5\x59\x8f\xd0\x16\x66\xee\xd5\xdc\x1b\xe5\x58\xc6\xd0\xb1\xcc\xc8\xfa\xf5\x30\x72\x54\x7b\xa0\x8c\x90\x72\x62\x1b\x30\x08\x08\xf7\x73\x5e\x0c\xbb\x69\xa0\xf7\x67\x43\xe8\xda\xeb\x7c\xe6\xf4\x31\xa9\xea\x29\x35\x58\x84\xfa\x08\x61\xee\x86\x34\xa7\xd3\xa8\x0a\x4d\x6d\x43\x44\x64\xf3\x4b\xf8\x4f\x40\x25\x58\x76\xe3\x25\x9a\x84\xca\x08\x84\xd3\x70\xec\x37\x69\x03\xbd\x77\x44\x63\xa7\xc1\xf5\x14\x3c\x4c\x82\x11\x57\x93\x41\xca\xaa\xc7\x45\x21\x66\x43\xc9\x29\x5f\x3d\x72\xd1\x73\xbe\x3a\x0a\x72\x4c\x87\x76\xc7\xb6\xd7\x41\x98\x97\x3c\x75\x33\xcf\x4e\xb4\x29\x80\x18\x3d\x4e\xe4\xa5\x3e\x38\xc1\x19\xd9\xe3\x4d\xef\x8b\x7f\x72\xca\xc1\xcc\xfb\xa9\xb1\x83\x95\x98\x39\x24\x8e\xa9\x19\x47\x80\xc3\x0e\x43\x4f\x58\x8f\x9d\x3f\x5a\x1e\x16\x49\xbd\xce\x9e\x77\x07\x03\x27\x6d\x31\x1f\x42\x2e\x73\x07\x1b\x73\x0d\x4f\xb7\x15\xef\xa5\xee\x15\x3b\x13\x15\x30\x62\x3b\x8a\x5c\xcf\xf2\x1f\x80\xe6\xed\xad\xa3\x40\xc4\xff\xb2\xa8\x84\x41\x7e\x1f\x65\x3c\x21\xae\xf0\xac\x3b\x1e\xef\xdf\xd5\x38\x41\xe4\x3d\xc9\x1f\xfd\xec\xf1\xb6\x8d\x95\x6d\x34\x8e\xf2\xd4\x04\x1a\x6d\xe0\x73\x09\xdc\x65\xf3\x80\xe7\x91\xab\x70\x37\x70\x69\x11\x17\xe5\x8a\xa8\xbc\xd1\xd9\xcb\x16\x10\xd7\xc6\x3b\x15\x16\xd6\x7c\x5c\x05\x68\x3b\xda\xaa\x33\xcc\xf1\xe0\x9c\xa9\x7c\x3e\xf5\xc6\xd2\xa5\x84\x15\x99\x51\x2f\x7c\x9f\xda\xc7\x89\xfd\x84\xc6\x99\xdf\x36\x98\x27\xc1\x7a\x8d\xb3\x53\x16\xfb\xea\x5c\xfb\x0c\xe2\x9d\xa3\xa1\xcf\xa3\x67\x10\x77\x01\xff\xd0\xe7\xbf\x80\x3f\xde\xc1\xef\xff\xcc\x7a\xea\x13\x28\xfb\xb2\x04\x3a\x44\xe8\xa1\xb2\x83\xcb\xd7\xc3\xe0\x44\xad\x77\xc2\x63\x2e\xd6\xbf\x8f\xf7\xb0\x28\x66\x26\x78\xda\x20\xae\xc7\x3e\x11\x33\x7c\x84\xaf\x55\x59\x9c\x8c\x47\x95\x67\x40\x30\xa2\x19\x93\x8d\x29\xc7\xc4\x8f\xe8\xc7\x0e\xc7\x79\x4f\xd6\x13\xf4\xfd\x84\x8f\xf0\x27\x1c\xce\x56\x44\x4e\x22\xbf\xf6\xa3\x2f\xbd\xce\x68\xda\x1b\xca\x55\x56\x93\x8d\x4c\xe8\xaa\x43\xe2\xf6\xe1\x58\x4d\x75\x17\x8a\xf7\x39\x7e\xd0\x9c\xd7\x99\xd3\x96\xd3\x13\xf3\x1a\x1a\x6f\xae\xe7\xc1\xc7\x0d\x49\x5f\x5f\x04\x75\x04\x35\x10\xa7\xcc\x48\x7a\x85\xf1\xf5\x6c\xcd\x7f\x3c\x18\x97\x18\xcf\x55\x47\x71\xef\x35\x06\x71\x04\xe4\x08\xcc\xfd\x03\x35\xa5\xd8\x50\x5c\x6b\x5c\xc7\x60\x89\x3a\x30\x97\x8c\x54\x5f\x5a\xba\x08\x1f\x64\xd2\xb1\xc9\xa2\xc5\x3e\xb6\x28\x3b\x52\x3d\x2b\x97\x5b\xec\x96\xa1\xb8\xb3\xbf\x11\x28\xa2\x73\xba\xf0\x51\x11\xc5\x80\xa0\xeb\xae\x0d\xad\x53\xa5\x4a\x5c\x55\xf2\xa1\x78\xf6\xc9\xcb\xea\xac\xcf\x83\x38\xee\xa7\x62\xfb\xf5\x64\x18\xa2\x42\xd4\x09\xe4\xf5\x4d\x27\xfb\x08\x84\xdd\x9d\xc1\xaa\xab\x62\x99\x4b\x25\xe0\x9c\xe1\xd3\x40\x66\x2a\x1b\x85\xf9\x1b\xbe\xc3\x75\x75\x6a\x78\x7b\x76\xc5\xe4\x2d\xf0\xb1\x50\x73\xae\x15\xed\x5b\xe3\xa0\x33\x0e\x87\xc7\x7d\x71\xd2\x67\xef\x3e\x59\x8f\xcd\x30\x34\x31\x7d\x17\x33\x6e\x63\xc6\xd8\xe0\x63\x7b\x2e\x54\x8c\x8d\x67\x86\xe1\x99\x0b\xc2\xc6\x8c\xf3\x85\x0f\x1f\xbd\x40\x43\x34\xe7\xda\x2c\x36\xc4\x78\xbb\xc4\xde\xf4\x35\x7a\xe3\xbd\xde\x54\x6c\xe8\x23\x11\xf9\x98\x5e\x48\x70\xa0\xe8\x11\x08\xef\xf2\x27\xce\x28\xc6\x22\x06\x14\x99\xde\xe7\x00\xe8\x10\xd1\x27\x44\xb4\x77\xd3\x74\xf9\xd4\x15\x04\x7c\xf4\xbd\xc4\x44\xf4\x38\xe5\x9a\x71\x73\x17\xb5\x79\x17\x63\x02\x67\x30\xc1\xd5\x74\x6a\x9e\x4a\x7f\x0a\x2e\xfd\x99\x42\x8f\x3d\x53\xe8\x9c\x75\xb4\x89\xd2\x7a\x4a\xfe\x20\x48\x5d\xec\x68\x43\x1a\xf1\xc9\x83\xd8\x48\xe0\xc2\xc1\x57\x40\x54\x87\xb1\x01\x4f\xf4\x64\x31\xd3\x47\x35\xde\x9f\x5a\xd1\xff\x1f\xa2\x01\xea\x99\x23\xcf\x7c\x6e\xf2\x80\x0a\x87\x5c\x4d\xd7\x5b\x2e\x9d\x24\x43\xdb\x6d\x70\xf0\x0e\xef\x30\xb4\x3d\x18\xce\x2f\x8f\xcf\x9a\xa3\x03\xae\x9e\xaa\x3b\xd0\xf1\xa5\x87\xa7\xf3\x1e\xef\x87\x93\x5f\x93\x75\x93\x17\xfa\x92\x91\xbc\xd0\xa7\x8c\xe4\x85\xbe\x65\x24\x2f\xf4\x31\x23\x79\xa1\xaf\x19\xc9\x0b\x7d\xce\x48\x5e\xe8\x7b\xc6\x64\xfb\x31\xe3\x73\xee\x0e\xbf\x28\x67\x51\x43\xa6\xd1\x72\xba\x7d\x3a\x5a\x1f\xf4\x71\x97\xb4\x8f\xc3\x7a\x98\xf5\x5f\xda\x7e\x4e\xf1\x77\x87\x50\xb3\x61\xfd\xcd\xb9\x03\x8b\x29\xf9\x2f\x3a\xff\xbc\x4c\xfe\x49\xfe\x41\x85\x6b\x98\x86\xc7\x10\x8a\xb9\x67\xa5\x0b\x52\x17\x44\x12\xf2\xbf\x00\x00\x00\xff\xff\xa5\xc8\x5b\x7a\x5c\x20\x00\x00") + +func fontsJerusalemFlfBytes() ([]byte, error) { + return bindataRead( + _fontsJerusalemFlf, + "fonts/jerusalem.flf", + ) +} + +func fontsJerusalemFlf() (*asset, error) { + bytes, err := fontsJerusalemFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/jerusalem.flf", size: 8284, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsKatakanaFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\xdb\x8e\xdb\x36\x13\xbe\x8e\x9f\xe2\x03\x46\x97\xff\x2c\xfe\x0d\x8a\xb6\x29\x82\x60\x93\xa6\xc7\xf4\x98\xa6\x69\xd3\x3b\xee\x46\x5e\xab\x5e\x4b\xad\x65\x35\xf0\xdb\x17\xb6\x25\x72\x86\x1c\x4a\x32\x36\xcd\x06\x16\x49\x49\x9c\xc3\x37\x47\x2a\xcb\xbb\xe5\x43\x57\xe0\x53\x7c\x82\x87\xff\x07\x5f\xe2\xe3\xc5\x0b\xb7\x73\x6b\x57\xbb\x8b\xe5\xdd\x12\xd7\x7b\xbc\xae\xea\xba\xdc\xe3\xd5\xca\x55\x78\xdc\xb6\xcb\xaa\xda\x5d\x95\xdb\xaa\xbd\xb8\x69\x2f\xba\xcd\xf5\x45\xf9\xb6\x7b\x82\xe7\x6e\x57\x7e\x86\xa7\xdd\x6d\xd7\xee\x70\x79\xf9\x3f\x5c\x3e\x7a\xf4\xd1\xe2\xda\xb5\xe5\x5b\x34\x35\x9e\xb9\xba\x2e\xb7\x07\x6a\x2f\xf7\xae\xc6\x9b\xa6\xbb\x59\xe3\xf1\xfe\x30\x5c\x1d\xc8\x6c\xcb\xdb\xaa\x76\x17\x37\xee\xc9\xc2\xf1\x53\x54\xfc\xec\x41\xc7\x9f\xa3\xe4\xe7\x0f\x1a\xfe\x02\x6b\xc7\x5f\x62\x5d\xf1\x57\x58\x77\xfc\x35\xd6\x25\x7f\xf3\x60\xdd\xf0\xb7\x68\x1d\xbf\x40\xbb\xaa\xf8\x3b\xb4\x1d\x7f\x8f\xb6\xe4\x1f\xd0\x36\xfc\x23\x76\x8e\x7f\xc2\xe2\x66\x55\xf1\xcf\xd8\xb5\x1d\xbf\xc4\xae\xe4\x5f\xb0\x6b\xf8\x15\x6a\xc7\xbf\xa2\xae\xf8\x35\xea\x8e\x7f\x43\x5d\xf2\xef\xa8\x1b\x7e\x83\x95\xe3\x3f\xb0\xaa\xd8\x61\xd9\xf1\x35\x56\x25\xdf\x60\xd5\xf0\x5b\x6c\x1c\x97\x8b\x4d\xc5\x4b\x6c\x3a\xbe\xc5\xa6\xe4\x15\x36\x0d\x57\xd8\x3b\xfe\x13\xfb\x8e\xd7\x28\xf9\x0e\xfb\x86\x37\xd8\x3a\xae\xb1\xad\xb8\xc1\xb6\xe3\xbf\xb0\x2d\xf9\x6f\x6c\x1b\xde\xe2\x9d\xe3\x16\x15\xef\xf0\xae\xe1\x0e\x8b\x9a\xff\x59\x14\x45\x71\x35\xe3\x72\xb5\x20\xa2\x42\x5f\x41\x38\x5c\x81\xf8\xfe\xe1\xce\x71\x01\x7f\x1b\x7e\x0b\x4e\xdb\x0a\x1c\xff\xd4\x0c\x63\xb3\xc3\x94\x0e\xfb\x0b\x39\xa3\xd3\xdf\xe4\x3d\x39\x0b\x04\x8f\xef\x1d\xb7\x1c\x9e\x83\xfc\xec\xf8\xa2\x7f\x8a\xe8\x29\xc5\x4f\x21\x54\x1e\x5e\xa4\x5e\xd1\xa3\xf2\x08\x64\xbc\x34\x1e\x10\x1a\x48\xc3\xe3\x27\x54\xf6\xd2\x20\xa8\x22\x24\xec\xe1\x87\x24\xd3\x1b\xe6\xf8\x94\x12\x09\x23\xfb\x79\xd9\xac\xeb\x89\x5b\xcf\xbe\xa7\x9e\x0c\x42\xae\x9e\xd9\x89\x93\x57\xb5\xd7\xdd\x18\x7a\x84\xbc\x61\x4e\x1c\xbd\xed\xbd\xb7\x4c\x18\x57\xbc\x67\xfa\x4d\x40\x5d\x8e\x92\x90\x7e\x0e\x39\x66\xc1\xc9\x81\x99\x50\xf0\x63\xe0\x68\x3f\x3f\x8b\xa3\x0a\xb6\x93\xb6\x9e\xb6\x07\xc3\xf2\x3c\x01\xae\x37\xa3\x15\x6c\x14\xa3\x4b\x9e\x89\x35\x53\xd6\xa2\x34\xd8\xb4\xff\x9e\x36\x48\xf1\xec\x89\x82\x2c\x8d\x5a\xa9\x74\x14\x97\x41\x2d\x31\x23\x4d\xef\x2c\x82\x30\x95\xb6\xd2\x80\xe6\x4c\xd9\x99\x96\x26\x90\x31\xf2\x4a\xff\x62\x5e\xa9\xb3\x24\xb4\x54\xb6\x08\xda\x76\xb6\x33\x9f\x94\x70\x8e\xff\x99\xb3\x19\x12\x5a\x56\xbe\x97\xca\x61\x0b\x9d\x87\x21\xcc\x52\x98\xb9\x9f\xad\x94\xa3\x69\x44\xa4\x49\x9d\x82\x91\x49\xaf\x66\xfe\x89\xf3\xce\x44\x1e\x9a\x62\x62\x89\x33\x2f\x96\x4e\x99\x21\xf2\x08\xc1\x3d\xbe\x97\x33\xdb\xb1\xa8\xa6\xb3\x28\xee\xc7\x3c\x55\x19\x9b\x64\xe8\x05\x11\x28\x92\x4c\xb9\xab\xca\xa2\x4a\x6c\x9d\x92\x07\x36\x24\xb2\xdf\x50\x7f\xa0\x4a\xce\xe4\x54\x10\x26\x1d\xab\x3a\x3b\xa8\xaa\x2f\x75\x0b\xf0\x12\x09\x90\xc6\x35\x08\x6a\x47\x89\x46\x45\xed\xf4\xc2\x00\x3e\x62\x44\xb9\xf7\x44\x35\x11\xdd\x94\xc0\xef\xa4\x83\x2e\x3a\x94\xd3\x28\x2b\x10\x49\xe8\xe4\x42\xfa\x72\xdf\x6a\x09\xe3\x5b\x8c\x48\xd7\xd9\xde\x31\x33\x9d\xa0\xd9\xc4\x24\x9d\xa0\x76\x41\x21\x39\x09\xa3\xd3\xfb\x30\xfa\x28\x23\x52\x95\xed\xbc\x60\xc9\x7b\x97\xed\x1c\x67\x2c\xfc\x7e\x8b\x11\x21\xe7\x5d\x94\x18\x50\x2e\xc8\xf6\xe9\xa4\x61\xd2\x8c\xa0\x4e\x12\x50\xbe\xa2\xc9\x0b\xfa\xc2\x36\xca\x38\x7e\x1e\xf1\x53\xb9\x4c\x48\x2f\x64\xcf\x26\x9e\x21\x72\x84\xbf\x24\x29\x46\x99\x8d\xb2\x86\x92\x1a\x66\xf7\x60\x70\x23\x2b\x4e\x14\x60\xb1\x30\xa4\x6c\x21\xa1\x92\x40\x49\x98\x24\x07\xe5\xce\xd2\x9b\x65\xec\x0e\x87\x35\x11\x9a\xe7\x32\x40\x88\xb8\x5e\xcf\x39\xd9\x2f\x97\x31\xa5\x27\x68\xa8\x54\x05\xfc\x2f\xa0\x1a\xa4\x2f\x26\xa3\x33\x9b\xee\x0d\x37\x36\x8d\x2e\x4e\x91\x43\x23\xe1\x4f\xe2\xe9\x09\x73\x56\xfa\x9e\x25\x14\x54\x7b\x60\x20\x61\x55\x3e\xb5\xc8\x81\x34\x81\x58\x06\x8a\xb1\xbc\x17\xb7\xd2\x14\x19\x36\xc0\x4c\xca\xb4\xe7\x57\x3e\xc5\x48\x9c\xde\x84\xae\x43\xb5\xcd\xc0\x9d\x40\xa7\x48\xbf\xe7\x88\xb6\x1e\xf4\x80\x91\x4c\x3f\x3a\xf3\xca\x06\x29\xdd\x1b\xd9\x64\x78\x79\xce\x48\xaa\x9b\xd6\x1d\x68\xe0\x10\x15\x90\xe0\x9a\x01\xcf\x5c\xf1\x57\x4d\xca\x8c\x31\x91\x28\x1c\xa7\x43\xda\x9f\xf1\x21\x62\x11\x83\x5e\xdc\x6f\xe6\x9d\xce\xfe\x90\x18\x8e\x44\xc9\x35\x98\x2e\x7c\x2a\x1b\x28\x4a\x73\x8c\x4d\x91\xfa\xfc\x54\x11\x9d\xaa\xa7\xda\x55\x93\x22\x6a\x79\x58\x9a\x1b\x49\x92\x02\x92\x46\x16\x53\x49\xa4\x97\x6f\x3a\x2d\x52\xd2\x6e\x0a\x77\x50\x3e\x31\x1e\xdb\x73\x92\x88\x8a\xef\x5c\x27\x85\xf8\xc8\x27\x6c\x22\xb2\x43\x31\x7c\x5a\xf5\x05\xda\xef\x18\x7d\x34\xbf\x14\x18\x67\xba\xa8\x5f\x84\x91\xa0\x2d\x68\x2c\x34\x68\x1e\x1a\x14\x9e\x50\xda\x98\xe6\xaa\x34\x65\x0d\xa2\x1a\x89\xfc\x02\x63\xad\x99\xf8\x08\x28\xfb\xc7\x00\x1d\xc5\xfd\x70\x44\x7b\xda\xd8\x72\x65\xd6\xd7\xa9\xc5\xdc\x62\x3b\xc5\x28\xeb\xa4\xa3\x75\x73\xac\xaa\xdb\x35\x50\x1d\x58\x60\xd6\x46\xf5\x81\x90\x8c\x1a\x98\xeb\x42\x94\x88\x64\x30\x9f\x3c\x1a\x6a\x4d\x44\xe1\xb4\x46\xd5\xfc\x0f\xa9\x30\x2a\x1c\xa4\x4b\xdf\xf8\x82\x42\xa2\x12\xe5\xdb\x7f\x32\x89\xc4\x8e\x24\xc5\x74\x25\xf0\xb8\x84\xb6\x73\x6e\x1a\x2f\xb4\x61\x73\x47\x17\x32\x8c\x4c\x93\xfd\xc6\x98\xc9\x32\x59\x55\xf7\x11\xba\x21\x34\x8d\xe9\xf7\x48\xff\xa7\x71\xc3\x8c\x95\x93\x6c\x9d\xb0\xe3\x24\xd1\xc8\xbb\xf7\xb8\x12\x64\x76\x6d\x49\xb3\x29\x71\xd7\xe7\x52\xdc\xa3\xd9\x2c\x46\x7f\x1f\xe6\x05\xf9\x9f\xa7\x62\xa4\x68\x2d\xbe\x29\x45\xfd\x68\x11\x7e\x28\x92\x75\x5f\x41\x55\x34\x87\x91\x8c\xfb\x2a\xf3\xf4\x5f\x69\x21\xcb\x81\xf8\xe8\x19\x10\x9d\x3f\xbb\x5a\x44\xff\x3e\xd4\x8d\x7f\x03\x00\x00\xff\xff\x28\xc0\xd7\xf9\x8d\x20\x00\x00") + +func fontsKatakanaFlfBytes() ([]byte, error) { + return bindataRead( + _fontsKatakanaFlf, + "fonts/katakana.flf", + ) +} + +func fontsKatakanaFlf() (*asset, error) { + bytes, err := fontsKatakanaFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/katakana.flf", size: 8333, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsKbanFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x5b\x6b\x1c\x47\x13\x7d\x9f\x5f\x71\xf8\x90\xa9\xd5\xb7\x3b\xd5\x91\x13\x1c\x0c\xb2\x32\xcf\xb9\xbc\x04\x42\x1e\x32\x61\x24\x13\x39\x16\x04\x3d\xc4\x26\x44\x6c\xeb\xbf\x87\xae\xaa\xbe\x4c\xcf\x4c\xef\x2a\x6f\x31\x78\xfa\x56\x7d\xba\xfa\xd4\xa5\x6b\xf5\xe1\x8f\x0f\xaf\xef\x2e\xf0\x35\xde\xe0\xf5\x17\xe8\xaf\xf0\x65\xf7\xdd\xfb\xbb\x47\xbc\x7f\xc2\x8f\x77\x8f\xbf\x3d\xe1\xdb\xbb\x7b\xfc\x7c\xff\xf0\xf8\xe9\xf3\xfd\xc3\x63\xf7\xc3\xdd\x13\xae\xde\x1c\x70\xf5\xf6\xed\x57\xe8\xf1\xfd\xfd\xc7\x87\xdf\x3f\xe2\xa7\xc7\x87\xbf\xee\xff\xfc\xf4\xf0\xf9\xa9\x43\x77\x71\x81\xa1\xfd\x19\x3a\xf6\x8c\xa1\xf3\xde\x63\xe8\xc8\x13\x86\x0e\x1e\xe1\xcb\xc8\x33\x40\x10\xfd\x9f\x74\x57\xff\xab\x8c\xfe\x0b\xfd\xe7\x23\x8e\xcf\xd2\xbf\xbe\xd9\xef\xdf\xed\xf7\xd7\x37\x71\xfe\xf9\xd8\x9c\x2f\x70\xd2\x60\xe8\x30\xed\xf7\xd3\x41\x15\xf5\x32\xe3\x7d\xdf\x8b\x8a\x78\xe5\x55\x7b\xc0\x3a\x87\x7e\x9a\xfa\x6a\x7f\xf8\x77\x90\x63\x01\x67\x33\xce\x96\x9c\xc9\x38\x13\x76\x40\x10\x4b\xfb\xc3\xd2\xa8\xed\xe8\xd2\x38\xa0\x39\x1a\x47\xd1\x0a\x18\x47\x0c\xdd\x38\xf6\x6e\x0c\x1b\xa9\x4d\x95\x0b\x7b\x9d\x5e\xa3\xfe\x8e\xa3\x0a\x0a\x4e\x3c\x76\x14\xbb\xf8\xfa\xeb\x9c\xaa\x3b\x33\xc0\xd0\x89\xfa\x30\x6a\x31\xda\x45\xaf\x6f\xfa\xfe\xff\x7d\x7f\x7d\x63\x97\x1f\x57\x64\x13\xf3\xfb\xd6\x05\x0e\xad\xc5\xe6\xd5\xb9\xb5\xe8\x4e\xfb\x97\x50\x3f\x1a\x59\xc2\xc1\x56\x8b\x31\xd8\x2a\x6e\x0c\xfb\x84\x5d\xa7\x24\x63\xad\x39\xb8\x9e\x66\x3b\xe4\xa0\x1d\x70\xa9\x6e\xe2\x8c\xf2\x00\xb3\x13\x91\xe3\x34\x4d\x61\xc7\x24\x6d\x77\x3b\x93\x41\xb4\xe4\xe5\x65\xb1\x91\xa2\xb5\x0e\xa5\x56\x1a\x73\xd3\xe4\x03\x4c\xdf\xf7\xbd\xfa\xb3\x88\x90\x6a\x25\x67\x98\x97\xdc\x4a\x3b\x69\x20\x88\x25\x45\xeb\xcb\xa4\xa8\x40\x13\xe6\xcc\xe1\x60\x0b\xea\x0f\xbb\x9d\x44\x49\x00\x94\x8d\xbb\x5d\x64\xce\x29\x73\xf1\xc4\x5b\xd8\x02\xe0\x0e\xe5\x09\x3b\x65\x01\x16\x92\xf0\xf1\xc4\xca\x46\xea\x82\x79\xde\x39\xf5\xe8\x75\x5b\x35\x8c\x8c\xd1\x45\x4d\x6c\x3c\xa5\x8d\x22\xb5\xe8\xa5\xcf\xba\x0c\x55\x32\xd7\x6d\x17\x1c\xba\x77\x98\xb7\x69\xf1\xa6\xb5\xf3\x9b\xd6\xe2\x70\xca\xed\x7d\x4e\x62\x9a\xfd\xa2\x73\x44\xde\x99\x88\xc8\x4b\x36\x0c\xe9\x1c\xec\x3d\xa3\x4e\xc9\x45\x70\x93\xf7\x2a\x9f\x8c\x97\xe2\x20\x01\xe9\xbc\xd2\x1c\xf0\x98\x3d\xa1\x05\x0a\x04\x11\x22\xd6\x1d\x24\xf3\x14\x3d\x36\xed\xd1\x73\x01\x88\x86\xe1\x34\x66\xae\x90\xab\x41\x53\xe1\xa4\xe4\x8b\x15\x16\x50\xa2\x0c\xc4\x40\x62\xc1\xa3\x38\x4c\xfb\x0a\xca\xec\xcf\x02\x2d\x30\x0b\xc8\x02\x51\x01\x6b\x8c\x79\x16\xce\x94\xae\x73\x1a\xd4\xc9\x9c\x1a\x27\xca\xa9\xdd\xab\xcd\x29\x20\xdf\x92\xbc\xe4\x06\x54\x53\x9c\x34\x5e\xb8\xd7\x3a\xf6\x46\x96\x2d\x77\xc7\x46\xda\xb8\x29\xeb\xb1\xd2\xf3\x1e\xd1\xae\x44\x34\x3f\x3b\x5d\xca\x04\x82\x2c\x65\x9b\x7a\x2e\x6c\x6a\xa1\xa3\x17\x3a\x23\x5c\xd2\x42\xe1\x10\x5b\xfd\x73\x1d\x45\x6f\x65\x44\x79\x0b\x6e\x49\xa3\x3e\xd8\xcf\x06\x08\x35\x58\xa4\x8e\xe1\xcf\xa2\x9f\x85\x4f\x32\x34\xc4\xa7\x24\x80\x71\xea\xa7\x13\xd9\xe4\xdb\xf1\x2d\xbe\x98\x78\xa3\x99\x4d\x80\x18\x7a\x31\xbe\x35\x26\xd5\x17\xeb\x40\xdc\x8e\xef\x22\xbc\xa5\xab\x9b\xff\x55\xd4\x9c\xa7\x29\x31\x34\x58\x61\x51\x73\x86\x67\x6f\xa6\x4e\x7d\xff\xe2\x3c\x67\x6d\x73\x19\xbd\x41\x6d\x8e\x72\xb9\xb4\x05\x39\x02\x7f\xde\x80\x44\x98\xf4\x39\x24\x3e\x9d\xde\x84\x02\xcb\x44\xe6\xef\x8d\xae\xb1\xba\xc9\x69\xcc\x16\x65\xb2\x40\xab\x2f\x7c\x52\xad\xd6\x5a\x60\x19\xa8\xb0\x1a\x5f\x02\x09\x87\x95\x47\x6f\xf6\x18\x6e\x82\x62\x01\xbc\x04\xaf\x0f\x58\x1c\xb2\x3c\x68\x75\x6c\xb7\x88\x16\x0e\x48\x4b\x76\x7d\xce\x37\x58\xa4\x9b\x15\xa2\xdb\x68\x2f\x30\x9b\x24\x72\xca\x49\x94\x3d\xcd\x41\xa0\xde\x17\xf7\x5a\xe6\x6a\xa7\xae\x5f\xd0\x28\x58\xc6\xd6\xe2\xaf\xad\x45\xe0\x6f\x2b\xfe\xb4\xa0\x75\xf2\xdb\x65\xe8\xe6\x3a\x2c\xda\xa1\x9b\x5a\xa8\xb7\xed\x23\x13\x98\x3c\xa5\xa1\x47\x14\x58\x8d\xa9\x43\x7b\x9a\x90\xcb\x18\xae\x5f\x9d\x34\x1b\x1e\x1d\x8e\x99\x8c\xaa\xda\xc4\xdc\xb1\x2e\x78\xaa\xf4\x55\xcc\xb2\x61\x89\xdf\x10\x15\x29\x2c\x23\x6d\x28\x25\x6d\x36\x3d\x73\xcc\x54\x4c\x79\x5e\xbc\x27\xd6\x0d\xfa\xea\x34\x9f\xc2\x75\xdd\x82\xc3\xf8\x97\xe9\x66\x99\x36\xfb\xaf\x9f\x57\x5c\xe5\x52\x05\xb0\x54\x43\x6b\x49\xfd\x91\xa0\x71\x43\x14\xd5\xf0\xde\x0e\xe2\x5a\xa1\x15\xbb\xa5\x5a\xa1\x2c\x59\x8a\xb2\xa5\x19\xb9\x51\x90\xf3\x3b\x76\xba\xe8\xb1\xdf\x23\x5a\xc7\xe5\xf1\xbc\x0d\xb6\xd3\x54\x46\xab\x8a\x23\x1f\xc9\x34\xaf\x77\xce\x51\x9c\x1a\xbf\x89\x37\x15\xcf\x50\x81\x7f\x2e\xc8\x2b\x0c\x51\x0e\x38\x0e\x4e\xbc\xb0\x35\x32\xa3\xb2\xc4\xcb\x8c\x52\x79\x4a\x0c\x26\xb2\x3f\x7b\xe5\x4a\xc5\xc7\x52\x63\xdd\x61\x0b\xa5\xf8\x44\x84\xfb\x1c\xe1\xd9\x4e\x64\xe5\x6a\xfd\xa7\x1a\xf3\xdf\x00\x4c\xb9\x4e\xc9\x61\x59\xd6\x82\x98\x15\xc4\x82\x38\xbb\x22\x9b\x1d\x4c\x33\x2a\x2b\xa7\x5c\x38\xb5\xaf\x98\x52\xa1\x3c\x06\x5a\x87\xc8\xfb\xc9\x56\x78\x94\x24\xcd\x63\xb2\xf0\x37\x94\x7f\x15\xf0\x29\x25\xd0\x5a\xfa\xde\xe4\x76\xc5\xe0\x96\xa4\xce\x30\xb8\xa2\x71\x8c\x2c\x89\x86\x5c\x99\xa4\x57\x95\x7c\x0d\xb1\x8e\x36\x53\xcf\x30\x8b\x3a\x42\x1f\xeb\x58\x45\x68\xe4\xc6\x1a\xa2\x46\x99\x8d\x96\xe6\xe3\x74\x4d\xf3\x0f\x9f\xbd\x76\xee\xea\x0d\x0f\x3d\xfb\xde\xcc\xb9\xd0\x21\x5a\x57\xca\x2c\x42\x48\x3f\xbd\xac\x62\x88\xf5\x82\xdf\x52\xea\xd8\x7a\x7c\xe3\x93\x21\x3b\x16\x43\x05\x78\x6e\x01\xbc\x6a\x2d\x6e\x2c\xfc\x17\x17\xff\x09\x00\x00\xff\xff\xce\xd1\xdd\x67\x19\x18\x00\x00") + +func fontsKbanFlfBytes() ([]byte, error) { + return bindataRead( + _fontsKbanFlf, + "fonts/kban.flf", + ) +} + +func fontsKbanFlf() (*asset, error) { + bytes, err := fontsKbanFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/kban.flf", size: 6169, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsLarry3dFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x5a\x5d\x6b\x5c\x39\x0f\xbe\x9f\x5f\xa1\x8b\xc0\x69\xe1\x10\xbd\x4d\xfb\x2e\x14\x86\x61\xa0\x17\x85\x65\xae\xf7\xca\xa0\x33\x6d\x27\xd9\xb2\xd3\xa4\x24\xcd\xee\x06\xf2\xe3\x17\xdb\x92\x25\xf9\xf8\xcc\x67\x92\x0d\x5b\x7b\x4e\xc6\x8f\x25\x4b\x8f\x3e\x7c\xae\xb7\xd7\x57\xeb\x0b\xf8\x08\xbf\xc1\xfb\xff\xc1\x3b\xf8\xff\x6c\xbb\xbe\xbf\x7f\x7a\xff\xed\xf2\x7a\x7b\x0d\x5f\x9e\x60\x15\xa7\xf0\x79\xb3\xfd\xb2\xb9\xbf\x81\x37\xe9\xe9\xcd\x72\xfd\xf7\xc3\xe5\xd7\xbb\x1f\x6f\x67\x6f\x1e\x7e\xdd\x6d\x37\xb7\xb0\xfd\xfe\x65\x73\xbf\xde\x6e\x9f\xe0\xfa\xfe\xee\x07\xfc\xfe\xb8\xbe\x85\x4f\xeb\xfb\xee\x01\x7e\x3e\x5e\x5f\x3f\xc5\xd5\xde\xce\x7e\xfd\xb3\x59\xff\xb5\xf9\x16\x97\xfd\xbc\xdd\xdc\xde\xc2\xa7\x3f\xd7\x3f\x7f\x6e\xb6\x5b\x98\xdf\xdc\x7c\x5d\x3e\x7e\x7f\xfc\x7a\xb9\xf9\xf6\xb8\x98\xfd\xb1\xb9\x7f\xf8\x7e\x77\x0b\xef\x2e\xaf\xe0\x0a\xaf\x3e\xe0\xc7\x0f\xb3\xd9\xc5\xc5\x05\xf0\xcf\x72\x06\x3a\x5b\xce\x40\x67\x71\x52\x66\x69\x22\xb3\x3c\xe1\x19\x4f\xf2\x4c\x26\x69\x56\x26\x71\xb6\x5c\xce\x80\x88\xd7\xc2\x00\x21\x8f\xe2\x20\xe4\x55\x78\x18\xbf\x15\x20\x50\xc8\xdf\x0f\x18\x28\xe4\x95\x02\x12\xea\x9a\x66\x94\x97\x76\xab\x5b\x00\x0a\x71\xb5\x8c\x81\x84\x48\xc8\x3b\x65\x41\x79\xa7\x32\xd3\x85\x1d\xca\x68\x12\x67\x0a\x1b\x1f\x52\x06\x4e\x9f\xc4\x6d\xc4\x41\xfc\x4d\x52\x05\xa4\x55\x7e\xbe\xa2\xb4\x12\x86\xf8\x77\xe9\x79\x92\x19\x09\xf3\x56\x45\x48\xde\xac\x87\x85\xd1\x54\x15\x2b\xc2\x8b\x22\x02\x76\x44\x43\xd0\xcf\x03\x6f\x25\xee\x91\x08\x42\x56\x49\xd6\x76\xde\x04\x0c\x81\x80\xd5\x3c\x04\x52\xe5\xb3\xfa\x15\x2c\xfe\x93\x84\x8c\xeb\x23\x20\xcb\x88\x3c\x91\x6d\xe6\x09\xcf\xd2\x84\xb2\xf8\x90\xff\x96\x01\xd2\xa4\x3a\xe1\xb1\xb0\xac\x73\x55\x39\x3e\x47\x0d\x06\x9e\x3d\x63\x1c\x3d\xcb\x17\x03\x22\x44\xf9\x31\xc9\x19\xff\x34\xac\x16\x00\x73\x5a\xce\xe0\x39\xab\x80\x30\x6b\x3b\x20\x4f\x6a\xc0\x11\x7c\xdb\x86\xf1\x42\x20\x93\x1d\x89\x71\x19\xcb\xba\x68\x5a\xae\xb3\xe1\x64\x0c\xf9\x53\xec\x64\x71\x0c\xd0\x63\xe7\x7c\x45\x9c\x65\x28\xde\x32\x04\xe2\x73\x1a\x42\x65\x31\x76\x58\x6d\x9f\x2d\x23\xc4\x7f\xf3\x52\x43\x1c\x67\xc7\x1b\x42\xdc\x43\x3e\xa8\x40\x7c\x2c\xd5\xe2\xd5\xf6\x2b\x4f\x80\xe2\x06\x03\x74\x20\x3e\xb0\x80\x78\x02\xe2\x00\x3d\xf4\xce\xfa\xad\xe9\x3b\xe5\xbb\x71\x05\x26\x27\x91\xf4\x42\xd9\xb8\xd9\xff\xa2\x77\x45\x80\x78\xbe\xc9\xce\x19\xc0\x10\xcc\x21\x60\xf2\x51\xf5\x7f\x86\x89\x6a\x8a\x36\x90\xcf\x9f\x9f\x4e\x1c\x81\x1b\x52\xfe\x91\xbd\x52\xd9\x6a\x32\xc6\xfd\x2b\xec\xdf\x1a\x2f\x88\xee\xa9\x2c\x41\xa4\x3e\xaa\xfe\x99\x8f\x3c\xfb\xb3\xf1\xe5\xec\xa0\x71\xd1\x3c\xd8\x65\xc5\xc6\x8c\x1d\xff\x20\x13\x7c\x00\xf9\x4f\x68\x5f\xf9\x27\xa8\xec\x49\x13\xfb\x4e\xa6\xe9\x32\xd4\x73\x50\x49\x66\x25\x51\x45\x31\xc5\x02\x8c\x01\xec\x52\x31\x79\x89\x54\x24\x0a\x50\x64\x8a\x64\x8d\x80\x85\xdd\xe2\x98\xa5\x92\xd3\x35\x54\x53\xd3\xfa\x38\xb4\x4c\xa9\x91\xc2\xca\x40\x52\xa0\xe2\x4d\x90\x9e\x9c\xa2\x46\x17\xc6\x4c\xf8\x94\xb3\xf2\xbc\x13\x72\x88\x29\x40\xd0\x0b\xfb\xb0\x17\xab\x36\x73\x40\xd8\xcf\xa9\xc2\xe9\x09\x3d\xce\x83\x82\x13\x61\xc1\x8e\xca\x1f\xf2\x11\x06\x2b\xb0\x97\xb8\x25\x72\x33\x78\x0b\x68\x3e\x56\x13\x28\x19\x32\xc9\x2d\x88\x69\x33\xa7\x6b\xb8\xf6\x75\x00\x71\x76\xc4\x0e\x3b\x76\xc0\x2e\xfe\xa6\xe3\x4c\x23\xe6\x37\x75\xb9\x18\xd3\xc6\x00\x63\xaf\xb0\xb6\x03\xc6\x05\x8d\xed\x2c\x80\xe0\x05\x6c\xc7\x53\xb1\xa0\x39\xb8\x92\x72\xf4\xec\x8a\x71\xd9\x1e\xd5\x19\xbd\x3b\x36\xf2\x80\x86\x7b\x98\x4f\x25\xae\x49\x3a\xc2\xae\x21\x41\xa6\x04\x98\xd1\xf7\x8e\x5b\xa8\xe8\x47\x74\x53\x2f\x44\xc5\xf9\x81\x89\x34\x0d\xe2\x72\x73\x80\x39\x2b\x62\x48\xbf\x29\xda\xc6\xc0\xfd\x2c\x71\x7b\x47\x64\xb5\x7a\xa0\xe2\x2d\x26\x6c\xf0\x56\x33\xb3\x14\xc1\x25\xa4\x14\xd2\xd9\xa9\xd5\x5a\xc5\xad\x54\xc1\x6d\x7e\x01\xb0\xa8\xc4\x0d\xe2\xee\xd9\x4e\xf7\x64\x3a\xd9\x60\x72\x1a\x93\xa2\x7f\x18\x38\x5e\x15\x8f\xc0\x63\x0b\x81\x4a\x20\x63\x9b\x19\x8b\x04\x37\xce\x20\x94\x4c\x2e\x9b\x6a\x76\xf9\x98\x4e\xe5\xbc\x24\x8f\x45\x8b\x83\x70\x77\x43\x57\x44\x56\x5f\x51\x3e\x5b\xe1\xac\x5c\x95\x03\x29\xf7\x16\x02\xe5\xb8\x58\x68\xcc\xb8\x42\xe2\xd3\x03\x72\x62\x97\x12\x27\xca\x1a\x83\x17\x6c\x98\x77\x0a\x7d\x0e\x81\xee\x46\x15\x0f\x62\x11\xc5\x2c\x5f\x1d\xd5\xc8\xaa\xf1\xde\x25\x19\x2f\x8f\xba\xb2\xb2\x02\x85\x15\xbc\xac\xac\x06\xd3\x20\x2a\x1e\xaa\x29\x59\x3b\x52\x23\xda\x59\x11\x1c\x2c\x59\x58\x91\x95\x0c\xfb\x73\x24\x03\x5f\x4a\x55\x2d\x01\xaa\x1c\x06\x9c\xc3\xc0\x4b\x38\x8c\x81\x27\xae\xd2\x4d\xca\xa8\x55\xab\x2d\xbb\xe2\x34\x57\x18\x3c\x43\x43\x0f\x9a\xdb\x1d\x04\xaf\xe0\xa4\xd9\xaa\x4d\x57\x49\xe5\xc4\x33\x72\xe4\x7d\xaa\x8e\x54\xa8\xaa\xee\x53\x9c\x52\x55\x87\xc1\x64\xcf\xa7\xa9\x5a\x9f\x95\x4a\x0d\x6c\x39\x6b\xfc\x55\x9a\x03\x67\x7b\x8e\x7f\x86\x5d\x20\x34\x79\x49\xfa\x71\x29\xa6\xa3\x0d\x42\xc3\x1b\x14\x54\xe8\xe4\x4a\x87\x0a\xbd\x43\xe7\x43\x70\xe6\xdd\xc3\xe0\xcc\x7b\x78\x11\xf3\x36\x4a\x07\x6a\xb1\xa4\xa7\x49\xcf\x93\x60\x15\x7f\x8a\x79\xc3\x04\x6d\x81\xd0\x56\x2f\xf5\xd0\xe9\xb4\x75\xba\x88\xa1\x0b\x4e\x44\xab\xe1\xd4\x74\x3b\x3c\xe2\x7a\xda\x6c\x07\xfc\x1e\x4a\x4b\x4c\xeb\xa9\xe9\x03\x86\x33\xe1\x7b\xa6\x6d\x4e\xb5\x43\x21\x18\xd0\x54\x5f\xe0\x73\x7e\x73\x0a\x7f\x51\x45\x9e\x96\x3a\x2d\x71\xf2\xd2\xa6\xf6\xf6\xd9\x7e\x6d\xd0\xc7\xf2\x17\xbc\xb6\x31\x0b\xbc\x43\x77\xe0\x13\xf9\x06\x16\xfe\x4a\x5a\x66\xa8\x94\x45\x1e\x12\x1f\x01\x3c\x72\x9c\x79\x74\x6c\xef\x60\xb4\x0b\x93\xf9\xa4\x9d\xfc\xab\xbb\xe9\xd8\xe2\xc7\x7d\xe6\x5a\x19\x76\x67\xbe\x0b\x6a\x0f\x64\x08\x83\x89\x28\x43\x40\x58\xe4\x88\x52\xdc\xba\x43\x8d\x29\x29\xbf\xaf\xec\xbf\x41\xb0\xf5\x4e\x54\x43\xa5\x05\x27\x05\x5a\xde\x41\x44\x91\x0a\x4b\x2a\x6a\x18\xce\xb1\xc1\x51\xc6\x90\x4b\x78\x28\x81\x9b\xeb\x78\xfd\xb2\x4c\x45\xd2\x54\x72\x10\x81\xcf\x1c\x6a\x7b\x1c\x59\xe4\xf8\x18\x1c\x01\xb0\xfb\x3b\xea\x03\x17\x52\xad\x33\x80\x23\x3d\x1b\x4f\xf7\x79\x60\xc1\x2a\x2c\x3b\x94\xb2\x70\x90\xb1\xb4\x90\x07\xe9\x21\xfb\xc6\xf4\xae\x2e\x5b\x9d\x8f\xf9\x6c\x0c\x9a\x94\xa2\xbe\x9d\xb5\x79\xb0\x40\xae\x65\x01\xa6\x63\x41\x28\xc4\x19\x10\x63\x0d\x9b\x9b\xf9\xee\xa2\xa8\x75\x4f\xb4\x13\x6c\xef\x1f\x4d\x8e\x0b\x6e\xda\x84\xb6\x90\x5c\x71\xaf\xb5\xfd\xe4\xf5\x44\x28\xd7\x13\x78\xce\xf5\xc4\xd4\x7e\x27\x1b\xbe\xab\x70\x19\x48\xba\x3f\x78\x19\xca\x7e\x47\x59\xcc\x94\xc9\xb5\xb3\x46\x12\x3f\x8a\xb3\x0c\xd9\xce\x1a\xfb\x93\xb2\x46\x3f\x92\x48\x27\x4d\x42\x6d\x11\x96\x9b\x34\x3d\x86\xe9\xbe\x84\x51\x92\xba\xa5\xa9\x73\x7c\x3f\x42\xbb\x11\x54\x7a\x11\x18\x47\xb0\xb3\x53\xdd\x3a\x40\xbb\xfd\x21\x94\x6c\xe9\xc8\xed\x83\xa8\xbc\xad\x05\xe9\x01\x73\xc8\x59\xd6\x97\xb8\xbb\xee\x8d\x0e\xb3\xab\x66\x17\xd3\x5f\x64\x12\x51\x39\x7d\xb4\x45\x12\xe5\xf8\xe6\xec\xca\x1b\x96\xb1\x2c\x50\xf6\x04\x32\x97\x6c\x2f\x51\x77\x96\xcf\x4d\xf9\x27\x7c\xe3\xe3\x78\xd1\x60\x51\xa0\xfd\x7a\xeb\xcc\x2d\x7f\x16\xf6\x0c\xe3\xdb\xf5\x52\x56\x6a\x51\x69\x0a\x2c\xf1\x93\x03\x74\x85\x9d\xbd\x15\x78\x85\xc2\xd1\x84\x1d\xbd\xe4\x8d\xc4\x5c\x85\xb5\x12\x1d\x12\x0c\x98\xf3\x2f\xda\xdb\x1b\x11\x26\xb7\x52\x38\x40\x8b\x9a\x2e\x16\x1b\x5c\x70\x68\x52\x88\xea\xb2\x56\x62\x11\xf7\xb0\xac\xa6\xf1\x48\x39\x88\xb1\xc1\xe3\x5a\x4c\x83\x77\x24\x4d\x28\xcb\x81\xd2\x04\x93\x69\xc3\x3c\xf6\xf8\xb2\x1f\xda\x6a\xa1\xd0\xf5\x69\xf5\xe0\x0e\xfe\x9e\x8d\xae\x22\xe8\xc0\xab\x08\xda\x77\x15\xd1\x70\xbd\x19\x50\x79\x05\x62\xe8\xf2\xcd\x6d\xd9\xb9\xee\xdb\x33\xdf\x84\xe7\xfa\x63\x10\x9a\xed\x85\x66\xe3\x66\x73\x63\xfc\xd0\x63\xf0\x97\x79\xac\xfb\xac\x64\xdb\x93\x75\x99\xa1\x26\x86\xb6\x14\xde\x6b\x45\xa3\xa3\xd6\x37\x43\xac\x79\x96\x1a\xc4\xf6\x72\xc6\x31\xf9\x5c\x0c\x84\xe7\x82\x81\x72\x00\xd5\x15\xe4\x04\x06\x34\x3d\x5f\xb0\x3c\x5e\x85\x29\x95\x5e\xc6\x4d\xc5\x55\x57\xb0\x47\xf8\x2d\xa0\x09\x73\xe0\x17\x88\x18\xb8\x4b\x98\xb8\x00\x98\x23\x27\x81\x28\x3e\x8f\x47\xdc\x89\x34\xc4\x83\x66\x71\xab\x5d\x31\x1c\x34\xce\xc2\x90\x8e\x6d\x01\xe2\x24\x68\x8a\x5b\xd6\xf7\x6e\x2e\x80\x42\xea\xfd\x50\x1a\x07\x00\xee\xe2\xca\x5c\x5b\xed\x31\x42\x21\xfc\x4e\xda\xa3\x7d\xd2\xfe\x9c\xdf\x39\x99\x83\xd4\x23\x5a\x8d\x78\xd3\x76\x5e\x59\xbd\x4a\x25\xe3\x52\x4b\x41\xb3\xb0\x02\x0d\x75\x07\xb7\x3c\x5c\x16\xc0\x9c\xce\xef\xe2\xc8\x0b\x3e\x8b\x78\xca\xe9\xc5\x86\xcc\x03\xa9\x92\xcd\xca\x9a\x24\x15\x72\xb7\x6b\x10\xf8\xbd\x08\xa9\x6e\x08\xf3\xdb\x6f\xd9\x64\x08\xb9\x0a\x28\xef\x03\x56\xef\xbf\xd5\x71\x79\x6f\xcc\x06\x9f\x87\xf8\x5c\x44\xae\xc5\xec\x6d\xba\x6a\x11\x8a\x85\x65\x6d\xe1\x49\xbd\xe6\x9d\xf0\xe4\xe0\xf1\xd5\x5a\x45\x4d\xf8\xd7\x6f\xbb\xee\x80\xcf\x0a\xa4\xf2\x45\x2d\x37\x26\x9a\x82\xbd\xc0\xc7\xca\xbd\x3f\xba\x51\x56\xd0\x15\xdc\x61\xd7\xd0\x06\xf9\xe8\x46\x99\x4b\xa6\x6b\x54\xee\x79\xa0\xbd\x67\xb5\x01\xe9\x44\xd4\xaa\xc7\x4d\xa3\xb6\x5c\x39\xe6\xb9\xbc\xa1\x53\x0e\x5d\xa9\xc2\xbf\x90\x56\x50\x1b\xaf\xcd\x2c\x97\xb3\xff\x02\x00\x00\xff\xff\x1d\x76\x41\x67\x61\x2c\x00\x00") + +func fontsLarry3dFlfBytes() ([]byte, error) { + return bindataRead( + _fontsLarry3dFlf, + "fonts/larry3d.flf", + ) +} + +func fontsLarry3dFlf() (*asset, error) { + bytes, err := fontsLarry3dFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/larry3d.flf", size: 11361, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsLcdFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x4d\x8b\xdc\x30\x0c\xbd\xe7\x57\xbc\x43\x6e\xc1\xb8\xb3\xa5\x85\xde\xe6\xde\xfd\x09\x81\x30\xed\x76\x4f\xdb\x29\x2c\xb4\x50\xd0\x8f\x2f\xd3\xd8\xb1\x2d\x45\x96\x92\xb9\x98\x64\xf2\x2c\x3f\xe9\xe9\xc3\xaf\x6f\xaf\x4f\xb7\x11\x9f\xf1\x09\x97\x0f\x08\x17\x3c\x0d\xcf\xdf\x5f\xf0\xed\x2f\xbe\xde\xde\xdf\xf0\xe7\xd7\x1d\xcf\xb7\xdf\x2f\x3f\xde\x7f\xde\xee\x77\x5c\x2e\xf1\xcb\xc7\x61\x18\xff\xff\xae\xde\xf5\x3a\x8c\x00\xf0\x78\x01\x4a\xeb\x94\xd6\xfc\x3e\xd4\xcf\x2d\x80\x9a\x0f\x77\xd7\xeb\x30\x2e\xcb\xf2\x78\x41\x09\x40\x61\x0a\x54\x3d\x8f\x21\x04\x15\xf0\x58\x11\xa6\xd0\x58\x94\x80\xd5\x24\x81\x62\x03\x88\xdb\x11\x03\xe7\xb0\xd4\x3b\x66\x40\xb6\x28\x2d\x14\x52\xb1\x43\x76\xdf\x42\xeb\xd6\xfc\xcc\x8f\xb4\x68\x71\xc8\x47\x92\x16\xd6\x17\xf3\x4a\x7a\x2c\xa4\x67\xed\x48\x6c\xc7\xe2\x56\x03\xc0\xd6\xa8\x93\x6e\xff\xd8\x2c\x78\x01\x62\x55\xc5\x97\xe3\x30\x19\x47\xda\xb4\x84\xb8\x8a\x6e\x5a\x45\x18\xd1\xd7\x92\x1e\x07\x45\xad\x8f\x17\x54\x91\xa6\xcd\x09\x4e\x40\x79\x56\xc4\x47\xee\xc0\x15\xd2\x22\x0e\x46\xc6\x25\x4b\x15\x87\x2e\xe0\x4c\x1c\x58\xc6\x39\x8e\x94\xbd\x44\x1e\x2f\x49\xaf\x98\xf2\xa6\xc3\xf2\x8e\xac\x54\x5a\x19\xa7\xe4\x83\xca\x61\x06\x4e\xb9\x75\x04\x97\x06\x4f\x20\x21\xbe\x75\x75\x14\x63\xb0\xea\x8d\xbd\xf2\x5f\x5b\x20\x71\x24\x9f\xf8\xd8\xda\x11\xdf\x66\x21\xad\x5e\x0b\xde\x14\x55\x01\x56\xc6\x11\x2c\x79\x57\x24\x0f\xbb\xd5\x57\x97\xaa\x1d\xea\xc4\xf1\x1c\x29\x26\xd2\xf9\xb9\x2f\x6f\x7f\xe0\xf2\x07\x33\x2b\xc6\x2a\xe9\x02\x00\x03\xcc\x5d\x2f\x11\x23\xeb\x2e\x33\x87\x22\xcd\x2c\xcc\x47\x2d\x28\x6e\x3d\x5b\xbd\x3b\xb3\x86\x1e\x38\x9f\x97\xb8\x34\x92\x05\xb2\x4a\x65\xd9\x79\x6b\xbb\x4a\xe0\x4a\xe5\xe3\x0d\xc5\xa8\xad\x1c\x60\x35\x45\xb5\x65\x49\xd2\x96\x5b\xbd\xd5\xbb\x13\x69\x18\x39\xad\x77\x20\xdf\xec\x6d\x0d\x58\x76\x07\x3a\x6a\x21\xf0\x61\x45\xe1\x50\x6a\x2b\x07\xb8\x2d\xc0\x06\xe4\xf1\xbf\xe9\x0f\x3b\xd7\x81\x7d\x0b\x45\x1a\xc2\x42\x2b\x0d\xc7\x28\xcd\x5a\x16\xda\x69\x46\x3b\x92\xea\x25\x69\x61\x61\x7f\x98\x45\x60\x1f\xe0\x88\xc3\xa6\xd6\x4c\xda\x79\xe1\x90\x09\xd4\x8f\x34\x05\x6c\xd7\xb2\x7e\xd5\xf0\x7b\xc9\x52\xab\x48\xd1\xf6\x03\xe2\xe2\x3b\x11\x69\x2f\x07\x7f\x4e\x07\x56\x66\x8c\xc9\x58\xaa\xd5\xc8\x07\x38\x73\x5a\x00\xbc\x37\x45\x7f\x7f\x28\x81\x6b\x39\x04\x8d\x83\x6c\x28\xc7\xa4\x51\xfa\x43\x27\xd2\xc9\xad\x3c\xe3\xac\x1b\x8a\xd9\xb2\xd4\x8c\xeb\xa7\x28\xcd\xbb\x3b\xee\x5a\x58\x67\x8d\x39\x8b\xcf\x9a\xf9\x12\x00\xe9\x0f\xc7\x3d\x0e\x4b\x1b\x69\x73\x22\x6b\x01\x76\x43\x31\x01\x0a\x07\xbf\xbc\xe5\xcc\xd7\x92\xe6\x91\xfe\x17\x00\x00\xff\xff\xc6\xce\x60\x5b\xbb\x13\x00\x00") + +func fontsLcdFlfBytes() ([]byte, error) { + return bindataRead( + _fontsLcdFlf, + "fonts/lcd.flf", + ) +} + +func fontsLcdFlf() (*asset, error) { + bytes, err := fontsLcdFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/lcd.flf", size: 5051, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsLeanFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x5c\xdd\x6f\xda\xd8\xb7\x7d\xe7\xaf\x58\x0f\x96\x7e\x2f\xd3\x26\x38\x09\x49\xa4\xab\x2b\x28\x71\x13\xab\x04\x67\x0c\xb4\xd3\x27\x8b\x36\x4e\x83\x86\x40\x14\xc8\x8c\xfa\xdf\x5f\xf1\x65\xef\xaf\x63\x6c\x02\xd1\xd5\xaf\xf3\x30\x26\x80\xcf\x3a\xfb\xec\x8f\xb5\xd7\x39\xe6\x61\xfc\xe0\x0f\x3d\x5c\xa0\x01\xff\x1c\xc7\xa8\x1f\xe3\x18\x67\xe7\x8d\x5a\x27\x1d\x4e\xf0\xe3\x37\xae\xc7\xe9\x64\x82\xf6\xe3\xf0\xf9\x39\x1d\x8f\x71\x7a\x74\x79\x82\x0f\x1f\xf0\x63\x38\x4b\xef\x31\x9d\xe0\x9f\xe1\xcb\x68\xfa\x3a\xc3\xc7\xd9\xe8\xd7\x7f\x66\xb5\x70\xf2\x73\xfc\x7a\x9f\xce\x10\xf6\x22\x74\x86\xf3\xd1\xe4\x43\xbd\xf6\x30\xfa\x35\x4e\xe7\x78\x49\xc7\xe9\x70\x96\xc2\xff\x58\x5f\xdc\xa2\xee\xa3\xf5\xfa\x0b\xf5\xcb\xcb\xd3\xda\x5d\xfa\xf2\x34\x9a\xcd\x46\xd3\x09\x46\x33\x3c\xa6\x2f\xe9\x8f\xdf\xf8\x35\xfa\x27\x9d\x60\x3e\xc5\xd3\xf4\x7e\xf4\xf0\x1b\xf3\xc7\xd1\x0c\x0f\xd3\xc9\xfc\x0f\x0c\x67\x18\x4f\x27\xbf\x16\xff\x9f\x3f\xa6\xb5\xe5\x07\x46\xe9\xcb\x7f\x66\x98\x0c\x9f\xd2\xc5\x3d\x9e\xc7\xc3\x9f\x2b\x84\x43\xfc\x9c\x3e\x3d\xa5\x93\x39\xc6\xa3\x49\xfa\xb1\x56\xbb\x5d\x7d\xfa\x7e\x31\xbd\xbb\xe1\xeb\x18\x9f\x5e\x5f\xe6\xd3\x09\xfe\x67\x36\x1d\xbf\xce\x47\xd3\x49\x33\x1d\xbe\xcc\x1f\xc7\xa3\xc9\xdf\x1f\x27\xe9\xfc\x7f\x51\xf7\x8f\x2e\x1b\x0b\x20\xa3\xd5\xec\x30\x49\xff\xc5\xf3\xf0\x65\xf8\x94\xce\xd3\x97\xda\xec\xf5\xf9\x79\xfa\x32\x5f\xdd\xf0\x73\x78\xbd\x98\xeb\x70\x72\xbf\xb8\xfc\x36\x9a\x7c\x04\x6e\x87\xbf\x31\x1c\xcf\xa6\xf8\x91\x62\x36\x1e\xfd\x7a\x9c\x8f\x7f\xe3\x69\x83\xe2\x61\xfa\x82\x1f\xe9\x7c\x9e\xbe\xe0\x75\x96\xd6\xa6\x0f\xcb\xdb\x3f\xbc\x8e\xc7\x1f\xfe\x1d\xdd\xcf\x1f\x8f\xfe\x4e\x5f\x26\x47\xb3\xa7\xd7\xd9\x23\x86\xe3\x79\xfa\x32\x19\xce\x47\xff\xa4\xb3\x3f\xf0\xe3\x75\x8e\xfb\xf4\x61\xf8\x3a\x9e\x63\xfa\x3a\x7f\x7e\x9d\x2f\x66\xde\x8d\xfa\xf8\xf9\x38\x9c\xfc\x4a\xef\x3f\xd6\x6a\x58\xfd\xf3\x00\xaf\x59\xcb\xaf\xb1\x7e\xb1\xbc\x5e\xbf\x58\x5d\xaf\x5e\xac\xaf\x97\x2f\x36\xd7\x8b\x17\xd9\x35\x9a\xb5\xfc\x1a\xcd\x66\x36\xd0\x7a\x98\xe4\x68\x33\xc8\xf2\x6a\x79\xb9\xba\x5a\x0f\xb0\xbe\xfd\xfa\x6f\xab\x9b\x67\xb7\x06\xd8\x8d\x93\xa3\xd5\x97\xd7\x37\x21\x37\xc7\xfa\x56\x4b\x9c\x19\x9a\xfc\x56\x4b\x9c\x5e\x7e\xbb\xfc\xce\xf2\x05\x7b\xb5\xf8\xfe\xfa\x0f\xeb\xf1\xb0\x19\x7d\xf3\x1f\x99\xde\xfa\xfd\xd5\x74\xc8\xfb\xcb\x21\xe8\xfb\xf9\x98\x14\x2b\xff\x47\x81\xe4\x28\x08\x04\xb0\xf1\x97\x97\xf9\x22\x92\x91\x09\x8c\x0d\x8e\xdc\xd4\xd4\x40\x0a\x42\xbe\x86\xcb\x2f\xb1\xb5\xe4\xd3\x25\xd8\x88\xf3\xac\x3e\x93\x8f\xbd\xb9\x0b\x5d\x6b\x32\x45\xdb\x0e\x0c\x98\x47\x8d\x97\xcd\x84\xb8\x03\x98\x75\x32\xc8\x19\xfe\x0d\x5e\xf2\x2e\x1d\x93\xdd\xdf\x00\xb0\x71\xbe\xec\x5e\x9b\xf0\xf1\x94\x3d\xdf\xe6\x7c\xf6\x40\x64\x19\x41\x8d\x98\x5d\x6f\xa6\x6a\xbe\x43\xc1\x19\xee\xc4\x57\x98\x05\x2c\x84\xa9\x28\x0e\x0a\x84\x8c\xcd\xe7\x5f\xe8\x5c\x3a\x96\xd7\x2e\x8b\x3c\x61\xc8\x68\xe2\x9f\xd9\x8c\x4d\xd6\x74\x07\xe7\xca\x12\x88\x9a\x2f\xf1\x2c\x3b\xf0\x29\x54\xb0\x90\xa6\x3e\xe0\x1a\xb8\xdc\x95\xe7\xe5\xe6\xce\x4d\x9d\x9b\x59\x9a\xb8\xe8\xa5\x67\xce\x44\x5b\xa1\xc4\xad\xf2\x81\xdd\xff\x5b\x0d\xb7\x1a\x86\xdc\xde\xed\x14\x9b\x7f\x12\x9b\x99\x71\xc8\x72\x53\x07\x94\x0e\xb9\xf9\x94\xe7\xa1\x68\x36\x50\x19\x87\xa5\x1b\x59\x7a\x72\x97\xcb\x5c\xd0\x93\x89\x56\xe6\x05\x6b\x20\x59\x2f\x37\xce\xce\xeb\x25\xcb\xa5\xdb\xeb\xa5\x9e\x44\x56\x27\x88\xeb\xca\xa0\xa7\x55\x64\xf5\x75\xd0\x92\xc6\x0b\x58\xe6\x7d\xce\x9c\xc9\x00\xe4\x51\xcb\x57\x95\xbe\xcf\x97\x18\xb2\x90\xf1\x0a\xea\x2c\xa0\x3c\xb0\x1d\xcb\xc6\x42\x9a\x05\xb4\x0c\x67\x16\xcc\x45\x8c\x81\x55\x4d\x23\x4f\x88\xa4\x46\xeb\x25\x58\x6a\xe5\xd3\x55\xf3\x2d\x66\x0c\x34\x96\x04\x4f\x60\x28\x04\x08\xe2\x10\x20\x75\x92\x41\x70\x23\xb0\x2d\xe1\xca\x33\xac\xbc\xf0\x62\x0e\x5e\x48\x44\x14\xab\x18\x2e\xb6\x84\xc7\xca\x0a\x8d\x61\xcd\x68\xf2\x38\xe6\x51\x27\xdd\xce\xe5\x75\x3b\x86\x5d\x1e\x58\x7b\xf4\x7a\xbb\x6e\x80\x66\x78\xcf\x63\xb9\x84\xd4\x92\x9a\x3d\x23\x15\x53\x50\x49\x9a\x96\x0f\xcf\x93\x24\x41\xf2\x13\x9b\x7e\x5a\xf7\x16\xcb\x43\x19\x0e\x6c\x56\x54\x32\x5e\x95\x11\x8b\xdd\x76\x33\x43\x8b\x93\x90\x59\x1b\x4e\xe9\xf6\xd2\xdc\x66\x9c\x56\xf2\x4c\xb9\xdd\x16\x8e\xd0\x34\x5b\x09\x95\x87\x5d\xad\x04\x49\xf1\x84\xdc\x48\x7e\x57\xb3\x67\xc6\xed\x4a\x3e\x46\xcc\x97\x83\xc8\x92\x13\x87\x03\x52\x38\xb2\x30\xc9\x09\x3f\x2b\xbb\xf4\xd3\xcc\x35\xe8\xbd\x59\x46\xcb\x91\x10\xb2\xbc\x4b\x1c\xd3\x9b\x64\x03\x2a\x60\xfb\x2c\x9f\x26\x00\x92\xdb\xb1\x9f\xf2\x59\xbe\x98\x90\x3c\xbd\x99\x31\x5b\x02\xa3\xf7\x73\x71\xf3\x1d\x2d\x00\x08\x1f\xd9\x97\x05\xaa\xd6\x75\x9e\xa0\x40\x1b\x15\xd3\x0e\xee\x26\x65\xcf\x40\x68\x59\x2d\xa4\xc6\x7b\x22\x18\xe0\x62\x04\x34\xc1\xd8\xd1\x27\x78\x0f\xa9\xf2\x23\x28\x81\xd6\x5a\x8c\xaa\x49\x46\xe7\x58\x72\x49\xf8\x82\xc8\xf4\x4d\x52\xbb\xd9\xa9\x70\x85\x46\xba\x65\x21\xc1\xe0\xfd\x92\xa8\x54\x6e\x42\x61\x2c\xc1\x1b\x39\x9e\xbd\x04\xb2\x85\x97\x65\x5b\xb6\xef\x3b\x2e\x81\x27\x4d\x2c\x18\x07\xa4\x56\xb2\xfe\x9a\x66\xfe\xb0\xda\xdf\x62\xe9\x42\xce\x9a\xfa\x7b\x93\xe8\x52\xd2\xf9\xb4\xfb\x59\xb3\x2f\x2d\x5d\x94\x03\xc6\x63\xc2\x06\x96\x28\xc1\xee\x6d\xc0\xca\xd0\x6f\x8e\x6d\xef\xf4\x7b\x87\xaa\x09\x8b\xf2\xe8\x1c\x59\x18\x1d\x65\x67\x9e\xbb\x4b\x42\x65\x4a\xa1\x52\xe6\x77\xf6\xbc\x43\xce\x7c\x2f\x84\x85\x53\xf8\xa2\x22\xc5\x32\x83\x4c\x56\x6f\xef\x82\x6d\xa1\x2b\x29\x90\xec\x38\xc7\x65\xcb\x2f\x56\xbf\x54\x6a\xae\x48\x50\x70\x80\xf6\x9b\x98\x54\xa5\x1f\x15\xe3\xe6\x4e\x85\x20\x76\x0c\x45\x09\xc2\xa2\xc1\x58\x80\x34\x28\x9d\x08\xe0\x00\xa7\x01\x62\x1b\x48\x1b\xa8\x3b\x83\xea\x92\x06\x2e\x54\xa8\xb2\xb7\xb7\xd4\xbe\x96\x4f\x95\xd5\x78\x7d\x85\x29\xfb\xbf\xcd\x89\x0d\xb6\xb9\x93\x24\x0b\x4d\x7f\x77\xb5\xc8\xea\x9b\x9c\x61\x71\x82\xc5\xca\xbf\x88\x70\xb9\x01\xc6\x9a\x3e\xe6\xd8\x4d\xf2\x92\xf4\xd9\xe4\x06\xc4\xec\x84\xd2\x11\x2c\x24\x59\x91\x19\xd9\x33\x11\xf4\x8d\xe5\x7c\x26\x31\xb8\x66\xb9\x6d\x26\x14\x5f\xf2\x2e\x7b\xa7\xca\xb7\x76\x79\x69\x95\x1b\x9d\xd4\x4b\xd9\xdf\x53\x3a\x09\x99\x17\x5d\x20\x63\xad\x2c\x90\x16\xed\xdf\xce\x64\x64\x41\xb5\xeb\x69\x53\x24\x31\x96\x95\x08\xa7\x2b\xcb\xa8\x74\x91\x2d\xd7\x60\x94\x9d\x31\xc0\xab\xc9\x5b\x66\xcc\x86\x02\xf8\x6a\x56\x92\x58\xde\xd2\x5c\xf2\xb4\xc4\xa9\x4a\x76\xeb\x35\x30\xd5\xf3\x26\x52\x16\xb4\x34\x41\xbb\x89\xcc\xf8\xa0\x62\x2a\x44\xd4\x52\x4c\xf5\x6d\x49\xde\x61\x7a\xa3\xaf\x77\x99\x1e\xd6\x4a\xcb\xec\x94\x6b\x0f\x7b\xf6\x6e\xc8\x24\xe9\xb0\x78\xe1\xf9\x94\x3c\x51\x30\xc6\x21\xcd\x5b\x6e\xbf\x2d\x7f\x29\xeb\x65\xe1\x26\x33\xb7\x06\xaf\xa2\x54\xb7\xb1\x4f\xdc\x28\x4b\x52\x4a\x62\x68\xbb\x84\x43\x70\x2b\x5a\xf7\x2c\x63\x42\xd6\x5d\xbe\xcd\x84\xd9\x5f\xd5\x1f\x3d\xe1\x20\xa6\x9b\x58\xce\xb2\x95\x5f\x8a\x51\xca\xb2\x46\xfa\x52\x82\x3b\x90\xf7\xca\x71\xed\x0c\x45\x06\x55\xd1\x2b\xca\x81\x9d\x14\xb7\x6d\x95\x78\xc6\x89\x0d\xea\x81\xdb\x14\x39\xc1\xa9\x0a\xb4\xc9\x43\xd7\x61\x67\xb4\x95\x4a\x95\xa4\x8b\x27\x41\xe6\x96\xc5\x77\x55\x14\x9c\x00\x54\xae\xe6\x1d\x2e\x95\x75\x76\x96\xc1\xd9\x66\x1e\xc0\x05\x34\xb1\x88\x59\x49\xa4\x12\x8e\x16\x70\xca\xb3\x47\x6f\x27\xd5\x68\x27\xe6\x61\x9a\x58\xb4\x5f\xb2\x89\xe6\x7d\x5f\x53\xb4\xcc\xcd\x1a\x6f\x46\xb7\xae\xb1\xeb\x83\x36\x20\x07\x30\x1b\xa0\x0d\x14\x06\xe0\x7c\xb8\x2d\x78\xaa\x44\x89\xe2\x72\x87\x51\x9f\x4a\x01\x38\x0c\xa3\x29\x06\xa0\x3a\x68\xd6\x3e\xb3\xa0\x49\x76\x3c\x6f\x93\xb8\x8e\x0e\x6e\xdd\x3e\x77\x69\x08\x4d\xf3\xe8\x20\x68\xf7\x2a\x74\x92\xaa\x8a\xbc\x95\xf7\xc5\x19\x45\xd0\x71\xf9\xae\xb8\x7c\x57\xee\xa6\x83\x9b\x40\x73\x28\xc1\x96\x19\x6d\xca\x4f\x16\x1e\xec\xb8\x67\xee\x79\x45\xe5\x55\x88\x0e\x2a\xd5\x31\xa2\x65\x4a\x4e\xdb\xf6\x52\x1c\x38\x14\xb9\x28\x15\xaf\xbb\x48\x97\xc5\x00\x8a\xe3\xf5\x1d\x00\xe0\xf0\xdd\xe7\x7f\xdf\x12\xec\xda\x7f\x7b\x2c\xa6\xb7\x71\xcb\xe2\xf0\x30\x62\xdf\x19\xfd\x4b\x20\xf5\xc6\x31\xd0\x8d\x3e\x7c\x8a\x83\xd6\x17\xf4\xee\x5a\xed\x20\x0b\xa2\x83\x3f\x41\x51\x6f\xd4\x81\xb0\xfb\x35\x88\xfb\xc1\x15\x82\xbf\xda\x9d\xd6\x6d\xab\x1f\x46\x5d\xdc\xb6\xe2\x2f\xee\xee\xeb\xad\x0d\x6c\xbd\xe1\x03\xed\xa0\xdb\x47\x2f\xbc\xee\x1a\x4d\x0d\x44\xf2\x95\x84\x9b\xd7\x18\x4e\xef\x79\x25\x93\x7e\xc9\x8e\x81\xd5\x1b\x27\xc0\x5d\x34\xe8\x5e\x49\x20\x12\x8c\xf4\x0c\xcb\x37\x34\x17\xde\x30\x1e\x90\x5d\x36\xc9\x25\x69\x63\x69\x13\x9f\x25\xd0\x53\xa0\x3d\x88\xe3\xa0\xdb\xfe\x6e\x61\x05\x67\xe8\x39\x68\xf0\x13\x45\x89\x3c\x71\x65\x66\x70\xf1\x47\x70\x4a\xc1\xb6\x2c\xe9\x40\x72\xa5\x79\x2d\xaf\x37\xce\x80\xef\x41\xd7\x84\x5f\x65\x37\x24\x31\xcf\xc3\xd1\x58\x73\x69\xff\xb2\xf7\x03\x27\x9d\x6b\x98\x0d\xe0\x53\x1c\x7d\x09\xba\xf8\xd4\x8a\xab\x50\x10\xae\x62\x78\x15\x29\x48\xbd\x71\x0e\xf4\x82\xf6\x32\xfe\x98\x8d\xf4\x46\x04\x8a\x78\x1d\xb3\x8b\x6a\xbf\xdc\xc4\x52\xf6\x0e\x0b\x48\x17\xc0\x55\xd8\x0a\xe2\xa0\x17\xf6\xca\x51\x88\xf5\x0d\x68\x26\x02\x78\x36\x62\x6b\xc0\x5f\x1b\x8c\xc1\xa0\x10\xf5\xc6\x25\xd0\x8e\xee\xbe\xc7\xe1\xf5\x8d\x4c\x22\xcc\x4f\x79\xb4\x82\xa7\x95\xcd\x54\x92\x7c\xf3\xbb\xc9\x1f\xf0\x31\x1b\x2f\x6a\x46\x7e\x0c\x50\x94\x24\x3e\x24\x00\xee\xbf\x64\xd6\xcb\x49\x9d\x1f\x03\x9f\x83\xdb\xb0\x1b\x76\x03\x44\xf1\x55\xd8\x6d\x75\x10\x76\xaf\xc2\x76\xab\x1f\xc5\xdc\x0c\xc5\x9b\xde\x9c\x99\x2a\x89\x18\x74\xb7\xc9\xac\x9c\xc5\xd6\x3f\xaf\x03\x9d\xe0\x73\xff\xc3\x5d\x14\x76\xfb\x61\xf7\x1a\x57\xd1\xe0\x53\x27\x40\xab\x7b\xdd\x09\xf0\xe7\x20\xea\xeb\x2a\xa2\x71\xbb\x2b\x2b\xaf\xa3\x56\x65\x65\x9e\x5e\x92\x78\xd6\xcf\x7d\x2c\x9f\x55\xd4\x09\xa8\x59\xf0\xd2\xfd\x04\x8b\x90\x30\xed\xe3\x12\x35\x0d\xe3\x04\xe8\x45\x9f\xfb\xb8\xf9\x7e\x77\x13\x74\xdd\xa4\xbd\x66\x62\xd8\xb6\x99\xe6\xba\xdb\x62\xe4\x53\x20\x0e\xae\xc3\x5e\x3f\x88\x03\x59\xf3\xaa\xc5\x8d\x28\x65\x3c\x6e\x12\x7d\x7c\x96\x17\x3e\xba\xc6\x78\x73\xdc\x9c\x01\xb7\xad\x76\x1c\x75\x6b\xac\x36\x34\x0d\x62\xc1\x59\x29\xef\x9d\xf8\x3e\x46\x91\x56\x66\x2d\x6a\x03\xb8\x0a\xae\xe3\x20\x20\x66\xcd\x6d\xa6\xbd\x9d\x70\x38\xc5\x96\xaa\xf4\x75\xf5\xf3\x73\xe0\xae\x33\xe8\x7d\xb8\x0d\xbb\x83\x9e\x72\x6e\xbe\x27\x2c\x33\x41\x92\x09\x6c\x30\xf7\xdd\x13\x26\xb4\x59\x75\x17\x70\x4a\xd8\x4b\x74\x17\x40\x6f\x70\x17\xc4\xbd\x76\x1c\xde\xf5\xd1\xff\x16\xd5\xe4\xa2\x18\xd4\xca\xd0\x12\xe4\xc0\xc6\xa0\xda\x36\x97\x62\xf4\x9b\x38\x08\x98\x5b\x50\xee\xe6\x89\x9a\xc9\x2b\x26\xaf\x97\xdc\x9b\xf8\xd4\xc5\xab\x66\xad\x7e\x71\x0c\xb4\xda\x83\x7e\x80\x56\x7b\xc1\x7b\x35\xab\x38\x54\xdb\x5f\xbf\xa8\x03\xb7\x61\x3b\x8e\x6c\xda\x28\x17\x4d\x90\x48\x69\x15\x9a\xf3\x64\xf7\xa3\xd4\x81\x3c\x94\x09\x61\x54\x0d\xd0\x85\x0f\xdc\x85\x9d\x76\x1c\x7d\xd3\x10\x2d\x39\x8b\xd7\x5a\xeb\x11\x5b\x23\xa3\x54\x69\x0e\xeb\x17\x27\x0b\x9b\x5d\x5d\x75\x02\x5c\x45\xd9\x62\x35\xc5\x45\xa6\x14\x93\xdc\x92\x2d\x8c\xfc\xf0\xfa\xc6\x0b\x1a\x1f\x5c\x85\x9d\x4e\xab\xa6\x57\xa0\xdc\x95\x7c\x3a\x73\x6d\x86\xc5\xdd\xcf\xb8\xb7\x47\xdd\xa0\xf8\x3b\x35\x96\x8e\x37\xe6\xd2\x4f\xe6\xd4\x98\x71\x1a\x8b\x74\xdb\x6b\x0f\x3a\x5b\x79\x4a\xa5\xc7\x92\x58\xcc\x21\x2f\xa7\x0e\x75\x52\x61\x23\x08\xcf\x81\x25\x33\xac\x4c\x50\xf8\xe3\x31\x3c\x26\x93\x23\xde\x93\x56\xa5\x30\x28\x14\x45\x56\xc0\x2f\x80\xaf\x83\xce\x75\x2b\xc6\xe7\xb8\xb5\xea\x04\xa2\xee\x02\x70\x2b\xee\x07\xb1\x0a\x5e\x4f\xf7\x36\xf4\xca\xcb\x52\x16\x81\xa7\xf6\x34\xc4\xd4\x28\xcd\xe5\xf5\x80\x74\xb3\x3a\x49\x3a\x50\x59\xff\x96\x33\xbd\xb4\x67\x7a\xd3\xea\x7c\x76\xdc\xd0\x9e\xa5\x07\x3d\xc9\x6c\x95\xe8\x1c\x41\xfd\x5f\x13\x1b\x45\x3d\xac\x3a\xa7\xf1\x38\xa6\x77\x79\xac\xa7\xb7\xac\x3d\x9b\xa5\xec\x19\x5f\xe6\xab\xc9\x27\xb5\xfa\x00\x4d\xc4\x7a\x45\xe9\xdf\xe4\x9a\xba\x57\x15\xa5\xd7\x75\xfb\xca\xae\x27\x4f\x45\xa5\x3f\x07\x41\xcf\x0a\x35\xcf\x71\x1c\x8d\xc4\x60\x72\x24\x9a\x2c\x2e\x37\x5a\x62\xa3\xae\xbc\x97\x3e\xd0\x69\xf5\xc3\x2e\xda\xad\xbb\xb0\xdf\xea\xa0\x13\xf4\xfb\x41\x8c\x16\xbe\x85\xfd\x1b\x5c\xc7\xad\xaf\x01\x63\x43\x8e\x13\x15\x49\xc1\x2e\x3b\x15\x6b\x55\x55\x24\x96\x33\x78\xe3\xe5\x49\x31\xbe\x25\x6b\xa0\x24\xdd\xb5\x1f\x71\x28\x7c\xa7\xc5\xf8\xda\x61\xdc\x1e\xdc\x7e\xee\x04\x7f\xe5\x46\x4c\x0a\x24\x54\xb5\xca\x42\xfc\x92\xd9\x3e\xb1\xf4\xa9\x6d\x8d\xde\xe5\x59\x31\xe8\x7e\xd8\xb9\x62\x8b\xce\x79\x98\xe8\x81\x20\x0f\x0f\xed\x1f\x6f\xa3\x18\x6f\x45\x09\x46\xd6\x53\xb9\xe4\xfb\xc1\x7c\x5e\x8c\x39\x5e\x94\xdd\xd6\xa7\x88\x46\x57\xb1\x63\xbc\x07\xe8\x0b\x17\xe8\xc0\xca\x69\x52\x65\x71\x68\x2d\xeb\x0f\x73\x12\xcc\x73\x94\xfe\x2c\xf4\x23\xa4\x66\x76\x2d\x38\xa9\x9c\xcd\xea\xd2\x31\xab\xf6\x3a\x46\x05\xe3\x34\xe7\xe6\x7e\xb6\x8f\xab\x73\x72\x0e\x30\xf6\x5e\x12\x26\x2b\x92\x77\xd7\x88\xfd\xe3\x63\x07\xe2\xc0\x99\x95\x79\x13\xcb\x7f\x99\x88\x48\x10\xee\xfe\x63\x87\x7d\x6e\xff\xb8\x5e\x8c\xd3\xc8\xce\x60\xe9\xf9\xbd\x70\xba\xaa\x5c\xb0\x5b\x96\xe6\x25\x43\x2f\xb1\x76\x64\xdb\x85\x8b\x82\xd1\x3f\x76\x95\xbe\x60\xa7\xac\xf7\x3e\x98\x5d\xe5\x30\x54\x8e\x9b\x18\x4a\xda\x76\xb9\x81\xd4\x16\xc3\x09\x4c\x17\x58\xe2\x72\x55\xbc\xb0\x12\x8d\xe0\x16\x64\x68\x58\x58\x57\x7d\x92\xd4\x3f\x76\x55\xb8\xd0\xed\xa0\xdc\x3d\xd5\x4e\xdb\x01\x40\xba\x4a\x5a\xe8\x74\x48\x97\xce\x78\x38\x8c\xae\x0a\x16\xf4\x6f\xcc\xd6\x3b\xeb\x24\x8c\xd6\xdb\x50\x52\x20\x05\x2f\x21\x79\x15\x9f\xdf\xf3\x8f\x5d\xa5\xa8\x5b\x99\x79\x49\xbd\x49\xaa\x3c\x89\xe3\x37\xdf\x2a\xc5\x73\xdd\x55\x88\x22\x5d\x88\x44\x77\x60\xc8\x17\x7b\x3b\x0b\xeb\xd7\x5d\x85\x27\xb2\xe2\x59\x6e\x0f\x1e\x10\x97\xab\xd0\x44\x8e\x38\xae\xa0\xfb\x28\xe7\xa3\xeb\xb9\xc3\x21\x13\xbf\xee\x2a\x30\x91\x76\x46\xe6\x8b\xaa\x09\x38\x30\x4e\x57\x51\x89\xac\xbc\xe3\xa8\x83\xef\x73\x70\xc7\xaf\x9f\x01\xb7\x83\x4e\x3f\xbc\xeb\x84\xed\x96\xde\xab\x86\x90\xf0\x9a\x05\x9b\x1f\x89\xfa\x61\xc4\x12\x9a\xbe\x5f\x77\x95\x92\xb5\xb5\x7a\xfd\x38\xfa\xa2\xe8\xbc\xa6\xf2\x32\xcf\xd0\x4c\x43\x30\x53\x33\x81\x66\x9b\x44\x9e\x86\xc8\x17\xc2\x61\xbf\xcd\x04\x5c\x65\x66\x50\x25\xe7\x90\x69\xed\x2b\xb6\x5d\xa5\x65\x50\x25\xe7\x1c\x00\x97\xab\xa6\x0c\x76\xca\x39\xf9\x2a\x1d\x22\x3e\x7c\x57\x41\x19\x54\x8f\x65\x65\xb2\x7d\x63\x75\x15\x99\xef\xae\x05\x07\x5d\xf1\x4c\x2e\x06\x13\x9b\x38\xa1\xe5\x90\x68\x23\xd8\x54\x1b\xb0\x0a\x9f\xab\xd8\xf4\x6f\xa2\xb8\xcb\xe3\x8d\xf6\x31\xe5\x1e\xec\xa2\x79\x32\x21\xfa\x22\xb6\x3e\xd2\xe6\xfb\x59\x61\xe9\xdd\xb6\x3a\x19\xac\xde\x4d\x2b\xbe\x43\xcf\xec\x4e\x50\xa2\xe7\x32\x2c\xa7\xf8\x8e\x14\x0f\x68\x6b\xae\x0e\x17\xfa\xfe\xa9\x09\xd4\xd0\x3e\x4b\x25\x1a\xd6\xba\x18\x01\x2d\x54\x45\xc7\xa3\x88\xbe\x7f\x56\x84\x4a\xb4\x2a\x3c\x3a\xcc\x54\x03\xc9\xb9\xf7\x71\x42\xd4\xf7\x1b\x45\x28\x75\xc3\xc2\x4d\x98\xa8\x07\x9f\x0f\x09\xf5\xbc\x08\x6a\x79\x9a\x73\x60\x94\x17\x45\x28\xab\x93\x9c\x4a\x89\xb1\x1a\xd2\xcb\x22\xa4\x5a\xd9\xdc\xb2\xf4\xa2\xc1\xdf\xab\x51\x4f\x8e\x6d\xa8\xa6\x9e\x69\x31\x12\xd5\x28\x1a\x4d\x43\x72\x64\xc4\x7d\xc2\x85\x42\xfa\x75\xb9\xbb\x00\x3a\x0f\x03\xc3\x66\x2e\x75\x73\x2e\x45\x2a\xa6\xb8\x5b\xf9\x1f\x8f\x03\x21\x1f\x4a\x68\x61\x45\x8b\xcb\x97\x4b\x9c\xbe\x89\xd3\xd0\x2e\x13\xaa\x01\x02\x82\xee\x0a\xa6\x6e\x78\x06\xf0\x06\xcf\xb0\x8b\x94\xad\x5c\x16\x64\xd9\x03\xa3\xb4\x2b\x94\x4b\xb7\xac\xd4\x4e\xee\x1b\xaa\x5d\xb6\x4c\xb5\xb2\x62\x93\xb6\x6f\xa4\x76\xe9\x92\x1a\xa5\x60\x23\x7b\x3a\xdc\xef\x9f\xd8\xd5\xc8\x52\x22\x93\xad\xe7\x9f\x84\x82\x9b\x8d\x48\x62\xb2\xf0\xe7\x48\xfd\x13\xbb\xea\x38\x74\x47\x4e\x17\x79\x01\xf7\xe4\xaf\x38\xed\x0c\xc9\x2e\x2f\x96\xca\x98\x28\x8d\x31\x57\x18\xb1\x3f\x23\x9d\xda\x55\x64\xa3\x29\x0a\x14\x64\xad\x12\x7e\x28\xa6\x98\x1e\x96\xec\xf7\x4e\xed\x32\x50\x5d\x41\x84\xf2\xa5\x44\xec\xdd\x42\xf4\xf4\xa8\xae\x20\x9e\xda\xc5\xa0\x92\x7e\x08\xc9\xba\x80\xb7\xda\xd0\x4e\xfe\x95\xd4\xc3\x03\xa0\xb2\x93\xfd\x6e\xda\xa1\x0b\xe0\x7e\x7a\xe3\x53\x3b\xd9\x9b\xca\xe1\xca\xe7\xdd\x94\x9a\x9d\x5f\xda\x2f\x4a\x3b\xd1\xff\x3f\xd4\x0d\x4f\xcf\x81\xab\xf0\x6b\xd8\xd3\x8a\xa1\xe7\xfc\x9d\x92\xcd\xfb\xae\x03\xf8\x46\x4e\x5e\x7f\xd7\x09\x66\x89\xc5\x2e\x09\x45\xfa\x61\x59\x9a\xc9\xba\x92\x84\xff\x5c\x74\xd5\xbd\x94\x53\xbb\x4c\x28\x95\x90\xb5\x17\xa6\x09\x4b\xa9\x71\x25\x9b\xf7\x33\xbb\x54\x98\x1a\x61\xe9\xe6\x7d\xef\x1d\xdc\x99\x5d\x43\xde\x49\x31\xac\x06\xd5\x2e\x20\xef\xa3\x17\x56\x43\x6a\x17\x15\x53\x2d\x64\x20\x8d\x85\x2f\xf1\x3b\x1b\x56\x2f\xc9\x05\xf8\x3c\x03\x2d\xd0\xd9\xc5\x85\x69\x85\xd2\xad\xb9\x5e\x08\xb9\x59\x6a\x6a\x74\x06\x40\xc1\x28\x4c\x02\x96\xc1\xb4\x0b\xcb\xf7\x1d\xcf\x3c\x48\x3c\x50\x9a\xa1\xa4\x8a\x7a\xd1\x39\x65\x24\x66\x5d\x22\xfe\xbf\x00\x00\x00\xff\xff\x63\xd4\xf0\x9c\xb1\x6f\x00\x00") + +func fontsLeanFlfBytes() ([]byte, error) { + return bindataRead( + _fontsLeanFlf, + "fonts/lean.flf", + ) +} + +func fontsLeanFlf() (*asset, error) { + bytes, err := fontsLeanFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/lean.flf", size: 28593, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsLettersFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x97\x69\x5f\x22\x39\x10\xc6\xdf\xf7\xa7\x78\xc6\x89\x0a\xea\x88\xb2\x5e\x8c\x2c\xc6\xd1\x5d\x15\xc7\x5b\x01\x15\x45\x44\xf0\x42\x40\x10\xbc\xfd\xec\xfb\xab\x4a\x3a\x9d\x34\xed\xba\x2f\xb6\x6c\x52\xff\x3c\x55\x95\x4a\x1f\x1c\xd6\xea\xb5\x64\x59\x60\x06\xd3\x48\x4e\x60\x72\x1a\xb3\xde\xef\xea\xc3\x43\xb5\xdd\xc1\xf9\x33\xf6\xda\xd7\xed\xf2\x1d\xb2\xe3\x58\x69\xd6\xeb\xe5\x56\xb9\x5e\xbf\x1e\x03\xb2\xdd\xfa\x33\x26\x27\xc6\x30\x99\x4a\x4d\x79\x1e\xd0\x2c\x95\xe0\xdb\xe8\x8f\x48\x1b\x35\x09\x28\x95\x9a\x1e\x4a\xe3\x99\x44\xdc\xaf\x7a\x8b\xec\x84\x37\x53\x11\x2b\xa6\xc7\x4b\x5e\xac\x14\x47\x91\x86\xff\xd2\x28\x56\x8a\x27\xc0\xc9\x1e\x5c\x4b\x77\xb8\x99\xac\x94\x5b\xe5\x4a\xa5\xda\xe9\x8c\x37\xdb\x97\x19\xcf\x13\x32\x74\x48\xef\xdb\xb7\x6f\xc2\x1d\x01\xb8\x2c\xbd\x01\x32\x11\x78\x0c\x0c\x0c\x40\x85\x11\xe1\xa5\xd9\x0f\x6b\xdf\xbf\xd3\x41\xf8\x5d\x59\x08\xad\x04\x53\x26\x3d\x24\xde\x7e\x14\x85\xf4\x12\x74\x91\x84\xf4\x8a\x3f\xd4\x9c\xae\xe5\x9b\x9e\x27\xec\x9e\x83\x83\xc0\xe0\xa0\x60\x18\x1c\x54\xcb\x91\x46\xc0\x92\xb0\x72\xac\x3e\x18\x1a\x1a\xd2\x5b\x1d\x1a\xa2\x43\x23\x9b\x90\xde\x50\xa4\xea\x6c\x75\x98\x82\xc3\xc3\xc3\x3c\xc2\x5c\x43\x6b\x54\x05\x34\x8b\xc5\x62\x42\x7a\xb1\x58\x0c\x7d\x4e\xc5\x4c\x6a\x3c\x1e\x67\x35\x1e\x8f\xf7\x39\x15\x0b\xae\xfc\x08\x80\x11\x0a\x8f\x8c\x8c\xc0\xf2\x46\x0f\xdd\x19\x96\x46\x47\x47\x99\x46\x95\x39\x14\x44\xed\x13\x0d\x9f\x96\x87\x31\x1a\xc7\xc6\xc6\x78\xec\xeb\x60\xa8\x44\x16\xd2\xfa\x29\xaa\xc3\xf8\xf8\xb8\x19\xad\x73\x48\xe8\x7b\x9f\xd0\x90\x48\x24\xb8\x86\x3c\x01\x7b\xb8\x77\x7a\x82\x8c\xb4\x89\x09\x00\x13\x13\x9f\x50\x90\x67\x6f\x6c\x92\x84\xc9\xc9\x49\x92\xad\x51\x2b\x3a\x29\x99\x4c\x26\xb9\x6b\x92\xcd\x5f\x23\x99\x4c\x8a\xfe\x68\xd2\xed\xf0\x07\x9b\x2e\x21\x64\xb2\x34\x28\xcd\xc9\xb3\x6f\xeb\xd4\x94\x7f\xc2\x53\x3e\x92\x57\x38\xa5\x2d\x94\x60\x2d\x31\xcd\x46\xea\xf4\xb4\x7f\x67\x02\x0d\x00\x61\x58\x0b\xfa\xcf\xcc\xcc\xa8\x25\xc9\x33\xcd\xb0\x29\x02\x25\xa8\xa8\xd6\xac\xe2\x59\x65\x5a\x34\x34\x3b\x3b\xab\x12\x09\x98\x18\xc2\x9d\xe7\xc8\x48\x9c\x9b\x03\x30\x37\x27\xbe\xd2\x42\x97\x4d\xaf\x98\x22\x23\x4a\xa5\x00\xa4\x52\x46\x4b\xe9\x12\x15\x25\x9f\xea\x7f\x56\x7f\xfe\xfc\x19\xc5\x6e\xd2\xfc\xfc\x7c\x88\xe7\xe7\x9d\xcf\x86\x74\x3a\x2d\xa4\x97\x4e\xa7\x11\x38\x2d\x46\xbd\x7f\x7d\xfa\x53\x99\x43\x9f\xbd\xbb\x48\xc9\x64\x32\x1c\xc8\x64\x32\x81\xf3\x45\x3f\x75\x61\x61\x81\xe6\x0b\x0b\x58\x58\x50\xba\xf6\x4a\x0f\xbc\x59\x5b\x4a\x29\x59\x92\x90\x12\x52\x48\x4f\x42\x92\xcc\x04\x13\x35\x79\xf6\xc6\x16\x17\x17\xd5\x6e\x17\xc9\x88\x48\xc0\xe2\x22\x13\x9b\xa3\x59\xc5\xbf\xc8\xb8\x98\x1c\x7e\x29\x62\xd1\x68\xbf\x1c\xcd\xee\xbc\x44\x46\xe2\xd2\x12\x69\x4b\x86\x5c\x2d\xc8\xb3\x8a\x97\xc9\x38\x91\xdc\xf2\xb2\x4f\x58\x5e\x0e\x11\x27\xba\xc5\x7f\x29\x13\x44\x7e\x3f\x56\x34\x39\x1a\xe7\x59\xc5\x7f\x2b\x13\x44\x7e\x22\x09\x3e\x05\x5a\xe4\x63\xb0\xb2\xb2\xb2\xc2\xe2\xca\x0a\x4d\x84\xf4\x08\x54\x22\x13\x6b\x9c\xa6\x28\x28\x5e\x5d\x05\xb0\xba\x2a\x5c\x62\x13\xd1\x51\xab\x78\x8d\x8c\xa4\xb5\xb5\x35\x44\x78\x13\xb7\x9f\xd8\x6c\x36\x2b\xa2\x29\x9b\x35\x5a\x96\x2c\x74\x9e\xeb\xeb\xc0\xfa\xba\x60\x58\x5f\x07\x03\x69\xae\xa2\x73\x82\xb2\xdf\xbf\xfd\x4b\xf1\x15\xb1\xb9\x3d\x37\x36\x88\x36\x36\x04\xe1\x06\x91\x42\xf0\x21\x42\x09\x06\x61\x2d\xb1\xb9\x09\x60\x73\x53\x10\x6d\x1a\xc2\xa6\x21\x6c\x1a\x52\x9a\x7d\x6f\xb7\xc8\x68\x7b\x5b\x5b\x00\xb6\xb6\x3e\x21\xce\x0b\x7f\xc9\x6d\xb3\x91\xb8\xbd\x0d\x60\x7b\x5b\xf4\x69\x70\xc9\xee\xbc\x43\x46\xe2\xce\x0e\x80\x9d\x9d\x3e\x52\x51\xce\xc3\x8e\x5b\xbc\xcb\x46\xe1\xdd\x5d\x00\xbb\xbb\x22\xac\x85\xa3\x76\xe7\x3d\x32\x0a\xef\xed\x99\x8d\x19\x8d\x95\xbd\xbd\x7e\x4d\x15\xef\x2b\x63\x71\x7f\x7f\x5f\x9f\xd5\xbf\x93\x29\x3e\x38\x00\x70\x70\x20\xbe\x22\x1c\x90\x85\x8a\x73\x39\xc6\x5c\x4e\xb8\x0c\x9e\xe4\x72\x2a\x3d\x97\xe3\xb9\x2a\xcd\x71\xc8\x7a\x5e\x78\xa1\x7c\x5e\x71\x3e\x2f\xa2\x26\x79\x3d\x41\x3e\x8f\x3c\xbf\x78\x01\x15\xcb\x3b\xab\xf1\x72\x85\x02\x51\xa1\x40\x01\xe2\x42\x41\xe5\x14\x0a\x34\x73\x55\x3b\xd7\x5a\xe2\xf0\x10\xc0\xe1\xa1\xb0\x09\x87\x64\x6a\x29\x82\x30\x99\xe2\x23\x32\x25\x69\x7f\x74\xc4\x29\x47\x47\x9c\x6a\xc5\x55\xc1\xf1\xf1\xf1\xb1\x20\xc7\xe1\xb0\xe3\x98\x9f\x5a\x2c\xfa\xcf\x07\x13\x47\x88\x54\x4a\xb1\xe8\x6f\xa5\x58\x74\x3e\x12\x4e\x4e\x4e\x4e\x58\x88\x70\x26\xa6\x1f\xc6\xd3\x53\x5a\xe4\xf4\xf4\xf4\x94\x1c\x70\x2a\x82\x2f\x78\xdb\x7d\xf1\x5b\xd8\xa2\x92\xb2\xd0\x43\x7f\x46\xa1\xb3\xb3\x33\x92\xd5\x18\xfe\xed\xec\xae\x56\x2e\xa3\x5c\x26\x62\x20\x2a\x97\xa1\x89\x9c\x8e\x06\x1d\xce\xcf\xfd\x62\x9b\xc8\x8c\x76\x7e\xee\x68\xf6\xf6\x82\xc6\x95\x4a\xa5\x22\xa4\x57\xa9\x68\xc5\x00\x47\x2a\xce\x85\x66\xb8\xb8\x10\x0e\x5d\xb0\x09\xe9\x5d\x5c\xf4\x6b\xd1\x3d\xab\xd5\x2a\x41\xb5\x0a\xa0\xca\xa0\x15\x30\xb9\x3d\x6b\xb5\x9a\x90\x5e\xad\xc6\xb5\xb5\xd0\xec\xb3\x3b\x76\xc9\x26\xa4\x77\x79\x09\x40\x93\xaf\x71\x96\x22\x96\xb8\xf8\xea\xca\x2f\xb6\x89\xcc\x68\x57\x57\x0e\x59\xe7\x76\x7d\x7d\x6d\x6e\xae\x62\x7b\x34\xdb\xbb\xb9\xb9\x09\xde\x19\xd6\xdc\xf5\x37\x37\x37\x37\x5c\x70\x7b\xab\x33\x09\x6e\x6f\x19\x6e\x6f\x6f\xb5\x62\x40\x85\x82\xcd\xd4\xeb\x75\xf1\xf9\xe8\x5e\x2b\xdd\xe0\xee\x0e\x7c\xdc\xdd\xa9\xd9\x1d\xa0\x5e\x5f\xcc\xe0\x7e\x36\x05\x0b\x36\x1a\x68\x34\x1a\x8a\x1a\x40\xa3\xa1\xb4\x3e\x8a\x7c\x3e\x9a\xcd\x66\x93\xa0\xd9\x04\x9a\x4d\x1b\x4c\x28\xaa\x67\xab\x85\x56\x4b\x53\x0b\x68\xb5\x14\xd1\x44\x45\x83\xbc\xa8\xf7\xf8\x3d\x99\x90\xde\xfd\x3d\x68\x42\xda\xbd\xaf\x71\x96\x4b\xd6\x86\xdb\x6d\xb4\xdb\x0a\xda\x40\x5b\x29\x26\xd4\xf7\x8f\xae\xaf\x74\x3a\x1d\xf2\x9d\xd0\x9c\xa6\x9d\x60\x2e\xbd\x87\x07\x95\x10\xf8\x87\x07\x7b\x0e\x12\xc2\x3f\xcb\x54\xa8\xdb\x05\xd0\xed\x7e\x42\xe8\x76\xbb\x5d\x74\xa3\x6e\x03\x15\xf7\x7a\x00\x7a\x3d\x0a\xf7\x7a\x74\x70\x62\x8f\x64\x55\xd2\x8b\xfc\x17\xc6\xd4\x3f\x3e\x2a\x7c\x7c\x0c\x4f\xf0\xf8\xa8\x0f\x2e\xa7\x18\xbd\xfa\xbe\xeb\xcc\x52\x4f\x4f\xc0\xd3\x13\xc7\x09\x6d\xb0\x42\xfd\xa7\xf0\xfc\x0c\xe0\xf9\xd9\x21\x3c\xb3\xf9\x15\x96\xe6\xdc\x9e\x17\x32\x4e\x7a\x79\xe1\x86\x2f\x2f\x21\x3d\xe8\xf6\xfa\xca\xc2\xeb\x2b\x25\xbc\xbe\xbe\xc2\xf6\xbe\xae\xf3\x08\x84\xf4\xde\xde\xa2\x07\xe9\xbd\xbf\xeb\xcb\x4a\xc0\x75\xef\xef\xef\xb6\xd7\xba\xce\xb3\xdf\x38\x1f\x1f\xf8\xf8\x10\xd2\xfb\x00\x3e\x9c\xa7\xae\x1f\xa4\x67\xfd\xfd\xdf\x93\x7f\x02\x00\x00\xff\xff\xd9\x6e\xbb\x3d\xd6\x15\x00\x00") + +func fontsLettersFlfBytes() ([]byte, error) { + return bindataRead( + _fontsLettersFlf, + "fonts/letters.flf", + ) +} + +func fontsLettersFlf() (*asset, error) { + bytes, err := fontsLettersFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/letters.flf", size: 5590, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsLinuxFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x56\xcd\x6e\xe3\x36\x10\xbe\xf3\x29\x3e\x38\x06\x94\xb4\x1a\xaa\x4e\x36\x87\x14\xcd\x82\x3d\xb4\x97\x16\x58\xa0\xbd\x0a\xb5\x68\x8b\xb2\x84\x95\xa8\x85\x44\x75\x13\x94\x7d\xf7\x82\xa4\xa4\xc8\x8e\x9d\xed\x03\x04\x96\x61\x8a\xdf\xfc\x7e\x33\x43\xba\xa8\x8b\x5b\xb9\xc6\x07\xdc\xe1\x01\xb4\xc1\xe6\x8e\xd5\x95\x1e\x9e\xb0\x7b\xc6\xef\xb2\xeb\x9e\xf1\x67\x53\x99\x12\xf7\xc9\xe6\x36\x79\xb8\x07\x61\x27\x7b\x95\xa3\xd5\xe0\x7d\x75\x40\x5b\xe0\x8f\x76\xa7\x3a\x53\xe2\x37\xd9\x35\x52\x73\xf6\x6b\x75\xa8\x95\x41\xa7\x6a\x25\x7b\x85\x5b\xfe\x03\x88\xf0\xf3\x70\x18\x7a\x83\xfb\x18\x9b\x87\x87\x3b\xc6\x7e\x79\xfa\x52\x4b\x2d\x4d\xd5\x6a\x67\xa4\xa8\xba\xde\xa0\xae\xb4\xfa\x91\xb9\x98\x40\x58\x35\xf2\x50\xed\xa1\x87\x66\xa7\xba\x15\x8a\xb6\x43\x51\xd5\x0a\x55\xae\xb4\xa9\x8a\x6a\xef\x95\x99\x04\x00\x42\x5f\xb6\x43\x9d\x43\xd6\x5f\xe5\x73\x8f\x9d\x42\x26\xa3\xd8\x2b\xe9\xf6\x2b\x43\x10\x32\xa5\xc2\xaa\x94\x5d\xbe\xab\xa5\xfe\xbc\x72\x71\x7d\xe9\x2a\x6d\x7a\xc8\x1e\x12\x7e\x37\xc6\x6e\x30\xd8\x4b\x1d\x19\x67\xa6\x6f\x86\xbe\x54\x39\xfb\x10\x2c\x94\xaa\x3a\x94\xc6\x45\x2c\xb1\x2f\x65\x27\xf7\x46\x75\xec\xee\x0d\x30\x86\x6e\x0d\x2a\xbd\xaf\x87\xbc\xd2\x07\xe4\xaa\xdf\x2b\x9d\xab\xae\x67\x0f\x41\xad\x91\x4f\x3e\x73\xd4\x4a\x1f\x4c\x89\x6b\xf5\x34\x09\xef\xdb\xa6\x51\x3a\x10\xd3\xdf\xe0\x7b\x48\x14\x43\x7e\x50\x28\xe4\xde\xb4\x1d\xa3\x8d\xb7\x90\xab\x42\x0e\xb5\x09\xc1\x36\x6d\xae\x7c\xe2\xa6\xac\x7a\x14\xad\x36\xb8\xae\xab\xcf\x0a\x2b\x6a\xb0\xb9\x5f\xb9\xda\x39\xbb\x52\xe7\xde\xee\x0d\xdb\xdc\x79\x2b\x81\x69\x17\xfe\x91\x5b\xc6\xd6\x62\x7e\x04\xe3\xc4\x05\xcb\xfe\x8e\x04\x43\x0b\xe1\x98\x15\x82\x59\xeb\x56\xf3\x57\x30\x58\x0b\xc1\x1e\x81\xc7\x69\x8d\x20\xc9\xc9\x3a\x03\x48\x6d\x0a\xc1\x32\xb2\x14\x05\xcc\x81\x2d\x62\x8a\x05\x43\x82\xc4\x83\x11\xda\x17\x50\x7a\xe3\xda\x9b\xca\x67\xc7\x82\x61\x7c\x04\x0b\xda\xd7\xb8\x76\xef\x19\x65\x0b\xa7\xdc\x6d\xdd\xe0\x46\xb0\x88\xa2\x97\x68\x52\x9b\x08\x46\xdf\x91\x60\x89\x4d\x27\x9b\xb8\x82\x60\x57\x57\x57\xe3\x6a\xdc\xf4\x4f\x32\x3a\xc2\x2c\x81\x13\x89\x4f\x93\x84\xe7\x69\xcc\xc4\xf9\xc4\x4b\x26\x9c\x88\x1c\x6a\x1d\x33\xd6\x25\x4a\x34\xd3\xe0\xb5\x13\xee\x69\x13\x0c\x44\x93\x7d\xa7\xb5\x24\x87\x96\xcc\xf1\x60\x11\x20\x6b\x5f\x81\x88\x3d\x18\x91\xf7\x06\x64\x4b\x30\x26\xef\x22\x45\xea\xf5\xa2\x99\x9b\x98\xb8\x0b\xda\x22\xf1\x15\x3f\xeb\x2f\xa3\x04\x89\x27\x3b\x3a\xce\xcf\x61\x49\xc2\x4f\x4c\x4e\x80\xd3\xc9\x96\x75\xc0\x5a\xb0\x76\xfc\x62\xbd\xd8\x48\xa6\x8d\x35\xb6\x5b\xd7\x81\x09\xdc\xd6\x3a\xa3\xcc\xfd\x00\x01\xdc\x06\x90\x88\xe6\xdd\x39\x9a\xf5\x76\xeb\x5e\xd7\x29\x52\xf7\x13\x51\xb4\xd0\x8c\xa7\x50\xe1\xfb\xf9\x25\x22\xe5\x96\x72\xd1\xd1\x7c\xae\x59\x28\xd9\x5f\x27\x74\x38\x8c\x7e\xc2\xeb\x8c\x27\xad\xb3\x45\xe3\x9e\xe1\x91\xfc\xe8\x0c\xc3\x16\x96\x2e\x96\x7b\x06\x5f\xb5\x57\x00\xa3\x6d\x30\x9b\x2c\x41\x1e\x5a\xd3\xc2\x3e\x8e\xa9\x44\x8b\x8e\x98\x84\xbc\x80\x07\xe7\xfe\x1e\x5b\x3a\x9e\x7b\xf6\x28\x20\x1e\x07\x25\x8e\x40\x42\x94\x1d\xb5\xb4\xf3\x1b\xda\xc9\x6e\xb7\x38\xd3\xf3\x9c\xfc\x67\x64\x78\xf4\xbd\xa0\x64\x12\x1a\x65\x78\x36\x8a\x9c\xba\xf9\xc6\x68\x2d\x98\x8b\x2e\x33\xf7\x09\xb1\x07\xd3\xf4\x04\x0c\x19\xd0\x85\x14\xc3\x0c\xa6\x48\x2f\x56\x2c\x73\x55\x39\x33\x30\x53\x5a\x17\xa3\x0e\x02\x48\xe1\xa7\x1f\xc8\x5e\x02\xbf\xc4\xde\x91\x91\x23\x2b\x1f\xe1\x8b\x14\xb9\x0c\xb2\x73\x6e\x3e\x8e\x6e\x96\xfc\x7c\xf3\x0c\x8a\x63\x72\x37\x81\x03\xb2\xc5\xa9\xe5\x4f\x91\x91\x94\x93\xb3\x87\xf8\x74\xce\x51\x34\xf7\xd9\xf8\x39\xb7\x78\x9f\xc1\xf7\x19\x7c\x9f\xc1\xb7\x66\xd0\xdd\xac\xff\xf8\x79\x9b\xaf\xad\xa9\x8d\xe6\x5e\x12\xcc\x8f\xe2\xbf\x82\x25\xf3\xdf\x35\xaf\xee\xae\xfc\x6d\x3c\xbf\x4d\x36\xdf\x18\xc8\xff\xb3\xf8\x2f\x00\x00\xff\xff\xf6\x6c\xf8\xe3\x63\x0c\x00\x00") + +func fontsLinuxFlfBytes() ([]byte, error) { + return bindataRead( + _fontsLinuxFlf, + "fonts/linux.flf", + ) +} + +func fontsLinuxFlf() (*asset, error) { + bytes, err := fontsLinuxFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/linux.flf", size: 3171, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsLockergnomeFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x56\xdf\x6b\xe3\x38\x10\x7e\xf7\x5f\x31\x94\x82\x1b\x22\x59\xbd\xf6\xa0\x30\x38\x46\xdc\xc1\x1d\x2c\xfb\xb0\xb0\xaf\x82\x38\x49\xdd\x6d\x68\xda\x94\xb4\x7d\xe8\x22\xfc\xb7\x2f\x1a\x49\x96\x64\xcb\x61\xd9\x5d\x92\xa8\x1a\xcd\x37\xdf\x7c\x33\xfa\xf1\x70\x78\xb8\xd9\x5c\xc2\xdf\x70\x0b\x37\xd7\xc0\xff\x82\xdb\xe2\xeb\x71\xf7\xd4\x9d\xfe\x7f\x39\x3e\x77\xb0\xfd\x84\x7f\x1f\x4f\xfb\xb7\xf7\xe3\xeb\x63\x77\x82\x2f\xc7\xb7\xee\xf5\x11\xbe\xed\x4f\xfb\xc3\xe1\x08\xf5\x2b\x0d\x76\x37\x77\x77\xd7\x72\x77\xdc\x9e\x36\xd5\xc7\xcb\xbe\xea\xee\x3f\x9a\xe2\xbf\xfd\x8f\x43\xf7\xbe\xff\xd9\xdd\x1b\x90\xef\xfb\xe7\xe3\x0b\xfc\x73\xda\xdc\x1f\xba\x4f\xa8\xdf\x3e\xb7\xb7\x72\xb3\xed\x4e\xd5\x66\x57\x7d\x3c\x35\x45\x71\x79\x29\xe3\x8f\x2c\x10\x35\xc8\xa2\xc5\x12\x64\x01\x4b\x30\xdf\x00\xe0\x0c\xce\xd8\x02\x98\x8f\x35\x91\x39\x8c\x64\x01\xa8\x51\x9b\x39\xc4\xd5\x8a\x1c\x86\x99\xb0\xa8\x42\xd4\x88\xca\x44\x32\x83\x46\x16\xca\x0c\x84\x5d\xd4\xba\x90\x26\xa2\x28\xcd\x14\xa2\x30\x00\xcc\xfc\xa2\x8e\x91\x04\x22\x2a\xe2\xa0\x90\xa1\x58\x33\x13\x99\x31\xa6\xd6\x23\x5e\x96\xbc\xc5\x09\xa4\x0d\x61\x46\x71\xaf\xf0\x8a\x26\x5b\x54\xde\x51\x12\x28\xcd\x2e\x70\x01\x26\x58\x09\x89\xd1\x52\xe6\x1c\x39\x27\xb3\x0e\xce\x04\x4d\x92\x59\x31\xcc\x3f\x52\x63\x98\x8b\x45\x0b\x84\x62\xa2\x21\xd1\x22\xf2\x4f\x35\xcf\x7a\x07\x43\x4e\xc1\xd4\x9b\x19\x64\x26\x0b\x04\xce\x41\x53\x49\x10\xcb\x28\x0b\x74\xd5\xf3\x65\x0d\x34\xc8\xd8\xf7\xa6\x90\xe0\x64\x14\xc4\x33\xf2\x16\x58\xb6\x06\x1e\x40\x63\x6d\xcb\xc4\x52\x78\x0d\x36\xe3\xe0\x19\x25\x2a\x83\xa1\xa5\x52\x5b\xc2\x22\x5e\x40\x19\x28\xb3\x90\x73\xce\xb2\x19\x18\x17\x2f\x83\x49\x25\xf4\x41\xa2\x41\xc3\x39\xe7\x75\x06\xc1\x2f\x50\x66\x81\x76\x7f\x97\x49\xa9\xad\x38\x8b\xe1\x3b\x6b\x88\x4b\x6b\x05\xab\xb1\xce\xf4\xdd\xb4\xee\x93\x06\x30\x52\x56\xe4\xda\x60\x33\x6d\x4e\x81\x7d\xa8\x8c\xcb\x7e\xb9\x1c\x77\x8e\xdb\x3e\xb2\x68\xd5\x7a\xad\x61\x41\x23\x4c\xe4\x71\xbb\xda\x89\x8f\xa8\x19\x55\xc3\x1d\x08\x31\x9a\x6d\x72\x2a\x44\xc3\x11\x6b\x3f\x23\x92\x90\xa1\x5a\xda\x35\x6e\x39\x59\x14\x21\xb9\x28\x19\x24\xf4\x8d\x81\x88\x4d\xbc\xcb\xe6\x16\x99\x55\x71\xdc\x34\x3b\x47\x1c\x60\xb5\xf2\x33\xa3\x70\x09\x95\x73\x12\xe8\x61\x9b\x67\x39\x01\xed\x4b\x6d\xc3\x59\x1b\x26\x3d\x3d\x0d\x57\xc3\x4c\x38\x1d\xb6\xfd\xef\x48\x70\x31\x4f\x5c\x65\xb2\x53\x23\xe2\x51\x17\x9c\x21\x1e\x6a\xb7\xf6\x48\x7d\xdf\x4f\x15\x4f\xfa\x69\xa8\x41\x1e\x29\x6e\x3a\x35\xba\x4a\xc2\xf1\x40\x57\x09\x9b\xe3\x14\x97\x25\x1a\x64\x14\x77\xc4\xdb\x9c\x98\xd3\xcc\xd3\xed\x32\xaa\xdd\x45\xaa\x6a\x0e\xa9\x39\x5f\xe0\x69\xb8\x11\x71\xa8\x9d\x4e\xcc\x77\xed\xa0\x45\x8c\xb4\x02\x3a\x6c\xc1\x8f\x9d\xa1\xf5\x17\xa8\x41\x56\x0e\x19\x51\xb1\xd8\x7b\xb8\xc9\x61\x18\xfb\xed\x0c\x74\xf6\xa8\xc9\xb5\xea\x9d\xa3\x11\x8e\x0f\x31\x97\x22\x21\xd3\xa4\xfd\xf2\xa7\x98\x29\xae\x02\xfb\x4a\x88\xef\x1c\x7f\xa7\x96\x7a\x7a\x21\x05\x4f\x01\x2e\xb9\xd8\x68\xcf\xd2\x12\xe7\x3d\xb1\xef\x85\x35\xb2\xc8\xc8\xfc\x29\x6c\x05\x8c\x6e\xf0\xe0\x29\xec\x1a\x65\x7f\x58\x65\x18\x44\x6c\x2d\x80\x4e\x62\x2e\x97\x16\xcd\x7d\x79\x38\x9a\x66\xe0\xe1\x44\x26\x79\xbd\x26\x96\xba\x57\xa9\x31\x07\x67\x3d\x94\x38\x93\xb4\xdb\x8d\x3a\x67\x64\x43\x15\xc4\xbc\xa7\x35\x5a\x76\x33\x8a\x58\xf5\x63\x4f\x3d\xab\x65\x6d\xb7\x44\x83\x74\xb7\x85\x63\x23\x79\x83\x44\x1b\x21\xce\xd3\xbd\x61\xb3\x79\x2a\xe7\x29\x72\x9e\xfe\x7e\x56\x19\x4f\x65\x5d\xc4\xb0\x45\xe6\x63\x56\xcc\xbc\x9f\x07\xa3\xcb\xc1\x65\x14\x1a\x8a\xc4\x6c\x56\xe4\xa7\xd2\x97\xb6\x2c\x80\x4f\x5e\x73\x8a\x1a\x72\x55\x83\xbb\xec\x07\x03\xe3\xb8\x66\xe1\x39\x33\xfe\x95\x85\xfb\xff\xa7\x83\x5f\x01\x00\x00\xff\xff\x0f\xc2\xf4\x69\xae\x0c\x00\x00") + +func fontsLockergnomeFlfBytes() ([]byte, error) { + return bindataRead( + _fontsLockergnomeFlf, + "fonts/lockergnome.flf", + ) +} + +func fontsLockergnomeFlf() (*asset, error) { + bytes, err := fontsLockergnomeFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/lockergnome.flf", size: 3246, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMadridFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x55\x4f\x6b\x1b\x3f\x10\xbd\xef\xa7\x78\x98\x85\xdf\x2f\x87\xf5\xc4\x49\xa1\x34\x78\x82\xa1\x3d\xf5\x90\x5b\x4f\x55\x31\x5b\x67\xdd\x38\x24\x76\xea\xb5\x53\x4a\xd5\x7e\xf6\xa2\x91\xb4\x92\x56\xda\x4b\x0d\x6b\xb4\x7a\xf3\x7f\xde\xcc\x6e\x9f\xb6\x57\x6d\x8d\x37\x58\x60\x71\x89\x66\x81\xb7\xd5\x73\x7b\x7f\xdc\xdd\xcf\xb7\x4f\x5b\x7c\xfd\x89\x8f\xe7\x76\x8f\xf7\xed\x11\xff\x3f\x6e\x56\x8f\xe7\x6f\xe7\xee\xd4\xcd\xbf\x9f\x77\xcf\xf3\xf3\xe6\x79\xde\xf5\x17\xd5\x6b\x77\xec\x77\x87\x3d\x16\xf3\x4b\x34\x0d\xae\xae\xe9\xee\xf0\x4a\xef\xae\xab\xea\x53\xdf\xf5\xe8\x5f\xda\xfd\xae\x7f\xc0\xe6\xa1\x3d\xb6\x9b\x53\x77\x44\xdf\x9d\xf0\x63\x77\x7a\x40\xf3\x01\x87\x97\xd3\xee\xb0\xbf\xa9\xf0\x2f\xbf\x75\x38\x55\x9f\xc1\x68\xff\x03\xa0\xc0\xe8\xcc\x01\x5f\xc0\xd8\xc9\xe9\x17\x18\x07\x39\x69\x30\xce\x72\xfa\x0d\xc6\x5e\x94\xff\x80\x71\x57\x55\x75\xbd\x8a\x9f\x55\xa5\x6b\xfb\x70\xbd\xaa\x20\x17\xe6\x0d\x88\xff\xdc\x25\x1b\x99\x01\x5e\x55\xa4\x95\x39\x2b\x23\xa6\x34\xc9\xbd\x00\x0c\x79\x21\x03\x10\x38\x00\xd0\x17\xe6\x8a\x0d\xac\x48\x05\x80\xac\x77\xff\x18\x65\xe3\x4b\x5e\xd5\xe0\x50\x3c\x41\x8b\xd5\x70\x29\x9e\x1b\x6e\xea\x21\x22\xef\x0c\x03\x60\xcf\x1e\x70\x8e\x68\x70\x86\x48\x12\x05\x49\x0e\x92\xa2\xc4\x36\xb3\x48\x92\xd8\x78\xd6\x64\x82\x53\x1c\xd5\x42\x6e\x6c\x0d\xa2\xca\x89\xb4\x2b\x50\x22\x6d\x01\x5e\x8e\xcd\x40\xec\x2c\xd9\xda\xd0\x01\xd0\xa2\x31\x6b\x54\xe6\xd8\x46\xc4\x05\x40\xd4\x69\x56\xce\xe1\x36\x77\x4e\xce\x48\xe6\xdc\xd7\x26\xd4\xc7\xbd\x50\xd2\xc7\x25\xa7\x7d\x5c\xaf\x3d\x9b\x22\x8a\x49\x73\xf9\x36\x6d\xae\xaf\x07\x8d\x7b\xe8\xb2\x9b\xd1\x54\xda\xd2\x08\x24\x80\xe1\x9e\x5e\x0a\x5b\xe5\x9c\x6a\x00\x63\x53\x2c\x21\x69\x49\x98\x29\x77\xce\x99\x46\x0c\xe8\x02\x3d\xb0\xce\x34\x30\x11\xae\x9f\x49\xed\x2b\x69\x4d\xac\x91\x31\x4c\xdb\x59\x71\x4e\xe3\xa1\x02\xca\x89\x91\x9d\x3d\xcd\xce\xad\xf7\x1b\x47\xa4\x0a\x05\x54\x43\x39\x8a\x59\xd3\x54\xd6\x56\x43\x4d\x69\x64\x80\x5b\x29\x85\xc1\xb0\x14\x48\x88\x80\xa9\xa8\x80\x28\x49\x28\xdf\x40\x0f\xc2\xe1\x60\xcd\xf0\x22\x41\x0a\x9e\x95\xf1\xc0\x47\x91\xc6\xc0\x88\x99\x54\x9e\x6d\xcd\x7e\xa5\x69\x8e\x58\x8f\x60\x26\xb6\x6f\xa3\x16\x16\x64\x7b\x23\xd4\x78\x18\xc4\xf1\x55\xc5\x66\xbe\x56\x95\xca\x16\x2b\x6c\x60\x3a\x9a\x68\x47\x23\x0c\xfb\x42\x27\xab\xc7\x69\xdc\x88\xc6\x4d\x0c\xe8\xb2\x29\xaf\x61\xb7\x46\x52\x9b\x3a\xae\x03\x26\x22\x92\xff\x51\x44\xa5\xb5\xa3\xd3\xa4\xe0\x3e\x35\x89\x09\x99\x5d\x21\x74\xf8\x98\xc1\x07\xa0\x38\x8e\xc2\x47\xed\x1d\x7a\x97\x79\x52\x69\x34\x49\xb6\x59\xe1\xf4\x78\x3a\xca\x3d\xd0\x16\xa8\x7d\x91\xf2\xfa\xc8\xa2\x4d\x08\xd5\x94\xf3\x28\xcf\x43\x02\xa8\x14\x70\x7b\x42\xdb\xcf\x72\xbc\x0d\x2c\xa6\xe6\xe6\x8e\x66\x6a\xc2\x5c\xdc\x35\x1f\x2e\x8d\xf9\x2f\x03\xe3\x86\x31\xf4\xa2\x0e\x4f\xf8\x0c\x08\x45\x92\xcf\x00\x4d\xf1\x9e\xa6\x28\x48\x65\x0a\xda\x28\x30\x2e\x31\x4d\xb5\x90\xc2\x7a\x8e\x81\xa6\x69\xca\x6c\x70\x57\xf9\xfa\xfc\x1b\x00\x00\xff\xff\x57\xd0\xd4\x0a\x85\x0a\x00\x00") + +func fontsMadridFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMadridFlf, + "fonts/madrid.flf", + ) +} + +func fontsMadridFlf() (*asset, error) { + bytes, err := fontsMadridFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/madrid.flf", size: 2693, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMarqueeFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\x4d\x8f\x1c\x35\x10\xbd\xcf\xaf\xa8\x43\xae\x98\x30\x42\x22\xf8\xd4\x1c\x92\x1b\x07\x56\xe2\xbe\x41\x9a\xa0\x95\x56\x20\xc1\x5e\xe0\xd7\xa3\xe9\xb6\xcb\xef\xbd\x2a\x77\xcf\x2c\xab\x6c\x36\xdd\x76\xb7\xbb\xbe\xeb\x55\xd9\xfb\xe5\xf9\xcb\xf9\xf3\x3b\xfb\x60\x3f\xd8\xf9\xbd\x7d\xf3\x9d\x9d\x4f\x3f\xff\xf4\xf0\xcb\xaf\x1f\x3f\xda\x6f\xff\xd8\xc3\x9f\x7f\xd8\xa7\xbf\x9e\x5e\xfe\xb5\x0f\xdf\xfe\xf8\xfd\xe9\xd3\xd3\xef\xcf\x97\x17\x7b\xb8\x3c\x5f\x3e\xff\x7d\xb1\x73\x79\x7f\x7a\xb7\x24\xbf\xb6\x2c\xa7\x52\x2b\x5f\xac\x5d\xcc\xac\x3d\xbb\x8e\xd6\x87\xa5\xfa\xcd\x6c\x7d\x7f\x7c\x6b\xf7\xfe\x73\x9d\x96\x5a\xad\x5d\x57\x0e\xeb\xcf\x7a\x0f\x6f\xef\x5c\x8c\x8c\xd6\x79\x7f\xb3\x4a\xbd\xa9\x56\x45\x8e\xed\xbb\xf1\x76\x1b\x45\xaa\xa0\xc9\x4a\x70\x5d\x89\x1f\x47\xa9\xdb\xba\x6e\xc9\xb1\x0e\x09\x96\xca\x04\x41\xc2\xab\x9a\xfe\x71\xce\x58\x24\xdc\xde\x34\x07\xcd\x2f\x4d\xc4\x46\xae\xd1\xd7\x9b\x8b\xeb\xec\x1a\x87\xec\x25\xdd\x88\x28\x1b\x4f\x34\xf1\x4b\x33\x5f\x77\x54\xf7\xb5\x75\x8f\xc7\xf7\xc9\xf7\x4c\x3f\x09\x07\x08\xc2\xe8\xfe\xce\xb4\x0c\x86\xe9\x1a\xa5\x83\x0c\x76\x2e\xee\x9a\xfe\x55\xbc\x23\xe7\xec\xfd\x5d\x9c\xd8\xe8\x6d\x38\x22\x70\x78\x67\xe8\xe5\xc1\x0a\x66\xec\x23\x54\x56\x92\x1a\xcd\x03\x84\x30\x9a\x28\xc3\x40\xa6\x36\xc5\x04\xca\xfc\xb7\x38\xa8\x48\xf8\x65\xb7\x02\x46\x0c\x49\x5b\x3b\xc0\xad\xcc\x38\x7d\x4b\xc8\x36\x8a\xbe\xa0\xfa\x48\x6a\xb1\x29\x07\x61\xad\xc5\xf5\xb9\x0a\xe0\x64\x8f\x6d\x2a\x01\x88\xb3\x9e\x08\x9c\x09\x90\x37\x31\x2b\x72\x9b\x92\x26\x95\x3d\x2e\xee\x20\x44\xec\x60\xa5\xb0\x26\x9a\x50\x70\x40\xbc\x51\xc4\x59\xf7\x89\xb8\x00\x3d\x13\x41\x5e\x72\x7a\xb3\xaa\xaa\x0d\x81\xbf\x93\xf7\xb9\x91\x72\x4d\xfa\x24\x0f\x72\x90\xd7\x24\xfc\xdb\x1b\x30\xc5\x0e\x23\x05\x3a\x37\xb8\x31\x39\xf6\x0a\x05\x48\x92\x49\x13\xec\x70\xc4\x10\xec\x38\x5c\x08\x70\x46\xd5\x90\xe1\x84\x30\x04\x43\x21\x89\xa8\xc4\x26\x30\x28\x92\x8f\xd9\x93\x30\xb8\x5f\x80\x03\x15\x88\xb0\x10\x1b\x8e\xa0\x67\x45\xbd\x9e\x3e\x93\xaa\xe5\x14\x30\x61\x8c\xdb\x1a\xa8\x80\x2d\x4b\x0a\x83\xeb\xe3\xe3\x63\xc0\xe8\xbc\x59\x1a\xd1\x0d\xf1\x64\x82\x00\x59\x2e\x53\x05\xe9\x49\x48\x48\x18\xe0\x3f\xb8\x9b\x7c\x35\x60\x00\x3f\x1d\x5f\x35\xd8\x81\x35\x9a\x1d\xd0\x3f\x64\x29\x2d\xfd\x19\x35\x5e\xe4\x6e\x19\xe3\x7a\xf3\x6c\x16\x06\x0d\x88\xe0\x63\x52\x1e\x99\x4d\xc6\x50\xc8\x02\x58\x2c\xa3\x2c\x55\x05\x6b\x18\x76\xc9\x76\x17\x68\xc2\xbd\x09\xe1\x10\x61\x0b\xda\xaa\x87\x6d\x12\x11\xe9\xa4\x57\x73\x37\x12\xc4\x59\x82\x6e\x04\xb7\xbb\x93\x51\x2c\x6e\xfc\x26\x30\x3a\xba\x40\x62\x51\x07\x76\x34\x2a\x15\xfb\xa3\x9a\xf4\xf8\xc6\x85\x07\xab\x26\x20\x96\x67\x73\xb2\x2c\x6b\xcf\x66\xb6\xbc\xc5\xd9\xf7\x86\x59\x9b\x74\x7d\x87\xaf\x87\x16\x05\x7c\xc0\x20\xe7\x38\x94\x90\xa2\xa9\x59\xd0\xa8\xe6\x7c\xc1\x78\xcc\x95\x99\x72\xbe\x1a\xb1\x0c\x1c\x07\xd6\xc4\x8e\x47\xac\x1f\xe5\xcf\xe7\x03\xc2\x0b\x76\xf2\x59\xa9\xe7\x8c\xb6\x7d\xfc\x89\x59\xbd\x07\x82\x53\x0d\x45\xc1\x23\xf7\x04\x6f\x15\xef\x5c\xa1\x60\x69\x9e\xaf\x9b\x5e\x94\x5a\x1a\xc6\x64\x66\x1a\x9e\x5a\xd7\xd5\x1d\xb9\x2f\xbb\x34\x6c\x3e\xa9\xaa\x52\xd5\x23\xf4\x23\x9d\xe0\x2b\xe3\x7e\x86\x89\xbd\xcd\x8c\x18\x46\x67\xbc\x7a\x72\x03\x2a\x27\x36\x73\x67\x63\x60\xe7\xcd\x85\x27\x24\xab\x26\xc1\x9f\x55\xa0\x4c\x90\x9d\x7c\x2b\xdc\x13\x04\x90\x40\x60\x18\xc5\x2c\xcf\xd7\x99\x00\x68\x06\x57\xb9\x40\xef\x63\xd2\xc3\xd3\xbb\x14\xc2\xd3\xa8\x4d\x19\x92\xd1\x67\x0c\xef\x99\x31\xc3\xb0\x11\x03\xa7\x26\x1e\x35\x49\x1d\xe9\x1c\xb1\x8e\xd4\x5c\x43\x37\xfd\xe1\x2d\x1c\x65\x09\x3b\xdd\x20\x04\x59\x21\xa5\xa6\xfb\x16\x3c\x9e\xdc\xbf\x25\x67\x15\x85\xa1\xb4\x1c\x94\xe9\xd7\x4c\xfa\xec\xfe\x7b\x3b\xa1\xf2\x2e\x27\xee\x10\xf9\x42\x08\xa7\xe3\xa1\xa9\xa7\x59\xd6\x30\x4b\xac\xd6\x88\x9a\x20\xdd\x64\x48\x56\xe4\x46\x9b\x16\x44\x1b\xc9\x86\xb2\x60\x37\xea\x6c\xe0\xc9\x58\xc3\x84\xe6\x7d\x1f\x82\x49\xda\x01\xb2\xd3\x6f\xb7\xa9\x6e\x24\x68\x8b\x64\xa3\x8f\x9e\x16\xf5\xa5\x6f\x38\xaf\x9f\xf9\xc1\x3f\xa1\xd5\x64\x10\x24\xd5\x21\x23\x99\x7a\x04\x9e\x76\x43\x51\x66\x82\x35\x4e\x23\x73\x71\x44\xc7\x2f\xc1\xba\xd5\x32\x9b\xe2\xc9\xf9\x41\x3f\xef\x4a\xa4\xf3\xf4\xbe\xf5\x9e\xcb\x4c\x60\xd9\xc4\x37\x93\x57\x8a\xe0\x99\xe8\x6e\xb3\xf9\x75\xc4\x64\x7e\xc8\x60\x64\x50\xaf\x70\xe0\x21\x71\x4d\x3e\xdd\x94\x2c\x2a\xe4\x7e\x38\x14\x3d\x30\x08\x31\x40\xe6\x09\xc4\x5f\x8d\x32\x42\x3d\xed\xdf\x42\xbc\xa1\xdc\x03\x57\x06\x61\x45\x95\xd4\xe1\x39\x41\x03\x5c\x41\x82\xc3\x11\x85\x3a\x63\x1f\xd5\xc9\x61\x99\xc2\x55\x1c\x14\xc9\xff\x1c\xf7\x36\x98\x40\xb8\xb3\x76\x58\x6c\xa2\x2e\x13\x52\x60\xb8\x0d\x45\xa8\xb8\xce\x8d\xaf\x5b\xb5\x7c\x64\xcd\x6c\x13\x00\xd5\x32\xc9\x75\x36\x2d\x40\x2e\x64\xd2\xc7\xe4\x75\x36\xe1\xc4\xac\x9c\x57\x65\xd8\x1f\xb3\xed\x17\x52\xc0\x42\x2d\x3c\xc8\x30\x44\x74\x08\x76\x1b\xd5\x42\x8a\xee\x24\xc3\xa6\x84\x8b\xfc\xf1\x34\xfc\x59\x44\x51\xff\x80\x30\x05\x94\xa4\xe7\x22\x67\xb3\xd9\x29\x83\xc9\xa6\x99\xe2\x90\xb7\xab\x52\xbb\x24\x8e\xb7\xd3\xc4\xed\xff\xf6\x35\xcd\x6f\x25\xb7\x23\xc4\xe0\x54\xbd\xe2\x5a\x8a\xa3\xff\x6b\xbc\x9c\xe4\xdf\xd7\x7a\xf0\x5f\x00\x00\x00\xff\xff\x20\x3b\xa8\x10\xbe\x20\x00\x00") + +func fontsMarqueeFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMarqueeFlf, + "fonts/marquee.flf", + ) +} + +func fontsMarqueeFlf() (*asset, error) { + bytes, err := fontsMarqueeFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/marquee.flf", size: 8382, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMaxfourFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x55\x4d\x6f\xe3\x36\x13\xbe\xf3\x57\x3c\xaf\x21\xc0\x0e\x36\x34\xe3\x4d\xde\x43\x8a\xa0\x60\x0f\xed\xad\x97\x9e\x52\x80\x00\xcb\x48\x94\x2d\x44\x96\x16\xfa\x68\xb2\x00\xa1\xdf\x5e\x70\x48\x4a\xb2\x77\xb3\xbd\x14\xd0\x07\x35\x9c\x8f\x67\x9e\x19\x8e\xca\xba\xfc\x6c\x32\x3c\xe0\x1e\x87\x03\xee\x70\xb8\x67\xbf\x9b\xf7\xb2\x1d\x3b\xbc\x7c\xc5\x1f\xa6\x29\x4c\x5d\xfb\x77\xdf\x9e\x71\x10\x8f\x0f\x78\xea\x47\x63\x72\x99\xf7\x7f\xef\xdf\x4c\xf7\x56\xe5\xaf\x7b\x93\xef\xc7\xd7\x9f\xd9\x6f\xd5\xb1\xb6\x03\x3a\x5b\x5b\xd3\x5b\x7c\xde\xdf\x81\x73\xfc\x32\x1e\xc7\x7e\xc0\xff\x6f\x71\x78\x7c\xbc\x67\xec\xd7\xf7\x2f\xb5\x69\xcc\x50\xb5\x0d\xda\x12\x65\xd5\xf5\x03\xea\xaa\xb1\x3f\x31\x0f\x06\x1c\x9b\xb3\x39\x56\x39\x9a\xf1\xfc\x62\xbb\x0d\xca\xb6\x43\x59\xd5\x16\x55\x61\x9b\xa1\x2a\xab\x9c\x8c\x99\x01\x00\x8e\xfe\xd4\x8e\x75\x01\x53\xbf\x99\xaf\x3d\x5e\x2c\xfe\x32\xdb\x5b\x32\x6a\xda\x37\x96\x05\xa5\xe1\x64\xb1\x39\x99\xae\x78\xa9\x4d\xf3\xba\xf1\xb8\xbe\x74\x55\x33\xf4\x30\x3d\x0c\x48\x7a\x8b\x97\x71\x40\x6e\x9a\xed\xe0\xdd\xf4\xe7\xb1\x3f\xd9\x82\x3d\x04\x0f\x27\x5b\x1d\x4f\x83\x47\x6c\x90\x9f\x4c\x67\xf2\xc1\x76\xec\xfe\x07\x9b\xb7\x68\xda\x01\x55\x93\xd7\x63\x51\x35\x47\x14\xb6\xcf\x6d\x53\xd8\xae\x67\x87\x03\x99\x9d\xcd\x3b\x65\x8e\xda\x36\xc7\xe1\x84\x9d\x7d\x4f\xca\x79\x7b\x3e\xdb\x26\x10\xd3\xdf\xe0\x13\x0c\xca\xb1\x38\x5a\x94\x26\x1f\xda\x8e\xdd\x85\xc0\x85\x2d\xcd\x58\x0f\x01\xec\xb9\x2d\x2c\x25\x3e\x9c\xaa\x1e\x65\xdb\x0c\xd8\xd5\xd5\xab\xc5\x86\x9f\x71\xb7\x41\xdb\x90\x5b\xd3\x14\xe4\xf6\x86\x1d\xee\xc9\x49\x20\xda\xa3\xbf\x88\xca\x58\x96\xc9\xf5\x2d\x99\xa3\x6b\x2f\x19\x7d\xb8\xab\x5d\xec\xb1\xcf\x24\xe3\x8e\x3b\x3e\xbf\xb0\xc5\x36\x69\xef\x26\xc9\xf4\x0d\xad\x25\x6b\x21\x24\xcb\x44\x26\x99\x40\x2b\x59\x06\xaf\x95\x45\xad\x9d\xa6\xb5\x64\x5e\x27\x5e\x92\x79\x0b\x87\x70\x43\x49\xc9\x94\x5f\xb8\x70\x0b\x90\x40\x48\xc6\x9f\xb9\xff\x54\xb3\x53\x42\xf5\x89\x4b\x96\x6d\xb3\x59\x48\x57\xeb\x23\x04\x11\xe3\x9c\xc7\xd5\x95\x06\x85\xf6\x8e\x21\xe0\x1d\xfb\xa0\x00\xe1\x99\x26\x45\x78\x00\x82\xa1\xb4\x16\x61\x37\x28\x08\x42\xea\x1f\xda\xe9\x64\x25\x26\xe5\xb9\x12\x92\x09\x7d\x25\xc4\x93\x64\x4a\x8b\xc5\xbf\x37\x15\xd1\xd6\x45\xcf\x9e\xcc\x69\x92\x6c\xeb\x4d\xf4\xa2\x2d\xbc\xf0\x4f\x2f\x5c\xb9\x98\xa6\xef\xe0\xde\x4d\x37\x31\xa2\x5a\x9b\x2b\xc9\xb6\x3c\xa9\x07\x61\x20\x20\x72\x30\x7f\x88\x58\x0b\xaf\xa7\x52\x2d\x16\x0e\x67\x22\xe7\xdd\xa4\x9d\xf2\xa4\xa8\xff\x5b\xa2\x78\x1e\x33\xcf\xe3\xce\x69\xdf\x55\x4a\xeb\x50\x07\x00\x91\x7d\x45\xf0\x85\xd6\x2a\xe4\x02\x72\x8b\x85\x10\x25\x99\xe3\xfc\x49\x32\x97\x38\x49\x25\x0a\x05\x0a\xe5\x59\x53\x98\x4a\xe7\x82\x4d\x2a\x5c\xe2\xd7\xf9\x34\xdc\x52\xa2\x95\xf0\xaa\x05\x48\x42\x8a\x6a\x8e\x4d\x16\xc1\x37\xe7\x2e\xad\xe3\xc6\xe4\xa6\x6f\xfb\x62\x0a\x52\x12\x2b\xbd\xae\x36\xf5\xfd\x8e\xd0\xaa\x19\x0d\x90\x1e\x2b\x88\x0a\x10\x14\x4c\x09\xb8\x55\x63\xce\x88\x54\x80\x04\x15\xb7\x95\x5b\xe0\xfe\x6b\x3b\x47\x96\x29\xc7\x48\xe9\xc7\x96\xcf\x3f\xb0\x54\xb3\x65\xa0\x6f\xcb\xf9\xde\x27\xbd\x2a\x9c\x67\x23\xd2\x81\xd5\x6b\x4e\x3e\xa5\x17\xa2\x89\xab\xcd\x20\x46\xec\x64\x25\x70\x01\x86\xbc\xb9\xd5\xc2\x6b\xa8\x25\xd5\x79\x92\xe0\x19\x71\x92\x24\x61\x98\x03\x2a\x3a\xbe\x00\xb5\x3a\x6a\xeb\xa6\x59\x46\x96\xd3\xc1\x05\x39\x20\x5c\x73\xe9\xdd\x32\xc7\x34\x8d\x3d\x75\x35\x5b\xd7\xa3\x58\x93\x9f\x8b\xc1\x48\xc5\x10\x93\xf7\xa3\xb4\x76\xab\xd6\xa1\x0e\x59\xb1\x9f\x2a\x16\x0d\x48\x7f\x6e\x66\x22\xe3\x3b\x6e\xa2\xb6\x90\x4c\x09\xbd\x6a\x7d\x45\x63\x3e\xcc\x38\xac\xb5\xaf\xd0\xd0\x33\x56\x46\x32\x27\xd6\x47\x2f\xd1\xb7\x4d\xbf\x97\xe8\x62\x9b\xf0\x84\xe7\xe2\xc2\x67\x22\x2e\xcf\xc2\x95\x69\x2c\x62\x08\xb4\x0a\xb6\x3e\x0c\x0b\xd4\x0f\x11\xc5\x34\x2e\xc7\x24\xbe\x61\xd4\x5d\xb8\xf2\x69\x23\xc0\x0d\x8c\x38\xb1\x58\x89\x38\x2b\xd6\x5c\x2d\xbf\xc5\x20\xf0\x66\xcb\x7c\xb8\x84\xb9\xf4\xfb\x25\x4c\x9a\xc3\xa1\x33\xc5\x65\x1d\xe2\x56\xda\x54\x17\xe7\x99\x36\x45\xe8\xb6\x0f\x5c\x69\x31\x6b\x4e\xf4\xc7\x9a\x61\x4e\xf1\x97\x12\x1a\x9a\x7a\xdb\x2d\x97\xcf\x61\xe9\x71\xe1\xdb\x7a\x09\xa0\xb9\xe6\x69\x9d\xa0\x2c\xd7\x7f\xfd\xc1\xfe\x09\x00\x00\xff\xff\x4a\xf6\xac\x28\xe4\x0a\x00\x00") + +func fontsMaxfourFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMaxfourFlf, + "fonts/maxfour.flf", + ) +} + +func fontsMaxfourFlf() (*asset, error) { + bytes, err := fontsMaxfourFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/maxfour.flf", size: 2788, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMikeFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x92\x5f\x6f\xd3\x30\x14\xc5\xdf\xfd\x29\x0e\x72\x85\xf8\xb7\xdc\xb5\x19\x8c\xc2\x18\x93\x90\xe0\xa9\x4f\x7b\x24\x60\x85\xd6\x49\xad\x19\xa7\xca\x9a\x01\x92\xb5\xcf\x8e\x12\x27\xd7\x6e\xe1\x91\x17\xa4\xa9\x8d\xf3\xbb\xf2\xcd\xb9\xd7\xe7\xba\xb2\xd5\xa2\x9c\x21\xc7\x02\xf9\x29\x4e\xe6\xc8\xc5\xca\xdc\x68\x7c\xfb\x85\x95\x59\x6f\x4b\x6d\x71\xdd\x59\x6b\xee\x4a\x87\xf9\x72\x79\x26\x3e\x9a\xda\xea\x3d\xee\x74\x7b\x6b\x1a\x87\xa6\xc2\x77\x73\xa3\x7f\xb4\x66\x6f\x5c\x8d\x5a\x3b\xdd\x96\x7b\xbd\xe9\x05\xb6\xa5\xdb\x64\x59\x26\x56\xc6\x35\x2d\x74\xdb\x36\xed\x2d\x2a\xf3\x33\xec\x7e\xb2\xda\x39\x7c\xd8\x96\xbb\x9d\xb6\x16\x17\x75\xbd\xbe\xea\x4c\xb7\xce\xf4\xa6\xbb\xc4\xe2\x1c\xd7\x7a\x17\x4a\xce\x66\x72\x7a\xa4\x78\x24\x05\xfa\xbf\x14\xde\x4b\x01\x0c\x4f\xbf\xd0\xfd\xbd\x22\x5e\x49\x0a\x10\x14\x81\x57\x82\x14\xd4\x23\x92\x75\x90\xe9\x35\x82\xd8\x10\x93\x14\xe4\x27\x59\x29\x1e\x87\x6a\xc4\x75\x21\xc5\x13\xa6\xa7\x4c\xcf\x98\x9e\x33\xbd\x60\x3a\x61\xca\x98\x88\xe9\x94\x69\xce\xb4\x60\xca\x99\xce\x98\x5e\x32\xbd\x62\x3a\x67\x7a\xcd\xb4\x64\x7a\xc3\xf4\x96\xe9\x82\xe9\x1d\xd3\x65\xa0\xf7\xe9\x79\xaf\x46\xc2\xe0\x37\x05\x7b\x82\x41\xc1\x3d\x9a\xdc\x0b\x19\x5e\xc5\xb8\xcf\x2d\x7c\x8c\xd5\x61\xbe\x2f\x26\x91\xe4\xfb\x50\xc0\x47\xfd\xe2\x70\x5f\xf9\xc3\x18\x31\x7f\xd4\x1e\x15\x63\x0b\x48\x3f\x09\x13\x2f\x92\x94\xbf\x55\xc1\x51\x95\xa9\x55\x96\x18\xab\x78\x8a\x07\x3b\x3e\x48\x62\x8c\x0a\x7b\x3e\xad\x1a\xcc\xf3\xc7\x8d\xd0\x1f\xbd\x52\x92\xd2\x1b\xca\xd7\x7e\x10\x0d\xb7\x9d\xbb\x40\xb4\x5f\x8a\xcf\x3c\xc5\x82\xe9\x0b\xd3\xd7\x69\xb2\x52\x28\x35\x8a\x16\x71\xf4\x0f\x03\xff\xcf\x06\x3e\xfc\xfe\xd5\xeb\x77\x00\x00\x00\xff\xff\x46\xb0\xae\x62\x1d\x06\x00\x00") + +func fontsMikeFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMikeFlf, + "fonts/mike.flf", + ) +} + +func fontsMikeFlf() (*asset, error) { + bytes, err := fontsMikeFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/mike.flf", size: 1565, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMiniFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x59\x5d\x6f\xa3\x3a\x13\xbe\xcf\xaf\x98\x0b\xa4\xed\x91\x96\x12\x9c\x34\x4d\xa4\xd5\x6a\x29\x71\x5b\xb4\x04\x72\x80\xec\xb6\x12\x12\x9b\xdd\xd2\x36\x3a\x29\x54\x09\x39\x47\x95\xf8\xf1\xaf\xc0\x06\x6c\x18\x47\x7a\x2f\x4a\x52\xec\x79\x3c\x1f\xcf\x8c\xc7\xce\xf3\xfe\x99\x6c\x35\x98\xc2\x04\xcc\x31\x8c\xf9\x63\x41\xc6\xa3\xd5\x2e\xdb\xc1\xef\x0f\xb8\xdb\xa7\x59\x06\xf6\xeb\xf6\xfd\x3d\xdd\xef\x61\x6a\x2c\x26\x23\x27\xfb\xb3\x3f\x3d\xa5\x47\x70\x42\x1f\xdc\x6d\xb1\xcb\x74\x73\xf4\xbc\x7b\xd9\xa7\x05\x1c\xd2\x7d\xba\x3d\xa6\x40\x2e\x4d\xd0\x75\x30\x09\x58\xa7\x17\x30\x17\x8b\xe9\x68\x9d\x1e\xde\x76\xc7\xe3\x2e\xcf\x60\x77\x84\xd7\xf4\x90\xfe\xfe\x80\x97\xdd\xbf\x69\x06\x45\x0e\x6f\xf9\xd3\xee\xf9\x03\x8a\xd7\xdd\x11\x9e\xf3\xac\xf8\x0c\xdb\x23\xec\xf3\xec\xa5\xfa\x2c\x5e\xd3\x51\x3d\x61\x97\x1e\x3e\x1d\x21\xdb\xbe\xa5\x15\xc6\xfb\x7e\xfb\x27\x7d\x82\x3c\x83\x2d\xfc\xc9\xdf\xde\xd2\xac\x80\xfd\x2e\x4b\x2f\x47\xa3\x15\x9b\xfd\x54\xd9\xb0\xde\x9e\xf6\x70\x73\x3a\x14\x79\x06\x5f\x8e\xf9\xfe\x54\xec\xf2\xec\x5b\xba\x3d\x14\xaf\xfb\x5d\xf6\xcf\x65\x96\x16\x5f\xc1\x24\xc6\x62\x56\x29\xb2\x63\xd6\x41\x96\xfe\x07\xef\xdb\xc3\xf6\x2d\x2d\xd2\xc3\xe8\x78\x7a\x7f\xcf\x0f\x05\x03\xbc\x75\xee\x2a\x5b\xb7\xd9\x53\xf5\xf5\xe7\x2e\xbb\x04\x58\x6d\x3f\x60\xbb\x3f\xe6\xf0\x3b\x85\xe3\x7e\xf7\xf2\x5a\xec\x3f\xe0\xad\xd1\xe2\x39\x3f\xc0\xef\xb4\x28\xd2\x03\x9c\x8e\xe9\x28\x7f\xae\xe1\x9f\x4f\xfb\xbd\xfe\xdf\xee\xa9\x78\x35\xfe\x49\x0f\x99\x71\x7c\x3b\x1d\x5f\x61\xbb\x2f\xd2\x43\xb6\x2d\x76\xff\xa6\xc7\xcf\xf0\xfb\x54\xc0\x53\xfa\xbc\x3d\xed\x0b\xc8\x4f\xc5\xfb\xa9\xa8\x2c\xf7\xfc\x08\xfe\xbc\x6e\xb3\x97\xf4\xe9\x72\x34\xd2\xb4\x6f\xe2\xdf\xb7\x11\x00\x7c\x1b\x41\xa9\x7d\x1b\x41\xae\xb1\x7f\xeb\x67\xfd\xb6\xd4\xda\xef\xdd\x00\xfb\x5f\x2f\xf5\x52\xd7\xc4\x2f\x7c\xa8\xfe\x96\x54\x2f\x2e\x4a\xad\x9a\x99\x94\x7f\x35\xc3\x1d\xb4\x6f\x54\xef\x0c\x5f\x93\xa1\x2b\xa9\xbf\x6a\xa9\x8b\xe4\x41\x96\xaa\xa6\x6b\xcd\x37\x11\x8a\xbd\x66\x6b\x41\xac\x75\x03\x31\x7b\x55\x1b\x61\x68\xe2\x1a\x71\xc9\x96\x2f\xe3\x9e\x66\x4c\xdf\x5a\xfb\x06\x51\x54\xa0\x7e\xe4\x0c\xaf\x5b\x27\x49\x10\x37\x49\xb3\x87\xea\x1a\x9a\x38\x3b\xa9\xff\x33\xa0\x56\x27\x4e\x8c\xa1\xbf\x8c\x52\x6b\x6d\xe1\x03\x4c\x08\x6a\xe7\x1a\xc9\x60\x20\xa9\x07\x92\xbf\x7a\x1e\xae\xe3\xda\x98\x28\xd8\x28\x2a\x52\x72\xe8\x44\x0e\x9c\x34\x58\xca\x83\xdc\x07\x4a\xdb\x2e\xd8\xf4\x0b\x14\xf2\x22\x29\x1b\x75\xfa\x1e\xcf\x07\xc4\x94\x5e\x1a\x9a\x4c\x8e\xb8\xef\x6d\x9d\x71\x54\xd7\x7a\xb1\x89\x35\x81\x4f\x3d\x67\xe6\x92\xfe\x95\x69\x2c\x6e\xc0\xa4\x4a\xb8\x60\xf4\x81\xb8\x1e\x12\x1c\x0b\x06\xe3\x9c\xa1\xeb\x2d\xb3\x64\xdf\xd5\x0b\x94\x03\x2f\xb4\x5e\x8b\xc5\x40\x36\x52\x7c\xdd\x1e\x2f\x12\xf6\xb2\x7b\x62\x03\x72\x28\xd8\x42\xec\x23\x4e\x4a\x84\xfc\x25\x7b\x5b\xca\xa1\x48\x12\x31\x25\x5a\xf6\x48\x92\x3c\x7a\x43\xd8\x0a\x8e\xa5\x68\x2c\x73\xb1\xd5\x4f\x56\xbf\x19\x8a\x0d\xae\x4a\xa7\x4b\x6f\x02\xf0\x09\x71\x89\x39\x5b\x91\x4e\x72\x24\x34\x80\xe1\x60\x2b\xf9\x70\x46\x12\xe4\xda\xc1\x3c\x74\xc1\x73\xaf\x9f\x1c\x52\x45\xe9\x17\x16\x66\x0d\x37\x66\x18\x95\x9a\x16\x3c\xb5\x20\x36\x7a\x19\x2b\x4e\xe9\x26\x89\xd3\x44\x7f\xc7\x2c\x41\xe3\x61\xd9\x6d\xdc\x24\x2b\x27\xa5\x35\x42\x32\x34\x7c\x5d\xe9\x8d\x87\xe5\xaa\x36\x2f\x11\xeb\x58\xa7\xcd\x70\xaf\xe9\x3d\x2b\x75\xa4\x0c\x96\x37\x03\x56\xb4\x2e\x35\xb1\xa4\xf4\xd8\x8d\xd5\xae\x46\x90\x07\x70\xe0\x1a\xae\x2e\x0e\xd9\x94\x30\xa3\x97\x13\x89\x76\x66\x2b\x91\x24\x39\x6c\xf5\x31\xd4\x54\x55\x12\xcb\xc1\xb6\x92\x6b\x92\x7b\xfb\x29\xf6\xa5\x5f\x01\xcb\x21\x0c\x1b\xb8\x4c\xda\xba\x23\x2c\x2f\xa9\x7e\xa9\xd0\x4e\xb6\xab\xef\x64\x51\x52\x48\x3f\x75\xec\x44\x33\x2e\x91\x7a\x26\xc4\x2d\xf9\x3a\x8c\x5b\xeb\x7d\xbc\x5c\x89\xf5\x0e\xa3\x5c\x6c\x20\x55\xa9\x4d\x17\xa3\x1d\x1e\x4a\x7e\xfd\x32\x48\x08\x29\xfd\x34\x61\x80\x57\x1c\x89\x76\x9f\x75\x1e\x45\xb1\x5c\xfc\xd2\x6b\x6f\x74\x61\x6b\x1e\xd5\xfe\x76\xc9\xa7\x8a\x74\xd3\x3f\x69\x3c\xbf\xba\x02\x28\x7f\x54\x64\x6a\x88\xa3\xda\xb9\xf2\x84\x6d\xb4\x68\x31\xcd\x39\x19\xd1\xda\xc5\x07\xf1\x84\x6c\x07\x31\xae\x34\x83\x78\x88\x1a\xee\xf1\x32\x2c\x12\xc9\x9c\x8d\x01\x3c\x5f\xbf\x09\xa8\xf5\x1d\xc2\xb5\x65\xd3\x11\x54\xad\xae\xfc\xa8\x26\x9a\x00\x8e\xf7\x83\x06\x11\x5d\x02\x7d\xb0\x5d\x6b\x65\x45\x8e\xef\xc1\xca\x0a\xbe\xe3\x89\x66\xce\x08\x80\x4d\xbd\x08\x42\xe7\xce\xeb\x3c\x59\xf2\xc2\xd1\xf9\xbe\x9a\x3b\x01\x58\xfb\x1b\x6f\xd9\x4d\x4e\xf8\x5e\x93\x18\xc9\x2f\x4e\xe3\x32\x69\x7b\x47\x68\x16\x99\x02\xd8\x9b\x20\xa0\x9e\xfd\x28\x2f\xf4\x2b\xff\x54\xcd\xfd\x04\xbf\x04\x77\x98\xb3\x2b\x80\x47\xea\x09\x53\x19\xad\xe2\x84\x71\x4a\xd7\x4b\x5d\x6e\xcf\xcd\xd9\x0c\xe0\x26\xf0\xbf\x53\x0f\x6e\xac\x00\xa7\x94\x39\xbb\x06\x08\xa9\x5d\xfb\x84\x63\x33\x3b\x59\xc0\xda\xe6\xb2\x9a\x3a\x07\x58\x3a\x16\x0d\x68\xe8\x84\xfd\xe8\xc9\x7c\x33\x67\x0b\x00\xdb\x5f\x3f\x06\xce\xdd\x7d\xd4\x01\xd7\x11\xb5\xeb\xe5\x7f\xe9\x9f\x44\xfb\xae\xc7\x00\xb7\x74\xe5\x78\x8e\x47\xc1\x0f\x96\x8e\x67\xb9\xe0\x78\x4b\xc7\xb6\x22\x3f\xe8\xd3\x4b\xef\x6c\xad\x85\x4d\x00\x97\xde\x46\xfa\xda\x77\xbc\xc8\xf1\xee\x60\xe9\x6f\x6e\x5c\x0a\x96\x77\xe7\x52\xf8\x7b\xe3\x47\x72\xd4\xeb\x14\x60\x9d\xa4\xb0\x77\x99\xd7\x04\xea\x73\x94\x14\x8f\xa6\x2f\x94\x8a\xa0\x79\x3d\x01\x08\xfd\xdb\x08\xee\x1f\xd7\xf7\xd4\x1b\x35\x59\x2e\x6d\x56\xe6\xf5\x14\x20\xa0\x77\x4e\x18\xd1\x80\x2e\x7b\x7e\x08\x50\x3f\x5c\x01\xac\x2c\x3b\xf0\xbd\xe1\x81\xa3\xd3\x73\x06\xb0\xa4\x77\x01\xa5\x1c\xd2\x17\xd7\x6d\x17\xbf\x06\x58\xbb\x9b\x50\x5f\x39\xde\x26\xec\x19\x55\x4a\x1b\x57\x23\x31\x07\x08\x37\x6b\x1a\x84\x76\xe0\xac\x23\x88\x7e\xfa\x23\x20\x28\xf6\xa2\x37\xf3\x3e\xa0\x74\x04\x13\x6c\xee\x7c\x0c\x60\xd9\x9b\x88\x82\x65\x57\x79\xd5\x3b\xe0\xb5\xd3\x4c\x80\x95\x63\x07\x7e\x2f\xef\xa4\xea\xd0\x14\x80\x39\x01\x58\x3b\xae\x1d\xf8\x3f\x5b\xaf\xf2\x1a\xd3\xec\x56\xa5\xdc\x4f\x9a\xf3\x49\x85\xbf\x5c\xba\x14\x96\x7e\x24\xe6\xbe\xa8\x44\x95\x96\x74\xe9\xb8\xae\xd5\xd3\x70\x04\x61\x9d\x05\xf3\x2b\xd9\x72\xdf\xa3\x23\x30\x51\x83\x66\x55\x28\x43\x7b\xe3\x2a\x39\x2d\x56\xc5\x1e\xa7\xe7\xd7\x00\x75\xf6\xfc\x5f\xa4\x66\x74\x36\x84\x2d\xcd\x9c\xcf\x01\x7e\x6c\xdc\x3b\x2b\x80\xdb\xc0\x62\x99\xee\x7b\x15\x80\x15\x44\x34\x68\x24\x4d\xb6\x67\x4d\x45\xc9\x05\x2e\x79\x6f\xb9\xb7\x3d\x31\x22\x88\x2d\xc6\x43\xb1\x9a\x1f\xcd\x92\x61\x23\x3c\x19\xae\xb9\x10\x0b\xf6\xdf\x1b\x1a\x0e\x4c\x64\x31\x13\x1b\x38\x73\x41\x00\x5c\x2b\x72\x3c\xb0\xad\xb5\x13\x59\x2e\xb8\x34\x8a\x68\x00\x16\xfc\x74\xa2\x7b\xb8\x0b\xac\x1f\xb4\x9a\xcd\xfb\x55\xc5\x26\x68\x2e\x26\xe7\x71\x6a\x16\xd7\x7d\xf2\xd9\x63\xa0\xb9\x98\x9e\xc7\xb1\x9d\xc0\xde\xac\x6e\x5d\xfa\xd0\x81\xa8\xc1\xae\xce\x83\x45\x8e\xbb\xac\x95\xe2\xfb\xbf\x12\x67\x76\x1e\x47\x2a\xed\xe7\xbb\x05\x73\x71\x7d\x1e\x2b\xa8\xe8\x6a\xdd\xf8\xcc\xeb\x7e\xe3\x2e\x68\xe1\x04\x3c\x06\x38\x57\x01\x52\xbe\xa9\xd6\x00\xac\x5c\x19\x7a\x57\xb6\x98\xf4\x42\x21\x6d\x73\x7f\xb7\x19\x3d\x3c\x82\xd7\x59\x4d\xc6\x63\x05\x02\x95\x18\x14\x2b\x0e\xe3\x64\x6c\x9e\x97\x6f\x98\xc3\xfa\xaa\x52\xbe\x7f\x69\x30\x54\x2c\xa6\x38\x6b\xd4\x40\x2a\x1a\xd3\x61\xa4\x59\xdb\xa7\xc0\x51\xd1\xd8\xe9\x39\x45\x7d\x6b\x40\xc6\x2a\xf6\x3a\x92\x63\x12\xe9\x58\xda\xc7\x50\x31\xd7\x51\x3a\x86\x37\x5c\x7d\x20\x15\x6d\x1d\x95\x63\x14\x0a\xa9\xd8\x4a\xa3\xfb\x11\x34\x75\x3d\x29\x93\x98\x1f\x4c\xa4\x33\x04\x19\xab\xe8\xea\xa1\x19\xad\xba\x00\x21\xa6\x8a\xb4\xbe\x5c\xf6\x62\xf5\x3d\x09\x31\x55\xc4\xf5\x07\x25\x4f\x8d\xa1\x22\xae\xaf\x8c\x8f\x02\x48\x45\x5c\x5f\x72\x0c\xf7\x8b\x02\x43\x45\x5a\x5f\x15\x63\x05\x4e\xd5\x88\x6d\xdc\xc8\x59\xbb\xd5\x7e\x2d\xf4\xc7\xed\x36\xff\xd0\x9e\x19\x88\xa9\xa2\x28\x5f\x35\x8c\xaa\x46\xbc\xeb\x52\x0c\xee\x87\xd8\xe8\x71\xc3\x54\x51\x74\x83\xc5\x14\x3d\x91\x11\x53\x45\xcf\x0d\x16\x53\x05\x86\x8a\xa3\x1b\x75\x31\x42\x81\x88\x8a\xa4\x1b\x64\xdb\x51\x1f\x33\x09\x51\x11\xf5\x11\x33\x0a\xbd\xe6\x22\x44\x45\xd4\xe8\xde\x0f\xbc\xfe\xbd\x91\x7c\x69\x48\x48\x4b\xce\x70\x65\xb9\xad\x68\x78\x6f\x05\x6b\x08\xcf\x1f\x58\x09\x99\xa2\xc2\x16\x16\x54\xf4\x24\x4d\xc8\xd5\x39\x04\xd9\x7a\x05\xc2\xec\x1c\x02\x12\x50\x05\xcc\xf5\x39\x18\x39\x45\x15\x08\xf3\x73\x08\xc8\x11\x13\x47\x59\x9c\x43\x91\x3a\x10\xff\x8c\x57\x26\x63\x1c\x86\x0a\xa7\xec\x7a\x3b\xe1\x97\x78\x46\x22\x75\x2e\x64\x62\xa2\xf2\x83\xce\x63\x78\xf9\xc8\x3a\x8f\x09\x41\xe5\x29\xce\x0c\xe4\x12\x92\x4c\x70\x62\x52\x9c\x19\x28\x02\xce\x4e\x75\xdf\xa1\x80\xc1\x29\x8a\x74\x1d\xf2\xad\x50\x0f\x05\xa7\x69\xaf\xe7\x68\x0f\x5c\xdd\xc5\x0d\x99\xe0\xcc\x94\x3b\x0d\x03\x95\xc4\x19\x89\xf4\x17\xe2\x2d\xb6\x78\x4b\x49\x26\x38\x1d\x91\xce\x42\xbe\x37\x91\x2b\xd4\x14\x67\x63\xdd\x57\x3c\xf0\x7a\x14\xcb\x3f\xa6\x71\x41\x9c\x86\x1e\x96\x95\xe8\xb5\x2e\x99\xe2\x44\x44\x7b\x09\xf4\x3e\x8f\x4c\x71\x22\xa2\x9d\x84\x02\x01\x27\xa2\xba\x8f\x50\xc0\xe0\x44\x44\xbb\x08\x05\x02\x4e\x42\xa4\x87\x38\x73\xc1\x49\xa6\xd7\x00\x4b\xe7\x87\x13\x0a\xdd\x03\xfb\xd5\xb1\x39\xec\xe7\x72\xf4\x71\x1a\xf6\x7b\x08\x90\x7f\x91\x90\x97\xc4\x69\x88\x76\x0f\x80\x5d\xbc\x92\x2b\x9c\x82\x68\xef\xa0\x40\xc0\xb9\xa8\xee\x1c\x14\x30\x38\x21\x95\x7d\x83\x02\x05\x27\xe5\xa0\x6b\x68\xb3\x5a\xbc\xbc\x27\x57\x38\x21\x79\xbf\xd0\xae\x88\xfc\xd8\x41\xae\x70\x12\x3e\x0e\xd5\xcf\xf1\xc5\xff\x17\x00\x00\xff\xff\x41\xfb\x7f\x3b\x8b\x23\x00\x00") + +func fontsMiniFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMiniFlf, + "fonts/mini.flf", + ) +} + +func fontsMiniFlf() (*asset, error) { + bytes, err := fontsMiniFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/mini.flf", size: 9099, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMirrorFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x1a\xeb\x8a\xe3\xbc\xf5\xbf\x9e\xe2\xb0\x0c\x9d\x84\xce\x46\xb1\x72\x2f\xcb\x90\xd2\x1b\x0b\x2d\x2d\x7c\x65\xfb\x47\xa0\xf5\x24\x4e\x62\xbe\xc4\x59\x62\xcf\x5e\xc0\x0f\x5f\x74\x74\x24\x4b\xb6\x92\x78\x66\x76\x49\x3c\x89\xce\xfd\x7e\x34\xbb\xe3\x4e\xa4\x0f\x30\x87\x19\x88\x31\x24\x33\x48\x96\x90\xb0\x7f\xe5\x97\xcb\xf9\x02\x2f\xbf\xe0\xaf\xe9\xf7\x7c\x0b\xff\x4b\x8f\xd5\xb9\x80\x4f\x3f\xf0\x7d\xbd\x29\x47\xaf\x9b\x6d\xfa\x3d\x2f\x47\xd9\xf6\xf5\x19\x40\x4c\xe1\xcf\xaf\x7b\x48\x56\xab\x29\xfb\xef\x21\x2f\x21\x2f\x21\x85\x93\xc1\x92\x9f\xd2\x7d\x06\xdf\xb3\x4b\x99\x9f\x0b\x38\xef\x46\xa3\x11\xfb\xad\x4a\x8b\x6d\x7a\xd9\x6a\x12\xff\x38\x66\x45\x01\x7f\x39\xa4\xdf\xbe\x65\xc7\x23\xfc\x01\x3e\xa7\xf8\x6b\x0e\x13\xbe\x9a\xc0\xc7\x8f\xf0\x92\x96\xd9\x16\xce\x05\xfc\xfd\x92\x16\xbf\x3f\x96\x30\x2a\xf3\x3d\xfb\x5c\x6c\x8e\xaf\xdb\xac\x84\xcf\xbf\xfd\x1b\xfe\x99\x56\x79\xf1\x31\x61\xbb\x7c\x7f\xcc\x2a\xb8\x64\xc7\x2c\x2d\x33\x10\xa3\x44\x23\x48\x44\xc3\xdf\x7f\xb2\xcb\x29\x2f\x91\x97\xbc\x84\x43\x76\xc9\x5e\x7e\xc1\x3e\xff\x9e\x15\x50\x9d\xe1\x74\xde\xe6\xbb\x5f\x50\x69\x21\x76\xe7\xa2\x7a\x82\xb4\x84\xe3\xb9\xd8\xeb\xf7\xea\x90\x31\x3c\x90\x67\x97\xc7\x12\x8a\xf4\x94\x69\x1c\xdf\x8e\xe9\xc6\xf0\x97\xc2\xe6\x7c\x3a\x65\x45\x05\xc7\xbc\xc8\x46\x8c\xfd\xed\xe7\xb7\x63\x5a\xa4\x95\x91\x1c\x76\xf9\xa5\x34\xdf\xfd\x89\x69\xcd\xc3\x47\xf8\x70\x4a\xf7\xf9\x06\x8a\xd7\xd3\x4b\x76\xf9\x00\xbb\xf3\x05\x76\xf9\x31\x83\x7c\x9b\x15\x55\xbe\xcb\x37\x08\xcc\x52\x00\x80\x8f\x50\x1e\xce\xaf\xc7\x2d\xa4\xc7\x1f\xe9\xaf\x12\x5e\x32\xf8\x9a\x3e\x3e\x21\x50\x71\xfe\xc1\x1e\xcc\xa1\xea\x90\xc1\x87\x43\x7a\xd9\xbe\x1c\xd3\xe2\xf7\x0f\x5a\x01\xdf\x2e\x79\x51\x95\x5a\x86\x14\xf0\xd3\x27\x78\x79\xad\x60\x93\x16\x8f\x95\x46\x53\x9e\x5e\xcb\x43\xb6\x65\x73\x83\xe1\x90\xe5\xfb\x43\xa5\x39\x4e\x61\x73\x48\x2f\xe9\xa6\xca\x2e\x6c\x76\xe3\xcb\x27\x28\xce\x15\xe4\x68\x92\xbc\xd8\xc3\x36\x2b\x37\x59\xb1\xcd\x2e\x25\x13\x63\x04\x3b\xa5\x3f\x51\x72\x38\x66\xc5\xbe\x3a\xc0\x20\xfb\x69\x0f\xfb\x4a\x2b\x87\xf0\x47\x48\x61\xf7\xba\xdd\x67\xb0\x4b\x37\xd5\xf9\xc2\x92\x19\x62\xd8\x66\xbb\xf4\xf5\x58\x19\x66\x4f\xe7\x6d\x86\x82\x3b\x53\xb1\x64\x89\xc7\x8c\x2a\x35\x7f\x01\x5e\xc6\x1e\xd6\xad\xff\x6b\x06\x0a\xd6\xac\x86\xda\xbe\xa8\x7a\xcd\x06\x6a\xb8\x66\x00\x60\xbe\xd6\x07\x06\x50\x83\xfe\xec\x0b\x7c\x01\xfd\xd5\x03\xf8\x6f\x74\x14\x40\xe1\x7f\xfd\x99\xaa\xa1\xae\xa1\x46\xe4\x0a\x60\x34\x02\x50\xb5\x79\xd6\x3f\xfa\x19\xa0\x56\x75\xad\x6a\x87\xa3\x41\xa4\xf0\xb3\x1a\x6a\x4d\x5a\x29\x90\x6b\xc6\x41\x29\xbe\x66\x12\x00\x19\xd1\x70\xeb\x35\x53\x48\x51\x7f\x2c\x0d\xd3\x12\xa4\xa1\xaf\x1f\x50\x12\xa9\xa4\xc5\x4f\x4c\x2a\x83\x1e\x06\xa0\x60\x08\x6b\xc6\x25\x07\x85\xc7\x9f\x01\x3e\xa9\xa1\xd6\x83\xe4\x52\x29\xc5\x3d\xd6\xac\xae\x06\x48\x5f\x6a\x01\x1e\xc0\xbe\x80\x65\x06\x59\x21\xe6\xc3\x57\xae\xb1\xa1\x70\x6a\xcd\x80\x03\x47\x8d\x43\xf8\x0a\x9a\x5b\xc7\x2e\x53\x4a\xb3\x61\xa4\xd6\x00\x5c\x8b\x8b\xe2\x48\xde\xa8\xcd\x01\x80\x35\x82\xb5\x80\x55\x7f\xa3\x6f\x4f\xd9\x06\x2c\x78\x09\xe4\x0b\xb1\xd2\x83\xd2\x3f\x88\x55\xff\x20\x56\xed\x05\xf7\xb1\x36\x2e\xa5\x94\xc1\x85\xa6\x02\xcf\x66\x40\xaa\x03\x68\xdb\xcc\x9a\xcc\xda\xa9\x46\xdf\x30\x3e\xab\x8c\x8e\x5b\xe6\x6a\x3c\x5b\x76\xdc\xdb\xc3\xaa\x08\x2b\x8a\x52\xc3\x80\xe8\x48\x45\x74\x3c\x29\x1d\x56\x0b\x26\x09\x4c\xc3\xa3\x5e\x11\x5e\x11\x7c\x1b\x8c\x82\x03\xcd\x80\xa1\xd1\x04\x89\xb5\x91\x67\xa4\x96\xdf\x39\x56\x35\x43\x50\xb7\x79\xbe\x41\x93\x5c\x9d\xc2\x41\x83\x7d\x05\x02\x1b\x5e\xd5\x9b\xb5\x31\x58\x09\x9d\xa9\x1a\x5b\x69\x1b\xb5\xac\x6e\xad\x64\xe3\xca\x33\xd7\x0d\x6a\x11\xe3\xd2\xe9\x1a\x46\x74\x5a\xd3\x8a\xf9\x58\xe3\x59\x2d\x1f\x8b\x7e\xdd\x38\x76\x10\xa9\x52\xbb\x88\x09\x48\xee\x98\x5b\xdf\x74\xfa\xdb\xde\xdf\x04\x38\x07\xee\xd1\xb1\x4e\x4d\x3a\x06\x93\xd4\x6a\x8a\x04\x9b\xd4\x18\x8a\xef\xa7\x55\xa4\x6e\x55\xa4\xcc\xd9\x1a\x1e\x15\xc5\x8b\x8e\x81\x21\x85\x83\x42\x8d\x71\xe7\x0f\xdc\x49\xa2\x5c\x24\x73\x67\x4c\xa3\x71\x17\x02\x88\x4d\xab\x20\x08\xc0\xb6\x0b\x0e\xa0\x71\x41\xb0\xbe\xd4\xc4\x60\x24\x5a\x80\x54\x67\xa3\x9b\xb2\xa2\x3e\x0b\x56\x99\x11\xa7\xb0\xb1\x49\x44\xa2\x11\x7f\x25\x36\x11\x25\x50\x38\x41\x97\xda\x0d\x30\x73\x1a\x80\x88\x38\x78\xc0\xc8\xbc\x23\x9b\x82\x16\x93\x31\xd9\x30\xd6\xa3\x49\xac\x06\x2b\x2d\x26\xef\x0e\x35\x93\xcd\x55\xed\x6a\x23\xbd\xd5\x9e\x44\x96\x02\x10\x85\xe6\x81\x68\x5e\xcd\x97\x5e\x2d\xa5\x23\xf0\x95\x0c\x3d\xa2\x02\xe6\xf1\x64\x03\x44\x05\x36\x8d\x1b\xb7\x23\x09\x26\x26\x94\x57\xd7\x31\xa3\x02\xc9\xad\x32\xac\x12\x75\x22\xf4\xb5\xd0\xd1\x1e\x27\x00\x8b\x03\x08\x85\xe2\x1d\xed\xbd\xb3\x84\x44\xfc\xd0\x73\xf6\xab\x9e\xd1\x83\x1a\x57\xfc\x2d\xd4\x9e\xe9\x13\x7e\x45\xb6\xa6\x3a\x48\x1b\xce\xfc\x4e\x75\x68\xbc\xbe\x69\x10\x80\x1a\x84\xe6\xa1\xd3\x32\xc4\xdc\xf7\xbe\x26\x4d\xd1\x07\xd3\xcc\x48\xcc\x42\x9c\x0a\x0b\xe5\x2b\x90\xf0\x05\xb8\x2b\x2e\x3c\x28\x82\x1e\x8a\x00\x4b\x88\x28\xc4\x15\xa2\xeb\x60\xf4\x9c\xde\xa0\xb3\x88\x0c\x0a\x0e\x36\x1d\x86\xcd\x88\x52\x1e\x03\x16\xe4\x0b\x91\xbd\xae\x36\xab\x6c\x69\xeb\x7c\xab\x51\xc5\x38\x09\x9a\x1e\x6b\x9b\x3a\xd6\x4c\xd6\x1a\x09\x25\x76\x45\x59\x9d\x9b\x94\x4e\xdc\xa3\xe8\x3c\x10\xba\xc1\x6a\x7a\xf1\x4e\xf7\x69\xb1\x72\xe4\x88\x53\x8f\x0b\xad\xd7\x6e\xc7\x79\xf5\xa1\x5d\x38\xfd\x4a\x5c\x73\xe8\xb6\xd0\x1e\x28\xf5\x2e\x54\xe8\x5c\x99\x73\x45\xae\xd5\x0d\xd8\x4c\x44\x79\x27\xe8\x74\x9c\x53\x3e\x75\x1a\x24\xc7\x27\x25\x72\xca\xe3\x96\x92\x4f\xa8\x95\x59\xdf\xca\x9e\x47\xc7\x66\x05\x9b\x42\x22\x74\x88\x1f\x62\xc7\x35\x89\xb6\x43\xf4\xdb\x83\x1e\x1a\xf3\x1a\x29\x65\x06\xa7\x9b\x1a\xab\x9b\x04\xdc\xa9\x42\xb6\xa5\x8a\xf7\xd4\xa6\x4f\xd1\x0d\x8c\xad\x3a\xde\x9b\xb4\x99\xc8\xb9\x2e\x8e\x38\x86\x98\xab\x3a\xcf\x00\xf1\x6a\xd3\x1a\x54\x3b\xed\x7c\x13\xd6\x94\x80\x95\x99\x5d\x31\x9b\x3e\x86\xc2\x05\x02\x86\x45\xa6\xe3\xde\x1e\x9a\xfb\xfa\x09\x39\xe8\xdf\x04\xdf\xa0\xe6\xfb\xaf\x5f\x70\xde\x68\x7b\x33\x57\xb4\xdc\x11\x28\x3a\xbf\x12\x62\x5b\xc5\x23\x85\xde\x6b\x41\x82\x81\xbc\xd5\x7b\x90\x57\xb9\xea\xef\x06\x1d\xfc\x8d\x43\xdc\x73\xaf\x76\x44\xd7\x82\x09\xee\x64\x62\x4c\xf7\x11\xb0\x06\xd2\x07\xf6\xe1\x7d\x14\x21\x96\x50\x7b\x9d\xca\xf1\x0c\xf0\x29\x56\x31\x7a\xc9\x79\x25\x42\x4d\xe1\xa0\xba\x61\xcb\x86\x49\x18\xb2\xd1\x24\x8d\x07\x6e\x8c\x30\xbd\x21\x3c\xc3\xb3\xfb\x8d\xdb\x51\x20\x16\x45\x36\x94\x8c\xf9\x14\xd5\x11\x07\xfb\x09\x3e\x05\x78\xcd\x8e\xa2\x96\xdc\x8c\x35\xa6\x4e\xb4\x56\x42\xe1\x66\xc8\x0a\x3e\x50\x43\x45\x53\x1a\x57\x32\x08\x0f\xcd\xa0\x3f\x75\xc4\xc1\xbc\x7e\xea\xd6\xdc\xef\xc0\xc0\xe6\xaa\x5e\xeb\x82\x16\xb5\x7e\xb9\xfd\x16\x93\xb7\x86\xec\xfb\x4c\x46\xa9\xb5\x2a\x08\x65\x4d\x4e\xad\xaf\x54\xb5\x1f\xc2\x6b\x96\xcc\xc7\xb1\xad\x5f\x32\x4f\xee\xe5\xf2\x64\x2e\x98\x5b\x26\x51\x0e\x1f\xb4\x0a\xe4\x00\xa8\x5b\x32\xfd\x8e\x06\x9a\xd8\xd4\x87\xc6\x55\x23\xb7\x2f\xc0\x64\xe9\x26\x03\xdb\x14\x8c\x68\x66\xb7\x02\x26\xf3\x29\x33\xab\x37\x69\x9c\x1e\x80\x7b\x9a\xa4\x39\x75\xcd\x24\x4e\xa9\xdc\xd7\x4d\x32\x9f\x99\x94\x46\xdb\x19\x13\xc7\x35\x7e\x52\x7b\x0f\x91\xee\x2c\x99\xcf\xfd\xc8\xc0\x53\xc1\xaf\xfa\xc8\xc2\x94\x65\x54\x03\xd5\x32\xdd\x66\x61\x2a\xe4\x18\x2f\x52\xb9\xfe\x53\xa9\x21\x02\x2d\x23\x96\xc6\x95\x01\x35\x3e\xed\x07\x8f\xa5\x95\x89\x46\xa7\x4c\x37\xa4\x1b\xca\xa6\x59\x31\x7e\x06\xc6\x20\x34\xb5\x99\x76\xc2\x31\xe2\xd6\x00\xc9\x62\x4c\x95\xc2\xfa\xb6\xf5\x31\x32\x86\x8d\x63\xbf\x15\x49\x16\x09\xb3\x83\xae\x84\x66\x65\x62\x97\x26\xb4\x36\x09\xfb\x97\x64\x21\x62\x5d\xa0\xdd\x27\xd9\x52\x14\x5b\x9f\x24\x8b\x49\x53\xa0\x7c\x0c\x70\x93\xc9\xe9\x6d\x65\xe9\x78\xa9\xad\xb2\x24\x16\x3a\x52\x16\x57\x9a\x9b\x98\xb2\x66\x77\xb6\x9d\xdd\x07\xc7\xcd\x9c\xb9\xd5\x1b\xcd\x11\x35\x0c\x5c\x2a\xe0\x10\x15\x61\xc1\x6e\x2d\x6f\x35\x9b\x6d\x56\x1c\xe8\xd2\xd6\xe6\x41\xbc\x4c\x04\xd9\x38\x59\xac\x9a\xaa\x82\xa8\x70\x81\xe9\x95\xf2\xf0\xf8\x72\xcc\xcc\x61\xc4\xf5\x10\xe9\xda\x93\x65\xd2\xbb\x9a\xb7\x5a\x98\x64\x29\xac\x9a\xc9\x93\xa1\x19\x98\x87\xb6\xdd\x72\x1d\x4c\x77\x37\x9a\x2c\xd1\x5f\x82\x6d\x5f\x87\xbd\x29\x83\xd6\x4e\xda\x83\x18\xe0\x91\x59\x67\x5f\xac\xea\x16\x96\xb9\xb7\xb6\xc3\x1c\xa4\x5c\xe8\xc4\xd4\xb6\xa0\xc9\xdc\x8b\x13\x68\x16\x82\x76\x25\x18\xb4\x0a\xc9\x72\x49\x33\x5d\xd3\x9d\xe3\x59\x64\x0b\xf5\x69\x33\xbe\xf6\x0c\x55\x4b\x90\x66\x1d\x84\x8a\x09\x56\xb2\x0e\x25\xe5\x11\xd7\x99\x29\x87\x70\x60\x16\xc3\x36\x6f\xe0\x3f\xbb\xde\x94\x1d\x6c\x1a\xd9\x6a\xec\xf8\xb3\x4b\x07\xe2\x90\x5c\xc9\xf2\x48\x2e\xd5\x70\xa9\x5a\x7c\xb6\x67\x71\x83\x3e\x71\x97\x3f\xb4\x04\xa5\x9e\x43\x49\x93\xac\x50\xe1\xcd\x71\xc1\xbc\x1d\x37\xb7\xcd\x5a\x8f\xd6\x22\x59\x4d\x28\x44\x6d\x47\xf3\x06\x50\xed\x4d\x5c\x52\xaf\xc5\x95\xac\xfb\x83\xce\x18\x60\xdf\x64\xa8\xf2\xb7\x80\xce\xdf\xdb\x46\x25\x2b\x3f\xb3\xc0\xe0\x3c\x7c\x83\xac\x4b\x8a\x4e\x5a\x1f\x9b\xc5\xaa\x5b\x20\xa3\x79\xed\x22\x43\xd7\xf5\xf0\xde\xa4\xb5\x46\x26\x94\xab\x37\x6e\x85\x07\x6a\x60\x40\xc5\x78\xec\xd9\x5b\xd7\x9c\x70\xd7\xab\xea\x66\x9c\x6e\xa5\x48\x31\x4e\x3c\x7b\xeb\xa8\x7b\x03\xa8\x88\xd8\xbb\x27\xe8\x24\xd6\x56\xf6\x03\x9d\x32\xda\x92\x02\xa7\xec\xe7\x6f\x7e\x5b\x2b\x5f\x31\x9e\xd9\xa5\x01\xc5\xed\x9d\xe3\x73\x86\x22\xad\x19\x0a\x74\xf7\xf8\x22\xe6\x7c\x08\x63\xfc\x80\xf6\x5e\xb5\x19\x5e\x02\x39\x96\xed\x1b\x0c\x70\x6b\x01\x57\xe5\xa0\x69\xce\xcc\xa2\xb2\xbd\xfb\x13\xe3\x15\x73\x81\x43\x71\x83\x8b\x5f\x2c\x10\x8f\x4f\xb4\xf6\x0d\xc7\x56\x91\x8c\x63\xf9\xa1\xcf\x0c\x21\x92\x24\x96\x1f\xfa\x81\xc6\xfc\xa5\x27\xe8\x24\x96\x1f\xfa\x81\x4e\xdf\x3b\x2f\x89\x64\xc6\xc0\xe5\x63\xbc\x0e\x97\xe6\x2a\x1c\xaf\xc1\x29\xe1\xe2\xc1\x79\x13\xba\x20\xa5\x45\x2d\xa5\x2d\xf3\x52\x5a\x1a\x52\xb6\x68\x2c\x62\xa1\xdb\x67\x2e\x13\xc9\x32\x16\xba\xfd\x40\x57\x81\x29\xc0\xfa\x4d\x0f\x50\x31\x7e\xef\x20\x29\x84\xe7\x3b\x0a\x19\xbe\xb6\x2e\x6e\xcf\x21\x42\x08\x6f\x33\x67\x53\x21\xde\xfa\x99\xe9\xee\xfa\x9d\x83\x10\x93\xb7\x4f\x87\x42\x4c\x5b\x66\xe9\xbd\xd3\x14\x62\xd6\x32\xcb\x1b\x40\xe7\xb1\x8c\xda\x0f\x74\xe1\x45\x88\x72\x99\xa0\x17\xe8\xf2\xbd\x33\xbe\x10\x2b\x4f\xd6\xc1\x60\x68\xd6\x9b\x5f\xa1\x07\xe8\x64\x1c\xac\x9e\x4c\xfc\x34\x66\xb2\xe4\xcd\x2a\xb8\x59\x3b\xab\xa7\xf0\xaf\x4f\x08\x59\x12\xcc\x44\x70\x6b\x67\x8d\x65\x13\x81\x84\x37\x8d\x18\x0b\xdf\x5d\x40\x8b\xc9\xc4\x4d\xbb\x36\xfb\xf5\x00\x9a\x32\x6b\x56\x6b\xd5\x1e\x40\xb3\x5b\x89\xab\xb9\x66\xab\xbb\x81\x36\xc1\x7c\x84\x9d\xcb\x95\x25\x86\x98\x2c\xbc\xe1\xe5\xca\x91\x65\xab\x16\x86\x97\xab\xf6\x56\xde\x11\x5d\x45\x1b\xb1\x1e\x57\x67\x62\x3a\xd6\x94\xc8\x79\x71\x5d\x88\xde\xc3\x25\xb7\x23\xb9\xb9\xbe\x33\x57\xf7\x21\x68\x72\xa5\x32\xdc\x5d\x49\x8b\x69\xb4\x4b\xee\xb3\xa4\x12\xd3\x68\x97\xdc\x0f\x34\xda\x25\xf7\x03\x8d\x76\xc9\xfd\x40\xa3\x5d\x72\x3f\xd0\x85\x37\xc3\x36\x83\xb8\xc6\x12\x8e\xdd\x74\xbb\xe1\x83\x2e\x99\x17\xe1\x4d\x89\xe4\x4d\x8d\x1c\x48\x39\x6c\x6a\x63\xeb\xef\xca\xc4\x74\xd5\xab\x3a\xc6\xf2\xcb\x6c\xdc\xab\x3a\x46\x41\x93\x5e\xd5\x31\x0a\x2a\xde\xbb\xc1\x14\xb3\x49\x3f\x86\xbb\x1b\x71\x31\x9b\x7a\xd5\xf1\xce\x4d\x5f\xb8\x66\x10\xb3\x59\x3f\x86\xbb\x54\xff\x1f\x00\x00\xff\xff\xbe\x22\xec\x0d\x20\x2c\x00\x00") + +func fontsMirrorFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMirrorFlf, + "fonts/mirror.flf", + ) +} + +func fontsMirrorFlf() (*asset, error) { + bytes, err := fontsMirrorFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/mirror.flf", size: 11296, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMnemonicFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\xbd\x69\x77\xdb\x38\xb6\x35\xfc\x1d\xbf\x82\xe9\xf5\x36\xa2\x54\x95\x13\x6b\xb0\x25\xa5\xaa\x52\xa4\x44\x48\x62\x4c\x91\x0a\x07\x3b\xf6\x17\x2f\x8d\xb1\x13\xc7\x4e\x7b\x48\x2a\xf9\xf5\xef\x02\xc1\x73\x08\x8a\x07\xa4\x9f\xbb\xee\xea\xdc\x6e\x63\xef\x43\x0c\x7b\x1f\x00\x14\x09\xee\x6e\x76\x9d\xe5\xff\x67\xb5\xe5\xff\x75\xac\x83\xb6\x75\x64\x1d\x5a\x87\x56\xbb\x3f\xec\xb3\xaf\xb7\xdb\xaf\x77\xb7\xd7\x6b\xeb\xc0\x5a\x5a\x13\x6f\xba\xbb\xbb\x7d\xb4\x56\x3f\xad\xf7\x77\x57\xb7\xd6\xf8\xee\xc7\xf2\xd6\xfa\x6b\x2d\xff\x9f\x6d\xaf\xd7\xd7\x37\xaf\xef\xee\x3f\xbd\x63\xc9\xd5\xf5\x83\x95\x21\x9f\x1e\xb6\x0f\xd6\xe3\xd5\xd6\x82\x38\x0f\xd6\xdd\xce\x8a\x26\xe3\x76\xb7\x77\xc4\x5a\x57\x8f\x8f\xdf\xde\xbe\x79\xb3\x79\x78\x7d\x7d\xfb\xb8\xbd\xbf\xbd\x5e\xbf\xbe\xdd\x3e\xbe\xb9\xdf\xad\xe5\x7f\x24\xe6\xf5\xe3\xbf\x8f\xaf\xac\xc7\x3b\xeb\xfa\xeb\xb7\x9b\xed\xd7\xed\xed\xa3\xb5\xb4\xee\xb7\xcb\x9b\x9b\x9f\xd6\xcd\xf2\xfe\xd3\xd6\x5a\x5f\x2d\xef\x97\xeb\xc7\xed\x3d\x7b\xd8\x3e\x5a\xf7\xcb\xc7\xab\xed\xbd\xb5\xbe\x7f\xda\x6c\x6f\x7e\xfe\x61\xad\x9e\x1e\xad\xfb\xed\xf7\xed\xfd\xc3\xf5\xea\xe6\xe7\x6b\xcb\x4a\x7e\xdc\x1d\xdc\x6c\x1f\x1f\xb7\xf7\x45\x9d\xd8\xf2\x7e\x6b\x7d\xbb\xdf\xee\xae\xff\xdd\x6e\x64\xeb\xf8\x9f\x2a\xb8\x86\xb1\x2a\x98\x4b\x6b\x79\xbb\xb1\x76\x77\x37\x37\x77\x3f\xd4\x9f\x2e\x5f\x33\x1e\x2f\x6c\x9b\xbd\xb0\x6d\xf6\x1f\xdb\x66\x3c\x58\xc9\x7f\xdd\xd0\xb6\xd9\x7f\xe5\x7f\xb3\x6d\xf6\xd2\xb6\x59\xcb\xb6\xd9\x2b\xdb\x66\xbf\xd9\x36\xfb\xdd\xb6\xd9\x1f\xb6\xcd\x0e\x6c\x9b\xbd\xb6\x6d\xf6\xc6\xb6\xd9\xa1\x6d\xb3\xb6\x6d\xb3\x8e\x6d\xb3\xae\x6d\xb3\x9e\x6d\xb3\x23\xdb\x66\xc7\xb6\xcd\xfa\xb6\xcd\x06\xb6\xcd\x86\xb6\xcd\xde\xda\x36\xfb\xd3\xb6\xd9\x5f\xb6\xcd\xfe\xb6\x6d\xf6\xce\xb6\xd9\x3f\xf2\x3a\xce\xa3\x6d\x33\xc7\xb6\xd9\xc8\xb6\xd9\xd8\xb6\x99\x6b\xdb\x4c\xd8\x36\x9b\xd8\x36\x9b\xda\x36\x9b\xd9\x36\xf3\x6c\x9b\xbd\xb7\x6d\x76\x62\xdb\xcc\xb7\x6d\x36\xb7\x6d\x16\xd8\x36\x93\xb5\x95\xad\xf8\x60\xdb\x2c\xb2\x6d\x16\xdb\x36\x4b\x6c\x9b\xa5\xb6\xcd\x4e\x6d\x9b\x9d\xd9\x36\xfb\x68\xdb\xec\xdc\xb6\xd9\x85\xbc\xda\x5f\xb2\x3d\xfc\x8d\xac\x39\x7f\x25\xeb\xc0\x5f\xca\x7f\x2f\xb3\xff\x26\xfb\x62\x69\xdb\x4c\xf6\xc4\xda\xb6\xd9\xc6\xb6\xd9\xd6\xb6\xd9\xce\xb6\xd9\x27\xdb\x66\x57\xb6\xcd\xae\x6d\x9b\x7d\xb6\x6d\xf6\xc5\xb6\xd9\x8d\x6d\xb3\xaf\xb6\xcd\x6e\x6d\x9b\xdd\xd9\x36\xfb\x66\xdb\xec\x7f\xb6\xcd\xee\x6d\x9b\x3d\xd8\x36\x93\x2d\x7b\xb2\x6d\xf6\xdd\xb6\xd9\x0f\xdb\x66\xff\xda\x36\xfb\x69\xdb\xec\x97\xbc\x5a\x4b\x5e\x8d\xbf\x50\xff\xbe\xca\xae\xaf\xfa\x43\xf6\x14\x0f\xb3\x7f\xd3\xec\xdf\x65\xf6\xef\x5d\xf6\xef\x53\xf6\xef\x83\x8c\x7e\xf8\xef\xe1\xe1\x61\xdb\xb2\x2c\x2b\x4e\x9c\x28\xb1\xc2\x89\x35\x13\x8e\xeb\x05\x53\xab\x15\x87\xb3\x57\x8c\xc7\x33\x80\x75\x4a\xb0\x44\x7c\x4c\xac\x56\x9c\x7c\x94\x98\x8f\x80\xe9\x4a\x8c\x08\xdc\x02\x21\x32\x84\x40\x44\x4f\x47\x44\x4e\x10\xcf\xbd\x38\xf6\xc2\xc0\x6a\x89\x30\x91\xc8\x04\x90\x47\x0a\xf9\x21\xf5\xa2\x73\xab\x25\x82\x0f\xb2\xf4\x03\x94\x1e\xcb\x52\x67\x7c\x12\x84\x67\xbe\x70\xa7\xc2\x6a\x39\xe3\x93\x57\x8c\x3b\x27\x80\xe8\x4b\xc4\x48\xf8\xbe\xd5\x1a\x09\xff\x15\xe3\x23\x1f\x8a\x06\x59\x91\x33\x3e\x89\x17\xce\x58\x58\xad\x51\x2c\x8b\x63\x28\x1e\xca\xe2\xf1\xcc\x89\x9c\x71\x22\x22\x2b\x71\x46\xa9\xef\x24\x59\x25\x67\xb2\x8e\x33\xac\xe3\x52\x22\x7d\x2f\x10\xd6\x44\x08\xd7\x6a\xf9\x93\x57\x8c\xfb\x13\x28\x5e\x61\xb1\x1e\xe3\x54\xc6\x38\xc5\x18\x6b\x09\x9a\x84\xd1\x3c\x8f\x31\x91\x31\x26\x18\x63\x93\x55\xc6\x89\x22\xcf\x99\x0a\x2b\x12\x49\x1a\x05\x56\x6b\x1c\xbd\x62\x7c\x1c\x01\x68\x9b\x8d\xcd\xcc\x9b\x24\x56\x98\xca\x71\x09\xe5\xb0\x84\x50\xbc\x2b\x8a\xbd\xc0\x6a\xc5\x9e\x2c\xf5\xf2\xd2\xf6\xa1\x2c\x75\x9d\xc4\xf1\xbd\xe0\xc4\x12\xf1\xd8\x59\x08\xab\xe5\xfa\xe2\x15\xe3\x2e\xf4\x59\x3b\x53\x89\x2b\x4e\xbd\xb1\xb0\xc6\x61\x90\x44\xa1\x6f\x85\x81\x04\x8e\xdb\x12\xd8\x06\x60\x87\x00\x26\x67\xa1\x04\x76\x24\xb0\x03\xc0\x2e\x05\x9c\x45\x22\x8b\xd9\x95\xd0\x2e\x40\x7b\x04\x74\x12\xa6\x91\x44\xf6\x24\xb2\x07\xc8\x4c\x35\x81\x98\x3a\x89\x77\x2a\xca\x02\x09\x1c\x29\x90\x00\x04\xd2\xce\x24\x14\x9f\x07\xe3\x28\x0c\xc2\x34\xb6\x3c\xd7\x17\x56\x2b\x3e\x0f\x64\xef\x9c\x03\xaa\x6f\x12\xec\xc8\x0f\xc7\x27\x52\xe0\x23\x29\xcc\x11\xe0\x07\x6a\xbc\x82\xb1\xf0\xad\xd6\xd8\x91\xc1\xc6\x01\x14\x0e\xb5\x60\x73\xe1\x7a\xe9\xdc\x6a\x89\xb9\xe4\xcf\x01\x92\x49\x2a\x4e\x47\x71\xe2\x25\x69\x22\x2b\x94\xca\x0b\xc4\x78\x81\x4c\x54\x30\x4a\x22\x1e\x4b\xf6\x18\x0a\x95\x98\x3c\x5f\x58\xb1\x58\x38\x91\x93\x84\x91\xd5\xf2\x62\xd9\x47\x13\xd0\x77\x3b\x93\xd4\x34\x0a\xd3\x45\x19\x25\xfb\x7c\x8a\xa8\x4c\x53\x91\x18\x87\x91\x5b\x86\xc9\x51\x8c\x10\x96\x69\x2b\x0d\xbc\xa4\x0c\x92\x9a\x48\x01\xd4\xdf\xa9\xf1\xf3\x85\x6c\x90\x9b\xb9\xd1\x05\x03\x0c\x32\xfd\x2d\x1c\x37\x4b\x3b\x85\xed\x5a\x0b\xc7\x7d\xc5\xf8\xc2\x01\x5c\xa6\xc0\x99\x37\x9d\x59\xe1\x38\x11\x89\xb5\x88\x44\x2c\x12\xab\x35\x0b\x17\xd2\x94\xa0\xf6\x41\x26\xc0\x51\x24\x9c\x13\x6b\x21\xa2\xb9\x97\x24\xc2\xb5\x66\x22\x92\x46\x5f\xc8\x9c\x36\x82\x9c\x36\xc8\x24\x18\x84\x39\x5a\x61\x82\x91\xc4\x04\x88\xc9\xb4\xe7\x05\xae\xf8\x68\xb5\xbc\x40\x56\xc9\x83\xf1\x1c\xe4\x6a\xfb\x98\x28\x97\xb7\x82\xac\x69\x01\x98\x66\x70\x5c\xca\x99\xb1\xf0\xc5\x58\x56\xc6\x89\x84\x63\xb5\xe2\xd8\x91\x03\x8b\xed\xd3\x95\xb6\x07\x15\x19\x54\x40\x7f\x0e\x06\xc6\x0c\xa5\x7a\x24\x91\x09\x6d\x86\x70\x73\x42\x3b\xf3\x92\x99\xf5\x3e\x8d\x13\x6f\xe2\x8d\x31\xc7\xbd\x97\xec\xf7\xc0\x5e\x52\x59\x2c\xbb\xce\x69\x76\x9d\x53\xbc\xce\x4a\x8d\x64\x94\x78\x8e\x9f\xa7\xc5\x30\x3a\x73\x22\xd7\x6a\x2d\xfc\x6c\x30\x5d\x80\xae\x2b\x50\x99\x8f\x01\x9b\x4a\x6c\x0a\xd8\x8d\x52\xe2\xa9\x88\x62\xa1\x67\xdb\x48\xe6\xb1\x08\xf2\xd8\x40\x25\x41\x2f\x98\xfa\xe2\x40\x25\xbb\x2c\xed\xc4\x99\x60\x63\x48\x3b\x83\x5d\x15\xa6\x92\x4e\x9c\x19\x20\x86\xa4\x33\x3c\x24\x92\x4e\x9c\x44\xd9\xe4\xe8\x8e\x65\xcb\x5d\xf0\xdd\x30\xd3\xe6\x22\xf2\x4e\x9d\x44\x58\x69\x2c\x54\x6a\x5c\xa4\xd2\x06\x0b\x48\x8d\xc3\xce\x3e\x2a\xab\xe0\x22\x95\x15\x5c\x40\x05\x87\x99\x28\x65\xff\xe6\xe9\x26\x91\x0a\xca\x32\x41\xd6\xdd\x09\x74\xf7\xb0\xa7\xa5\x1a\xcd\x37\xe3\xb1\x14\xf0\x18\xeb\x96\x89\x74\x2e\xe2\x58\x4e\x20\x67\x8e\x97\x64\x2d\x98\x9f\xbd\x62\x7c\x7e\x06\xa0\xb2\x52\xa7\xa9\x13\xb9\x85\x50\x17\x99\x50\xa7\x80\xd5\x85\x5a\x46\x8a\x0c\x29\x10\x39\x28\xeb\x3f\xef\xbc\x38\x94\xed\x88\xb1\x1d\xc3\x62\x44\xac\x69\xe4\x2c\x66\xde\x58\x6b\x8f\x27\x7b\xde\x4d\xc7\xb2\x69\xf1\x74\x2c\xc7\x7c\x8a\x6d\x5b\x6a\x54\x03\x25\x63\xc4\xc8\xc8\x34\x8a\xc3\x29\x3e\xa4\x22\x18\x8b\x12\x63\x9c\xcd\x8f\x63\xd0\xd5\x70\xad\x1a\x91\xd5\x3d\x91\x09\x25\x50\x49\x2e\x96\xf3\x78\x0c\x69\x6c\x98\xa9\x34\x5c\x88\xc8\xc9\x3a\x38\x3e\x8f\x13\x31\xb7\xc6\xe1\x7c\xee\x04\xae\xd5\x0a\xb3\x4c\x1d\x62\x3d\xb6\xa8\x85\xf1\x39\x8e\x4e\x6b\x21\x27\x83\x05\x4c\x06\xc3\x4c\xab\xce\x62\xe1\x83\x39\x17\x51\x38\x8d\x1c\x2d\xac\xb3\x90\x61\x1d\x08\xbb\x3c\x54\x19\xed\x40\x65\xb4\x6c\x7d\xc3\x78\x00\x5d\xbd\x6c\xab\x64\x76\x2a\x22\x99\x5c\xc4\xc7\xb1\xef\xcc\x55\xe4\xb9\x13\x9d\x30\xfe\x02\x5a\xbd\xcc\xb4\x3a\x16\x41\x62\xc5\xde\x34\x60\x7c\xfc\x08\x25\x99\x3e\x17\x61\x1a\xb8\x79\xd1\x62\x03\x45\x4a\x91\x69\x14\x89\x60\x7c\x0e\xc4\x27\x28\xcd\x84\x78\x2e\x82\xbc\xe0\x7c\x0b\x05\xc7\x2a\x63\x87\x27\x22\xb0\x46\x4e\xc4\xf8\x08\x66\xbb\x65\x5f\x99\x61\xac\x92\x4e\xc6\x8b\x05\x14\x66\xf2\x72\x3d\x47\x44\x22\xf6\x62\xc6\x5f\xbe\x85\x12\x95\xf1\xc2\xc5\x79\xe4\x4d\x67\xd8\x84\x3b\x28\xce\x64\x33\x11\x73\x2f\x90\xd9\x24\x8c\x5c\x2f\x70\x7c\x99\xe4\x65\x3f\x87\x11\xe3\x07\x4b\x80\xaa\x35\x9c\x98\x24\x07\x8b\xd0\x0b\xb2\xa1\x75\xc3\x74\xe4\x0b\xcb\xc9\x54\xf7\x21\x0d\x13\xbd\x07\xff\xfa\x0b\x98\x6b\x35\x14\x70\xf5\x00\x26\xa8\x65\xa6\x94\x38\x9c\x24\xd6\xec\x7c\x31\x13\x01\xe3\x07\x07\x50\x96\xcf\xba\x53\x2f\x4e\x44\x24\xa0\x87\xa3\x4f\x50\x9e\x29\x62\xee\xc8\x35\x0b\xe3\x2f\xbf\xe6\x7f\x5e\xe5\xb9\x6a\x2a\x13\x99\xa2\xb8\x60\xc2\x95\xca\x4d\x7e\x1a\x1f\xcc\xbd\x20\x8d\xf3\xf2\xdf\xe1\x92\x2b\xb5\xb0\x4f\x17\x22\x8a\xc7\x91\xb7\xc8\xd2\x26\xe3\x1d\xd0\xcc\xaa\x5b\x29\x97\xf9\x92\xf1\x2e\x22\x7a\x6a\x31\x2e\x97\x2b\xce\x58\x6a\x86\xf1\x97\x2f\xa1\x50\xa5\x1f\x6f\x1c\x85\xf9\x95\xe7\x3f\xa1\x28\x1b\xf7\x85\xe7\x8f\xa3\xf0\x0c\xb4\x04\x02\x5c\xf5\x15\xcf\x95\xab\x32\x37\x4c\x18\x7f\x0d\xb6\x58\xa9\xe9\x4f\xb8\x9e\xef\x3b\x8c\xbf\xfc\x03\xfe\x3e\xdc\xaf\x69\x18\x08\xc6\xdb\x58\xcf\xa5\xea\xbc\x78\x9c\xfa\xa6\x71\x07\x89\xac\xb2\x71\xcf\xd4\xf3\xcc\x81\x7f\xf7\x0e\xa8\xd9\xc0\x9f\xa6\xfe\xd4\x89\xac\x89\x4c\x49\x12\x24\x27\x84\x0f\xa9\x13\x25\x22\x62\xbc\x0d\xeb\xd5\xd5\xc6\x84\x9d\x39\xfe\x84\xf1\x36\xcc\x0a\xab\x2d\x05\x54\x33\x57\x1e\x36\x66\xbc\x8b\x71\x77\x25\xa3\x7f\x48\x45\xac\x55\xf5\x1f\xe8\xe4\x75\xa6\x1b\x39\xa5\x07\xd6\xd8\x59\x78\x89\x9c\x8c\x45\x22\x73\xa8\xa3\xd6\x06\xd3\xc8\x39\x15\x8c\x3b\x2f\x80\xd1\x6e\x62\x64\x3a\x60\xdc\x01\x01\xac\x3b\x4d\x8c\xb1\x17\x8d\xd3\xf9\xc4\x17\x1f\x19\x77\xa0\x17\xd7\xdd\x26\x5a\xe2\xf9\xae\xbc\xd0\x3f\xc0\xe8\x35\x31\xb4\x54\xe1\x40\xaa\x58\x1f\x35\xb1\xb2\xc4\xef\x8c\xc2\xac\x1f\x60\xb5\xb6\x3e\x36\xd3\x24\x0e\x72\xd4\xba\x6f\xc4\x8d\xf3\xd6\x83\x8e\xc7\xa0\xe3\xf5\xc0\xc8\x11\xa5\x51\x11\x38\x2a\xc3\x26\x46\x3e\x2a\x02\x47\x65\xd9\xc4\xd0\x47\x45\xe0\xa8\xac\x9a\x68\x5a\x1f\x0b\xec\xe3\xb5\x91\xe5\x95\x1a\xe4\x61\x83\x36\x4d\x8c\xbc\x41\x1e\x36\x68\xdb\xc4\xd0\x1b\xe4\x61\x83\x76\x4d\x34\xad\x41\x1e\x34\x68\x63\xf6\x8d\x48\x66\x56\xcb\x5b\x6f\x6f\x96\xb7\x9b\xeb\xb5\x5c\x24\x42\xa2\xdd\x98\xad\x13\x94\x14\x1d\x80\xa2\x37\x66\xeb\x84\xa5\x7e\x0b\xa1\xdf\x36\x66\xd7\x84\xa5\x7e\x0b\xa1\xdf\x36\x66\xd7\x84\xd5\x7e\x0b\xa1\xdf\x36\x66\xdb\x84\xa5\xc6\x84\xd8\x18\xb3\x63\xc2\x4a\x4f\x87\xd8\xd3\x6a\x1a\x48\xfd\xc4\x2b\x56\x41\x6a\xaa\xf8\x0d\x6e\x2a\x6d\xcc\x7e\xc9\x23\xc7\x89\x5c\x5c\x30\x1e\xbe\x01\x8a\xd9\x30\x69\xa9\x67\x53\xec\x59\xb3\x61\xd2\x52\xcf\xa6\xd8\xb3\x66\xaf\xa4\xd5\x9e\x4d\xb1\x67\xcd\x66\x49\x2b\xfd\x94\x62\x3f\x99\x0d\x73\x5e\xaa\xde\x39\x56\xcf\x6c\x98\x64\x16\x46\x41\x59\xc5\x09\xec\x87\x37\x9a\x61\xe2\xb9\xe3\x23\x29\x9e\x39\xd1\xc2\x8a\xad\xd6\x74\x7b\xff\x75\x79\xfb\x4a\xbb\xc5\xb8\x3d\x34\x70\xca\x93\xcc\x12\xfa\x7a\xdb\xae\xc7\xe7\x4d\x59\x42\x53\xb6\x9d\x7a\xbc\xde\xcf\x4b\xe8\xe7\x6d\xb7\x9e\x94\xeb\x77\x09\xfa\xdd\xf6\xea\xf1\xda\xa8\x2c\x61\x54\xb6\x47\xf5\x1c\x7d\x6a\x59\xc2\x92\x73\x7b\x6c\x22\x49\x14\x2c\x9a\xb7\x7d\x03\x6a\x7f\x5a\x59\xc3\xb4\xb2\x1d\x18\x18\xe5\x49\x65\x8b\xa3\x30\xac\xc7\xe7\xa3\xb0\xc5\x51\x58\xd6\xe3\xf5\x51\xd8\xe2\x28\xac\xea\x49\x5a\xaf\x6e\xb1\x57\xd7\x06\x4e\x79\x32\xb9\xc6\x86\x6c\xea\xf1\x79\x43\xae\xb1\x21\xdb\x7a\xbc\xde\x90\x6b\x6c\x88\xc9\x17\xd5\x69\xe4\x1a\x1a\xb2\x33\xf9\xa2\x32\x89\x6c\x60\x12\xd9\x99\xac\x51\x9e\x42\x6e\x41\xb5\x3b\x93\x35\xca\x13\xc8\x1d\xf4\xd5\xce\xe4\x8a\xf2\xf4\x71\x07\x7d\xb5\x33\xb9\x82\x98\x3c\xee\xa0\xaf\x76\x26\x5b\x94\xa7\x8e\x3b\x6c\x84\xc9\x11\xd5\x89\xe3\x0e\xfb\xb6\xaf\x36\x87\xa7\x5e\x5c\x4c\x19\x07\x58\x6a\xf2\xc2\xde\x84\x71\x07\x13\xc6\xce\x64\x86\xf2\x74\xf1\x84\xfd\x68\x32\x43\x79\xb2\x78\xc2\x7e\x34\xf9\x80\x98\x2a\x9e\xb0\x1f\x4d\x46\xa8\x4e\x14\x4f\xd8\x72\x93\x19\xca\xd3\xc4\x4f\xac\x98\xc9\x0c\xc4\x24\xf1\x78\x05\x24\x93\x19\xce\x2b\x15\xfb\x99\x57\xac\x7d\xd8\xb8\x17\x81\x6d\xaf\x73\x00\x94\x86\x79\x02\x08\x4b\x24\x34\xee\x45\x46\x91\xc8\xd6\xf9\x2d\x60\x34\xcc\x12\x39\x7e\x89\xf8\xc6\x4d\x48\x38\x0d\x03\x71\xc2\xb8\xf3\x27\x50\x1a\x26\x09\x20\x2c\x91\x60\x5e\x49\x8d\x4b\x83\x38\x7e\x09\x8c\x86\xc9\x22\xc7\xaf\x11\x6f\x5e\x51\x8d\xab\x7a\x1c\xbf\x03\x9a\xc9\x24\x04\x69\x8d\x24\xf3\xc2\x2a\xa7\xb9\x61\x02\x93\xe4\xf8\x35\xb0\x4c\x7e\xa9\x72\xd6\xc8\x31\xaf\xac\xa0\x82\x4e\xa6\x97\xf1\x5f\xc0\x30\x99\xa5\x8c\x5f\x23\xde\xbc\xa6\x72\x4b\x0c\x17\x19\x26\xa7\x94\xf1\x1b\xc0\xb7\xcd\x2e\x71\xcb\xa9\xcb\x7d\x03\x14\x93\x4b\xf6\x08\x1b\x24\x98\x5d\x22\xca\xc6\x12\x60\xac\xb6\xc9\x26\x7b\x84\x2d\x12\xcc\x3e\x11\x25\x67\x09\x70\x56\xdb\x64\x93\x32\x7e\x8b\x78\xb3\x4b\x44\x45\x23\x02\x34\xd2\x36\x39\xa5\xca\xd9\x22\xa7\x71\xbf\x0e\x16\x16\x60\xe1\x76\xc3\xda\x0a\x08\x5b\x24\x34\xef\xd7\x95\x52\x04\x2a\xa5\x61\x65\x95\xe3\xb7\x88\x37\xbb\x63\x5a\xb5\xef\x14\xec\xdb\x36\x59\x84\x20\x7d\x42\x92\xd9\x27\xd3\xd2\x68\x4e\x71\x34\x4d\x3e\x29\xe3\x3f\x01\xbe\x63\xf6\xc9\xb4\x32\x92\x53\x18\xc9\x8e\xc9\x2a\x55\xce\x27\xe4\x98\xdd\x32\xdd\x5b\x8a\x4f\xff\x00\x8e\xc9\x2e\xfb\x8c\x4f\xc8\x30\xfb\x65\x56\xed\xe8\x19\x74\x74\xc7\x64\x1a\x82\x74\x85\x24\xb3\x73\x66\xe5\x94\x31\x83\x94\xd1\x31\xd9\x66\x8f\x70\x85\x04\xb3\x67\xbc\xd2\x22\xd0\xfb\x07\x18\x26\xcb\x94\xf1\xd7\x88\x37\x3b\xc6\x2b\x27\x25\x0f\x92\x52\xc7\x64\x99\x3d\xc2\x35\x12\x1a\x6f\x6c\xe5\xba\xf4\x50\x97\x0d\x7b\x91\x1c\x7f\x8d\xf8\xc6\xdb\x5a\x90\x2a\x3c\x48\x15\x9d\x86\x9d\x08\x10\xae\x81\xd0\x35\x7b\xc5\xab\xe8\xde\x03\xdd\x77\x4d\x5e\xf1\x24\xdc\x17\xb1\xdc\xe9\x20\x98\x32\x89\x37\x75\x92\x34\x12\x96\xf7\x9e\x71\xef\x3d\x20\xab\xd6\xd0\x71\xd7\x9f\x01\x67\x36\xc4\xfb\xaa\xb6\xdf\x83\xb6\xbb\x26\x43\x10\xa4\xcf\x48\x32\x1b\xe2\x64\xcf\xb0\x27\x60\xd8\xae\xc9\x11\xfb\x8c\x2f\xc8\x30\xed\x49\x4e\x22\xc7\x6a\x4d\xef\xb7\xdb\x5b\x5c\x69\x7f\xf9\x02\x24\xf3\x8d\x2c\xbf\xb4\xb6\xf3\x61\x6d\xd7\x35\xed\x4c\xca\xf8\x1b\xc4\x9b\x6f\x63\xf9\x7b\x4d\xf1\xb1\x29\xa6\x8d\xc9\x3e\xe3\x06\x19\xe6\xfb\x57\x7e\x69\xbe\xf2\x61\xbe\xea\x9a\x36\x26\x65\xfc\x0d\xe2\xcd\x77\x7a\x73\x86\xfe\x63\x93\x0f\xca\xed\x99\xf6\xe8\x04\xe9\x06\x49\xe6\x5b\xbd\x7e\x39\x21\xfa\x90\x10\x7b\xa6\x8d\xfa\x1e\xe1\x06\x09\xe6\x5b\xbd\x41\x69\x24\x03\x18\xc9\x9e\x69\xaf\x5e\xc6\xdf\x22\xde\x7c\x8f\x37\xd8\x1b\xc7\x00\xc6\xb1\x67\xda\xaa\xef\x33\x6e\x91\x61\xfe\x85\x24\x28\x8d\x64\x00\x23\xd9\x33\x19\xa5\x8c\xbf\x45\xbc\x69\xe6\x08\xac\x45\x24\xc6\xc2\x15\xae\x35\x3a\xb7\x9c\x45\x28\x3b\x79\x31\x13\x8c\xbf\xbc\x05\x6e\xcd\xba\x2b\x98\x5a\x2d\x7f\xf9\xed\xdb\xf5\xc3\xd5\x2b\xc6\x83\x29\x50\x8c\x0b\xaf\x32\xe1\xf6\x13\x10\xcc\xb3\x48\x58\x9e\x78\x42\x98\x78\x7a\xa6\x69\x64\x8f\x70\x87\x04\xf3\x3c\x12\x96\x66\x9e\x10\x66\x9e\x9e\x69\x1a\x29\xe3\xef\x00\x7f\x64\x9e\x45\xe0\x36\x4d\xfe\xf3\x69\xfe\x0b\xc4\x7f\x80\x68\x9a\x48\x48\xda\x1d\xd2\xea\xa6\x94\x50\x5e\x40\x00\xd2\x3c\xa5\x48\xdc\xdd\x16\x70\xe6\x29\x25\x2a\x39\x24\x02\x87\x1c\x99\x66\x93\x32\xfe\x1e\xf1\xe6\x89\x24\xda\xf3\x47\x04\xfe\x38\x32\x4d\x24\xfb\x8c\x7b\x64\x98\xd7\x56\x51\xc9\x21\x11\x38\xe4\xc8\xe4\x90\x32\xfe\x1e\xf1\x66\x57\xc4\xa5\x96\xc7\xd8\x72\x93\x29\xca\xf8\x07\xc4\x9b\x3d\x11\x57\x67\xea\x18\x66\xea\x23\x93\x2f\x08\xd2\x03\x92\xcc\xde\x88\xf7\xfa\x38\xc6\x3e\x36\xfe\x0c\xb2\xc7\x78\x00\xc6\xb1\xd9\x1f\x71\xa9\x97\x63\xe8\xe5\x63\x93\x31\xca\xf8\x07\xc4\x9b\x77\x22\xc9\x5e\xad\x12\xac\x95\x69\x27\xb2\xcf\x78\x44\x86\xd9\x25\x49\xa9\x5e\x09\xd6\xcb\xe4\x92\x32\xfe\x11\xf1\x66\x97\x24\xe5\xc9\x30\x81\xc9\xf0\xd8\x64\x92\x3d\xc2\x23\x12\xcc\x1e\x49\x4b\xfb\x89\x14\xf6\x13\xc7\x0d\x77\x80\x73\xfc\x13\xe2\x1b\x7f\x30\x84\x24\x9d\x42\x92\x3e\x6e\xb8\x09\x0c\x84\x27\x24\x34\xfe\x56\x98\x67\xe9\x14\xb2\xf4\xb1\xc9\x20\x65\xfc\x13\xe2\xcd\xde\x48\xab\xbf\x61\xa5\x87\x40\x33\xd9\x83\x20\x3d\x01\xa9\x6f\x76\x48\x4a\x4d\x05\x29\x4c\x05\x7d\x93\x51\x48\xda\x13\xd2\xcc\x7e\x49\xcb\x5b\xa5\x14\xb6\x4a\x7d\x93\x5d\xf6\x08\x4f\x48\x30\xbb\xe5\xac\x9a\x92\xce\x20\x25\xf5\x4d\x96\x21\x48\x3f\x90\x64\xf6\xcd\x79\x95\x76\x8e\x34\x93\x77\x08\xd2\x4f\x24\x99\xfd\x53\xbd\xc7\x7f\x0e\xf7\xf8\xfb\xe6\xdd\xca\x45\x69\x1e\xb8\x80\x79\xa0\x6f\xda\xad\x94\xf1\xbf\x10\x6f\xde\xad\x5c\x54\xf6\xb2\x17\xb0\x5e\xef\x9b\xf6\x2b\x55\xce\x2f\xe4\x98\x77\x2c\x17\xa5\xbc\x76\x01\x79\xad\x6f\xda\xb1\x94\xf1\xbf\x00\xbf\x6c\x5c\x51\xcd\xc2\x48\xae\x0a\x87\x40\x68\x58\x49\x29\xf8\x1d\xc2\x6b\x1e\x2f\xf1\x18\x0f\x3d\xc0\x19\x7f\x13\xf4\x18\xbf\xbb\x06\x94\xa6\x3e\x50\x42\xc4\xf8\xcf\x7b\x28\x37\xef\xbf\x52\xbd\x7a\x29\x54\x6f\x65\xda\x79\x95\xe0\x4f\x08\x37\xef\x56\x2e\xca\x93\xc0\x05\x4c\x02\x2b\xd3\x66\x65\x8f\xf0\x0b\x09\xe6\xbd\x8a\xb8\x98\x31\x2e\xf2\xe7\xdb\xdb\x35\xcf\x2f\x39\xa5\xc1\x76\x60\xb0\xd7\x26\x71\x94\xf1\x4b\xc4\x37\x3e\xb8\x94\x33\x3c\x60\x6c\x4c\xfd\x59\xc6\x5f\x23\xde\xbc\x89\x0d\x4b\x8c\x10\x19\x0d\x3f\x36\xe7\xf8\x3b\xc4\x9b\xb7\xb0\x69\x89\x91\x22\xc3\xb4\x85\x2d\xe3\x9f\x10\x6f\x16\xc5\xfe\x6f\xa4\x96\x13\xb8\x38\xc3\x5e\xa6\x6f\x0f\x2e\x21\x86\x49\x25\xf5\x11\x9e\xb4\x08\x66\xd9\x90\x31\xf2\xa4\x76\x99\xbe\x7d\x89\x21\x4c\x7b\xde\xda\x00\x4f\x5a\x80\xc6\x27\x9e\xca\x21\xf2\x9e\xbc\x4c\xdf\xfe\x85\x21\x1a\x7e\xd3\xa6\x03\x3c\x69\x01\x1a\x9f\x88\x2a\x87\xc8\x7f\x54\xbf\x4c\xdf\xbe\xc0\x10\xcf\xfd\xc9\xbb\x14\xe0\x49\x0b\x60\x5e\xd0\xec\x3f\xc8\x53\x1a\x4f\xa7\x0d\x01\x4c\x4b\x9b\x5a\xfa\x12\xe8\xdb\xc6\x9f\xb9\x71\xb2\x29\x5f\xbf\x0f\x01\x1a\x7e\xf4\xa6\xe9\x4b\xa4\xd7\xfc\x04\xbe\xf7\x63\x9d\xd3\x05\x8e\xf1\x47\xf0\x3d\xc6\x12\x19\xe6\x65\xcf\xb4\x9c\x5b\xa7\x90\x5b\x8d\xcf\x4a\xed\x11\x3e\x21\xc1\xbc\xdc\x99\x96\xb2\xc1\x14\xb2\x81\xf1\x99\xa9\x32\xfe\x13\xe2\xcd\x8b\x9c\x93\x12\xe3\x04\x19\xa6\x4d\x42\x19\xff\x05\xf1\xe6\x4d\x42\x58\x5e\x51\x86\xb0\xa2\x34\x3e\x31\xb5\x47\xb8\x43\x42\xe3\xed\x25\x45\x29\xe9\x25\x44\xbd\x36\xdc\x69\x22\xb8\x77\xc8\x35\x7b\x4d\x5c\xcc\x4a\x3d\x22\x2e\x80\x63\xb2\xd7\x3e\x63\xfb\x2b\x67\x18\x9f\xa2\x7a\x5f\xc2\x7f\x86\x3e\xdf\x35\x6a\x33\x4f\x9f\x53\x58\x54\x1a\x9f\x57\x2a\xe3\x3f\x21\xde\x3c\xaa\x95\x07\xff\x4a\xf9\xda\x71\x30\x5f\x1b\x1f\x08\xaa\x8f\xb0\x5c\x16\x11\xcc\xe3\xee\x94\x1f\xe1\xbb\x74\x44\xc1\x32\x8d\xf7\x3e\x67\xb9\x2d\x38\x8d\x37\x17\x95\x79\x4b\x35\x0d\xdf\x14\xfc\x86\x5b\x8d\x04\xfb\x0e\xd9\x1d\xf5\xae\xc3\x3c\x74\xbd\x89\x27\x22\xbc\x6d\x2e\x26\x49\xf6\x12\x45\xd6\x53\x8c\xff\x99\xbf\x05\xd2\x51\x4f\xe4\xe7\xa2\x78\x99\x8b\xa2\xa3\xe6\xd7\x7c\xf7\xfb\xb2\x05\x7f\xcd\xec\xac\xad\xfe\x5f\xbe\x86\x92\x6c\x88\xf5\x6d\xec\xcb\x43\x28\xca\x46\x0e\x6c\xf8\xf2\x4f\xf8\xf3\x46\xc5\x2a\xb6\xa2\xc5\xcb\x32\xf9\x8e\xb4\xab\x5e\x18\x9d\x46\x42\x9c\x54\x86\xcc\x5f\xcc\xf6\xde\xb0\xf8\x2f\xb0\x06\x46\x96\x58\xc4\x9e\x1f\x96\xef\xef\x0b\xe4\x0d\xcd\xbc\xa4\x7c\xad\x73\xe4\x2c\x8d\x1c\x2f\xdc\x23\x79\x48\x5a\x1b\x49\x61\xf6\x66\x50\xb9\x82\x21\xf2\xb6\x46\x5e\x4a\x34\x2c\x45\xde\xae\xe6\x7a\x62\x5a\xae\xe5\x19\xb0\xd4\x0b\x9f\x8a\x55\x5e\x1f\x97\x1b\x96\xe9\x50\x7f\x9c\xb3\x0b\x01\xda\xf5\xa3\xc7\xb8\xf3\x1b\x40\x3b\x46\xe8\x48\x24\x0e\xe3\x23\x44\x76\x8d\xc8\xa9\x33\x9f\x3b\x8c\x4f\x11\xda\x33\x42\x5d\xe1\xcb\xa8\x2e\x42\x8f\x9a\x24\xc3\xb8\x40\xb0\x59\x95\x17\x59\x65\x2f\x10\xd9\xaf\x53\x14\xe3\xe7\x08\x34\x4b\x36\x99\x65\xd0\x19\x42\xcd\x2a\x95\x03\xc3\xb8\x87\x48\xb3\x36\x4f\x9c\xc5\xc2\x61\xfc\x04\xa1\x2b\x23\xd4\x77\xe6\xae\xc3\xb8\x8f\x50\xb3\x78\xe7\x29\xe3\x73\xc4\x6d\x8c\xb8\x20\x65\x3c\x40\x9c\x59\xd4\x1f\x3d\xc6\xc7\x88\xab\x13\xb1\x97\xcf\xd2\x00\x5e\x1e\x1a\xc1\x0b\x8f\xf1\x05\xe2\xcc\x12\x8d\x66\x21\xe3\x11\x02\xcd\xb2\x8b\xbd\xa9\x94\x5d\x8c\x50\xb3\xec\x12\x27\x65\x3c\x41\xa0\x59\x74\x29\x88\x2e\x45\xb0\x59\x74\x8b\x99\xc7\xf8\x04\x81\x66\xcd\x8d\x25\xf0\x23\x02\xcd\x9a\x5b\xc4\x1e\xe3\x1f\x10\x68\x56\x5c\x96\x3e\x18\x3f\x43\xe8\x73\xd2\xa1\x96\x2b\xde\x23\xd1\x2c\xc0\x52\x6a\xd3\xb8\xa7\xc8\x5d\x1b\x12\x55\x75\x8e\x58\x42\x72\x5b\x6e\x0c\x1c\x6a\x86\xd8\x22\x6b\x6b\x62\xed\xa5\xfa\x9f\xc8\xd8\x3d\x2f\x89\x32\x7e\x0d\x94\x95\x29\xef\x56\x73\xfc\x7e\xea\x7d\x82\xd4\xbb\x6a\xd7\x75\x09\xe3\x4b\xe8\xbb\x55\xc7\x00\x54\x69\x77\x85\xb8\xae\x01\x97\x27\xdd\x4f\x08\xec\x19\x80\x79\xca\xdd\x20\xf0\xa8\x7e\x04\x18\xdf\x22\xf4\xd8\x00\x55\xe9\xf6\x17\xe2\xfa\xe6\xe1\x61\xfc\x27\xc2\x06\x06\x58\x9e\x6a\xaf\x10\x38\xac\x19\x3c\xc6\xaf\x11\xb7\x34\xe0\xf2\x34\xfb\x05\x81\x2b\x03\x30\x4f\xb2\x37\x08\x34\x49\x5a\xa6\xd8\xaf\x88\x32\x89\x58\x26\xd8\x5b\x44\x99\x44\x2b\xd3\xeb\x1a\x51\x26\xa1\x62\x72\xbd\x03\xe8\xda\x24\x50\x99\x5a\xbf\x21\xca\x24\xc1\x2c\xb1\xde\x23\xcc\x24\xc0\x49\xf6\x2e\x72\x9e\x5c\x7f\x7b\x00\xb8\x49\x87\x39\xf0\x01\xe3\x9a\x74\x98\xe5\xe0\x47\x84\x99\x54\x88\x19\xf8\x09\xa1\x26\x15\x66\xf9\x77\x87\x30\x93\x08\xb3\xec\xfb\x2f\xc2\x4c\x22\xcc\x72\xef\xff\x10\x66\x92\x60\x9e\x79\x7f\x20\xd0\xa4\x41\x32\xef\x7e\x46\x9a\x49\x91\xa6\xac\xfb\x1d\x99\x26\x89\x52\x4b\xd8\x3b\x48\x6e\x6b\x93\x64\xa9\x05\xec\x13\xb2\x4c\x12\xae\x2e\x5f\x7f\x00\x67\xa3\x75\x70\x90\xce\x45\xa4\xd4\x24\x37\x39\x53\x80\x68\x9d\xeb\x87\x67\x99\x71\x74\xe0\x1f\x08\x34\x4f\x6b\x71\xa2\x84\x97\x40\xea\xdd\x98\x7a\x14\x90\x8f\x88\x34\xaf\xa4\x5c\x2f\xcf\xab\x73\x04\x9b\x3a\x0e\xa1\x5f\x11\x6a\x5e\x51\x9d\x84\x6a\xdd\x87\x50\x93\xeb\x73\xe0\x17\x00\x6e\xcd\x0b\xaa\xd8\x99\x67\x6b\x2a\x84\x9a\x8c\x9f\x03\xbf\x01\x70\x57\x39\x27\xa1\x3c\x9f\x59\xad\xe4\xee\xf6\xee\x21\x3b\x68\xcf\xbd\x5e\xde\xfc\x7c\xbc\xfe\xb2\x7c\xc5\xf8\x4b\x18\xe2\x9d\x66\xde\x4c\xe6\x23\xe1\x87\x67\x8c\x7f\xce\xaf\xd0\x53\x2f\xc4\x8c\xcf\x23\xcf\xf7\xbd\x71\x75\x45\xc2\xb8\x17\x02\xb4\x53\x07\x75\xdf\x0b\xab\x15\x6f\xef\x57\x77\xeb\xfb\xbb\xe5\xe3\x75\xf6\x3e\xa7\xfb\x5f\xe0\x76\xeb\xb8\x53\xc9\x9d\x2f\xd7\xdb\xcd\xdd\xad\x22\x4e\x91\xd8\xab\x23\xa6\x27\x91\xe3\x05\x9e\x13\x58\x9e\xdc\x3d\x0a\x20\x1d\xd5\xd6\xf4\x62\xff\x6a\x6e\x0c\xc4\xe3\x3a\xe2\xe8\x5c\xf8\x61\x94\xc6\xb1\xe7\x04\x07\xda\xa5\x19\xf7\x3c\x08\xd0\xaf\x0b\x70\xee\x59\xad\xf4\xcb\xfd\xf2\x3a\xbf\xee\x39\xd2\x06\x75\xb4\xf7\x82\xf1\xf7\xd8\x21\xc3\x3a\xa8\x2f\xb1\xfe\x7b\xc0\x2e\xeb\xb0\x81\xc4\x06\x88\x5d\xd5\x61\x93\x78\x46\x0c\x6f\xf2\x00\xe4\x75\x1d\xf9\xa4\x32\xbc\x27\x78\xd5\x6d\x1d\x31\x9e\x85\x51\x62\xa5\x56\x6b\xf4\x73\x7b\x73\x77\xff\xf4\xf0\xa0\xe8\xa7\xd8\x19\xbb\xfa\x81\x9e\x09\xc6\xdd\xfc\x3e\x61\x4f\xbd\x08\x63\x02\xcb\xbd\xf6\xdf\x80\xac\x35\xc5\x48\x30\x3e\x42\x68\xad\x29\x4e\x05\xe3\xa7\x08\xad\xf7\x80\xac\xeb\x14\xb1\xb5\xb2\x77\x65\xb3\x10\x5a\x2b\x76\xe9\x0b\x81\xd0\x5a\x79\x67\xbd\x75\x01\x5d\xdb\xae\x55\xf2\x85\x84\x62\xd8\x5a\xf5\x4a\x7b\x20\xb2\x56\xbc\x6a\xb8\x3d\xc6\xdf\x23\xbe\x56\xc0\x27\x32\x4f\x23\xb4\x56\xbf\xc2\x67\xdc\x47\x68\xad\x5a\xc5\x9c\xf1\x39\x42\x37\xb5\xd0\x80\xf1\x00\xa1\xb5\x52\x0e\x19\x0f\x11\x59\xab\xda\x85\x60\x7c\x01\xd0\x4e\xad\x66\x45\xc4\x78\x84\xd0\x5a\xd1\x8a\x98\xf1\x18\xa1\xb5\xa2\x95\x6b\x84\x04\xa1\xb5\xa2\x4d\x19\x4f\x11\x59\x2b\x59\x31\x61\x7c\x82\xd0\x5a\xc9\xca\xdd\xd7\x0c\xa1\xb5\x92\x4d\x62\xc1\xf8\x18\xb1\xb5\x92\x1d\x4b\x79\x8f\x41\xde\x9d\x5a\xcd\xc6\xb2\x0e\x31\x62\x1b\x54\x3b\xce\xd0\x6b\x40\xd7\x6a\x76\xe6\x44\x70\x44\xd3\xdf\xff\x01\x46\xad\x74\xcf\x45\x94\x32\x7e\x8e\x8d\xac\x15\x6f\x76\x46\x94\x0a\xff\x5f\x0c\x5f\xaf\x61\xc6\xdf\xc3\xbc\xd9\xa9\x95\xf0\x79\xca\xf8\xfb\x14\xa0\xb5\x1a\x3e\x77\x18\x7f\x9f\x1f\x7c\xd3\xeb\x96\x35\x5c\xde\x6c\x33\xbe\x84\x96\x75\xdb\x66\x9c\xcc\xb9\x2b\x04\x76\xcc\x40\x99\x71\xbf\x23\xb0\x6b\x06\x66\xf9\xf6\x13\x22\x7b\x66\xa4\xcc\xb6\x1b\x04\x1e\x99\x81\x32\xd7\x6e\x11\x78\x6c\x06\x66\x99\xf6\x17\xc8\xab\xdb\xaf\x41\x4a\x20\x86\x1c\xd4\x5c\x9b\xf1\x6b\xc4\x0d\xcd\x38\xcc\xb1\x9f\x11\xbd\x34\xa3\x65\x86\xfd\x82\xc0\x95\x19\x28\xf3\xeb\x0d\x02\xd7\x35\xc0\x39\xe3\x5f\x11\xb8\xa9\x01\x06\x8c\xdf\x22\x70\x6b\x06\x86\x8c\xdf\x21\x6e\x67\xc6\xc9\xbc\xfa\x0d\x80\xbd\x1a\x4d\xca\xac\x7a\x8f\xc0\x1a\x51\xca\x9c\xfa\x80\xc0\x1a\x51\xca\x8c\xfa\x88\xc0\x1a\x51\xa6\x8c\x3f\x21\xae\x46\x92\x32\x9b\xee\x10\x58\x23\x49\x99\x99\xae\x10\x58\x23\xc9\x2c\x93\xae\x11\x59\x23\xc9\x2c\x8f\xae\x41\xbc\xbd\x1a\x4d\x66\x59\xf4\x01\x91\xb5\xaa\xcc\x72\xe8\x03\xe4\xd0\x5e\x8d\x26\xf5\x0c\xfa\x12\xf0\x35\xd2\x54\xf9\xf3\x27\x36\xad\x46\x9c\x7a\xf6\xc4\xd0\x75\x1a\x65\xfc\xf3\x16\x70\x35\x12\x95\x99\xf3\xf3\x13\x00\x6b\x34\x2a\xf3\xe6\xe7\xfc\x54\x97\xde\x51\x8d\xf4\xe4\xc6\xec\xfa\x0e\x80\x35\xd2\x23\xb7\x65\x1b\x18\x93\xa3\xba\x04\x59\x59\xb5\x7f\x42\x5a\x8d\x34\xcb\x5b\xb2\x6b\xe8\x9e\xa3\x1a\x91\x56\x37\x64\x1b\xd8\x58\x1c\xd5\x48\xd6\xbc\x1d\xbb\xbe\x06\x7a\x8d\x8e\xf7\x37\x63\x3f\x91\x54\x23\x69\xb9\x65\xfa\x8c\xdd\x50\xa3\xe8\x6c\x23\x76\xf3\x19\x90\x35\x7a\xce\xb6\x61\xb7\x88\xac\x51\x32\xbd\x09\x7b\xc4\xbe\xaa\x51\x76\x75\x0b\xf6\x05\xaf\x58\x23\x5c\xd3\x06\xec\x3b\x76\x41\x8d\x98\xd5\xf6\x6b\x93\x3f\x74\xd1\x3b\xae\x5d\x74\x9e\x3b\x09\xe3\xe7\x70\x57\xe2\xb8\x46\x97\x19\xf2\x27\x22\x6b\x57\x5b\x23\x6f\x6a\x9d\xa7\x31\xe3\x21\xe2\x6b\xfa\x17\xd1\x77\x80\xee\xd7\xd6\x79\xe2\x25\x0e\xe3\x13\x04\xd7\x54\x5a\x41\x77\x08\xad\x5d\x2b\x7b\x17\x33\x2f\x89\x1d\xc6\x4f\x11\x5f\xb7\xea\x00\xf4\x77\x40\x0f\x6a\x37\x0d\xf9\x7d\xab\x31\xa2\x6b\xd2\x4c\x8e\x5d\x03\x76\x58\x1b\x79\x3a\xcb\x1f\xf8\x48\x17\x49\x1a\x05\x8c\x4f\x91\x57\x73\x8d\x0a\xeb\x53\xce\x3a\x52\x8f\x9f\xce\xc4\x28\x12\x67\xb8\x60\xf4\xe5\xdc\xe7\xfc\x0e\x90\x76\x15\x32\x12\x09\xe3\x23\x44\x74\xaa\x88\xa9\x37\x97\x0b\x96\x29\x62\xba\x55\x8c\xeb\xf8\x32\x8e\x8b\x98\x5e\x15\x23\xf5\x3d\x43\xc0\x51\x15\x70\xea\x9c\x32\x7e\x86\x88\xe3\x2a\xe2\xc2\x39\xf7\x02\xc6\x2f\x10\xd3\xa7\x2e\x93\x30\xfe\x11\x11\x83\x2a\x22\x91\x88\xe4\x33\x20\x86\x55\xc4\x79\xe8\x32\xfe\x1e\x63\x2c\xab\x08\xf5\x6b\xc2\x89\x33\x61\xfc\xe4\xbf\x80\x5b\x55\x71\x0a\x81\x91\xd6\x55\x84\xef\xcc\x85\xcb\xb8\x8f\x98\x8d\xe9\x6a\xf3\x6c\xb3\x8d\x57\xdb\x56\x71\x0a\x81\x91\x76\xa6\x48\x41\x2a\xf7\xe2\x10\x69\x4b\x28\x47\x21\x20\xd2\x96\x10\x4e\xec\xcc\xc5\xc9\x8c\xf1\x18\x41\x84\x76\xd4\x78\x09\x84\x10\xd2\x51\x35\xca\xf6\xf1\x58\x21\x42\x3d\x19\x00\xe3\x10\xea\x51\x71\x92\xd8\x71\x3d\xc6\x2f\x60\x74\xb7\x84\x8a\x00\xf3\x1e\x30\x84\x8a\x3e\x84\x13\xc6\x3f\xe0\xf5\x08\x15\x45\x22\x9e\x31\x1e\x21\x84\x90\x51\x3c\x93\xad\x8f\xaf\x00\x42\xe8\x28\x91\x9a\x4f\xf2\x20\xc7\xea\xce\xa0\x13\x39\x23\x99\x2f\xc2\xec\x36\xfc\x1f\x50\xa8\xee\xd9\xe4\x85\xb1\x98\x7b\xe3\x30\xfb\x41\xe9\x4f\x04\xec\x34\xc0\xfe\x91\xb4\x00\x52\x37\x3e\x72\x10\x2e\x12\xe7\x17\x0e\xe3\xb3\x97\x80\xe9\x54\x31\x32\x95\xc0\xc3\xa8\xae\xeb\xe0\xa9\x7a\x73\xe0\x74\xeb\x38\xd9\x15\x90\x33\x03\x4e\xaf\xca\x39\x73\xce\x08\xca\x0f\xa4\x1c\x35\x5f\x26\xbf\x6b\xbf\xbc\x02\xce\x71\x95\x73\x2e\x66\xc4\x65\x7e\xe2\x65\xfa\xf4\x65\x18\x5f\x62\x47\x0e\xaa\x90\x91\x98\x31\xbe\x42\xc4\xb0\x8a\x48\x84\xec\xc0\x68\x94\xca\x19\xee\xf1\x2b\x20\x97\x24\x92\xf1\x47\x8c\xb5\x22\x10\xb3\x0c\xf2\x05\x20\xeb\x2a\xe4\xbd\x90\x19\xe1\x13\x46\xd9\x50\x83\x3f\x63\xfc\x0a\x83\x6c\xab\x88\x93\x0c\xf2\x2f\x06\xd9\x55\x21\xae\xe3\x33\xbe\x01\x84\xba\x87\xb1\x5f\xd9\x0c\x02\xd7\xe9\x12\x32\x8c\x64\x73\xee\x31\x08\x21\xc2\x0b\x47\x1a\xea\x17\x42\x08\xcd\xc5\x42\xee\x85\x1f\x10\x42\x48\x2c\x9e\x29\xcc\x2d\x60\x08\x4d\xc5\x8e\xcb\xf8\x1a\xa3\x10\x0a\x72\x25\x62\xb3\x01\x04\x21\x98\x44\x76\xdb\xe3\x67\x40\x10\x7a\xb9\x90\x88\x5f\x20\xba\x2e\xa1\x97\xac\xc1\x5b\xac\x07\xa1\x93\xe9\x2c\xc3\x5c\x03\xa6\xa7\x77\x7e\xe2\x24\x67\x42\xce\xde\xbf\x63\x31\xd1\xf1\x13\xd9\xf1\x3b\x44\x10\x1d\xff\x41\x4e\x63\xff\x43\x04\xd1\xef\xd9\x44\xf7\x05\x11\x44\xb7\xfb\xce\x9c\xf1\x1b\x44\x10\x9d\x3e\xcf\xd4\xfa\x15\x21\x44\xaf\x07\x61\x76\xf4\x00\x42\x88\x6e\xcf\x5c\x71\x85\x08\xa2\xdb\xcf\x9c\x33\xc6\x7f\x20\x82\xea\x76\x99\x52\xe6\xce\x49\x9c\x46\x72\x93\x89\x50\xa2\xff\xcf\xe5\xe5\x7e\x22\x42\xf7\xe9\xc4\x49\x66\x4e\xe2\x04\x8c\xbf\xc5\x72\xdd\xa4\xae\x33\x9f\xab\xf2\xff\x60\xb9\xee\xd0\x13\x27\x8e\x54\xf9\xdf\x58\xbe\xdd\x8f\xcf\xf8\x1b\x2c\xdc\xed\x07\x67\xfc\x25\x14\x1e\x1d\xee\x47\x66\xbc\x8d\x85\xba\x2a\xe2\x99\xcc\xf0\x8c\x77\xb1\x54\x57\x44\x9c\x9e\xc8\xb5\xc1\x21\x14\xaa\xb7\x4a\xf5\x53\xe1\xc9\xb4\x19\x03\x9c\x48\x30\x0b\xd9\x85\xdf\x20\xe0\x92\xd0\xce\xa9\x44\x7c\x47\x04\x91\x81\xa6\x52\x7f\x9f\x76\x39\x42\x3d\xb3\x2e\x9c\x38\x11\x51\x90\x23\x0f\xb2\x73\xe8\x2d\xd7\x9b\x7a\x89\x75\x21\xa2\x90\xf1\xc3\x25\xe0\xdb\x0d\x78\x75\xd2\x3d\xc2\x3b\x0d\x70\x75\xc4\x3f\xc2\xbb\x4d\xf0\xfc\xc4\x7f\x24\xf4\x1a\x08\x93\x30\x8d\x18\xef\x21\xfe\xa8\x09\xef\xc9\xe9\xed\x08\xf1\xc7\x0d\xf8\xd8\xfb\xc8\xf8\x31\xc2\xfb\x4d\x70\x71\x2a\x33\x6a\x1f\x09\x83\x06\x82\xf0\xa6\xb3\x84\xf1\x01\x12\x86\x0d\x84\xc0\x93\x03\x30\x54\xf8\xf6\xb6\xf9\x30\xcb\xec\xd1\xf1\x7c\x25\x70\xe9\x1c\x1c\x5e\x02\xb3\xe1\xf5\x9e\x12\x6f\xa9\xf1\xcc\xef\xf5\x8c\x2a\x2f\xb1\x8e\x5e\x03\xcb\xf4\x66\x4f\x95\xb3\x42\x8e\xf9\xfd\x09\x8d\x05\x55\x1c\x1d\xbc\xc6\x2a\x9a\x5e\xa3\x20\x68\x2b\x8d\x66\x7e\xcf\x27\x27\xaa\xef\x07\x29\xe6\x08\x69\xa6\x97\x7d\x08\xd2\x0a\x49\xcd\x07\x5f\xaa\x03\x11\x4a\x2f\x23\x8c\xff\x78\x89\x01\x9a\x8e\xc0\x24\xe8\x6b\x8d\x6e\x7e\x73\xc4\xad\x8c\x88\x8b\x23\x62\x7a\x57\xa4\xca\xd9\x20\xc7\xfc\x76\x88\x5b\x1d\x0e\x57\x1b\x0e\xd3\x0b\x22\x04\x6d\xa3\xd1\x1a\x8f\xc6\xd4\x07\xc4\x45\x5a\xc3\xf9\x98\x3a\x69\x03\xa4\xe6\x43\x32\xf1\x58\x0b\xf7\x0f\xe0\x34\x9c\x92\x89\x8c\x0d\x32\xcc\x8e\x73\xf7\xdf\x9c\xd7\x3a\xf2\x1d\xd6\xd2\xe4\x3d\x33\x7b\xa3\xb1\x1b\x0f\xd0\x54\xaf\x63\x95\xde\x83\x14\x07\x2f\x90\xdf\x70\x9c\x26\xc1\xde\x6a\xec\xc6\xc3\x35\x35\x3e\xe8\x5c\x1c\xa0\xce\x9b\x8e\xd9\x24\xd8\x5b\x8d\xdd\x78\xe0\x26\xd1\x77\x42\xeb\xbb\x86\xd3\x37\x09\xf6\x56\x63\x37\x1e\xc5\x99\x1d\x05\xa2\x5d\xf8\x1f\xa4\x36\x9c\xc9\x59\x26\x6e\x35\xa2\xd9\xad\xa2\x9a\x58\xf2\xd7\x98\x2e\xc5\x1f\x2d\x0c\x60\x72\x6d\x0d\x7d\xab\xd1\xcd\xee\x9d\x54\x72\xcc\x04\x72\x8c\xf1\xd0\xce\x2a\x67\x07\x9c\xe6\x83\x3b\xe1\x35\xc3\xe9\x01\x50\x1a\x4e\xed\x04\xc2\x27\x24\x98\x9d\x3b\xab\xd4\x6c\x86\x35\x33\xf9\xb5\xca\xb9\x42\x4e\xe3\xb1\x9d\x7a\xba\x9c\x15\xe9\xb2\xe9\xe0\x4e\x9d\x76\xa5\xd1\x1a\x8f\xee\xd4\x1e\x6d\x9d\xbd\x05\x56\xc3\xe9\x9d\x1a\xe7\x0a\x39\x66\x0f\xce\xf6\x32\xe6\x0c\x32\xa6\xf1\x08\xcf\x7d\xc6\x15\x32\xcc\x5e\xcb\x39\x99\x58\xb5\x1e\x44\xc9\x1a\x0f\xf3\x24\x89\x57\x1a\xb1\xf1\x50\xcf\xb2\x4d\xbd\xc2\xa6\x4d\xa7\x7b\x96\x89\xd7\x1a\xb1\xf1\x98\x4f\xfa\x05\x7f\xef\x2d\x26\xc5\xa6\x63\x3f\xe9\x00\xd7\x45\x80\x9a\x63\x40\x4f\x4a\x8f\x1c\x9f\xbc\x04\x86\xc9\x78\x65\xfc\x17\xc4\x9b\x7d\x77\x52\x95\xf5\x49\x21\x6b\xe2\x50\x50\x23\xed\x8b\x46\x33\xbb\xef\xa4\xba\x7c\x38\x41\x9a\xc9\x7b\x04\xe9\x0b\x92\xcc\xce\xf3\xab\x95\xf4\xb5\x4a\x9a\xcc\x47\xd0\x6e\x34\x9a\xd9\x7f\xfb\xc4\xf2\xb9\x14\xfe\x81\x16\xc4\x64\xc8\xfa\x10\x37\x7a\x08\xb3\x43\xfd\x6a\x77\xf9\x48\x33\xb9\x93\x20\xdd\x20\xc9\xec\x4c\xdf\x38\x73\xfb\xc5\xcc\xdd\x35\xd9\xd3\xcc\xbe\xd1\xd8\x66\x8f\xce\x4b\x7a\x9f\xa3\xde\x4d\x96\x2c\xe3\xbf\x02\xbe\x67\x76\xe0\xbc\x32\xc7\xcc\x61\x8e\xe9\x99\x5c\x58\xe5\x7c\x45\x8e\xd9\x89\xf3\xaa\xec\xe6\xc5\x70\xf7\x4c\x4e\x24\x68\x5f\x35\x9a\xd9\x89\x41\xa5\x96\x01\xd6\xd2\x64\xc4\x2a\xe7\x16\x39\x66\x1f\x06\xd5\x2a\x06\x5a\x15\x4d\x3e\x24\x68\xb7\x1a\xcd\xec\xc3\xa0\x2a\xe5\x00\x69\xe6\x33\x49\x2b\xa4\x5b\x24\x99\xbd\x16\x18\x15\x1c\x14\x0a\x36\x9e\x4c\xfa\x3c\x76\xe3\x39\x12\x6a\x7a\x2b\xbd\xe4\xff\x0f\x4e\x2f\x4d\x47\x96\x56\xc9\x77\x1a\xb9\xf1\x84\x81\x82\xae\xad\x56\x2e\xc3\x7f\xde\x62\x88\x86\x43\x06\xe8\x00\x77\x45\x80\xe6\x03\x4e\x89\x3d\x53\x58\xec\x99\x9a\xce\x39\x25\xd8\x77\x1a\xbb\xf1\x5b\x70\xc4\x9e\x29\x2c\xf6\x4c\xc4\x21\xa8\x4d\xec\x3b\x8d\x6d\xf6\xef\xa2\x94\xcb\x16\x90\xcb\x8c\x47\xa3\x96\xf1\xdf\x10\x6f\xf6\xed\xa2\xe2\xf6\x05\xb8\xdd\x78\x38\x6a\x95\xf3\x0d\x39\x8d\xc7\xa3\x6a\xac\x08\x59\x0d\x47\xa4\x6a\x9c\x7b\xe4\x98\xfd\x1a\x55\x93\x4a\x54\x24\x15\xe3\x69\xa9\x04\xed\x5e\xa3\x99\x1d\xba\x4f\x2c\x4f\xe8\x91\x36\xa1\x1b\x8f\x50\xad\x0f\x71\xaf\x87\x30\xbb\x35\xaa\xa6\xb7\x08\x69\x26\x87\x12\xa4\x7b\x20\x35\x1f\xab\xaa\x8d\x4c\x0c\x23\xd3\x74\xb4\xaa\xc6\x79\x40\x8e\xd9\x81\x71\x75\x58\xe2\xa2\x3b\x8c\x67\xac\x12\xb4\x07\x8d\x66\xf6\x5c\x5c\x79\x27\xba\xa8\xef\x65\xfc\xb2\x08\x61\x32\x61\x6d\x80\x07\x2d\x80\xd9\x95\xfa\x21\xb4\xd5\x3a\xfc\x55\x84\x30\x59\xb4\x36\xc0\x83\x16\xc0\xec\xd7\xfd\x0e\xac\xd6\xe3\xb5\xd6\x9f\x26\x07\xff\xbf\x45\x31\x7b\x3a\xa9\x68\x27\x41\xed\x98\x0c\x5d\xe5\x3c\x22\xc7\xec\xe6\xa4\x2a\x9c\x44\xab\xa2\xc9\xc1\x04\xed\x51\xa3\xd5\x7c\x10\xb2\x6a\xc0\x04\x69\x26\xd7\x12\xa4\x47\x20\xd5\x1c\xf5\x9a\x18\xd7\x22\x49\xb1\x16\x31\x9e\xf8\x6a\x66\x3f\x6a\xec\xc6\x83\x5f\x8b\xcd\x32\xd0\xd3\x83\x03\x5c\x09\x34\x9d\x02\x5b\x65\x3f\xe9\x6c\xb3\xaf\x53\xea\x36\x41\x5a\xdc\x26\x30\x1e\x0b\x4b\x12\x9f\x34\xa2\xd9\xc7\x95\x0f\xcc\x69\x17\x2e\x7a\xcc\x64\x62\x33\xfb\x49\x63\x3f\xef\xb0\xe5\xf2\xd9\x87\xc5\xe2\xaf\xff\x9c\x93\x97\xcb\xe7\x1e\x6a\xe4\x67\x1e\xc3\x5c\x59\xf9\xa5\xda\x88\x3d\xeb\x5c\xe6\x4a\x84\x27\x2d\x82\xd9\xcb\xa7\x5a\x23\x18\x3f\xfd\x07\x18\x26\x13\x97\xf1\xdf\x11\x6f\x76\xef\x69\xd5\xf6\xa7\x85\xed\xfb\x26\xff\x12\xb4\xef\x05\x6d\x60\x76\x70\xfe\x80\x54\xbe\x88\x3d\x7b\x01\x0c\x93\x67\xcb\xf8\x1f\x88\x37\xbb\xf4\xac\xb4\x86\x3c\x83\x35\xe4\xc0\xe4\xcb\x32\xfe\x07\xe2\x1b\x0f\x67\xd6\x86\xf3\x0c\xee\x7c\x0e\x1a\xce\x66\xd6\x38\x3f\x90\x63\xf6\xdf\x59\x65\x06\x38\x83\x19\x60\x60\x72\x5d\x95\xf3\x03\x39\x66\xaf\x9d\x55\x07\xf4\x4c\x1b\x50\x93\xcb\x08\xda\x0f\x8d\x66\xf6\xd7\xc7\x4a\x2d\x3f\x62\x2d\x4d\x8e\xaa\x72\xfe\x45\x8e\xd9\x43\x1f\x2b\x3d\xff\x11\x7b\xde\xe4\xa3\x2a\xe7\x5f\xe4\x98\xbd\x74\x5e\xa9\xdf\x39\xd6\xaf\xe9\xa3\x97\x05\xe7\x27\x70\x86\x66\x17\x5d\xec\x67\x56\xc6\x2f\xde\x01\xcd\x64\x25\x82\xf4\x0b\x49\x66\x3f\x5d\x54\x87\xf8\xa2\x18\xe2\xa1\xc9\x56\x04\xed\x97\x46\x33\xbb\xeb\xa2\xba\x34\xb8\x40\x9a\xc9\x5e\x04\xe9\x17\x92\x4c\x67\xf4\xce\xaa\xa4\x2b\x24\x35\x7c\x34\x40\x93\xc6\x23\x48\x63\x68\x3a\x87\x37\x77\x89\x7e\x22\xe0\x8f\x43\x20\x99\xac\x75\x5e\x25\xfd\x04\x52\xcd\xe9\xdf\x4e\xb5\xdf\x9d\xa2\xdf\x8d\xa7\x80\x13\xb4\xa5\x46\x6b\xfc\x36\xea\x2c\x0c\x4f\xa0\x9a\x4e\x07\x68\x0d\x1f\x48\xd5\x49\x4b\x24\x35\x7e\x25\x55\x5b\x50\x94\x0e\xc8\x7c\x87\x13\xfb\xb2\xe1\xb3\xa9\x86\x08\x4b\x2d\x82\x39\x29\xd3\x31\xe0\xb6\x8c\xf3\x0e\x6f\xcb\x2c\x4d\x1a\xaa\x8f\xb0\xd4\x22\x98\x13\x36\x1d\x43\xef\xd1\x4b\xe7\x5d\x07\x03\x99\x54\xf6\x8c\x30\x4b\x2d\x4c\xe3\xe1\xa5\x7b\x81\xf2\x95\xc8\xa5\xf3\x0e\x97\x9b\xcb\x86\xc3\x4b\x0d\x11\x96\x5a\x84\x9a\xc3\x4b\xc9\x18\x25\x33\xbc\xd3\x64\x6d\x3c\xcf\xb4\x31\xcc\x52\x0f\xd3\x78\x6c\xb4\xfa\x3d\xb1\x24\xd6\x56\x21\xb5\x86\x43\xa3\xab\xe4\x65\x41\x5e\x35\x66\x82\x82\x8e\x12\x6d\xa1\xc0\x56\x0d\xf9\xa0\x4a\x5e\x6a\xe4\xe7\x7d\x31\x99\x10\x66\x0b\x15\xb5\x7a\xce\x37\x94\x09\x4d\x6a\x11\x1a\xf3\x45\x11\x03\xe5\xd8\x42\x31\xad\x1a\x52\x45\x95\xbc\xd4\xc8\x8d\x59\xa2\xa0\x97\x44\xd8\x2a\xd4\xb3\x6a\xc8\x12\x74\x84\xa5\x1e\xa1\xf1\x91\x17\x9d\x29\x34\x62\xc3\xb3\x2e\x3a\x6d\xab\xd1\x1a\x1f\x72\xd1\xc7\x4a\x40\x62\x5f\x35\x3c\xe0\xa2\x93\xb6\x48\x6a\x7c\xb8\x25\x1f\x16\x01\x9b\x9d\x55\xc3\xd3\x2c\x39\x7e\x8b\xf8\x9a\x83\xa8\x6b\x67\x0b\x51\xcc\x16\x2b\xe3\xc1\xd4\xb5\x11\xb6\x45\x84\xb5\xd9\xc7\x74\x0c\x7c\x6c\xab\x98\x2d\xd6\x26\x33\xd7\x47\xd8\x6a\x11\x1a\xbf\xee\x5c\x37\x51\x88\x62\xa2\x58\x37\x7c\xf3\xb9\x2e\xcc\x56\x0b\xd3\xf8\x20\x9b\x61\xb6\x10\xc5\x6c\xb1\x6e\x78\x98\xcd\x10\x61\xab\x45\x68\x7c\xa0\xad\x66\xa2\x10\xda\x44\xb1\x6e\x78\xb2\xad\x26\xcc\x56\x0f\xd3\xf8\x7d\x5c\xbd\x3f\x3d\xb0\xd2\xba\xe1\x23\xb9\x3a\xe9\x1a\x49\x8d\x5f\xca\xd5\x6b\xe9\x69\x95\x6c\xf8\x5e\xae\x4e\xbb\xd6\x68\x8d\x3f\x24\xea\xc4\x50\x23\x36\xfc\x84\xa8\xd3\xee\x34\x5a\xe3\x8f\x87\x7a\xb7\x84\xd8\x2d\x0d\x3f\x18\xea\xa4\x3b\x20\x6d\x1a\x7f\x24\x34\xe4\x89\xb0\xc8\x13\x9b\x86\x1f\x0a\x0d\x11\xee\xb4\x08\x8d\x3f\x16\x1a\xf2\x44\x58\xe4\x89\x4d\xc3\x0f\x86\x86\x08\x77\x5a\x04\xb3\xb3\xe9\x18\xa5\x14\x11\x16\x29\x62\x63\xb2\xf7\x33\xc2\xdc\x69\x61\xcc\x1e\xa7\x03\x41\x9e\x08\x8b\x3c\xb1\x31\xd9\xbb\x3e\xc2\x9d\x16\xc1\xec\x6c\x3a\x46\xc9\x0c\x5a\x8a\xd8\x98\xbc\xde\x1c\xe6\x4e\x0f\xd3\xf8\x09\x8a\x59\x18\xed\xfd\xac\x3c\x2c\x94\xd6\xf0\x35\x8a\x0a\xf7\x4e\xe3\x36\xe6\x01\x64\xa3\x3e\x87\x85\xba\x1a\x92\x41\x85\x7b\xa7\x71\x9f\x91\x11\x72\x76\x59\x93\xc3\x42\x4c\x8d\xd9\x81\x0a\x70\x57\x04\xa8\xf9\x10\xcc\x7e\x08\xd4\xe1\x10\x55\x64\xfc\x0a\x8c\x89\x7b\xa7\x71\x1b\xb3\x03\xb2\x4b\xda\x1b\x16\xa2\x31\x7e\x11\xa6\x2e\xc0\x9d\x1e\xa0\xf1\x47\x10\x9d\x99\x6a\xc4\x86\x9f\x40\x74\xda\x93\x46\x6b\xfc\x01\x44\x1f\xa7\x14\xd2\xb9\xf1\x8b\x31\x04\xe9\x09\x49\x8d\x3f\x77\x54\x5c\x91\x16\xae\x30\x7e\x42\xc6\xc4\x7d\xd2\xb8\x8d\x3f\x76\x54\x5c\x91\x16\xae\x30\x7e\x59\xc6\xc4\x7d\xd2\xb8\x8d\x5f\xa2\xa4\x0d\x91\x6a\x86\x68\xf8\x34\x25\x1d\xe0\x49\x0b\xd0\xf8\xad\xca\x8a\x2b\x52\xcd\x15\x0d\x9f\xac\xac\x70\x9f\x0a\xee\xae\xf1\xcb\x95\xa4\x21\x52\xcd\x10\xbb\x86\x6f\x58\x92\x01\x9e\xf4\x00\x66\x57\x9f\x97\x7e\x60\x39\x87\x1f\x58\x76\x26\x13\x97\xf1\x3f\x11\x6f\xf6\xec\x79\xd5\x7c\xe7\x5a\xd5\x4c\x9e\x25\x68\x3f\x35\x5a\xe3\xf7\x2c\x75\x25\x9c\x83\xfd\x76\x0d\xdf\xb3\xd4\x49\x3f\x91\xd4\xf8\x3d\xcb\x7c\xd8\xcf\x61\x4f\xb9\x6b\xb8\xa9\x9b\xe3\x7f\xe6\xf8\xdd\xa1\x76\xa2\xb0\xeb\xc4\x9e\xf6\x0e\x18\x7e\x08\xe6\xcf\x97\x00\xd6\xce\x14\x5e\xc4\x9e\xef\x11\xe0\x3f\x10\xdc\xa1\x22\x9f\x3a\x91\xe7\x30\xfe\xe7\x0b\x40\x75\xa9\x90\x39\xea\x0f\x44\xf5\xa8\x58\x0b\x11\x79\xf1\x22\x9c\x8b\xc0\x63\xfc\x9f\x3f\x01\x7b\x44\x45\x2c\x63\xff\x00\xac\x76\x98\x78\xf9\x49\xf4\xbc\x06\x2f\xde\x02\xb2\x6f\x42\x96\x23\x2b\x7c\xe7\x50\xb5\x5e\x04\x56\xbc\x70\xc6\x82\xf1\x76\x00\x05\xea\xad\xd6\x39\x16\xcc\xa1\x20\x6b\x63\xf6\x4e\xeb\xc1\x42\x44\x07\x05\xa4\x8b\x90\xac\x69\x93\x30\x8d\xf6\x10\x3d\x44\x64\x0d\x8a\xbd\x8f\x7b\x80\x63\x04\x0c\xd5\x55\xbc\xa2\x66\x09\x14\xa9\x83\x36\x1c\x2f\xc2\xa2\x59\x5e\xa4\x5e\x5d\x9b\x9d\x2f\x66\x22\x60\xfc\xa0\x0d\x7f\xee\xe6\x8d\x74\x9d\x78\xc6\xf8\x01\xb4\x51\xbd\x05\x26\xe6\xf0\x77\xb8\xb8\x7a\xbb\x6b\x16\x46\xde\x45\x18\x48\x2d\x8f\x9c\x88\xf1\x83\x2e\x14\x67\x95\xcf\xbf\x45\x74\x2a\xa2\xc4\x1b\x4b\xbd\x67\xef\xb0\xbe\xe8\x00\xa8\xaf\x81\xfc\xf0\x2c\x2f\xff\x1b\xcb\x95\x65\x44\x76\xca\x5a\x30\xf5\x85\xf5\x21\x0d\x13\x47\x3b\xd4\xe3\xe5\x31\x40\xb3\xbe\x88\xbc\xe9\xcc\x8c\x1d\x02\x76\xa9\x3a\x36\x43\xf9\xe1\xd9\xc1\xb0\x82\x7d\x8d\xd8\x95\x86\x9d\x79\xd3\xd9\x41\x24\x4e\x45\x14\x0b\x97\x60\x0d\x5f\x02\x6b\x8d\x15\xcf\x5b\xb7\x0f\xfd\x0f\x56\x7c\x53\x54\xdc\x84\xc5\xca\x6c\xcb\xfd\x45\x54\xe1\x2d\x62\x77\x1a\xb6\xb1\xe2\xff\xc9\x59\xea\xdd\x28\xd7\x99\x4e\x45\xc4\xf8\x9b\x03\xf8\x73\x5b\x0b\x86\xa5\x7f\x43\x69\x26\x85\xe4\x2c\xcc\xd2\xac\x2f\x1c\x57\x16\xbf\x7e\x9d\x17\xab\xb7\x3e\x16\x22\xb2\xe6\x9e\xef\x0b\x38\x30\xef\x10\x8a\x33\x77\x2d\x22\x6f\x2e\x55\x0a\x3d\xa8\xde\xc7\xc8\xaf\x98\x17\x76\xb0\x50\xd9\x2b\xf2\x16\x45\x61\x17\x0b\xb3\xda\x40\x63\x31\x30\xb4\x50\xbd\x45\x81\xc5\x7b\x57\x40\x54\xbf\x84\xda\xbb\x14\xa2\xd4\xc9\x6f\x4e\x24\x12\xc6\xc7\x4b\xf8\xeb\x50\x97\x97\x98\x24\x07\x8b\xd0\x0b\x92\xec\x17\x38\x52\x98\x7f\x81\x07\xbb\xba\x30\x33\x49\x34\x51\xdf\x21\x75\xa5\x2a\x3c\x11\x91\x08\xc6\x02\xd4\xf0\x11\x8a\xd7\x5a\x77\x8a\x8f\x63\xdf\x99\xeb\x61\x2e\x5f\xfc\x96\xaf\x71\x3a\x87\xea\x25\x84\xf0\x54\x44\xca\x8d\x2f\x41\x04\xea\x09\xfb\x49\xe4\x8c\x33\x66\xec\x67\x09\xe1\xcd\x2e\x2f\xae\x9e\x82\x50\x3a\x67\x20\x06\x58\x8f\x86\xe5\xef\xf3\x23\xec\xc8\x00\x53\xaf\xf1\x23\xec\x98\x86\xa9\xb7\xf7\x11\xd5\x37\xa0\xf2\x97\xf6\x11\x37\xa0\x71\xf0\xae\x3e\xe2\x86\x34\x2e\x7f\x45\x1f\x61\xcb\x7d\xd8\xc2\x4f\xe3\x5c\xff\xbf\x23\x6a\xb5\x8f\x9a\x7b\x41\x1a\x33\x7e\x80\x88\xf5\x3e\x42\x7c\x48\x1d\x1f\x22\xfd\x8d\xb8\xcd\x3e\x2e\xcb\x40\x0b\x27\x12\x41\x32\x53\x3f\x33\xb7\x10\xbc\xdd\x07\xab\x1c\x54\x42\xbf\x42\xf4\xae\x12\xba\xba\x2c\x09\x18\xbf\x05\xc2\x20\x57\xc3\x88\xd2\xc2\x03\x80\xda\x14\x48\x1d\x34\x81\x98\x0e\x85\x51\xa7\x4b\x20\xa6\x4b\x62\xf2\x23\x25\x10\xd5\xa3\x50\xb9\xee\x10\x74\x44\x82\x94\xea\x10\x74\x4c\x81\x94\xe6\x10\xd3\x27\x31\xb9\xe2\x10\x35\xa0\x50\xa0\x37\x44\x0d\x29\x54\xae\x36\x04\x2d\xcb\x20\x5d\x6b\x88\x59\x95\x31\xa0\x34\x2c\x5f\x97\xcb\xcb\x3a\x43\xd4\xa6\x8c\x22\x54\x86\xd0\x6d\x19\x4a\x69\x0c\xb0\xf9\x4f\xf6\x5e\xe4\xe4\x17\xf4\xaf\xa1\xa4\xaf\x66\x90\x58\x24\x50\xb6\x78\x84\xb2\xac\x6f\xce\x64\x42\xca\x0a\xce\xf2\x59\xa9\xad\x16\x67\xae\x90\xeb\x3c\x4b\xae\x67\xbd\x69\xe4\xc8\xf5\xf2\xdd\x18\x10\x47\x90\xbe\xad\x70\xc2\xf8\xfa\x0e\xfe\x3e\xd4\x98\x13\x67\x16\x89\x60\x26\xbc\x84\xf1\xbb\x49\x8e\x50\xab\x9b\xec\xd3\x26\x61\x7e\xdd\xe0\x10\xca\xd4\xc0\x87\x69\xe0\x5a\x91\x18\x87\x91\x2b\xd3\xf7\x38\x5c\x9c\x47\x6a\x58\x17\x21\x20\xb7\x6a\xe6\x13\xaa\x73\x64\x52\x4d\x9c\x13\xc1\x78\xf4\x6f\x8e\x50\xd3\x71\x2c\xa2\x53\x0f\x73\x7a\x3c\x87\xc2\x8e\x9a\x05\x1d\x57\x15\xe5\x15\x49\xb0\x3c\xab\x64\x38\x9b\xe7\x05\xe1\x57\x28\x50\x27\xf4\x04\xd3\x38\x89\x42\x01\xc5\x0e\x54\x4b\xbd\xc7\x71\x9a\xfa\x53\x27\x2a\xd2\x7d\x18\x08\xb9\xd4\x8c\x5c\xc6\xdb\x5d\x40\xf6\x28\xa4\x5c\x08\x64\xc8\x98\xf1\x0e\x42\x8f\x4c\x41\x27\xde\x24\x99\x31\xde\x3e\x02\xe4\xb1\x29\x68\x86\x94\x41\x11\xda\x27\xa1\xb3\x6c\xdc\x72\x70\x17\xc1\x03\x0a\x2c\xed\x8f\xd8\x1e\x62\x87\xa6\xda\xc6\xde\xc7\xac\xb6\xc7\x80\x5c\x92\x51\xbd\xd3\x1c\x1a\x33\x7e\x84\xd8\x95\x29\x6a\x66\x78\x19\x76\x00\xd0\xb5\xb9\x65\x0a\x2c\x9b\x86\xe8\x8d\xb1\x12\x08\x3e\x42\xf0\x96\x02\x67\x89\xa9\x40\xf7\x01\xad\x5e\x7e\x88\xc2\xb9\x13\xe0\xa7\x7c\x54\x82\x8e\x00\xd1\xae\x22\x54\x7a\x46\x44\x87\x40\xe4\xc9\x19\x31\xdd\x2a\x26\x4f\xcd\x08\xe9\x11\x10\x95\x98\x11\x72\x54\x85\xa8\xb4\x8c\x88\x63\x02\x91\x27\x65\xc4\xf4\xab\x18\x48\xc9\x88\x19\x54\x31\x79\x42\x46\xc8\x90\x68\xb6\xbc\xd0\x12\x11\x4b\xe2\x42\xbe\xaa\xcd\x0a\x41\x2b\xaa\x7f\x85\x2f\x1b\xbe\x46\xd0\x9a\xea\x9b\x49\x72\xce\xf8\xe5\xd1\x61\xfe\xfe\x4b\xa7\xad\x9e\xdb\xaf\x8c\xa6\x35\x4b\x03\x37\x12\x2e\xe3\x97\xed\x43\x0d\xbd\xa5\x3b\x5c\x83\x1f\xe9\xf0\x1d\x1d\x3c\x99\x85\x69\xec\x04\x79\xf4\x02\x9f\x2f\x18\xb3\x15\x04\x25\xb0\x7b\xc0\xb5\x4d\x38\x25\x33\xc4\x75\x8c\xb8\x5c\x6c\x88\xec\x9a\x90\xb9\xe4\x10\xd8\x33\x02\x95\xf0\x10\x78\x64\x02\x2a\xf9\x21\xee\xd8\x88\xcb\x45\x88\xc8\xbe\x09\x09\x52\x44\xe4\xc0\x84\xcc\x05\x89\xc0\xa1\xb1\x83\x32\x59\x22\x6e\x69\xbc\x34\x88\x13\xa1\x2b\xf3\xd8\xe4\x12\x45\xe8\xda\xdc\x97\x20\xd4\x7b\xd4\xc6\xa6\x46\x1b\x65\xb9\x16\x9c\x6d\xdd\x60\x95\x45\x5b\x90\x76\x75\x17\xda\x93\x2e\xb2\x06\x74\x56\x44\xbc\x35\xb6\x50\xee\x63\x17\x59\x44\xa6\xcc\xaa\xa6\x5d\xe6\x48\x77\xc8\x80\x4a\x9c\x22\xd8\xaf\x56\x41\xc8\x1f\xf6\x15\x93\xe4\xcc\x89\xdc\xd8\x72\xa2\x28\x3c\x63\xfc\xaf\x03\x28\xcf\x6a\x90\x2e\x4a\xa5\x07\x2f\xa0\xb4\x83\xb7\x22\xca\x80\x77\x00\xc8\x37\xe5\x67\x41\xb9\xfc\x3b\x94\xf7\xf0\xb6\x87\x5a\xe8\xc1\xf5\x31\xc0\x91\xba\x7e\x16\x03\x4a\x53\x17\x4a\xd5\xaa\x2a\x8c\x92\x99\x75\x26\x62\xa4\x5f\xfe\xf5\xe2\x05\xb6\xb0\x5f\x60\x84\xa3\x61\xde\xbc\x79\x87\x98\x41\xbe\x02\xdb\xc7\xbc\x78\x51\x60\x86\x05\xa6\x7c\xad\x37\x6f\x00\xb3\xd9\xeb\x4d\xf8\xb2\x76\xde\x28\x58\x68\x6e\xf6\xbb\xad\x8c\xfb\x1b\x1a\xbf\xd9\xef\x9d\x3d\x5c\x1e\xaf\xa3\x6e\xe3\x4e\xc2\xc8\x72\x7c\x9f\xf1\x89\x03\x7f\x57\x37\x4a\x9c\x28\xf1\x1c\xdf\x72\xbd\x49\xb6\xd7\x97\xff\x83\xf1\xcd\x02\x40\xd9\x10\x25\x33\x11\xc9\x7d\xbe\x17\x27\x31\xe3\x89\x80\x42\x75\x48\xde\x7c\x91\x9c\x5b\xb1\x48\x18\x7f\x73\x08\x25\x59\xd7\x7b\xc1\x38\x12\xf3\xec\xd6\xaf\x8b\x1c\xd5\xe1\xce\xc8\x77\x18\x0f\x46\xf0\x57\x75\xbc\x9d\x9f\xa1\xb3\xd5\x73\xeb\x00\x8a\xd4\x89\xe9\x61\x90\x38\x5e\x10\x5b\x4e\x6c\xcd\xc5\x7c\x24\x22\xc6\x0f\x5e\x01\x24\x33\x5d\x70\xe0\x44\xe7\xd6\x22\x0a\xdd\x74\x9c\x30\xfe\x1b\x34\x41\x9d\x93\xa5\x4a\xe3\x74\xae\x6e\x54\x30\xfe\xfb\x05\x94\x67\xfd\x90\x6d\x5d\xf2\xa5\xeb\x41\x07\x8a\xba\x58\x74\x10\x46\x07\xda\x16\xe8\xe0\x77\x80\xa8\xf3\x32\xe3\x44\x44\x5e\x7c\x62\x85\x0b\x11\x39\x49\x18\x31\xfe\x1b\x34\x40\xdd\x75\xcc\x9e\x98\x2e\x4a\xc3\x15\x94\x66\xc2\x19\xa5\xbe\x2f\x12\xad\x3c\xc6\x72\x95\x3c\x3f\xa4\x72\x5b\x11\x85\x61\xc2\x78\x94\x40\xd9\x46\x2d\xf8\xc3\x45\x18\xc9\x46\x49\x3b\xcb\xfd\x70\x0b\xca\xb7\x6a\x14\x26\x5e\xe0\xc9\x8c\x78\x08\xc3\xa3\x6e\xe6\xe5\x9e\x0a\xa6\xbe\x60\xfc\xc0\xcf\xcb\xd4\x1e\x01\xfe\x7a\x0a\x7f\x3d\xca\xc5\xe2\xf8\xbe\x50\x97\x59\x40\x07\xe7\x87\xf1\x84\xd3\xec\x9e\x6c\x96\x47\x9c\x00\xca\x06\x7a\x59\xd6\xf2\x08\x8a\x86\xaa\x76\x89\x88\x62\x31\x56\x83\xd2\x4a\xa1\x30\x6b\x76\x1a\x64\x7f\x7d\x85\x7f\x5d\x01\x65\x1a\x49\x99\x7a\xb7\x50\xa0\xdf\x8f\x2a\xca\x5d\x0f\xca\xb7\x20\x22\xb9\x3e\xd7\x02\xe4\x7b\xb4\x4e\x7e\x0b\x50\xea\x7c\x12\x46\x82\xf1\xd7\x6f\xa1\x24\x6b\xf9\x48\x8c\x9d\x34\x16\x8c\xbf\x7d\x0d\x7f\x57\x2b\x3f\xa9\x26\xc6\xdf\x42\xa3\xd4\x8d\xbe\x62\x48\x18\x7f\x8b\x81\xb2\x3a\xaa\x57\xbf\x8a\x81\xfe\xa7\x0d\xc5\xf9\x60\x9d\x8a\x28\x11\xae\xe5\x3b\x17\xe7\x56\xcc\xf8\x78\x9a\x97\xe7\x47\xc4\xc6\xe7\xf3\x45\x12\x66\xb7\xbf\xfd\x73\xb5\xa1\xce\x46\xe3\x1f\x90\x5b\x7e\x0c\xec\x62\x11\x85\x1f\xbd\xb9\x93\x88\x32\x0c\xb2\x42\x7e\x8a\xab\x3f\x0f\xe3\x44\x2f\x07\xe9\xe7\xc7\xaa\xfa\xbe\x56\xf8\xf7\x3f\x79\xa1\xda\xd2\x79\x73\x67\x2a\xb7\xba\x96\x4c\x2d\x86\xeb\xcd\x60\x04\xd4\xc2\x3f\x08\xf5\x8b\xbd\x80\xca\xa8\x35\xbf\xe7\xca\xf4\x33\x86\x8b\x75\xa1\x30\xcf\x73\x71\x7c\x90\xcc\x9c\x40\x5e\x4d\xab\xd2\x5f\x80\x82\x9f\x76\x9c\x44\x44\x04\xf0\x1d\x5e\x2b\x53\xd6\x3c\x1d\xcf\x8a\x98\x8c\xff\xf5\x1b\x14\xaf\xb0\x58\x0f\xc6\xf8\x6f\xef\x00\xb1\x85\x96\x68\xfc\x17\x58\x8f\x1d\x94\x96\xe9\x2f\x80\x5e\xdc\x80\x12\x79\xa6\x1b\x43\x49\x7e\xdb\x69\x21\x0d\xa1\xca\x5e\x61\xd9\x71\x89\x55\x6e\x5b\x7e\x10\x54\xa7\x33\xe8\xef\x45\x28\xe3\x5e\x01\x4e\xcd\x56\x63\x2f\x1a\xfb\x42\xfd\x3e\x5b\x08\xf2\x10\x04\xae\x3e\x4e\x0f\xa0\xbd\xfc\x75\x08\x3a\x59\xc2\x14\x9c\x38\xe3\x13\xc6\x0f\x20\x37\xa9\x07\x1c\xcb\xa1\x5f\x43\xc6\x50\xbf\x7b\xe3\xcf\x38\xc2\xf7\xbd\x45\x76\xf3\xe6\x2d\x0c\xba\xfa\x6d\x7b\xee\xb9\xd9\x7b\x30\xda\xcf\x42\x05\xf6\x75\x8e\xed\xaa\x89\x4c\xae\x61\x04\xe3\xe2\x0a\xfe\x5a\xfc\xd8\x33\x16\x9e\xef\x05\x53\xc6\xff\xea\x43\xa1\xf6\xf3\x0e\x96\xbe\xc3\xd2\x25\x52\x27\x7e\x28\x6b\xde\xff\x0b\x8a\x56\x05\x11\xca\xf2\x91\xed\xaa\x5f\xc2\xf0\x26\xbf\xd4\x40\x7e\x2f\xc7\x03\x44\x7e\xec\xef\x98\xf1\x96\x03\x7f\x53\xbf\x76\x08\x5f\x2c\x66\x72\xcd\xa7\xee\xf1\xc8\xf9\x2d\xc9\x93\x4a\x57\xa5\xe4\x24\x5c\x58\x33\xc7\x9f\xe8\xe9\xeb\x09\x10\x99\x81\x46\x61\x92\x84\xf3\x0a\xe8\x06\x40\x43\x68\xd6\xfe\xcf\x00\xa3\xc8\x19\x9f\xc8\x09\xfc\xaf\x37\x80\x5d\x62\x3b\x8d\xe0\x37\x79\xc3\xf3\x8f\xf4\x85\x0b\x11\x58\xa3\xf0\x23\xe3\xa7\xf9\x2d\xb8\xfc\x33\x57\xe1\x38\xca\x7e\xc2\x66\xbc\x7d\x05\x05\x6d\x28\x18\xcf\x1c\x2f\x62\xbc\x8b\x25\x1d\x28\x99\x84\xd1\x09\xe3\x1d\x2c\xe8\x42\x01\xe6\x48\x85\xe8\x21\xe2\x18\x10\xa3\xc8\x09\xc6\x33\x6b\xe4\x04\x27\x79\x56\x99\x78\xe3\x7c\xde\x6f\x7f\x06\x78\x1f\xe0\xce\x3c\x4c\xb3\x65\x87\x35\x9e\x09\xa9\xe2\x0e\x62\x06\x80\x51\x3f\x57\x76\xb1\x60\x88\x0d\x48\xe3\x24\x9c\x8b\xc8\x72\xc6\xe3\x2c\x4c\x90\xaa\xe5\x49\x0f\xb0\x2a\xf5\xa1\xdb\x8a\x9b\xd2\x97\xed\x83\xbb\x4b\x00\xb5\xab\xa0\x6c\x3f\x7a\xd9\xd1\x40\x1d\x02\xa4\x36\xa3\x97\x5d\x0d\xd6\xad\xc2\xd4\x4e\xf4\xb2\xa7\xa1\x7a\x04\x2a\xdb\x86\x5e\x1e\x69\xa8\xa3\x2a\x2a\xdb\x83\x5e\x1e\x6b\xa0\x63\x02\xa4\xb6\x76\x97\x7d\x0d\xd6\xaf\xc2\xf2\xdd\xe7\xe5\x40\x83\x0d\xaa\x30\xb5\xf5\xbc\x1c\x6a\xa8\x52\x0e\x53\x9d\xae\x36\x9e\x97\xed\x43\x0d\xb6\x24\x60\xb0\xef\xbc\x6c\xeb\x23\xb0\xa2\x02\xe6\xdb\xce\xcb\xb6\x3e\x0c\x6b\x0a\x39\xf3\xa2\x44\xa8\xa8\xfa\x58\x6c\x08\xac\x1c\x0c\xc0\xea\x23\xb2\xa5\xb0\xde\x04\xa0\xfa\xb0\xec\x08\x68\xec\x7d\x04\xa8\x36\x38\xfd\x43\x0a\x2a\x3b\x00\xc0\xda\x10\xf5\xdb\x54\x77\xc9\x31\xca\xb1\xda\x38\xf5\x3b\x04\x56\x0e\x14\x60\xb5\xd1\xea\x77\xe9\xce\x0d\xb2\x9d\x7a\x47\x1b\x30\x75\x8f\xa4\xb8\xa5\x7f\xb1\xe7\x99\x56\xfb\x15\x42\x8f\x4c\x50\xe5\x9c\x56\xa7\x80\x1e\x1b\xa1\xb9\x7f\x5a\xdd\x02\xdc\x37\x81\x73\x17\xb5\x7a\x05\x76\x60\xc4\x2a\x2f\xb5\x8e\x0a\xec\xd0\x84\x55\x8e\x6a\x1d\x17\xd0\xa5\x11\x9a\x4b\xb7\xd5\x2f\xc0\x2b\x13\x18\xdc\xd5\x1a\x14\xe0\xb5\x09\x9c\x7b\xac\x35\x2c\xb0\x9b\x2a\xb6\xe4\xb4\x56\xfb\xb0\x00\x6f\x8d\x60\xf4\x5b\xab\xad\x8d\xde\xce\x1c\x1c\x5c\xd7\x6a\x17\x43\xa8\x6e\x92\xd0\xf8\xc2\x7b\xad\x76\x31\x8e\xea\x06\x09\xc9\xd0\x1c\xd8\x6a\x17\xa3\x39\xe8\x98\x19\xe8\xc3\x56\xbb\x18\x52\xb5\x66\x23\x09\x85\x1b\x5b\xed\x62\x60\x07\x84\xbc\x09\x4f\xb6\xda\xc5\xf0\x0e\x08\x99\x57\x9d\xd9\x6a\x17\x63\x3c\x20\xd4\x5e\xf5\x67\xab\x5d\x8c\xf4\x80\x90\xfc\xbe\x4b\x5b\x9d\x62\xb0\xd5\x0f\x8d\xe8\x4b\x6b\x92\xfa\xbe\x15\x27\xe1\x82\xf1\xf6\x6b\xc0\x0c\x0b\x4c\xf6\x7b\x4b\x81\xe9\x20\x66\xa9\x61\xd4\x0f\x2d\x05\xaa\x8b\xa8\x55\x81\x52\xbf\xb0\x14\xa0\x1e\x82\xd6\x1a\xc8\x3b\x2d\x45\x3a\x42\xd0\xa6\x00\xc5\xde\x47\x1d\x73\x8c\x98\xad\x86\xc9\x7e\xc7\xd0\x50\x7d\x44\xed\x0a\x94\x50\xeb\xc1\x02\x35\x00\x94\xba\x83\x56\xf8\x4b\x07\x0d\x11\xa4\xee\x50\xa0\xb1\x74\xd0\x65\xfb\xf0\x35\x74\xba\xba\xa1\x56\xf2\x54\x19\xda\x2e\xa0\x5d\x3d\x64\x66\xa7\x32\xb4\x53\x40\x7b\x3a\x34\x77\x52\x19\xdc\x2d\xc0\x47\x1a\x18\x4c\x54\x06\xf7\x0a\xf0\xb1\x0e\x56\xfe\x29\x63\x8f\x0a\x6c\x5f\xc3\xe6\xd6\x29\x63\x8f\x0b\xec\x40\xc7\x82\x6b\xca\xe8\x7e\x81\x1e\xea\xbd\x96\x1b\xa6\x0c\x1e\x14\xe0\xa5\x06\x06\xaf\x94\xc1\xc3\x02\xbc\x2a\x77\x72\x90\x9c\x97\xa0\x1d\x6d\xe8\x88\xac\x4b\xbd\x5d\x29\x6d\xb6\x44\x97\x0d\x89\xfc\x4b\xb0\x46\x92\xb5\x2a\x58\x44\x22\x26\x58\x63\xc9\x5a\x17\x2c\x22\x1d\x13\x2c\x57\xb2\x36\xc8\x5a\x12\x49\x99\x7a\x95\x4c\xb2\xb6\x05\x8b\x48\xcc\x04\x6b\x22\x59\xbb\x82\x45\x24\x67\x82\x35\x95\xac\x4f\x05\x8b\xc8\xd0\xd4\xb1\x0b\x92\x75\x55\xb0\x88\x34\x4d\xbd\x36\x26\x59\xd7\x05\x8b\xc8\xd4\x04\xeb\xbd\x64\x7d\x2e\x58\x44\xb6\x26\x58\x27\x92\xf5\xa5\x60\x11\x19\x9b\x60\xf9\x92\x75\x53\xb0\x88\xe5\x0a\xc1\x9a\x4b\xd6\xd7\x82\x45\x2c\x5c\xe8\x07\x76\x2e\x5b\xb7\x05\x8b\x58\xc3\x50\x2f\x60\x48\xd6\x5d\xc1\x22\x16\x33\x04\x4b\xfa\xab\xf5\xad\x60\x3d\xcf\x5f\x1f\x24\xeb\x7f\x05\xeb\x79\xfe\xca\x96\x7c\xf7\x05\xeb\x79\xfe\x8a\x25\xeb\xa1\x60\x3d\xcf\x5f\xd9\x6a\xed\x11\x59\xab\xe7\xf9\x2b\x95\xac\xa7\x82\xf5\x3c\x7f\x9d\x4a\xd6\xf7\x82\xf5\x3c\x7f\x9d\x49\xd6\x8f\x82\xf5\x3c\x7f\x65\xeb\xdb\x7f\x0b\xd6\xf3\xfc\x95\x2d\x3b\x7e\x16\xac\xe7\xf9\xeb\x42\xb2\x7e\x15\xac\xd2\xee\x94\x7e\x2f\x3e\x3b\x90\x04\xf7\x20\xab\x7e\x23\x63\x94\x7d\x29\xa6\x60\x0c\x1a\x19\x32\xdf\x8e\x35\xc6\xb0\x91\xe1\x66\x9f\xdf\x28\x18\xcb\x46\x86\xfa\x68\x45\xc1\x58\x35\x32\x64\x8e\x9d\x68\x8c\x75\x23\x43\xe6\xd7\xa9\xc6\xd8\x34\x32\x66\xd9\x39\xf7\x05\x63\xdb\xc8\xf0\xb2\xd7\x76\x0b\xc6\xae\x91\x21\x73\xea\xfb\x82\xb1\x3e\x6c\x64\x9c\x64\x27\xb6\x17\x8c\x76\x23\xc3\xcf\x0e\xe7\x2e\x18\x9d\x46\xc6\x3c\x3b\x8b\xba\x60\x74\x1b\x19\x41\x76\x84\x71\xc1\xe8\x35\x32\xc2\xec\x95\xe3\x82\x71\xd4\xc8\x90\xb9\x73\xa1\x31\x9a\xfd\x21\xf3\xe6\x07\x8d\xd1\xec\x8f\x28\x3b\xa4\xb5\x60\x34\xfb\x23\xce\xce\x20\x2d\x18\xcd\xfe\x48\xb2\x23\x16\x0b\x46\xb3\x3f\xd2\xec\xd5\xbc\x82\xd1\xec\x8f\xd3\xec\xec\xb9\x82\xd1\xec\x0f\x75\x4a\x59\xc1\x68\xf6\x87\xcc\x8d\x1f\x35\x46\xb3\x3f\xce\xb3\xd7\x95\x0a\x46\xb3\x3f\x2e\xb2\x13\xb2\x90\xb1\x21\xfc\x51\x59\x89\x2e\x35\x3c\xe1\x8e\xca\x1a\x74\xa5\xe1\x09\x6f\x54\x56\x9f\x6b\x0d\x4f\x38\xa3\xb2\xee\xdc\x68\x78\xc2\x17\x95\x15\xe7\x56\xc3\x13\xae\xa8\xac\x35\x77\x1a\x9e\xf0\x44\x65\x95\xf9\x49\xc3\x13\x8e\xa8\xac\x2f\xaf\x34\x3c\xe1\x87\xca\xca\xf2\x5a\xc3\x13\x6e\xa8\xac\x29\x3f\x6b\x78\xc2\x0b\x95\xd5\xe4\x17\x0d\x4f\x38\xa1\xb2\x8e\xbc\xd1\xf0\x84\x0f\x2a\x2b\xc8\xaf\x1a\x9e\x70\x41\x65\xed\x78\xab\xe1\x09\x0f\x54\x56\x8d\x77\x1a\x9e\x70\x40\x65\xbd\xf8\xad\xc0\x6f\x9b\xf4\x2f\x33\xde\xff\x34\x7c\x93\xfe\xa3\xec\x44\xe9\x02\xdf\xa4\xff\x38\x3b\x3a\xb9\xc0\x37\xe9\x3f\xc9\x0e\x84\x2d\xf0\x4d\xfa\x4f\xb3\xb7\x89\x0b\x7c\x93\xfe\x4f\xb3\xc3\x32\x0b\x7c\x93\xfe\xd5\xa1\x8a\x05\xbe\x49\xff\x32\xc7\xfd\xab\xe1\x9b\xf4\x7f\x9e\xbd\x59\x59\xe0\x9b\xf4\x7f\x91\x1d\xe5\x57\xe0\x4b\xfa\xd7\xdf\x16\xb8\x2c\xee\x35\x1f\xa9\x47\x63\x46\xe1\x47\xcb\x8d\x9c\x33\x2f\x98\xc6\x96\x9f\xdd\xca\x29\x7e\x70\x64\xfc\xea\x0a\xd0\xed\x0a\x7a\x26\x9c\xd3\xf3\x12\x7a\x36\x03\x74\xc7\x10\x1b\x7e\xf6\x64\xfc\xfb\x77\xc0\x76\x0d\x91\x0b\xec\xe9\x29\x60\x7b\x86\xb8\xf9\x4b\x45\xae\x13\xcf\x4a\x35\xea\x1e\x00\xf3\xc8\x70\x15\x23\x13\xfb\xe9\xf8\x19\xd7\x2c\xea\xda\x7d\x01\xbc\xfe\x33\xae\xa8\xf1\xde\x00\x6f\x60\xb8\xde\x87\xd4\x71\xa3\x94\xac\x6c\x0f\x9b\x39\x34\x5c\xb4\x8e\x8c\x2d\x5d\x3e\xef\xca\x45\xa5\x7b\xd8\xd8\xd5\xf3\xae\xab\x51\xb1\xbd\x6b\xc3\x55\xd5\x73\x6d\x81\x6b\xe5\xcf\xfa\x6f\xee\x81\xb1\xa9\x30\x32\xac\x9f\x3f\xd3\x93\x33\x54\x0d\x18\xdf\x44\xc0\xdb\xd2\x3c\x55\xd3\x82\xe7\xab\xeb\xb9\x78\xbd\x9d\xa1\x71\xfb\x35\x74\xe1\x4a\x6d\x93\xb7\x90\xe1\x8b\x89\x6c\xd2\x0d\x10\xaa\xf6\xda\x6b\x52\xf6\x8b\x3c\xb4\xc8\x07\x5a\xd5\x67\x7b\x2d\xca\x68\xd0\x20\xbc\x9a\xc9\x72\x7b\xd5\xf3\x5d\x20\x98\x7c\x97\x2e\xf4\xf6\x3f\x41\x8f\xb5\xab\x6e\x4b\x17\xa6\xf1\x79\xc2\x5e\xab\x3a\x2d\x5d\x98\x46\x27\xc5\x6b\x99\x7c\x56\xae\x5b\x8a\x57\x31\xf9\x2b\xc7\xab\xa6\x3f\x61\x5f\x55\x1d\x55\x6a\x8a\x3e\x2e\x4f\x38\x2e\x55\x27\x95\x5a\xa2\x8f\x4a\x8a\x57\x32\x79\xa8\x54\xb1\x14\xaf\x61\xf2\x0d\x3e\x5a\xa2\xb5\xfe\x3b\xf6\x56\xd5\x3b\xda\x1b\xc5\xe4\xf8\x7c\xc7\x9e\xab\xfa\x87\x18\x9f\x42\xb7\x72\xbf\xb1\x81\xe7\x7c\x8f\xda\x55\x17\x91\xee\x83\xde\x95\x73\xb8\x8b\xec\x4e\xd5\x51\x58\x6f\x83\x42\x4e\xa1\xcd\x9d\x46\x73\xe1\x95\xf3\x26\x5f\xa6\x9b\x08\xaf\x5c\xf5\x18\xa1\xe5\xa2\x29\x59\xbd\x0b\x76\xd3\xec\xa6\x8f\xd2\x29\xf4\x74\xc7\xe4\xb7\x12\x4b\x09\xe2\x3b\xe8\xa7\x53\x35\x1d\x31\xb4\xba\x5e\xbf\x83\x96\x3a\x0d\xce\xcb\xdf\xbe\x2e\x0d\xec\x0d\x36\xb1\x6a\x40\x2a\x09\x95\xc6\xb5\x20\x57\xdd\x48\x8c\xab\xee\x97\x53\x6c\x6f\xd5\x99\x54\xce\x2c\x8d\xaa\x8f\xd7\x25\x1d\xba\x47\xdd\x1b\xd4\x82\x6c\x72\x2a\x31\x3c\xa7\xd8\xc7\x8d\xf3\x9c\x3e\x29\x6f\x60\xe1\xd5\xa9\x1a\xb6\x18\x42\x93\xed\x36\x7e\x61\x9c\xaa\x69\x35\x77\x1b\x46\x77\x73\x53\x08\xd8\x60\xdb\xa2\xab\xb4\x07\xd4\x60\x7e\x82\x75\x60\xb7\xea\xda\x3d\x69\x68\x64\x98\xa5\xa0\xe5\xdd\xaa\x6d\x35\x7f\x1b\x46\xc9\x2d\x5a\xde\x25\x16\xa1\xa8\x23\x93\x75\xdd\xa2\xe5\xdd\xc6\x59\x52\x1f\x30\x17\xdb\xdc\x30\x57\xea\xa4\x27\x6c\x6b\xd5\xbb\xe4\x28\xeb\x26\xd2\x5a\x5a\xb5\x2f\x39\xc6\x3a\x5b\x6b\x67\xd5\xbf\x25\x2b\x54\xc7\xf7\x09\xdb\x5a\x75\x6f\x29\x6f\x54\x47\x37\xc5\x16\x57\xdd\x4b\x8e\xae\x6e\x5f\xad\xc5\xc4\x52\x95\x1a\x5b\x9d\xad\xb5\xb8\x61\xa6\xd5\x87\x28\xc5\xb6\x3e\x6b\xbe\xd5\xa9\xdf\xb1\xad\xcf\xf4\x70\x39\x5d\xcb\x4d\xa9\xd6\xe2\x67\xfa\xb8\x1a\x43\x6b\x77\xd5\xcb\xc4\x0c\x51\x1d\xef\xef\xd0\x07\xbd\xaa\x9f\x4b\xe3\xad\xac\x54\x19\xf4\xcb\x74\x73\x05\x75\xe8\x19\x26\xe3\x22\x88\x8c\x58\x0d\xf1\xe4\x16\x21\xaa\xc6\x26\xe6\x8c\x6a\x8c\x53\x18\x8f\x5e\xd5\xd9\x25\xad\x99\x57\x33\xc5\x78\xf4\xaa\x36\x2f\x4b\xce\x3c\x71\xe2\x78\xf4\x0c\xae\x6f\x5e\x17\x69\xf5\x30\x79\xbf\x79\x0e\x2e\xea\x61\x98\xc1\x0b\x4d\x94\xc7\xa4\x98\x4f\x67\x18\x82\xcc\x04\x45\x80\x7d\x65\x14\xb3\x6a\x11\xe2\x99\x19\xa1\x3c\xd6\x8c\x5f\x9e\x6a\xbd\xf1\xcc\xbc\x50\x8d\xa1\xf5\xc6\xb3\x66\x77\xdd\xe8\xa7\xe0\x8f\x7e\x55\xdc\xf9\x1c\xef\x39\xd3\xec\x1d\x95\x74\xb1\x10\x51\x5e\x89\x24\xb4\xfc\xf0\x4c\x44\xf9\x4a\x61\x02\xbb\xad\xbe\xe9\xee\xc9\x5e\x94\xac\x65\x18\x24\x5f\x43\x8e\x20\x8a\x7a\x64\x4c\x21\xb3\xa7\xa2\x47\x7e\x38\x3e\x61\x3c\x19\x01\x40\xbd\x5b\x90\x91\x75\x80\x8f\x80\x6c\x4c\xb3\x87\x3a\xf2\xa2\x09\x16\x15\x87\xf2\xe8\xd4\x1b\x28\x57\x8f\xfe\xe4\x09\x4a\x03\x44\x08\x50\x07\xa8\xa9\x33\x86\x66\xd9\x59\x02\xaf\x63\x28\x53\x2f\x25\x09\xd7\x4b\xe7\x50\xf8\x16\x0b\xd5\x7b\x73\xd9\x3b\xfa\xaa\xe8\x1f\x28\x52\xcf\x63\x8c\x7c\x67\x7c\x92\xbf\x34\xc4\xf8\x0e\x0b\xb3\x0b\x9e\xcd\xbc\x44\x60\x61\x88\x85\x9d\xfd\xc2\xfc\x53\x0f\x61\x1a\xb8\xc2\xb5\xc6\x61\x14\x88\x28\x66\x3c\x0a\x81\xd1\xad\x30\xf2\xd7\xb4\xbc\x60\x0a\x75\xc8\xee\xd1\xc1\xc5\x22\xd8\x8e\xa8\xc7\x2b\xf4\xcb\x68\xbe\x98\x78\xbe\xcf\x78\x34\x01\xec\xd1\x3e\x16\x45\x98\x23\xcf\x01\x79\xbc\x8f\x0c\xa3\x64\x16\x2a\xbd\x8c\xa3\x30\x8e\x67\x4e\x32\x9e\x01\x0d\x14\xab\x9e\xa4\xd0\x69\x46\x6d\x01\xf5\x02\xa8\x03\x9a\x4a\x88\x1b\xa8\x27\x40\x1d\xee\x53\x51\xda\xd5\xaa\x7e\x04\xd2\x52\x1b\xde\x52\xd7\x3e\x80\xaa\xd4\x93\x10\x0a\x12\x89\x71\x92\xbf\xc9\x15\xc3\xbe\x4a\x3d\xf3\xa0\x46\x4d\x2b\x0f\x61\x68\xd4\x53\x00\x8a\x9f\x2e\x8a\x87\xff\x93\xc8\xcb\xa1\x69\x02\x50\x4d\x00\x34\xf4\x09\xa1\xc7\x5a\xad\xca\x6f\x15\x14\xe8\x05\xd4\x51\xfd\xfe\x9e\xd7\xd1\x84\x4e\xb0\xc6\x5a\x8b\x65\x8e\xa5\xc0\xee\x23\x80\xb5\xe6\x9b\xc0\x1b\xa8\xf5\x5a\x73\x53\xf9\xbd\x09\xad\xd2\xb0\xb9\x59\x6b\xee\x32\x81\x13\xd8\xb8\xad\xb5\xfe\x70\x3d\x67\x1e\x06\x2e\xe3\xee\x0a\x4a\xb5\xf6\x17\xa5\x3f\xa0\x54\xbd\x9e\x12\x5e\x88\x60\x2a\x18\xf7\x41\x8c\xea\xd7\x4c\xc5\x52\x77\xbe\x19\x3f\xfc\x0a\x85\x6a\xed\x94\xfa\x7e\x2c\xce\x65\xc1\x1d\x14\xec\x8a\x9a\x20\x2b\x3f\xf3\xe3\x48\xff\x99\x30\xff\x2e\x8d\x96\xef\xb2\x37\x7e\x0e\xa1\xfd\xfa\x4f\x84\xf0\x95\x18\x2d\xf7\x29\x30\x8c\xb0\xfa\xfd\x2b\x7b\x93\x23\x16\xf9\xeb\x8f\x8c\xc7\xb7\x50\x3c\xd4\x8b\xcb\x4d\xf2\xd6\x39\x68\xab\x29\x55\xf7\x68\xd1\xdd\x93\x0d\x40\xbb\xfb\x50\x65\x6e\x44\x8e\x72\xe4\x71\x7e\x93\x5c\x39\x2c\x71\x22\xc6\x7f\xeb\x40\xd1\xb1\x96\xef\x54\x51\x1b\x8a\xb6\x05\x0b\x5f\xe5\x61\xfc\x32\x11\xb0\x61\x3e\x56\x77\x50\x15\x5b\x87\x3c\x6e\xe1\x46\xc0\xb1\xba\x9d\x55\x28\xc8\x42\x05\x79\x81\x2b\x3e\x32\xfe\xd7\x0c\x90\xdb\x3d\x83\x54\xa0\xef\x00\xaa\xb6\x09\x79\xad\xe7\xd9\xab\x4e\xd6\x24\x3b\xc2\xef\xf0\x09\x20\x2b\x3d\xab\x94\x20\x29\x40\xb4\xaa\xc5\x69\xfe\xf5\xe5\xc8\x39\x8f\x19\x8f\x01\xa3\x56\xc6\x13\x31\x77\xf0\x54\xb6\xc9\x57\x28\x53\xf3\x59\x51\x32\xcf\x9d\x70\x7c\xac\xcf\x58\x0b\xc7\x95\xf1\xbd\x84\xf1\x75\x0c\x00\xcd\x57\x33\xe1\x44\x09\x00\xa0\x85\xc7\x9d\x8a\x5b\x00\xe2\x02\x44\x1b\xff\xb1\x9f\x8e\xa0\x7c\x0c\xe5\x3d\xad\x79\x5a\x25\x2e\xd7\xf1\x01\x8c\xce\xb1\xa6\x0c\xbd\x1e\x97\xeb\x59\x81\xa9\xda\x1a\x51\x6e\x81\xd2\xec\xad\xd5\xe6\x72\x3d\x2e\x20\x99\x01\x64\x6e\xcf\x7e\x1c\x0d\xb3\xaf\xb4\x83\x46\xd5\x3b\x2d\xea\x70\x15\x28\x1b\x40\x99\x1a\x4b\x27\x8a\x84\xab\x43\x62\xc6\xe7\x20\x64\xf5\xfe\x4a\x8e\xc9\x9f\xb5\x2d\x60\x97\xf3\xf6\x31\x56\x23\xcb\x96\xf3\x34\xf6\xc6\xd6\xc4\x77\xe0\x85\xb6\xf9\x0a\xca\xb7\x45\x79\xe0\x24\xa9\x3a\xa1\x22\x83\xfc\x0b\x90\x5d\x01\x89\x67\x4e\xb4\x00\x40\x3e\xa7\xf5\xd5\x9d\xf8\xec\xcd\xab\xfc\x48\xa2\xf0\x04\x8a\xd4\xa2\xdc\xf1\xfd\x30\xb1\x3e\x32\xfe\x11\x38\xea\x56\xe8\xdc\xf1\x13\x11\x0b\x35\x5f\x32\x7e\xa0\x4a\xbb\x87\xea\x67\x3d\xcf\x15\xe1\x34\x72\x16\x33\x79\x61\x75\x68\xa5\x17\x03\xa2\xbd\x8f\x18\x87\xf3\xb9\xc3\xf8\x1f\x97\x80\xe8\xec\x23\x16\x22\xf2\x42\x97\xf1\xd7\x08\x51\xcb\x30\x2f\x49\xc2\xbc\xe2\xbf\xff\x07\x8a\x7a\xfb\xec\x12\x0c\x23\x1c\xed\xc3\xbc\x44\x44\xfa\x49\x7a\xbf\x21\xf4\xb8\x52\x63\x3f\x8c\xa5\x53\x15\xf0\x4f\x04\xf6\xf7\x81\xf9\x03\xca\xf9\x61\x69\x88\x2b\x5e\x72\x84\x83\x03\xf6\xde\x05\xfc\x1d\x90\xda\x3b\x8f\x34\xf4\x1d\x42\x8b\x35\xb1\x5a\x2f\x6a\xe1\x5e\x02\x46\x3b\xa1\x72\x1f\xf4\x0e\x41\x5b\x0c\x94\xfb\x64\x3f\x1c\x76\xb5\xf6\x2e\x3b\x0d\x7d\x07\xd0\x36\x1e\xc0\x80\x73\x7a\x90\x78\xe3\xd4\x77\x34\x74\x0b\xd1\xed\x22\xb0\x19\xfe\x0a\xe1\xea\xe9\xc9\x30\x96\x4b\x58\x35\x26\x7f\x27\x50\xa6\x4e\xb2\x15\x89\x03\x25\x30\x0a\xed\xe2\x10\x87\x24\x8c\x92\xd0\x8b\x85\x15\xcf\x84\xdc\x6e\x60\x85\xa0\x4f\xd4\x2f\x4a\xb0\xb4\xa4\xc1\xaf\x10\x7c\xbc\xd7\x81\x64\x5b\x3d\x40\xf7\xf7\x3b\x91\x6c\x2b\xc2\xd5\x6c\xe0\x9c\x0a\x38\xc6\xf5\x9f\xbc\x44\x19\x53\xeb\x85\x7c\x22\xb9\xfc\x3b\x79\xfb\x0a\x1a\xad\xee\x7f\xcc\xbc\xc8\x99\x3a\x81\x83\x4f\x44\x64\xeb\x59\x87\x71\xe7\x08\x70\x1d\x0a\xe7\x30\xbe\x44\x44\xd7\x1c\xc9\x63\xdc\x43\x5c\x8f\xc2\x79\x8c\x5f\x23\xe2\xc8\x1c\x29\x65\x3c\x45\xdc\x31\x85\x4b\x19\x7f\x42\x44\xdf\x1c\x49\x30\x2e\x10\x37\xa0\x70\x82\xf1\x2d\x22\x86\xe6\x48\x21\xe3\x21\xe2\x96\x14\x2e\x64\xfc\x0e\x11\x2b\x0a\x71\xe2\x30\xfe\x65\x09\x90\x35\x05\x99\x3a\x8c\x7f\x42\xc8\x86\x8c\xe2\x31\xfe\xe5\x1a\x20\x5b\x32\x8a\xc7\xf8\x27\x84\xec\xc8\x28\x29\xe3\x5f\x9e\x72\xc8\xd1\x21\x19\x25\x65\xfc\x13\x42\x48\x0d\x9d\x08\xc6\xbf\x6c\x01\x42\xca\x47\x2e\x99\x3f\x21\x84\xd4\xcf\x49\xc8\xf8\x97\x3b\x80\x90\xd2\x99\x86\x8c\x7f\x42\x08\xad\x1d\x87\xf1\x07\xe8\xba\x23\x52\x36\x17\x0e\xe3\xbf\x10\x42\xeb\xc6\x63\xfc\x01\xba\xee\x88\x94\xcc\x85\xc7\xf8\x2f\x84\xd0\x9a\x49\x19\x7f\xc0\xae\x23\xe5\x72\x91\x32\xfe\x0b\x21\xa4\x5e\x62\xb9\xcf\xc4\xae\x23\xf5\x72\x21\x18\xff\x85\x10\x52\x2f\x71\xc8\xf8\x03\x76\x1d\xa9\x97\x8b\x90\xf1\x5f\x08\x21\xf5\x92\x38\x8c\x3f\x42\xd7\x1d\x93\x7a\x71\x1d\xc6\x37\x08\x21\xf5\x92\x78\x8c\x3f\x42\xd7\x1d\x93\x7a\x71\x3d\xc6\x37\x08\xa9\xc9\x37\x49\xca\xf8\x63\x0a\x40\x52\x35\x19\x04\xfa\xf8\x98\x54\x8d\x9b\x32\xbe\x41\x08\xa9\x1a\xb9\xde\x7b\x84\x3e\x3e\x26\x55\xe3\xca\x5d\x2c\x42\x48\xd5\x24\x21\xe3\x8f\xd0\xc7\xc7\xa4\x6a\xdc\x90\xf1\x0d\x42\x48\xd5\x04\x0e\xe3\xb7\xd8\xc7\xa4\x6a\x02\x8f\xf1\x5b\xec\x40\x52\x35\x41\xca\xf8\x2d\x36\x9a\x54\x8d\xdc\x2b\xdd\x62\x8b\x48\xd5\x04\x21\xe3\xb7\x58\x5d\x52\x35\x33\x87\xf1\x2b\xa8\x6e\x9f\x54\xcd\xc8\x61\x7c\x85\x10\x52\x35\x0b\x87\xf1\x6f\x08\x21\x55\x33\xf3\x18\xbf\x82\x46\xf7\x49\xd5\x8c\x3c\xc6\x57\x08\x21\xf5\xb2\xf0\x18\xff\x86\x10\x52\x2f\xb3\x94\xf1\x2b\xe8\xba\x3e\xa9\x97\x51\xca\xf8\x0a\x21\xa4\x5e\x16\x29\xe3\xdf\x10\x42\xea\x65\x26\x18\xbf\x82\x01\xe8\x93\x7a\x19\x09\xc6\x57\x08\x21\xf5\xb2\x10\x8c\x7f\x43\x08\xa9\x97\x59\xc8\xf8\x15\x0c\x63\x9f\xd4\xcb\x28\x64\x7c\x85\x10\x52\x2f\x8b\x90\xf1\x6f\x08\x21\xf5\x22\xd7\xfb\x5f\x71\x18\x49\xbd\xcc\x3d\xc6\xbf\xc2\x00\x0c\x48\xbd\xcc\x53\xc6\xbf\x42\xd7\x0d\x48\xbd\xcc\x05\xe3\x5f\xa1\xd1\x03\x52\x2f\xf3\x90\xf1\xaf\x50\xdd\x41\x4d\x96\x39\x77\x18\xff\xe9\x00\x90\x54\x4d\x06\x81\x76\x0d\x6a\xd6\x35\xe7\x29\xe3\x3f\x21\x63\x0d\x48\xed\x64\x10\x6c\x5d\xcd\xca\xe6\x3c\x64\xfc\x67\x08\x40\x52\x41\x19\x04\xdb\x48\x2a\x28\x72\x18\xbf\xc7\xaa\x93\x0a\x8a\x3c\xc6\xef\x71\x48\x48\x05\x45\x29\xe3\xf7\x58\x69\x52\x41\x91\x60\xfc\x1e\x87\x84\x54\x90\xdc\x2d\xdd\x63\x75\x49\x05\xa9\xa6\x9f\x39\x8c\xff\xc0\x21\x21\x75\x94\x41\xa0\x5d\x43\x52\x47\x67\x1e\xe3\x3f\xa0\x5d\x43\x52\x47\x67\x82\xf1\x1f\x50\xe9\x21\xa9\xa3\xb3\x90\xf1\x1f\x50\xe9\x21\xa9\xa3\x80\xf1\x5b\x58\x17\x0e\x49\x01\x9d\xa6\x8c\x7f\x87\xfe\x53\x2f\xb9\x9e\x38\x89\x73\xe2\x04\xce\x01\x62\x4f\x43\x6f\x2c\xdc\xfc\x60\xe2\xfc\xc3\x02\x18\x76\x4d\x73\x62\x31\xf7\x0e\x08\xe2\x21\x12\xcb\x23\x51\xd9\x0d\x23\xae\x3c\x1c\x79\xc8\x7d\xf8\xef\x00\x57\x3f\x7c\x40\x7d\x2a\x1b\x8e\xe5\x31\xe0\x3a\x14\x4e\x6e\x49\x10\xd1\x35\x47\x92\xdb\x09\xc4\xf5\x28\x9c\xdc\x92\x20\xe2\xc8\x1c\x49\x6e\x27\x10\x77\x4c\xe1\xe4\x96\x04\x11\x7d\x73\x24\xb9\x9d\x40\xdc\x80\xc2\xc9\x2d\x09\x22\x86\xe6\x48\x72\x3b\x81\xb8\x25\x85\x93\x5b\x12\x44\xac\x28\x84\xdc\x70\x9c\x80\x0d\x96\x6b\x0a\x22\x37\x1c\x53\x84\x6c\xc8\x28\x1e\xe3\x27\xe0\x14\xf5\x32\x66\x25\x8a\xc7\xf8\x14\x21\x3b\x32\x4a\xca\xf8\x09\x88\x5c\xbd\x66\x59\x89\x92\x32\x3e\x45\x08\xa9\x21\xb9\xe1\x38\x01\x4b\xae\x48\xf9\xc8\x0d\xc7\x14\x21\xa4\x7e\xe4\x86\xe3\x04\x5c\xbb\x22\xa5\x23\x37\x1c\x53\x84\xd0\xda\x71\x18\x8f\xa1\xeb\x56\xa4\x6c\xe4\x86\xe3\x02\x21\xb4\x6e\x3c\xc6\x63\xe8\xba\x15\x29\x19\xb9\xe1\xb8\x40\x08\xad\x99\x94\xf1\x18\xbb\x8e\x94\x8b\xdc\x70\x5c\x20\x84\xd4\x8b\xdc\x70\xc4\xd8\x75\xa4\x5e\xe4\x86\xe3\x02\x21\xa4\x5e\xe4\x86\x23\xc6\xae\x23\xf5\x22\x37\x1c\x17\x08\x21\xf5\x22\x37\x1c\x09\x74\xdd\x9a\xd4\x8b\xdc\x70\xb8\x08\x21\xf5\x22\x37\x1c\x09\x74\xdd\x9a\xd4\x8b\xdc\x70\xb8\x08\xa9\xc9\x37\x72\x37\x91\xc0\xf4\xbd\x26\x55\x93\x41\xa0\x8f\xd7\xa4\x6a\xe4\x86\xc3\x45\x08\xa9\x1a\xb9\xe1\x48\xa0\x8f\xd7\xa4\x6a\xe4\x86\xc3\x45\x08\xa9\x1a\xb9\xe1\x48\xa0\x8f\xd7\xa4\x6a\xe4\x86\xc3\x45\x08\xa9\x1a\xb9\xe1\x08\xb0\x8f\x49\xd5\xc8\x0d\x47\x80\x1d\x48\xaa\x46\x6e\x38\x02\x6c\x34\xa9\x1a\xb9\xe1\x08\xb0\x45\xa4\x6a\xe4\x86\x23\xc0\xea\x92\xaa\x91\x1b\x8e\x19\x54\x77\x43\xaa\x46\x6e\x38\x46\x08\x21\x55\x23\x37\x1c\x0b\x84\x90\xaa\x91\x1b\x8e\x19\x34\x7a\x43\xaa\x46\x6e\x38\x46\x08\x21\xf5\x22\x37\x1c\x0b\x84\x90\x7a\x91\x1b\x8e\x19\x74\xdd\x86\xd4\x8b\xdc\x70\x8c\x10\x42\xea\x45\x6e\x38\x16\x08\x21\xf5\x22\x37\x1c\x33\x18\x80\x0d\xa9\x17\xb9\xe1\x18\x21\x84\xd4\x8b\xdc\x70\x2c\x10\x42\xea\x45\x6e\x38\x66\x30\x8c\x1b\x52\x2f\x72\xc3\x31\x42\x08\xa9\x17\xb9\xe1\x58\x20\x84\xd4\x8b\xdc\x70\xcc\x71\x18\x49\xbd\xc8\x0d\xc7\x1c\x06\x60\x4b\xea\x45\x6e\x38\xe6\xd0\x75\x5b\x52\x2f\x72\xc3\x31\x87\x46\x6f\x49\xbd\xc8\x0d\xc7\x1c\xaa\xbb\xad\xc9\x32\x72\x37\x71\x0e\xab\xdb\x2d\xa9\x9a\x0c\x02\xed\xda\xd6\xac\x6b\xe4\x6e\xe2\x1c\x32\xd6\x96\xd4\x4e\x06\xc1\xd6\xd5\xac\x6c\xe4\x6e\xe2\x1c\x36\x1c\x5b\x52\x41\x19\x04\xdb\x48\x2a\x48\x6e\x38\x22\xac\x3a\xa9\x20\xb9\xe1\x88\x70\x48\x48\x05\xc9\x0d\x47\x84\x95\x26\x15\x94\x3d\xfe\x82\x43\x42\x2a\x48\x6e\x38\x22\xac\x2e\xa9\xa0\x62\xc3\x71\x86\x43\x42\xea\x28\x83\x40\xbb\x76\xa4\x8e\xe4\x86\xe3\x0c\xda\xb5\x23\x75\x24\x37\x1c\x67\x50\xe9\x1d\xa9\x23\xb9\xe1\x38\x83\x4a\xef\x48\x1d\x05\x8c\x07\xb0\x2e\xdc\x91\x02\x92\x1b\x8e\x53\xe8\xbf\x5d\x8d\x80\xb2\x05\x24\xb4\x7c\x47\x0a\x28\x07\xca\x75\x99\x00\x20\x29\xa3\x53\x87\xf1\x53\xec\x22\x52\x40\xa7\x1e\xe3\xa7\xd8\x45\xa4\x80\x4e\x05\xe3\xa7\xd8\x45\xa4\x80\x4e\x43\xc6\x4f\xb1\x8b\xca\x02\x9a\x7b\xae\xeb\x0b\xcb\x0d\x13\xc6\x5f\x63\x27\x19\xb6\x4f\x8b\x28\xf4\xc3\x60\xba\xb7\x79\x3a\x40\x5a\x59\x55\x95\xcd\x13\xe2\xca\xd2\x32\x6d\x9e\x72\x78\x1b\x5e\x1b\x5c\x84\xf3\x70\x12\x62\x46\x64\x7c\xd5\x03\xc4\x31\x85\x58\x30\xfe\x0d\x11\x7d\x0a\x31\x67\xfc\x2b\x22\x06\x14\x62\xc2\xf8\x0e\x11\x43\x0a\xe1\x32\xbe\x41\xc4\x92\x42\x24\x8c\x3f\x22\x62\x45\x21\xe4\x8e\x18\x11\x6b\x0a\xe1\x33\x7e\x83\x88\x0d\x85\x98\x32\xfe\x09\x11\x5b\x0a\x71\xc2\xf8\x17\x44\xec\x28\xc4\x8c\xf1\x2b\x40\xc0\xeb\x73\x65\xc4\x7b\xc6\x3f\x23\xa2\x4d\x21\x3e\x30\xfe\x3f\x44\x74\x28\xc4\x47\xc6\xff\x45\x44\x97\x42\x5c\xcc\x18\xff\x75\x05\x90\x1e\x05\x19\xcf\x18\x5f\x23\x84\x54\x48\x3c\x63\xfc\x01\x21\xa4\x44\x22\xc6\xef\xb1\x2a\xa4\x44\x2e\x18\xff\x85\x08\x52\x22\x63\xc6\xd7\x88\x20\x25\x12\x33\xfe\x80\x08\x52\x22\x0e\xe3\x4b\x44\x90\x12\x91\xbb\x5f\x44\x90\x12\x91\x3b\x6d\x44\x90\x12\x11\x33\xc6\x2f\xb7\x57\xf9\xbb\xa6\xdd\x76\x9b\x94\x89\xe3\x31\xbe\xbc\x06\x08\xa9\x13\xe1\x31\xbe\x05\x48\x87\x14\x8a\x93\x32\xbe\x7c\x02\x08\xa9\x94\x30\x65\xfc\x0e\x21\xa4\x54\x9c\x80\xf1\xe5\x2d\x40\x48\xad\x88\x80\xf1\x2d\x42\x48\xad\x38\xc1\x94\xf1\x65\x00\x18\x52\x2c\x42\x62\xb6\x88\x21\xd5\x22\x22\xc6\xb7\xf7\x00\x21\xe5\xe2\x31\x7e\x0d\x83\xd0\x21\xe5\x92\x32\xfe\x84\x08\x52\x2e\x5e\xca\xf8\x35\xf6\x0b\xa9\x97\x53\xc6\xbf\x63\x10\x3a\xa5\x4c\x19\xbf\x9d\x02\x84\x14\xcc\x34\x60\xfc\x53\xde\x73\x9d\x36\x71\x7e\xd5\xcc\x09\xa6\xa9\x6f\xbd\xcf\x0e\x77\x7a\x9f\xc2\xcf\xe7\xf9\x09\xfd\x65\x2c\x3e\xef\x91\x7f\xec\x66\x0d\x58\xe2\x24\xa8\x02\xab\x3e\x78\x83\x58\xe2\xfc\x27\x0d\x9b\x7f\xf4\x06\xd1\xc4\xb9\x4f\x05\x3a\xff\xf0\x0d\x82\x89\xe3\x9e\x34\xb0\xfa\xf8\x0d\x82\x89\x53\x9e\x0a\xb0\xfa\x00\x0e\x62\x89\xb3\xd3\x34\x6c\xfe\x11\x1c\x44\x13\x67\xa6\x15\x68\xf8\x10\x0e\xa2\x89\xb3\xd2\x0a\x74\xfe\x31\x1c\x04\x13\x47\xa4\x69\x9d\x97\x9f\x4b\xbc\x86\x31\x54\x3f\x37\x9c\x84\x91\x70\x02\x2b\x4e\x9c\xc0\x75\x22\xd7\x8a\xcf\xe7\xa3\xd0\x67\xfc\xf2\x24\x1e\x2b\xe4\x36\x7f\x54\xe9\x7d\xe8\x05\x56\x32\xf3\xe2\xec\x9b\xb6\xea\xf1\xbb\x40\x7c\x4c\xd4\xff\x6c\xcd\x6f\xb7\x5f\xef\x6e\xaf\xd7\xaf\x18\x7f\xb3\x06\x62\x36\x9c\xe9\xed\xf5\xa3\xf5\xf0\x6d\xb9\xde\x5a\x8e\xd5\xf2\xe2\xf0\xc0\x8b\x0e\x06\x07\x6d\xeb\xf0\xb8\xf7\x8a\xf1\xd4\x01\x74\x77\x0f\x3d\x2a\xa3\x87\xc7\x12\x3d\x02\xb4\x3a\x33\x32\x0c\x0e\xe2\x85\x33\xf6\x82\xa9\x95\xce\x7d\x27\x4d\x90\xd3\x1d\x58\x9d\xc3\xf6\x2b\xab\xb5\xbe\x5a\xde\x2f\xd7\x8f\xdb\x7b\xeb\xdb\xf2\xfe\xf1\x15\xe3\xff\xe9\x42\x8c\xa3\xfd\x18\xc5\xc7\x93\xb3\xf6\xa9\xcf\x46\x63\xc8\xfe\xa1\xd5\x1e\x76\xc8\x90\x6d\x08\x79\xbc\x1f\x32\xfb\x22\xf8\x7e\xa4\xf6\x61\xd7\x6a\x0f\xbb\x64\xa8\x17\x10\xaa\xbf\x1f\x4a\xff\x96\xf5\x5e\xa8\x1e\x19\xea\x25\x84\x1a\xec\x87\x1a\x7b\xd1\x38\x9d\x4f\x7c\xf1\x91\x8e\x77\x44\xc6\x7b\x07\xf1\x86\xfb\xf1\xd4\x47\x25\xca\x31\x8e\xc9\x18\xff\x40\x8c\xe5\x7e\x8c\xb9\x33\x8e\xc2\x60\x2f\x48\x9f\x0c\x72\x00\x41\x56\xfb\x41\x46\x91\x38\xdd\xaf\xc8\x80\x8c\xd1\x82\x18\xeb\x8a\x0a\xc2\x44\x7d\xec\x7c\x2f\xce\x90\x8c\xf3\x1a\xe2\x6c\xcc\x6a\xd2\xe3\x74\x0e\x0f\xc9\x38\x6f\x21\xce\x76\x3f\x4e\xf6\xe5\x83\x6a\x85\x3a\x87\xb4\x16\x0f\x21\xd0\xae\xda\x30\xf5\xec\xdc\x38\x13\x52\x39\x16\xad\xa0\xff\xe4\xb1\xda\x87\x15\x05\x39\xfb\x83\xd5\x39\xa4\x47\xfc\x2f\x88\xd1\xae\xc4\x10\xae\xe7\xfb\xce\x5e\x14\xda\x16\x7f\x40\x94\xce\x7e\x94\x70\x1a\x06\xe2\xe4\x59\x55\xf9\x13\x82\x74\xf7\x83\xc0\xc7\xba\x9f\xd5\x2b\x90\x19\xdb\x95\x24\xb4\xf7\xe9\xef\x52\x36\x6a\xd3\x5a\xfe\x1b\x82\x55\xb2\x91\xdc\x6a\x59\x71\xe8\x7b\x6e\xaa\x49\xa8\x63\x4e\x6c\x6f\x20\x94\xf6\xe5\x78\x3d\xa0\x17\x26\x8e\x35\x12\xb2\x76\x10\xee\xe8\xc8\xea\xd2\xba\xbe\x86\x60\x7d\x3a\x98\xfa\xd4\xfd\x22\x10\xe9\xdc\x49\x9c\x52\x40\xda\x70\x1b\x08\x38\xa0\x03\xaa\xef\xe1\x93\x01\xe9\x9e\xfb\x06\x01\x87\xd6\xde\x07\xf8\x2b\x41\xda\x03\x6b\xd8\x79\xc5\xf8\x9f\xa8\x80\xa5\xb5\xf7\x21\x7e\x8a\xd3\xee\xc8\x29\xea\x0f\xd4\xde\xaa\x20\x95\xcf\x9a\x52\x9b\xe8\x91\xd8\xbb\xe4\xf0\x15\xe3\x2b\x98\x6e\xda\xda\x89\x55\xc5\xd4\xd6\xb5\x0e\x3b\x43\x79\x95\x31\x76\x78\x96\x48\x26\x69\x90\x7f\xc8\xd2\x9b\x6a\x3e\xeb\x75\xad\x4e\xa7\xfd\x8a\xf1\x1d\xe4\x2f\xb5\x88\xa7\x0e\x20\xbb\x98\x15\xbc\xa3\x81\xd5\xee\xc9\x1e\xd8\xe2\x30\x64\xe9\xc1\x09\x12\x61\xcd\x45\xe4\xb9\x9e\x33\xdf\xbf\xd6\xd0\x3a\xec\x1c\xf7\x5f\x31\xbe\xfc\x9a\x93\x8a\xa7\x2b\xeb\x49\x83\x57\x8c\x7f\x43\x52\x66\xfc\x44\xf8\xd6\x38\x9c\x2f\x9c\xc4\x1b\x79\xbe\x97\x9c\x93\x44\xd9\x63\x97\x09\xbc\x91\xb0\x3d\xec\xe4\x1f\x2c\x71\x46\xde\x18\x97\xf3\xbe\x98\x58\x13\x2f\xc8\x3e\x6f\x18\xcd\xf7\xa2\xb6\xbc\xd1\x7c\x70\x2c\x9b\x2b\x7b\xf5\x72\xf9\xfb\x5b\x8c\xa5\x1e\x97\x4e\x93\xf1\xcc\x9a\xa6\x72\xc6\x8a\xa0\x0e\xa3\x79\xaf\xdb\xb7\xda\x47\xf2\xf2\x93\x1b\xc0\x67\xf6\x9e\x3a\xf3\xb9\x43\x0d\x47\xfb\xf0\xb8\x77\x7c\xd0\x76\xbd\xd8\x3a\xec\x76\xde\xa8\xff\xf4\xdf\xb4\x3b\xb2\x9b\xa7\x13\x08\x52\x3c\x33\xab\x3e\xa6\x76\x2a\xc6\x49\x18\xe9\xf9\x9c\x0c\x34\x78\x73\xd8\x93\x2b\x9e\x77\xa7\x10\x48\x33\xf5\xa9\x13\x79\x8e\x99\xdc\xeb\xbc\x69\x67\x8b\xab\x17\xbf\x01\x59\x33\xf1\x42\x44\x5e\xbc\x08\xe7\x22\xf0\x1a\x42\x1c\xbd\x62\xfc\x1f\x0c\x31\x28\x74\xb6\x7f\xf4\xa5\x5a\x2e\xe5\x53\xc2\xcd\xdd\x8f\xed\xfd\x7a\xf9\xb0\x7d\x6b\x1d\x1e\x1e\xbe\x51\xff\x69\xbf\xe9\xf4\x0e\x5f\x31\xfe\x5e\xcd\x06\xbb\x95\x7a\x44\xbe\x24\x5b\x6f\xea\x24\x69\x24\xac\xc9\x84\xf1\xdd\x0e\x70\x6d\x33\xce\x63\x7c\x77\x0d\xb8\x8e\x19\xe7\x33\xbe\xbb\x01\x5c\xb7\xe6\xba\x1e\xe3\x97\xbb\xdd\xf5\x25\x60\x7b\x35\x58\x3f\xc3\xde\x20\xf6\xc8\x8c\x4d\x18\xdf\x3d\x02\xee\xd8\x88\x8b\x13\xc6\x1f\x72\xdc\x56\x3d\x54\x94\x0b\x3f\x9e\x39\xae\xeb\x64\x6f\x63\xe6\xa2\x67\xfc\xb2\xfb\x7b\xfe\xc8\xfd\x6e\x3b\x30\xb9\x24\x1b\x97\x79\x46\x56\x82\x2b\x6c\x23\xbd\x31\x7f\x8d\x11\x7a\x75\x11\x66\xce\xfc\x82\x8e\x30\x2b\x22\x6c\x0c\x11\xbc\x38\xf4\x9d\xfc\x6b\x39\x19\xe7\xf7\x03\xe4\x6c\x1b\xdd\x9d\x11\x8a\x8b\xec\xaa\x84\x91\x98\x55\xae\xb1\x2a\xae\xa1\x1e\x76\x21\x28\x81\x97\x68\xbd\xb9\xfa\xfd\x0f\x64\xb4\x69\x46\xb9\xfb\x57\x45\xf7\x0f\x89\xee\x97\x84\x52\x2b\x56\x45\x2b\xd4\x63\x31\x65\x7c\x22\x2f\xe0\x44\xa3\x34\x71\x2a\xad\x79\xfc\x5a\xb4\x86\x18\x27\x9d\x5a\xba\xe4\xe3\xd7\xe2\x92\x47\x34\xaf\x72\x29\xad\xe3\x8e\x0d\x94\x72\xc7\x3d\x6a\x1d\xd7\x37\x54\xae\xd4\x71\x8f\x5a\xc7\x0d\x68\x42\xb9\x15\x5a\xc7\x0d\x09\xfc\x8c\x6a\xc6\x97\xa2\x19\x4b\x13\x67\xaf\x1d\x5f\x8a\x76\xac\x0c\x94\xbd\x86\x7c\x29\x1a\xb2\x36\x30\xca\x2d\xf9\x52\xb4\x84\x70\xcb\x7b\x21\xe6\x95\x96\x7c\xd2\x06\x84\x70\x8b\xe2\x94\x5b\xf2\x49\x1b\x11\xc2\x2f\x19\xa5\xdc\x92\x4f\xc5\x90\x2c\x09\xbb\x64\x8c\x52\x4b\x3e\x15\x63\xb2\x24\xdc\x32\x73\xaa\x43\x72\x55\x0c\xc9\x92\xf0\x4b\x46\x29\xb7\xe3\xaa\x18\x91\x25\xe1\x18\xc9\x28\x37\xe3\xaa\x18\x90\x25\xe1\x13\x49\x28\xb5\xe2\xaa\x18\x8f\x25\xe1\x8f\x13\xaa\x19\xff\x16\xe3\xb1\x24\x0c\x72\x42\xb4\xe3\xdf\x62\x3c\x96\x84\x43\x4e\xaa\x0d\xf9\x57\x1b\x0f\xc2\x22\x27\x95\x96\xfc\xab\x8d\x07\xe1\x11\xd7\xf1\x2b\x0d\xd9\x68\x0d\x21\x2c\xe2\x66\x6f\xb7\x6b\x97\xd8\x68\x97\x20\xfd\x41\x5d\x43\x1b\x73\xd2\x21\x95\x8b\x68\x23\x42\x38\x24\x22\xac\x7e\xaf\xb5\x83\x30\x48\xb4\x6f\xc2\x7b\xad\x1d\x84\x3b\x2e\x1c\x2f\xa8\x5c\xe3\x57\x71\x8d\x15\xe1\x8f\x8c\x53\xba\xc8\xaf\xe2\x22\x2b\xc2\x1f\xb1\x10\xd5\x8b\x3c\x68\x17\x21\x0c\xa2\x38\x65\x65\x3d\x14\xca\x5a\x11\x0e\xc9\x28\x65\x65\x3d\x14\xca\x5a\x11\x16\xc9\x18\xa5\x96\x3c\x68\x2d\x21\x3c\x12\xcf\xc8\xa6\xdc\x16\x4d\x21\x4c\x92\x93\xf6\xda\x72\x5b\xb4\x85\x70\x89\xe2\xec\x35\xe6\xb6\x68\x0c\x61\x13\x45\x29\xb7\xe6\xb6\x68\x0d\xe1\x93\xd8\x71\x2b\x6d\x59\x6b\xc3\x42\xf8\x24\xa3\x94\x5b\xb2\xd6\x46\x85\x70\x8a\x64\x94\xdb\xb1\xd6\x06\x85\xb0\x89\x24\x94\x5a\xb1\xd6\xc6\x84\x70\x89\x4b\xb4\x62\xb3\x29\x5a\x41\xb8\xc4\xad\xb6\x62\xb3\x29\x5a\x41\xf8\xc4\xad\xb4\x62\xb3\xc1\x56\xac\x09\x93\xb8\xfb\xad\xd8\x6c\xb0\x15\x6b\xc2\x23\x09\x91\x7c\x1f\x3f\x63\x2b\xd6\x84\x45\x92\x6a\xee\x7d\xfc\x8c\xad\x58\x53\xab\xae\x4a\xea\x7d\xfc\x5c\xb4\x82\x5a\x6b\xed\x67\xde\xc7\xcf\x45\x2b\x08\x7f\x5c\x10\xad\xf8\x35\x2b\x5a\x41\xb8\xe3\xa2\xda\x8a\x5f\xb3\xa2\x15\x84\x37\x2e\x2a\xad\xf8\x35\x2b\x5a\x41\x38\xe3\x62\xbf\x15\xbf\x8a\x75\xfc\x9a\xf0\x05\x95\x13\xb7\x85\x2f\xd6\x84\x2f\x32\x4a\xb9\x15\xdb\xc2\x17\x6b\xc2\x17\x92\x51\x6e\xc5\xb6\xf0\xc5\x9a\xf0\x45\x25\xeb\x6e\x0b\x5f\xac\x09\x5f\x4c\x67\x54\x3b\xae\xb5\x76\x10\xce\xc8\x49\xe5\x96\x5c\x6b\x2d\x21\xbc\xa1\x38\xe5\xb6\x5c\x17\x6d\xd9\x10\xee\x50\x94\x52\x6b\xae\x8b\xd6\x6c\x08\x7f\x4c\x88\xb9\x70\x57\xb4\x65\x43\xf8\x63\x52\x5d\xf5\xee\x8a\x96\x6c\x08\x7f\x4c\x2a\x8b\xde\x9d\xd6\x0e\xc2\x1f\x93\xfd\xe9\x76\xa7\xb5\x82\xf0\xc7\x07\xa7\xba\x41\xfc\x9f\xd6\x0a\xc2\x1f\x19\xa5\xdc\x8a\xff\x69\xad\x20\xfc\x21\x19\xe5\x56\xfc\x4f\x6b\x05\xe1\x0f\x49\x28\xb5\xe2\x7f\x5a\x2b\x08\x7f\x9c\x10\xad\xf8\xa2\xb5\x82\xf0\xc7\x49\xb5\x15\x5f\xb4\x56\x10\xfe\x38\xa9\xb4\xe2\x8b\xd6\x0a\xc2\x1f\x27\xfb\xad\xf8\xa2\xb5\x82\xf0\x87\xef\x54\xb7\x1f\x37\x5a\x2b\x08\x77\x64\x94\x72\x2b\x6e\xb4\x56\x10\xde\x90\x8c\x72\x2b\x6e\x8a\x56\x6c\x09\x67\x48\x42\xa9\x15\x37\x45\x2b\xb6\x84\x2f\xe6\xd4\x2e\xea\x6b\xd1\x8c\x2d\x61\x8c\x39\xb1\x8b\xfa\x5a\xb4\x63\x4b\x38\x63\x5e\xdd\x45\x7d\xd5\x1a\x42\x58\x63\x5e\xd9\x45\x7d\xd5\x5a\x42\x78\x23\x08\xc3\x6a\xba\xba\xd5\x5a\x42\x98\x43\x71\xca\x2d\xb9\xd5\x5a\x42\xb8\x23\xa3\x94\x5b\x72\xab\xb5\x84\xb0\x47\xc6\x28\xb5\xe4\x56\x6b\x09\xe1\x0f\x6a\x8b\x7e\xa5\x35\x84\xf0\x07\xb1\x43\xbf\xd2\xda\x41\xf8\xa3\xba\x41\xbf\xd2\x9a\x41\xf8\xa3\xb2\x3f\xbf\xd2\x5a\x41\xf8\xe3\xcc\x39\xab\xb4\xe2\x87\xd6\x0a\xc2\x1f\x92\x52\xba\xc4\x0f\xed\x12\x84\x3b\xb2\x9b\x5f\x73\xe7\x24\x4e\xa3\xea\x6d\xa0\xcf\xc5\xb5\x76\x84\x51\x4a\xdc\xd2\x45\x3f\x17\x17\xdd\x11\x8e\x39\x27\x46\xe7\xa7\x76\x2d\xc2\x30\xe7\xd5\xd1\xf9\x59\x8c\xce\x8e\xf0\xcb\x79\x65\x74\x7e\x16\xa3\xb3\x23\xec\x72\xbe\x3f\x3a\x3f\xb5\x56\x94\xdc\x02\xf7\x4f\x65\xa6\x50\xbf\xdb\x93\xb7\x3e\x2b\xa9\x6d\x5e\x34\xf1\xf8\xff\x10\xaf\x9c\x94\x8a\x5b\xa9\xbb\xfe\x33\x83\xe9\x77\x55\x2b\x95\x2b\xd6\x88\xbb\xc1\xff\x21\x5e\xb9\x72\xc5\xea\x6e\x37\xfc\x7f\x0a\xa6\x7e\x2c\xac\x54\xee\xaa\xa8\xdc\xf2\xff\x10\xaf\x5c\xb9\xab\xa2\x72\xab\xe6\x60\x95\xba\x2c\x8b\xba\xac\x9b\xe9\xe5\x4b\x2f\xb3\x4b\xff\xff\x01\x00\x00\xff\xff\x71\x9d\xca\x8f\xdf\x44\x01\x00") + +func fontsMnemonicFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMnemonicFlf, + "fonts/mnemonic.flf", + ) +} + +func fontsMnemonicFlf() (*asset, error) { + bytes, err := fontsMnemonicFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/mnemonic.flf", size: 83167, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMorseFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x54\x6d\x6f\xdb\x36\x10\xfe\xce\x5f\x71\x73\x03\x24\xd9\x4c\xc6\xb2\xd3\x36\x4e\x8b\xbe\x2c\x1b\x8a\x00\x1d\x56\xac\xed\xa7\x61\xc0\xce\xe2\xc9\x22\x4a\x91\x02\x49\x59\x4d\x87\xed\xb7\x0f\x7c\x91\xed\x24\xc0\xbe\xcf\x41\x40\xea\x78\xf7\xdc\x3d\xcf\x1d\xd9\xe8\x66\x89\x27\x50\x41\x05\xab\x05\xf0\x0a\x2e\x17\xec\x17\xeb\x3c\xc1\xe6\x0e\xde\x69\x32\x06\x6e\x5a\xec\x7b\xd2\x1a\x5e\x6e\xb7\xf5\x9b\x41\x0d\xb5\x20\x39\xbc\x82\x6a\x71\xb1\x7e\xca\x7e\x44\x4f\x12\xac\x01\x65\x1a\x0b\x8d\xb3\x1d\xcc\x32\xc0\x8d\x95\x04\x68\x24\x84\x96\xe0\x43\x6b\x0d\x05\x55\xc3\x5b\xdd\xb7\xb8\xa1\xe0\x67\xec\xf3\x6f\xef\xaf\xa1\x0d\xa1\xbf\xbe\xb8\x18\xc7\x51\x78\x1b\xac\x11\x58\x8b\xe1\xcb\xc5\x3f\xbe\xee\xd7\xab\xba\xbd\x70\xd4\x90\xbb\xc0\x12\x25\xda\xd0\x69\x76\x6b\x6a\x3d\x48\xf2\xe0\x6d\x47\x70\xfb\xf1\x57\x78\x8f\x41\x19\x5e\x41\xdd\xa2\xc3\x3a\x90\xf3\xec\x03\xb9\x4e\x79\xaf\x62\x69\x1e\x5a\x72\xb4\xb9\x83\xad\xda\x91\x81\x60\xa1\xb3\x52\x35\x77\x10\x5a\xe5\xa1\xb1\x26\xcc\x01\x3d\x68\x6b\xb6\x71\x0d\x2d\xb1\xe4\xa0\xc8\x9d\x7a\x30\xd8\x51\xc4\xe8\x35\xd6\x99\x2b\x42\x6d\xbb\x8e\x4c\x00\xad\x0c\x09\xc6\x3e\x7b\x02\xdb\x1c\xe0\x00\xb5\xb6\xa3\x87\x46\x6d\x35\x85\x98\xb0\xb6\x66\x47\x2e\xc0\xdb\x8f\x37\xb7\xb7\xd1\x70\x6b\x02\x39\x83\x41\x59\x83\x1a\x92\x64\x2c\x4a\x26\x18\xfb\xd4\x12\x34\x36\x22\x28\xb3\x05\x3f\x6c\x7c\x50\x61\x88\x9e\x1e\x5a\xdc\x11\x6c\x88\x0c\x74\x28\x69\x0e\x4a\x90\x98\x83\x6a\xa6\x54\x8e\x6a\x52\x3b\xf2\x80\x6c\x2f\x46\x2c\x39\x36\x41\x53\x13\x22\x3f\x65\xfa\x21\xcc\x41\x05\x18\x95\xd6\xd0\x3b\x65\x42\x72\x38\x6a\x5c\x63\x5d\xd2\xe1\x11\x88\x53\xdb\x36\x5c\xb3\xef\xe0\xfe\x8f\xbf\x02\xc1\x5e\x3c\x36\x5e\xb3\xdf\xff\xf8\xeb\xef\x07\xc6\xb3\x73\xf6\x8e\x5c\x87\x06\x3c\xff\xb6\xb7\x7a\xcf\xa6\x3e\xce\x7a\xa7\x3a\x9a\x45\xeb\x29\x7b\xf2\x18\xf5\xe5\x4f\xa4\x29\x10\x68\xf4\x01\x46\xeb\xe4\x2b\xf6\xfd\x63\xaf\x9b\x96\xb1\x9f\xbf\xd6\xd4\x27\xd6\xc6\x06\x92\x80\x1b\xbb\xa3\xf9\xd1\xa0\x44\x3b\xe0\x0e\x95\xc6\x8d\x26\x50\xe6\x58\x85\x83\x3e\x0c\x3d\xcc\x5e\xcf\x04\x63\xa9\x71\x3c\x26\x05\xdf\x63\x4d\xc7\x3d\x21\xa3\xd1\x6d\x49\xc6\x06\xab\xae\x77\x76\x47\xe0\x08\x25\x6e\x94\x56\xe1\x4e\xc0\xa7\x34\x09\x8d\x75\x1d\x0b\x36\xe9\xe9\x03\x1a\x89\x4e\xfa\x24\x39\x0e\xc1\x76\x18\x2f\x4a\x70\x68\xfc\x34\xc1\xb6\x39\xaa\x6a\x0e\x83\x27\xf0\xdd\xe0\xdb\xce\x4a\x62\xdf\xc8\x59\x38\x9b\x95\x01\xe0\x0d\x74\xc9\x95\x77\x8b\xd9\xf9\x0b\x18\x51\xe5\xe6\xca\xc1\xa5\x69\x8b\x60\x08\xd2\x86\x94\x90\xb0\x6e\x61\xa3\xd1\x7c\x61\x2a\x37\xd8\x0e\x21\x8d\x07\x7a\x18\xe3\xb5\x47\x0f\x1b\x0a\x63\xa4\x87\xe6\x0e\xc2\x68\x8b\xcb\xb1\x88\x67\xd2\x06\x3f\x67\x12\x7d\x4b\x1e\xac\xcb\x90\xfe\x5c\xc4\x0e\xf4\x1a\xcd\x3e\x75\xa3\x9c\xcf\xf7\xe6\x9a\xc5\xd7\x07\x38\xcc\x3a\xdc\xaa\x1a\xcc\xd0\x6d\xc8\xcd\x52\x59\x8d\x8a\xbd\x90\x64\x82\x6a\x54\x9d\x82\x19\xa6\xb6\x82\x6f\xed\xa0\x25\xa0\x1e\xf1\x2e\x96\x06\x7f\xe2\xe9\x3c\x05\x19\x3b\xb2\x93\xec\x14\x89\xcc\x5a\x74\x32\xd5\x31\x03\xce\x73\x1b\x7d\xa4\x83\xb9\xba\x39\x6c\x22\x09\x34\xa7\x21\xc2\x24\x41\x49\xb2\x2a\x23\xb4\x14\x07\x3d\x8b\xb5\x27\xfa\x9f\x87\xf3\x34\x4a\x2a\x3d\x4e\xf1\xe2\x4a\xf2\x35\x19\x19\x1f\xa3\xd5\x22\x85\x75\xf8\x35\x31\x07\x4d\x66\x1b\x5a\x38\xa3\xaf\x93\xf3\xf1\x83\xe2\xcf\xe1\x07\x40\x68\x06\xb9\x25\x68\xb0\x0e\xd6\x31\x5e\x25\x04\x49\x0d\x0e\x3a\x1c\xba\x5f\x2e\x6a\x79\x77\xd8\x65\x4e\x94\xa5\x8c\xf5\xdd\xc3\x65\x0c\xe0\xe4\x0d\x13\x3c\xfe\xe5\x8d\xe0\x22\x6e\xca\xef\xe4\x0d\x7b\x7d\xf2\x64\xff\x2f\x38\xe7\xe9\x9c\x0b\x9e\x23\x0e\x1b\xce\x79\xf1\xe6\x11\x25\x1f\x0a\x31\xe1\x4e\xee\x19\x9f\x17\x77\x31\xad\x62\x5a\xc5\xb4\x8a\x69\x15\x13\x52\x8a\x9b\x56\x3e\xad\x7c\xff\x7d\x6f\x73\xaf\x6c\x51\xdc\x33\x87\x82\x97\xab\x2f\xe8\x31\x57\xae\xa3\xa4\x29\xc9\xd3\x52\x0a\xcd\x01\x45\xa6\xe4\x95\x2c\x39\x6b\xf6\x29\xc1\xc5\x29\x63\x44\x53\x86\x2e\x04\xb3\x6b\x51\xa3\x68\x58\xb8\x1d\x2b\x9a\xa4\x3c\xfe\x38\x74\xe1\xff\xca\x40\xec\x67\xa5\x94\x3a\x0d\xc7\x63\xa3\x10\x27\x39\xf3\x22\x45\x56\xcf\x16\x79\x56\xab\xe7\xd5\x61\x4c\xab\xab\xc5\x61\x26\xab\xab\xe7\xc7\x27\x57\x65\xb8\xb2\x65\x1a\xa8\xea\x6a\xfd\xc0\x9e\x99\x57\xeb\x45\x19\xbe\x07\xfe\xeb\x15\x9b\x18\x54\xeb\x67\x53\xb1\xd5\xfa\xf9\xde\xbc\x5c\x54\x49\x75\x91\xf6\xeb\xa4\x5e\xb2\x57\x97\x13\xa9\xe5\x72\x31\x11\x5b\x2e\x57\x47\xe4\x96\xcb\xa7\x07\x9c\xe5\xd5\x04\xbf\x5c\xae\x0f\xe6\xd5\xea\x00\x7f\x59\x1d\xe0\x2f\x9f\xed\xe1\x9f\x2e\x27\xf8\x7f\x03\x00\x00\xff\xff\xa9\x58\xa7\xd0\xc0\x09\x00\x00") + +func fontsMorseFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMorseFlf, + "fonts/morse.flf", + ) +} + +func fontsMorseFlf() (*asset, error) { + bytes, err := fontsMorseFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/morse.flf", size: 2496, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsMoscowFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x56\x4d\x8f\xd3\x30\x10\xbd\xe7\x57\x3c\x65\x38\xa4\x12\x94\x2d\x88\x95\x96\x03\x5a\x89\x13\x17\x2e\x20\x4e\x1c\x48\x53\xa7\x89\x36\x6d\x16\x27\xcb\xaa\x12\xe2\xb7\xa3\x6e\x63\x7b\x6c\x8f\xd3\xe5\x8a\x7a\x19\x7b\x9c\xf1\x9b\x0f\x3f\x8f\x53\x77\xf5\x9b\xf2\x05\xae\x71\x8d\x1b\xbc\x5a\x61\x75\x95\x7d\x3c\xe8\xb6\xeb\xda\x0a\x75\xbf\x1f\xb1\x3e\xe0\xab\x2e\xab\x03\xbe\x54\xcd\x43\xf3\xa8\xf4\x1d\x8a\xa7\x85\xa5\x5d\xb8\x1d\x36\xba\x5a\x56\xfd\x6e\xb1\xcc\x3e\xf7\xbf\xd4\x6e\xad\x34\x56\xef\xc6\xe6\x25\x56\x37\x37\x6f\x81\x6f\x4a\x0f\x6d\xbf\xc7\x6a\x79\x95\x7d\xc2\x50\x56\xba\xad\xdb\x4a\x6d\x30\x36\x0a\x75\xdf\x75\xfd\x63\xbb\xdf\x62\x38\xec\xd6\x7d\x37\xa0\xee\x35\x6c\x10\x9d\x1a\x47\xa5\x87\xf7\x19\x90\x7f\xcf\x51\x0c\x5d\x39\x34\x8b\xa3\xf6\x3a\x47\xb1\x2e\xab\x3b\xb8\xa5\xdf\x39\x8a\xfb\xf6\x5e\x2d\x70\xd4\xfe\xe4\x28\xc6\xb6\xdb\x4c\xea\x8f\xe3\xee\x76\xbf\xed\x14\x7e\x3e\xf4\xa3\x7a\xda\xf1\x21\x47\xb1\xd5\xaa\x1c\x95\xc6\xd8\x94\xfb\x45\x96\x01\xc0\xed\xbc\xb4\x0a\x45\xd2\xad\x1b\x85\x40\x56\x26\x91\x26\x23\x22\x69\xee\x60\x4f\x1f\x60\x3f\xb3\x15\x1f\x09\x27\x79\x32\x75\xeb\x7e\xe0\xc4\x51\x9d\x53\x17\x60\x88\x21\x95\x80\x19\xf9\x85\x20\x1e\x93\x5c\x2d\x8a\x8b\x69\xf2\x0a\xe6\x41\xe9\x22\xd3\x20\xa6\x48\x0a\xee\x98\x9c\x90\xa4\xec\x66\xf0\xfc\xec\x26\x49\xfc\x34\xb9\x11\xaf\x32\x52\x15\x27\x87\xe1\xd5\x89\x1b\x99\x60\x39\x49\x66\x8d\x10\x6e\x90\x98\xe9\x6d\x40\x84\x44\xb2\xeb\xb4\x91\x90\x5d\xe4\x62\xce\x9d\x78\x1d\xd2\x46\x42\x4c\xde\x15\x0d\xe7\xe7\x8c\x28\xc5\x71\x8f\xca\x88\xdc\x71\x26\xb1\xf9\x69\x30\x35\x9f\x6a\x65\x93\x30\x64\x31\x35\x3c\x73\x8e\x31\x01\x3d\x6e\x25\x5d\x1f\x51\xa6\xbd\x86\x55\xc4\x5c\xfa\xae\x8d\x81\x71\x36\x75\x0a\x7b\xe5\xc8\x01\xf0\xe4\x3c\x24\xb7\xc1\xe8\xbe\x07\x0b\x61\x31\xfd\x09\x91\x2d\x79\x1c\x98\xf1\xc3\x03\x25\xff\xbb\x94\xba\xbb\xa0\xc4\xf4\xe9\xbb\xec\xc1\xf3\xe4\x8f\x71\xea\xa6\xb8\x51\xb1\xa3\x1c\x1c\xf2\x79\x22\xcc\xc4\x1c\xe6\x24\x7b\x30\xa4\x00\xd7\x53\x65\x35\xe7\x1b\x8e\xe9\x1c\x28\xc8\x21\xc9\xa8\x80\x20\x67\x29\x08\x76\xb2\x82\xa1\xf9\x3e\x7f\x70\xff\x16\x52\x10\x9a\x48\x3e\x04\x86\xb3\xd4\x40\xc8\xd6\x60\x44\x74\x8d\xc8\x5d\x7b\xa4\xc7\x67\x91\x2f\xc1\xd6\x99\x1c\x98\x2e\x55\x89\xc4\x31\x38\x07\xbb\xc3\x9a\x88\x13\xc4\x17\x35\x38\xbe\xa8\xe5\x24\x7b\x94\x4f\x08\xab\x7b\x99\xd8\x1e\x1a\xff\xae\xf8\xcf\xcb\xb3\x1e\xf4\x18\x03\xa1\x11\xd8\x6e\xa4\x7f\xa1\x64\xe9\x95\xc6\x76\x8a\x09\xcf\xbe\x77\xf6\x18\x58\x37\xb9\x74\xfd\x4b\xd7\xbf\x74\xfd\x4b\xd7\xbf\x74\xfd\xb8\xeb\x93\xdc\xf5\x65\x9e\x06\xe9\x13\x10\x3d\x13\x0c\x29\x7e\x01\xa4\x58\x03\x8a\x45\xb1\xce\xc9\xff\xdd\xe8\x6f\x00\x00\x00\xff\xff\x2b\x5c\x68\xd7\x8f\x12\x00\x00") + +func fontsMoscowFlfBytes() ([]byte, error) { + return bindataRead( + _fontsMoscowFlf, + "fonts/moscow.flf", + ) +} + +func fontsMoscowFlf() (*asset, error) { + bytes, err := fontsMoscowFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/moscow.flf", size: 4751, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsNancyjFancyFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\x4d\x8f\x9c\x38\x13\xbe\xf3\x2b\x4a\x74\x24\x2e\x79\x4b\x9a\xf7\xb0\x5b\xca\xae\x56\x68\xef\x48\x5c\x7d\xc3\xc4\x74\xa6\x77\x9a\x26\x9a\x66\x12\xcd\xbf\x5f\xb9\x5c\x36\x86\xe6\xab\x67\x26\x4b\x32\x60\x1b\xbb\xbe\xeb\x71\xbb\x38\x9e\x8f\xff\xd7\x9f\x80\xe0\x37\x78\xf8\x1d\xfe\xf7\x00\x0f\x0f\x49\x02\xee\xba\xe8\xcb\xd7\xd7\x7f\xf0\x78\x3e\xf2\xd0\x45\xb7\x8d\x01\x7d\xec\x9b\x67\xe8\x1f\x1b\x38\x77\xdf\x4e\x17\xe8\x8e\xa0\xe1\x67\xd7\xea\x0b\xfc\x7c\xec\x00\xf4\xf5\xa9\x31\xd0\x36\xd0\x77\xd0\xea\xa7\x06\x1e\x9b\x67\xd0\x76\xfd\xf5\xf4\x0d\xa1\x7f\x3c\x5d\xe1\x74\x65\x02\xc7\xee\xd2\x43\xff\xa8\x7b\xf8\xaa\xdb\x06\xba\x97\xde\x52\x3b\xf5\x08\x61\x5a\xfb\x0a\xc7\xd3\xf3\xb5\x67\x91\x74\xdf\x37\xed\xf7\x1e\x74\x0f\x1a\x8e\xa7\x6f\xe7\xa6\x67\x1a\x9f\xe1\xda\xc1\xb9\xd1\x3f\x1a\xcb\x57\x9f\xbb\x4b\x83\x41\x09\x00\xf8\xa1\xdb\xef\xaf\xcf\xb9\xfe\x7a\xc5\xfa\x05\x1b\xf3\x92\x24\x45\x67\x4e\xc7\x53\x63\x82\x94\xbd\xae\xaf\x50\x9f\xf5\xe5\xe9\x0a\xf5\x2b\x94\xfa\xe5\x0c\x7f\xbf\x3c\xf7\xdd\x05\xfe\xbc\x76\xe7\x97\xfe\xd4\x5d\xf2\x46\x3f\xf7\x8f\xe7\xd3\xe5\x09\x2f\x4d\xff\x57\x92\x7c\xfa\x04\xf9\x8e\x5b\x9e\x98\x12\xf2\x84\xc8\xdf\xb8\x0b\x00\x79\xd2\x75\xbe\x65\x6f\xfc\x86\x5f\xfa\xa7\x53\x60\xf7\x33\x0f\x5a\xe7\x09\x38\x1a\xcc\x93\x2f\x3b\x46\x64\xff\x8f\xc7\xc2\xbc\x68\x2d\x44\x04\x0f\xfc\x1f\xf2\x04\x8d\x5d\x52\x66\x90\x27\x8a\x0e\x5d\x77\xe8\x10\xfc\x04\x26\x55\x51\x98\x10\x2d\x83\x5b\xba\xac\x2a\x95\x00\xcc\xd9\x8a\x03\x04\x60\x28\xe3\x8e\xa2\x52\xda\x6e\x89\x6d\xdb\xd9\xb6\xc3\xe3\x76\xb6\x53\x06\x00\x14\x95\x31\x65\x98\x74\x84\x02\xd5\xe1\x15\x55\x19\x45\xb4\xeb\x60\x2f\x2a\xab\x30\xcd\xf2\xa9\xa8\x76\x76\xa9\x58\xab\x4a\x6d\x30\x32\xc1\xc1\x58\xc6\x6e\x9d\xdc\xf2\x04\x34\xb1\xd0\x5e\x61\x12\x79\xfc\x53\x11\xba\xd9\xca\xca\x3d\xe3\x65\x45\xd4\xb9\x81\xca\x4f\x70\xbe\x0c\x4f\x14\x06\xa4\x16\xc3\x84\x4d\xcf\x6c\x6b\xb4\xcb\x90\xe5\xb7\x14\x89\x58\xb2\x3c\xb1\x64\x88\x58\x9c\x3c\xb1\x8e\x25\x82\x4a\x09\xa7\x81\xc0\xba\xf1\xa3\x17\x6e\x85\x97\x13\xa2\x40\xa4\xf1\xe8\x78\xee\x5c\xf8\xcc\x59\x77\x48\xb3\xe0\x80\x85\xf5\x71\x73\x2c\xc1\xfa\xdc\xfd\x12\xc4\xee\x06\x09\x62\x31\x9b\xd8\x76\x08\x78\x88\xc2\x5d\x5a\x21\x1a\x56\x25\xd0\x56\x6e\x0d\x12\x4a\x88\x1c\x0c\xd6\x95\x25\x78\xb4\x31\x99\x6b\x2a\xca\x32\x89\x0a\x1b\x56\x36\x00\xd7\x72\x93\x41\x02\x56\x1e\x86\x86\xbc\xf3\x0f\x1e\x25\x1b\x4e\x6e\xa4\x72\xbe\xd0\x5a\x1b\x06\x05\x8a\x74\x43\x69\x29\x87\x19\x4b\xf0\x33\x47\x10\x2c\xc1\x60\xce\x2a\x78\x0e\x1d\xb8\x6e\x10\x14\xd4\x11\xfb\x8a\x99\xb4\xd6\x43\x00\xcc\xb5\xcc\x22\x41\x0a\xfc\x62\xf5\x6a\xad\x35\xde\x48\x08\x7b\x24\x14\x98\x5d\x20\x48\x54\x09\x41\x6b\x16\x97\xb3\x95\x1a\x60\x77\xc9\x86\x81\x5f\xb0\x5c\x88\xc3\x10\x86\x21\x0a\x87\x20\x5c\x96\x90\x9d\xa2\x18\x26\x1d\xd4\xd4\xda\x48\xfc\x8d\x24\x94\xe8\xdb\x56\x39\x22\xe8\x36\x13\x4b\x90\x46\x36\x34\xe8\xbd\x5c\xdd\x84\xf0\xd4\xcb\xd3\x3c\x9c\xcf\xcd\xf5\x89\x63\x18\x71\xf8\xe8\x0d\xe8\xcd\x27\xe8\x08\xdc\xe0\x79\x8a\xd6\x76\xe6\x71\xd6\x69\xb9\xde\x0c\x49\xd6\xca\x3c\xc4\x7b\x97\xc3\x6f\x41\x6e\xab\x80\xb3\x11\x95\x51\x96\x26\xe3\x40\xab\xb5\x5b\x9d\x41\xb4\x9b\x80\x8d\xac\x10\x27\xa5\x87\x29\x48\xd3\x81\x46\xd7\x4d\x85\x19\xb7\x3d\x3e\x39\xc7\x1a\x61\xe0\x61\x89\x06\x88\x52\x9d\xdd\x72\xc2\xbe\x07\x61\xef\x9b\x46\xf5\x0d\x83\xa2\x28\x53\x77\x15\x05\xe4\x49\x91\x01\xb6\x6d\xdb\x02\xb8\x6e\x98\x5a\x49\xb7\xb0\xd7\xf0\x76\xad\x1b\x5d\xe3\x4d\xcd\x31\x3e\x08\xdf\xac\xb0\xbf\xf2\x0e\x07\x00\xcb\x18\x1d\xa7\x43\xe6\xa7\x22\xd3\x3a\x30\xed\x1a\x33\x15\x75\x8b\x0c\xc0\x77\xdd\xdc\x3f\x5c\xd7\x5f\x4c\xf9\x86\x71\x91\x31\x5b\x15\x2b\x8c\x63\x05\xbb\x2e\xee\x39\x05\x0a\x3b\xc7\xf2\x74\x12\x15\x28\x4e\x9e\xe8\x3a\xe1\x68\x19\x8a\x9e\xca\x5b\xa9\xbd\x65\x28\x8a\xcc\xf5\x3c\xc3\x60\x8f\x6d\x86\xc2\x31\x4d\x1d\x0f\x61\xd9\xb6\x2d\x77\x2b\x37\xd1\xab\x35\xd1\xf2\xb6\x1b\xfb\x61\xc3\xa7\xeb\x8c\xb3\x3b\x19\x8f\xba\x1b\x8c\x33\x61\xbb\xec\xd3\x22\x0a\xd2\x10\xd0\xf3\x3e\xdd\x61\x62\x9b\x2f\x45\x51\xf8\xb4\xb9\xc9\x84\x5f\x96\x36\x69\xea\x17\x6d\x3d\x22\x02\xf9\x40\x57\xd6\x7b\x0e\xb0\xd9\xfb\x00\x13\x6d\x05\xb3\x24\x37\xa8\x68\x66\x3d\x9b\x12\x7b\x18\x16\xc5\x92\xdb\x77\xf6\xe4\xda\x99\xd8\x95\x0d\xbd\x4a\x85\xdc\x46\xb9\xc5\xd1\x36\xc9\xe9\xfb\x06\x8a\xb5\x90\x88\x11\x26\x96\x62\x22\xc2\x2a\xc2\xcc\xf7\x56\x35\x97\x5d\x63\x0d\x43\x67\x89\x4e\x82\xc9\x9d\x1c\xcd\x1d\x90\x26\x2a\x7a\x64\x11\xb2\x93\xdd\xe2\xd7\x20\x8b\xa8\x63\x55\xfd\xbc\x92\xca\x56\xa4\xd0\xad\x1c\x80\x16\x51\xee\xa0\xb7\xd3\x16\xe3\x00\xa5\xb1\xc6\x9f\xd7\x34\xae\x11\xd2\x91\x8a\xde\x95\x4b\x9e\x9d\x65\x5c\x8e\xf8\x8a\xa1\x11\xc5\x7f\x0e\xd6\x54\x44\x08\x83\x6f\x31\xda\x93\xeb\x7b\xf7\x47\xc1\x09\x76\xaa\xdf\x31\x7c\x26\xdc\xd9\x99\xe3\xb4\x84\x48\xfb\x12\xa3\x8a\x35\x13\x8a\xbb\x34\xdb\x62\x58\x2e\xec\xf5\x45\x51\x66\x03\x3c\x3a\x43\xee\xc5\x5c\x7f\x5b\x84\x97\xd2\xdf\xfc\x40\xe6\x6e\x9e\x79\x95\x01\x42\x36\xe2\x8f\x66\x56\x84\x15\x44\x1a\x29\x5e\x0d\xe6\x93\x2c\x88\xd2\x69\x8c\x54\x73\x9d\x4d\x9f\x0a\x23\x74\x8c\xf0\x96\x11\x63\x8d\x79\x5f\xf0\xc4\x3f\x6d\x04\x7d\x86\x9f\x45\x25\xb3\x72\x3d\x69\x07\x70\x1c\x68\xde\xb1\xbb\x0c\x47\x58\x98\x2d\x71\x8d\xca\x3f\x73\xc7\x25\x55\xfb\xa1\x4a\x85\x12\x9d\x34\xed\x34\xd7\xe4\x05\xdc\xbc\x39\xb1\xcf\x9e\x82\x21\x4f\x6e\x6b\x65\xe3\xe7\xa2\x44\x36\x69\xa2\xf2\xa0\x9c\xb7\x2a\x3e\x11\xa5\xd9\x7d\xd5\xa3\x69\x21\x09\xe6\x5e\xbd\xbf\xdb\x45\xd7\x0c\x58\x0e\x07\x5d\x85\x30\x5f\xd0\x5a\x3b\xbb\xe2\x50\x9c\xe1\x4a\x85\x3f\xdc\x8d\x0f\xe9\x54\x2e\x1e\x63\xa5\x1a\x03\xd3\x72\x17\x91\x59\x27\x4c\xc4\xa7\xc3\x45\xa3\xef\x96\x38\x4d\x03\x61\xc4\x69\x05\x7b\x85\x70\x54\x0e\x72\x85\x02\x16\x58\x64\x7b\x9b\x29\x76\x48\x6c\xdd\x68\xe8\x5e\x89\x91\x29\x78\x0b\xa7\xbe\xde\x15\xd9\x3c\x6a\x4c\x6b\xba\x43\xe3\x7d\x51\x50\x0e\x39\x85\xf2\xd9\xc1\x57\x70\xee\x8d\x82\x60\x74\xb7\x6c\x54\x99\x9b\xa8\x3e\x7c\x5e\x59\xfc\x02\xb3\x31\x71\xbc\x64\x59\x54\xbb\xbf\x0c\x1f\x5b\xd2\x30\x5a\x49\xf5\xa3\x64\x9c\x5a\x11\x75\x95\xef\x6c\x2e\x4e\xf2\xdd\x19\xac\x46\x13\x4c\x56\xb9\x3f\x91\x44\xfe\x9c\x2c\xf2\xb7\x42\x6d\xc9\xdd\x1f\xe0\x95\xf7\xc4\xd1\x8e\xdc\xdc\x2b\xf1\x04\x4d\xa4\x42\x1d\x96\x0d\x9e\xfe\xb0\xc8\x8f\x77\x27\x17\x4e\xf7\xdb\x78\x22\xdb\x9b\x6d\xac\x18\x4d\x3a\x1c\xcb\xb6\xc7\xc6\xc6\x17\x13\x59\x20\xd9\x58\xcb\x78\x24\x6a\x98\xf2\x3e\x34\x89\xa2\x27\x8a\xa9\x0f\x88\x02\x26\x8c\x03\x61\x57\xde\x24\xb2\x64\x33\x9f\xb8\x43\xa1\x73\x95\xf0\x4d\xe6\x8d\x52\x6a\x94\x6c\x44\x68\x0d\x4e\xc2\xcd\x1a\x4a\x4d\x3e\xbd\xec\xcd\x3c\x53\xa2\xd7\xc0\xc2\x4a\xf8\x44\x67\xd8\xa3\x3c\xc1\xc6\xca\xfd\x99\xb7\xcf\xe6\xab\x08\xbe\x40\xd8\x55\xb1\xa5\xa6\x8c\x46\xdc\x86\x6c\x81\x68\xc2\xaa\xc4\x18\xbe\x34\xd1\x17\xf7\x1e\xbb\xf0\x81\x14\x4c\xf8\x60\xe8\xdf\x5a\x9c\x8d\x3f\x9b\x4e\x7f\x06\x06\x9c\xe5\xdf\x6c\x93\x6e\x84\xb3\x62\x56\xd7\xff\xe2\xe3\x19\x49\xf9\x9c\xa1\x7a\xf2\x56\xdd\x04\xd0\x88\x33\xa0\x4d\x37\x7b\x16\xb1\x6c\x53\x48\xcd\x34\x10\xde\xd5\xce\x93\xc9\xbf\xff\x6a\xe0\xdf\x00\x00\x00\xff\xff\x46\xd8\x61\x0e\x43\x22\x00\x00") + +func fontsNancyjFancyFlfBytes() ([]byte, error) { + return bindataRead( + _fontsNancyjFancyFlf, + "fonts/nancyj-fancy.flf", + ) +} + +func fontsNancyjFancyFlf() (*asset, error) { + bytes, err := fontsNancyjFancyFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/nancyj-fancy.flf", size: 8771, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsNancyjUnderlinedFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\xcd\x8e\xdc\xa8\x13\x3f\x8f\x9f\xa2\xd4\x89\xe4\xcb\xff\x8f\x94\x95\x76\x55\x9b\x53\x3f\x82\xaf\xdc\x8c\x83\x3b\xd3\x9b\xe9\x21\x9a\x76\x12\xe5\xd2\xcf\xbe\xa2\x28\xa0\x8c\x3f\x7b\x66\xb4\x4e\xc6\x06\x1b\xea\xf3\x57\x45\x41\x9f\x9e\x4e\x7f\x98\x8f\x80\xf0\x17\x7c\xfa\x13\xfe\xff\x09\xfe\xae\xaa\x87\x87\x87\x07\x00\x78\x36\xcf\x5f\x7e\xff\xa3\x4e\x4f\xa7\xaa\x7a\xf0\xdd\x4b\x6f\xc1\x9c\x86\xfe\x05\x86\xc7\x1e\x9e\xdc\xd7\xf3\x33\xb8\x13\x18\xf8\xe5\x2e\xe6\x19\x7e\x3d\x3a\x00\x73\xfd\xd6\x5b\xb8\xf4\x30\x38\xb8\x98\x6f\x3d\x3c\xf6\x2f\x60\xfc\xfc\xeb\xf9\xab\x82\xe1\xf1\x7c\x85\xf3\x95\x08\x9c\xdc\xf3\x00\xc3\xa3\x19\xe0\x8b\xb9\xf4\xe0\x7e\x0c\x9e\xda\x79\x50\x90\x86\x5d\x7e\xc3\xe9\xfc\x72\x1d\x2a\x2f\x90\x19\x86\xfe\xf2\x7d\x00\x33\x80\x81\xd3\xf9\xeb\x53\x3f\x10\x8d\xff\xc1\xd5\xc1\x53\x6f\x7e\xf6\x9e\xaf\x79\x72\xcf\xbd\x22\x25\x20\x5c\x3f\xcd\xe5\xfb\xef\x97\xa3\xf9\x72\x55\xdd\x0f\xd5\xdb\x1f\x55\xf5\xf1\x23\x1c\x97\x6f\xce\xb9\xd0\x3a\x56\xb6\x81\x63\x85\x18\x6f\xd4\x05\xa0\x31\x71\xa0\xef\xd2\x17\xfa\x18\x9f\x81\xf5\xf2\xd3\xd1\x95\xfa\xb1\x41\x43\x02\x0d\xe2\x49\x97\x7f\x87\xe8\xff\x8f\xdf\xa5\x71\xce\x8d\xc9\x31\xc1\x0f\xf4\x1f\x8e\x95\xb2\x7e\x4a\x53\xc3\xb1\xd2\xf8\xc1\xb9\x0f\x4e\x41\x1c\x40\xa4\x5a\x4c\x03\xc4\x34\x37\xa5\x4b\xaa\x62\x03\x40\x9c\xbd\x38\x80\x00\x16\x6b\xea\x68\x6c\xb8\x1d\x34\xf5\x6d\x3f\xda\x77\xe8\xbd\x1f\x1d\x94\x01\x00\x4d\x5f\x9c\x9b\x63\x13\x4d\x62\x11\xbb\x44\x0e\xdb\x1a\x05\xed\x2e\xd9\x0b\x9b\x36\x0d\xf3\x7c\x5a\xec\xc8\x2e\xd0\x92\x56\xad\xde\x60\x64\x93\x83\x55\x72\x70\xbe\x09\x2f\x83\x41\x12\x3a\x2a\x8c\x2c\x4f\x7c\x6a\x54\x61\x9e\xf6\x72\xcf\x78\x59\x23\xba\x40\xb8\xa5\x01\x5e\x2b\x1c\x3d\x15\x33\x40\xbd\x08\x13\x32\x3d\xb1\xed\x94\x9f\xa6\x48\x7e\x4f\x11\x91\x24\x3b\x56\x9e\x0c\x22\x89\x73\xac\xbc\x63\x11\xa1\xd5\xcc\x29\x13\x58\x37\xbe\xc0\x6c\x98\x11\xe5\x04\x01\x44\x1c\xbf\x15\x63\x17\xe0\x93\xe3\xa0\xb8\x87\xb8\x21\x82\x37\xd5\xdc\xf8\x7d\x29\xc9\x4c\x73\x2c\xc9\xcc\x80\x65\x49\x26\xb7\x1c\xf0\xd2\xed\xc0\x60\x66\xf3\xb1\x8d\x33\xf0\x41\xc0\x9e\x5b\x09\x15\xab\x12\x18\x2f\xb7\x01\x86\x94\x52\x04\x0a\xef\xd2\x06\x62\xd6\xb1\x75\x68\x6a\xac\x6b\x46\x87\x87\x97\x07\xe2\x5a\x8c\x52\xb2\x80\x95\x87\xc5\x1c\x7f\x3c\x39\x4c\x44\x0f\xab\xf0\xa6\x0d\x41\x61\x8c\xb1\x94\x1c\x50\xe8\xa6\x20\x62\x3e\x08\xb3\x90\x86\xe6\x08\x82\x27\x98\xcc\xd9\x26\xcf\xa9\x90\x64\x37\x08\x72\xf6\x61\xfb\xb2\x99\x8c\x31\x19\x00\x73\x2d\xbb\x48\x10\x13\x3f\xa9\x5e\x67\x8c\x51\x13\x09\x61\x8f\x84\x9c\x6e\x17\x08\x22\xb6\x4c\xd0\x9b\x25\xc4\x6e\xab\x63\xfa\x5d\xb6\x21\x36\x02\x7e\x19\x7d\x19\x7c\x19\x7b\x31\x41\x2d\xae\x0d\x24\x21\x39\x45\x53\xba\x0c\x29\xa7\x33\x96\xf1\x37\x92\x90\xd1\xb7\xad\xb2\x20\x18\x16\x15\x4f\x10\x47\x36\xb4\x2a\x7a\xb9\x9d\x40\xb8\xf4\x32\xc7\xe1\x56\x6c\x8a\xb4\x01\x3b\xd3\x49\xc8\x97\xd1\x90\xd1\x8c\x9c\x2d\x81\x1a\x34\x4e\xe3\xda\x4a\x3d\xce\x31\x86\xaf\x3d\xa9\x69\x21\x68\x69\x0d\x23\xbb\xf1\xe2\xd5\x71\x26\xf7\x31\x18\x6c\x85\x4d\x26\x90\x83\x36\x00\xae\x33\x61\x76\x0d\x62\x75\x01\x8f\xb0\x84\x97\x26\xa6\x2b\x38\x1c\xb2\x0a\xce\x95\x72\x8d\x56\x83\x94\xa7\x82\x83\x2d\x33\x88\xe9\x09\x73\xaa\xd2\xce\x2f\x41\x69\x1d\x84\xb4\x16\x96\xe8\x9e\x32\x08\x2a\xc4\xcc\x54\x73\xa8\xc5\xd0\xf6\xc1\x6d\x72\x46\x8d\x5f\x96\x3a\x6b\xcb\x5a\x60\x13\x6c\xc5\xab\x56\x30\x96\x09\x9c\xb8\x0a\xa2\x2f\x2d\x27\xae\x30\x4c\xc5\x4a\x0c\x27\xb1\x30\xc7\x68\xd9\x64\x02\x1d\xb9\x1d\x4c\xc6\x3c\xf6\x98\x4c\x2a\x22\xf5\xc8\x96\x98\x6b\x2b\xce\x4b\x38\x5d\x41\x26\x3e\x89\x9a\x76\xd9\x06\x8c\xf5\x60\x2a\xf6\xaf\xfc\x52\x76\xf6\x99\xea\xcd\x8c\xb8\xa0\xd9\xac\x69\x14\xb1\x51\xb0\xe5\x13\xdd\x08\x18\xb3\x4f\x42\x8d\x5c\xaf\x9a\x8c\xe5\x88\xd5\x3b\xdc\x0d\xe3\x11\x81\x15\x4d\x8a\x7d\x49\xb1\x43\x29\x4b\x17\x90\x7b\x12\x90\xd9\x68\xd2\x26\xeb\x28\x2e\xda\x69\x59\xda\xad\x32\xab\xc2\xf9\x26\x2c\xc9\x4d\x9d\x54\xe4\x60\x92\x90\x94\x73\xb7\x18\x4c\x02\x66\x5f\x7b\x0e\x80\x33\x81\xd4\x19\x25\x63\x89\x52\xb0\x08\x27\x59\x69\xac\x75\x6d\xda\x19\x15\x3a\x95\x2e\x7c\x43\x04\xdf\x61\xba\x39\xbc\xaf\x31\x88\x78\x6f\x12\xde\x9b\x75\xe7\xdf\x95\x4d\xd9\x27\x6f\x8c\xdc\x91\x2a\x73\xb1\x64\x3b\xee\x90\x36\x9a\xb3\x1c\xab\xa3\xc5\xb6\xec\xbd\xd7\x07\x48\xbb\xf2\x19\xcf\x4c\x18\xf1\x82\xd7\x89\x62\xfa\x50\xc7\x52\x50\x54\xcc\x91\x7d\x50\x5a\xd6\xff\x1b\xe9\xdb\xa2\xac\x1a\xc5\xfe\x6c\xb3\xb9\xb1\x81\x9b\x44\xfc\x06\x54\x25\xaa\xda\x3d\x2b\xdb\x3a\x83\xb8\x82\x41\x2a\x85\x7d\x9b\x52\x56\x8a\xf8\x7a\xbd\x9c\x29\xc2\x74\x26\xa6\xa9\xdc\xca\x9c\xc8\xf8\x91\x99\x67\x85\x92\x5f\xed\xfd\x51\xb2\x9c\x8d\xf8\xa0\x19\x31\x25\xab\x24\x77\x7a\x5c\x25\x68\xcb\x20\x15\xc1\x3e\x46\xd4\x9c\x4f\x56\x08\xbf\xda\xd9\x38\x82\x91\x2c\x27\x55\xda\x76\xa8\xbc\xdd\x88\xad\xbc\x33\xdc\x91\x7d\x9b\x99\xa3\x94\xe9\x13\x71\xb6\x1c\xd7\x5d\xe4\xd7\xea\x74\x24\xc4\x4d\x3a\x68\xd1\x5d\xd4\x95\x9a\x72\x23\x37\x95\x29\xaa\x3c\x7b\x36\x33\x7e\x2e\x4a\x04\x94\x84\xd2\x8e\x8c\x13\x55\x7b\x48\x31\x9e\xf5\xbf\xfb\xc0\x02\xe6\x26\xbc\x43\xf7\x67\xbe\x7c\xf7\x90\xaf\x71\xb9\xa1\xd5\xf4\xcc\x64\x5a\x6e\x14\xea\xa8\x7c\x08\x80\x01\xe0\xb9\xd4\x90\xe7\x8f\xb8\x99\x74\x60\x7c\xac\x82\x68\xd7\x09\x23\x72\x09\xb3\x6d\xd3\x75\x89\xbd\x1d\x98\xb0\x52\xe3\x13\xd3\x55\xc2\xa2\xf0\x0a\x67\x29\x24\x30\xcb\xf6\x3a\x53\xec\x90\xd8\x4f\xb2\x78\xaf\xc4\x2a\xd6\xe0\x64\xe1\x43\x2c\xe2\x84\xcd\x45\xa3\x3c\x43\x4c\xa4\xde\x88\x82\x50\x79\xbb\xdb\xed\x76\x53\x88\x37\x0a\x21\x5e\xeb\xee\x45\x01\xec\xcf\x9f\x74\x9c\x5f\x1c\x32\xcc\xd6\xd3\x8b\x03\xf9\x76\x0b\x1f\x96\x45\xf5\x79\x33\xaf\x54\x07\x10\x25\xa7\x8a\xa2\x16\x07\xd6\x93\x48\xb8\xab\xf4\x87\x32\xde\x83\xc1\x3a\x65\x93\xc9\xda\xf0\xc7\x92\xf0\x5f\x90\x85\xff\xee\x38\x2a\x7e\x47\xaf\xbc\x05\x47\x3b\x62\xb3\xb4\x0c\x55\x72\x49\x68\x10\xc4\x41\x50\x87\x90\x53\x88\xfc\x0d\xf1\x96\xe8\x67\x8f\xbf\x4b\x04\xf8\xeb\x16\x22\x00\xa2\xa1\x5e\x63\x6b\x19\xad\x7b\xd6\x96\x25\x89\x35\x65\x15\xa7\xc6\xf9\x6c\x8f\xad\xf9\xa7\x22\x2e\x3b\x78\x81\x6d\xe4\x1b\xd1\xb0\xcd\x7d\x59\x45\xa0\x48\x60\xeb\x75\x68\x98\x12\x56\x99\x70\xaa\x35\x15\x6f\x89\xe3\xfe\x68\xd7\x7a\x5d\x74\x46\xa1\x35\x0a\x3a\x44\xbf\x13\x55\xc8\xdc\xbc\xa1\x74\x59\xe9\xef\x8c\x40\xdb\xa8\xa8\x81\x4f\x2f\xe9\xa7\x21\x4b\x1e\xa5\x01\x1e\x2b\xf7\x47\xe0\x3e\x9b\xaf\x66\xf2\x05\xc2\xe2\x1c\x28\xd4\xf4\x54\x2e\x29\xb2\x80\x18\xb0\x2a\xb1\xb2\x09\x5d\x9f\x19\xc5\x2e\xfd\x30\x07\x36\x95\xbe\xf1\x2b\xd0\x39\x47\xb7\x7d\x12\x4d\xb5\x5b\xd1\x95\x3f\x01\x07\xb3\x06\xd9\x3f\x47\x3c\x2b\xd4\x31\x66\xb0\x2b\xbe\xea\x09\x80\x46\x9c\x41\xf9\x70\x03\x65\x89\xed\x01\x0e\x36\xd5\xf2\x33\xa8\x5a\x6e\xcf\xe3\xe6\x78\xac\x8a\x7f\xff\xd5\x8b\x7f\x03\x00\x00\xff\xff\x8d\x44\x2c\x65\x74\x20\x00\x00") + +func fontsNancyjUnderlinedFlfBytes() ([]byte, error) { + return bindataRead( + _fontsNancyjUnderlinedFlf, + "fonts/nancyj-underlined.flf", + ) +} + +func fontsNancyjUnderlinedFlf() (*asset, error) { + bytes, err := fontsNancyjUnderlinedFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/nancyj-underlined.flf", size: 8308, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsNancyjFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\xcd\x8e\xdb\xb8\x0f\x3f\x8f\x9f\x82\x48\x0b\xf8\xf2\xff\x0b\xe8\x02\xbb\xe0\xf6\x94\x47\xf0\x55\x37\xcb\x95\xd3\xc9\x36\x89\x8b\x89\xdb\xa2\x6f\xbf\x20\x45\xc9\xb4\xfc\x99\x99\xc1\xa6\x9d\x98\xb6\x25\x7e\xfe\x48\x51\xca\xe9\x72\xfa\xc3\x7d\x04\x84\xbf\xe0\xd3\x9f\xf0\xff\x4f\xf0\x77\x51\x3c\x3d\x3d\x3d\x01\xc0\xcd\xdd\xbe\xfc\xfe\xc7\x9c\x2e\xa7\xa2\x78\xa2\xdb\x6b\xeb\xc1\x9d\xfa\xf6\x05\xfa\xe7\x16\x2e\xdd\xd7\xf3\x0d\xba\x13\x38\xf8\xd5\x5d\xdd\x0d\x7e\x3d\x77\x00\xee\xfe\xad\xf5\x70\x6d\xa1\xef\xe0\xea\xbe\xb5\xf0\xdc\xbe\x80\xa3\xf9\xf7\xf3\x57\x03\xfd\xf3\xf9\x0e\xe7\x3b\x33\x38\x75\xb7\x1e\xfa\x67\xd7\xc3\x17\x77\x6d\xa1\xfb\xd1\x13\xb7\x73\x6f\x20\x0d\xbb\xfe\x86\xd3\xf9\xe5\xde\x17\xa4\x90\xeb\xfb\xf6\xfa\xbd\x07\xd7\x83\x83\xd3\xf9\xeb\xa5\xed\x99\xc7\xff\xe0\xde\xc1\xa5\x75\x3f\x5b\x92\xeb\x2e\xdd\xad\x35\x6c\x04\x84\xcf\x4f\x77\xfd\xfe\xfb\xe5\xe8\xbe\xdc\x4d\xf3\xc3\xb4\xfe\x47\x51\x7c\xfc\x08\xc7\x1d\x5f\xc7\xc2\x57\x70\x2c\x10\xe3\x17\xdf\x02\xc0\xb1\xe8\xba\x48\xd1\x17\xbf\xe1\x97\xf1\x1a\x44\xef\xbe\x46\x82\x1f\x05\x1e\x2c\x93\x3f\xf4\x0c\x91\xfe\x8f\x9f\xa5\x71\x6a\x2e\x28\x86\x1f\xf8\x3f\x1c\x0b\xe3\x69\x4a\x55\xc2\xb1\xb0\xf8\xa1\xeb\x3e\x74\x06\xe2\x00\x66\x55\x63\x1a\xa0\xa6\xc1\x94\x2f\x9b\x8a\x15\x00\x4b\x26\x75\x00\x01\x3c\x96\x7c\x63\xb1\x12\x3a\x4c\x21\x9a\x46\xd3\x0d\x3f\xa7\xd1\xc1\x18\x00\xb0\x58\x69\xce\x90\xdd\x08\x07\x6c\xd2\x2b\xac\x4b\x54\xbc\x9b\xe4\x2f\xac\xea\x34\x8c\xe4\xd4\xd8\x04\xbf\xd4\x6c\x55\x6d\x37\x04\xf9\x14\x60\x53\xe9\xb0\x66\x5f\xc7\x02\x1c\xb2\xd2\xd1\x60\x14\x7d\xe2\xd5\xa2\x09\xa3\x2d\xe9\x3d\x13\x65\x8b\xd8\x85\x07\x75\x1c\x10\x62\x99\xae\x46\x04\xa0\x5d\x84\x09\xbb\x9e\xc5\x36\x86\xa6\x19\xd6\x9f\x38\x22\xb2\x66\xc7\x82\xd8\x20\xb2\x3a\xc7\x82\x02\x8b\x08\xb5\x15\x49\x03\x83\x75\xe7\xab\x17\x61\x46\xd4\x13\x14\x10\x71\xfc\x74\x3c\x76\x0e\x3e\x73\xde\x1d\xd2\x2c\x05\x60\x61\xbe\x26\xc7\x1a\xac\x8f\xdd\xaf\x81\x0e\x37\x08\x88\xc5\x6d\xe2\xdb\x01\xf0\xa0\xe0\x2e\x54\x42\xc3\xaa\x06\x8e\xf4\x76\x20\x50\x32\x86\xc1\x40\xa1\xac\x20\x56\x1b\x5f\x06\xd2\x62\x59\x0a\x2a\x08\x56\x04\xc0\xb5\xdc\xe4\x22\x01\x2b\x17\x8f\x43\xde\xc5\x0b\x3f\x45\x82\x53\x78\x52\x87\x58\x38\xe7\x3c\x17\x05\x54\xb6\x19\xa1\x6c\xa8\x19\x4b\xe5\x67\x8e\x21\x10\xc3\xe4\xce\x3a\x45\xce\x84\xe2\xba\xc1\x50\xaa\x8e\xf8\x57\xdc\xe4\x9c\x1b\x00\x30\x47\xf9\x45\x86\x98\xe4\x69\xf3\x1a\xe7\x9c\x99\x68\x08\x7b\x34\x94\x32\xbb\xc0\x10\xb1\x16\x86\xe4\x96\x90\xb3\xb5\x1d\xca\xee\x92\x0f\x93\xbc\xe4\xb9\x84\xc3\x04\xc3\x84\xc2\x01\x84\xcb\x1a\x72\x50\x2c\x97\xc9\x50\x6a\x1a\xe7\x05\x7f\x23\x0d\x05\x7d\xdb\x26\x2b\x86\x61\x31\x21\x86\x38\xf2\xa1\x37\x31\xca\xf5\x04\xc2\x79\x94\xf3\x3c\x9c\xcf\xcd\xf5\x81\xe3\x32\x12\xea\x63\x74\x60\x74\x9f\x54\x47\x60\x82\xc7\x59\x5c\x5b\x99\xc7\x59\xe7\xe4\xf3\xea\x92\x44\x5e\xe6\x47\xbc\x76\x85\xfa\x2d\x95\x9b\x0c\x08\x3e\xc2\x4a\x65\x69\x31\x06\x5a\xe3\xc2\xec\x12\xd4\x6a\x02\x84\xac\x84\x93\x2a\x96\x29\x38\x1c\x06\x1e\x5d\x97\x2b\x33\xa6\x63\x7d\x0a\x81\xf5\x22\x20\x96\x25\x1c\x4a\x94\xed\x68\xc9\x49\xeb\x1e\xa4\xb5\x2f\x47\xf5\x54\x40\x30\x21\x56\x24\x06\x6d\xec\x6f\xd8\xad\x54\x1d\x53\x25\x8d\x6f\x96\x6e\x60\x5e\x0e\x0b\x0a\x62\x82\xaf\x64\x95\x0a\xce\x72\x41\x92\x28\xca\x6f\x6a\x29\x58\x61\x98\x89\x9d\x17\x4e\x72\x60\x4e\xd0\xb2\xcb\x14\x3a\x06\x3a\xb8\x4c\x64\xec\x71\x99\x36\x44\xdb\x31\x78\x62\x8e\x36\x52\x8f\x70\xba\x72\x4c\x62\x12\x2d\x6d\x06\x1f\x84\x61\xe2\x2a\x99\xa3\xdf\xe4\x37\xfb\x5c\xf5\x66\x41\xd2\xc0\x6c\xf6\x30\x86\xc5\x18\xd8\x8a\x89\xad\x14\x8c\x25\x26\xa1\x27\x2e\x57\x5d\x26\x7a\xc4\x6e\x3d\x3a\x7f\x3f\x8c\x47\x0c\x56\x2c\xc9\xf6\x21\x73\x3b\x92\x51\xb1\xd2\xab\x9e\xae\x46\x13\x9a\xbd\x63\xa4\x49\xe7\xe5\x68\xb7\xc9\x62\x8a\xd4\x9b\xb0\x14\x57\x65\x32\x51\x92\x49\x43\x52\xcf\xdd\x12\x30\x49\x98\x7d\xf4\x1c\x00\x67\x12\xa9\x71\x46\xe7\x12\x97\x60\x95\x4e\xba\xc3\x58\xbb\xf5\x69\x27\x94\xd9\x94\x87\xf0\x0d\x19\xfc\x80\xeb\xe6\xf0\xbe\x26\x20\xe2\xbd\x4a\x78\xaf\xd6\x83\xff\x50\x35\x8d\x0c\xde\x96\xb9\x23\x53\xe6\x72\xc9\x37\x72\xc3\xd6\xd8\x58\xe5\x82\x39\x56\x6d\xc3\xe6\x05\xbd\x7e\x7d\x80\xb4\x0b\x9f\x89\xcc\x44\x90\x2c\x78\x8d\x6a\xa2\x0f\x65\x6c\x01\x55\xa7\x1c\xc5\x07\xa3\x75\xdf\xbf\x51\xbe\xfd\x08\xfa\x6a\x3f\xb6\x49\x6e\x6c\xd8\x26\x19\xbf\x01\x55\x8d\xaa\x7a\xcf\xca\xb6\x2e\x20\xae\x60\x90\x5a\x60\xa2\xb9\x64\xa5\x8c\x2f\xd7\xdb\x99\x2c\x4d\x67\x72\x9a\xdb\xad\x41\x12\x3b\x3f\x0a\x23\x51\xa8\xe5\x95\x14\x8f\x5c\xe4\x6c\xc6\x07\xcb\x58\x28\x7b\x25\x85\x93\x70\x95\xa0\xad\x93\x54\x25\xfb\x18\x51\x73\x31\x59\x61\xfc\xea\x60\xe7\x15\x74\x68\x27\x4d\xda\x6e\x98\x61\x9b\x11\x29\x48\x7b\x85\x1d\xd5\xb7\x9a\x39\x3a\x99\x5e\x75\x1b\xad\xda\x71\xdb\xc4\x47\xb5\x4d\x47\x40\x42\xd2\xb0\x40\xf2\x04\x26\x27\x3b\xc2\xc9\x96\x63\x74\x8a\xb1\x7c\x5d\xd4\x08\xb8\x08\x49\x7d\xc3\x52\x0a\x55\xcd\x1d\xf7\xa1\xdc\xb1\x15\x58\x0a\x86\x46\xd3\x16\xd8\x1e\xbb\xed\xd4\x67\x1e\xb7\xd2\x5f\x58\xa3\xfb\x8b\xec\x6b\xc9\x06\x33\x6c\xfe\x31\x00\x7c\x68\x35\xf4\x79\xe3\xe2\x36\x49\x75\x01\xa3\xe3\x14\x44\xbf\xce\x18\x51\x5a\x98\x6d\x9f\xae\x6b\x7c\x38\x24\xc6\xc6\xe4\x27\xa4\x2b\x8c\x55\xd2\x86\x8d\x28\x2b\x2c\xba\xbd\xce\x15\x3b\x34\xa6\x30\x7a\x7c\x54\x63\x13\x7b\x70\xf6\xf0\x21\x36\x71\xca\xe7\x8a\xc8\xcf\x0c\x07\xe2\x6d\x28\xa8\x86\x9c\x92\xc6\xdb\xc7\xb5\xee\x51\x14\xc0\xfe\xfa\x39\x1c\xdf\x2f\x9e\xf0\x6f\x0c\x1c\x4f\x59\x56\x95\xea\xe6\xb0\x52\x1d\x40\xb5\x9c\x26\xaa\x9a\x1d\x50\x4f\x32\xe1\xa1\xd6\x1f\xa6\xbd\x15\x39\xac\x31\x3e\xb9\xac\x0e\x7f\xa2\x89\xfc\x05\x5d\xe4\x6f\xbd\x67\x9a\x0d\xf7\x3b\x44\xe5\x2d\x38\xda\x91\x9b\x7b\x35\xce\xaa\x49\x55\x8e\x63\x3a\x44\xfa\xdd\x90\xaf\x57\xa7\x00\xa7\xc7\x7d\x9c\xe9\xf6\x6a\x1f\x5b\xae\x26\x9d\x19\xeb\xb6\xc7\xc7\x3e\x1e\x56\xb1\x42\xb2\xb0\x56\xfa\x89\x22\x7c\xf5\x58\x35\x51\xe8\x51\x98\x7a\x07\x14\x30\x63\x33\x30\x4e\x3d\xa6\x91\xad\x70\xdc\x17\xed\x5a\xa7\xb3\x9b\x51\x4a\x8d\x92\x0d\x91\x76\xa0\x06\x45\x1a\x39\xca\xe6\x1d\xfe\xce\xcc\xf3\x95\x89\x16\x50\x59\x49\x3f\x01\x79\x8e\x28\x0f\x20\xac\x3c\x9e\x79\xfb\x7c\xbe\x5a\xc1\x17\x18\xab\xf3\x9f\xd0\xcb\x73\xd8\x0c\x7b\x40\x0d\x58\xd5\xd8\xa4\x5f\x32\xf0\x73\x78\x6f\xba\xf4\x03\x1c\xf8\xd4\xf2\xc6\xb7\xc0\xe7\x1b\xcd\x62\x1b\x98\xea\x2c\xf7\x6c\xd9\xad\xaa\xb3\xe2\xd6\x70\xff\x39\xe2\xd9\xa0\x8d\x39\x83\x4d\xf6\xd6\x4e\x00\x34\x92\x0c\x86\xd2\x0d\x8c\x67\xb1\x07\x38\xf8\xd5\xad\xde\xa3\xf4\xb1\xc8\xfe\xfd\x57\x0f\xfe\x0d\x00\x00\xff\xff\x77\xef\xd1\x81\x5c\x20\x00\x00") + +func fontsNancyjFlfBytes() ([]byte, error) { + return bindataRead( + _fontsNancyjFlf, + "fonts/nancyj.flf", + ) +} + +func fontsNancyjFlf() (*asset, error) { + bytes, err := fontsNancyjFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/nancyj.flf", size: 8284, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsNipplesFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\x4d\x8f\x1c\x35\x10\xbd\xf7\xaf\xa8\x43\xae\x40\x58\x21\x11\x6e\x7d\x21\x12\x12\x42\x51\xf8\x01\xde\x20\x6d\x50\xa4\x15\x48\x90\x0b\x58\xfc\x77\x34\xdd\xb6\xeb\xbd\xaa\xe7\xee\x99\x61\xc5\x66\xd3\xb6\xbb\xed\x2a\xd7\xd7\xab\xb2\xf7\xe3\xf3\xc7\x87\x0f\xaf\xec\x8d\x7d\x6b\x0f\xaf\xed\x8b\xaf\xed\x61\xf9\xe9\x87\x77\xef\x7e\xfc\xfe\x67\xfb\xe5\x2f\x7b\xff\xfb\x6f\xf6\xf6\x8f\x4f\x9f\xff\xb6\x37\x5f\x7d\xf7\xcd\xf2\xf6\xd3\xaf\xcf\x4f\x9f\xed\xfd\xd3\xf3\xd3\x87\x3f\x9f\xec\xe1\xcb\xd7\xcb\xab\x55\xfc\xda\xba\x2e\xb5\x14\x7e\x58\x7b\x98\x59\x7b\x77\xe9\x6d\x2f\x6b\x19\x8d\xd9\xf6\xfd\xbc\x69\x6d\xff\xb9\x0c\x6b\x29\xd6\x9e\x1b\x87\xcb\x8f\x6d\x6d\xfa\x7a\xe3\x64\x64\xb4\x8d\xfb\x97\x6d\xd7\xbb\x68\x25\xec\xa3\xad\xeb\x2a\x68\xf3\x32\x55\x94\xa4\xd1\x19\x53\x61\xae\x13\x6d\xac\x36\xee\x45\x2f\xa3\x9d\xfa\xdc\xb1\x17\x5f\x5f\x7c\x79\x98\x07\x6b\x9d\xe0\xfe\xa5\x19\x6a\xfe\x68\xdb\x6c\xe4\x1a\xfd\xd8\x8c\xfd\x76\x76\x9d\x83\xfa\x48\x0d\x11\x65\x25\x06\x49\xc6\xa3\xcb\xd3\x0c\xd6\x6d\x6e\xcd\xf2\xe2\xbb\x58\xcf\xf4\x85\x5b\xa0\x37\x5a\x76\x84\xae\xeb\xce\xf6\x68\x5a\xa6\x86\x8c\x0e\x1e\xc3\x44\x7d\x59\x6e\x89\xbb\xf8\x7e\x13\x27\x56\x3e\x48\x71\x87\x07\x07\x69\x43\x90\x03\x41\x24\x84\x5e\x35\x58\x0f\x72\xee\x31\xb8\xad\xe6\xf9\xd1\x8e\xeb\x00\x99\xe0\x86\xaa\xa9\xa0\x44\xda\xe9\x88\xa2\x46\x49\xe8\x06\x35\x82\x7a\x70\x2c\xe2\x58\x16\x28\x21\x14\xcb\xce\x49\x38\xd3\x43\xbf\x44\xf7\x92\x3a\x66\x37\xac\x38\xea\x01\xc2\x11\x02\xf1\x94\xa3\x45\xeb\x98\x24\x29\xec\x01\xc1\x3c\x5a\x12\x8d\x77\xd2\x59\x50\xdb\xe8\x81\x6e\x9f\xec\x44\x7d\x5a\x4e\x02\xc1\xd8\x75\xe4\x0e\x14\x3b\x19\x9b\x55\x3a\xe9\x1f\x49\xd2\x07\xda\xe9\xcd\xf7\x6b\x5a\x12\x50\xc5\x91\xf1\x93\x81\x5d\xe7\x2c\x5d\x2d\x12\xba\x58\xd9\xd2\xf8\x73\x24\x09\x98\x72\x3a\x11\x60\x6e\xe8\x3f\xa4\xf8\x88\x2d\xb8\x4b\xe1\x59\x42\x37\xd0\xa9\x85\xa0\x53\xbe\x49\x9d\xdb\x37\x70\x22\x02\x11\x0e\xc4\xd6\x25\x93\x72\x4b\xfa\x6a\xf9\x2e\x64\xb5\x41\x01\x03\xc7\xb8\xfc\x81\x0c\xd9\xa2\xa5\x32\xe8\x3e\x3e\x3e\x26\xec\xd6\x45\x95\x7b\x39\xb8\x94\x05\x24\x50\x6e\x46\x99\xa5\x07\x23\xe3\x6a\x4c\x0b\xc9\xdc\x64\x2b\xc7\x34\x5c\xea\xab\x1a\xfc\x24\x7c\xf7\x10\x81\xfa\x42\x85\x76\x88\x30\x2a\xcc\xc8\xdc\xa1\x8f\xf3\x6d\x44\x75\x60\x30\x60\x7e\x2c\x26\xe1\x91\xd9\xa4\x0f\x09\x2e\x81\xc6\x0a\x29\x2a\x82\x36\x74\xfb\xce\x0e\x27\xc4\x80\x7b\x11\xc2\xc9\xc3\x56\xd4\x55\x77\x5b\xe1\x11\x72\xd0\xb3\xfc\x50\x12\xf8\x99\x80\xd3\x80\x98\x07\x03\x4f\x1a\x57\xae\x49\x8c\xce\x1e\x10\x58\x54\x7d\x9c\xf5\x6a\xc1\xba\xa9\xc4\xd3\x0a\xc4\x1c\x1c\x3c\xa8\x2c\x69\x71\x51\xd9\x2c\x38\x6d\x96\x71\x95\x2e\xaf\x31\xf6\xad\x6e\xd6\x06\x5d\x5e\xb7\xb5\x4b\x51\xc1\x06\x0c\x72\x03\x87\x04\x29\x1a\x9a\x25\x89\x8a\xe6\x0b\xca\x63\xae\xcc\x94\xe3\xd5\x88\x65\xe2\xe8\x58\x93\x2b\x9f\xa0\xfd\xbc\x7f\x3d\x36\x2c\x09\x4c\xd1\x17\x12\x87\x88\x55\x98\x93\xa3\xfa\x08\x04\xa7\x12\x06\x01\xcf\xcc\x93\xac\x55\x47\x05\x0b\x09\x2b\xc6\xf9\xc5\xf6\x24\x19\xcb\xa6\x46\x16\xdd\x33\xe6\xf5\x68\x0e\x6d\xcb\xbe\x1b\x56\x5f\xc8\xaa\x21\xab\x67\xe8\x47\x3a\xc9\x56\xc6\xf5\x0c\x13\x7b\x99\x11\x31\xcc\xc6\xb8\x7b\x70\x05\x2a\x0b\x9d\x0d\x63\xa3\x63\xeb\xe2\x62\x04\x24\x8b\x16\x9c\x5f\x65\x20\xb5\x91\x83\x78\xab\x5c\x13\x24\x90\x40\x60\xf0\x64\xa6\xe3\x75\xb6\x01\x54\xc3\x10\xb9\x42\xed\x83\xb2\xa4\x6f\x12\xc2\xa5\xd7\x4a\x86\xa4\xf4\x19\xc3\x5b\x46\xcc\x30\x1d\xc8\xc0\xa8\xc2\xa2\x66\xe2\xd0\x52\x43\x29\x50\x44\x2a\x01\x86\x4d\xd0\xd3\x26\x5d\x75\x05\x76\xf1\x80\x90\xf6\x0a\x21\x35\x3d\xb7\xe0\x35\xe6\x71\x23\xee\x30\x2a\x43\x69\x3d\x49\xd3\xf7\x0c\xfa\xe8\xf6\xb6\xdd\x5c\x8d\x2a\x27\x9f\x10\xf9\x41\x08\x17\xfb\x2e\xe9\x08\x33\x55\x30\x07\x5f\x2d\x19\x35\x61\x77\x93\x2e\x69\x91\x0b\x6d\x9a\x90\x75\x14\x0e\x94\x15\xab\xd1\xc1\x06\xde\xf8\x1c\x26\x34\xaf\xfb\x10\x4c\x64\x05\xc8\x46\xbf\x5e\xa7\xf1\x20\x41\x47\x24\xf3\x3a\x7a\x9a\xd4\xd7\x7e\xe0\xbc\x2c\x1b\x7f\x20\x20\xb4\x9a\x74\xd2\x4e\x63\x97\x91\x2c\x5a\x04\xde\x76\x45\x51\x64\x82\x36\x16\x8f\x5c\xec\xd1\x35\x4c\xd2\x6e\x31\xa5\x53\xbc\x59\x3f\xa9\xe7\x87\x10\x72\x2c\xdb\xbd\xf6\x5c\x67\x1b\x0e\x87\xf8\xa6\xf2\x42\x1e\x3c\xdb\xfa\xd0\xd9\xfc\xe9\x3e\xa9\x2f\x19\x8c\x14\x3a\x32\x1c\x58\x28\x98\x46\x0f\x77\x21\x6b\xdc\xe4\xb1\x3b\xd4\x78\x61\x90\x7c\x80\xd4\x93\x88\xdf\x8d\x32\x81\xba\xac\xdf\x92\xbf\xe1\xbe\x1d\x57\x9c\x70\x44\x15\x69\x70\x4d\xd0\x00\x57\x90\xa0\x1b\xa2\x52\x65\x3c\x7a\x65\x72\x59\x16\xe1\x2a\x77\x6a\x88\x7f\x8d\x7b\xb5\xd0\x15\x71\x47\x0e\x03\xa4\x88\x38\xb5\x2a\x60\xb8\x0e\x45\x28\xb9\xce\x95\x1f\x8f\x6a\xba\x67\x4d\x6d\x13\x00\x8d\x69\x92\xf3\xac\x4c\x40\x63\x93\xa2\x8e\xd1\x79\x56\x70\x62\x56\x83\x57\x61\xd8\xf7\xd1\xfe\x0b\x21\x60\x29\x17\x9e\x44\x18\x22\x3a\x38\xbb\x79\xb6\x08\x49\x77\x12\x61\x53\xc2\x35\xfc\x91\xb5\xe4\xba\x29\xb8\xdf\x31\x61\x72\xa8\x10\x9e\x6b\xb8\x9b\x55\xb7\x0c\x16\x0e\xcd\xe4\x87\x7c\x5c\x0d\xb9\x2b\xf8\xf1\x7e\x9b\xb8\xff\xdf\x57\xd3\x78\x5d\x4a\xf9\xa7\x93\xdb\xba\xd4\xd9\xba\xea\x13\xac\x72\x4e\xc5\xe0\xaf\xbe\x02\x47\xff\x53\x7f\x5d\xc2\xbf\xff\xeb\xc5\xbf\x01\x00\x00\xff\xff\x2e\x0c\xdd\x8b\xe6\x20\x00\x00") + +func fontsNipplesFlfBytes() ([]byte, error) { + return bindataRead( + _fontsNipplesFlf, + "fonts/nipples.flf", + ) +} + +func fontsNipplesFlf() (*asset, error) { + bytes, err := fontsNipplesFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/nipples.flf", size: 8422, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsNtgreekFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x59\xfb\x8f\xdb\xb8\x11\xfe\x9d\x7f\xc5\x87\xc5\xa2\x91\x50\xed\x2a\x9b\x34\x38\x24\x58\x2f\x9c\x7b\xa5\x77\x6d\xef\x7a\xbd\x6b\xd0\x16\x42\x18\x5a\xa2\x6d\xdd\xca\x92\x2b\xc9\xd9\xdd\x83\xfe\xf8\x82\x4f\x51\x2f\xaf\xbc\xe9\x19\xb6\x44\x51\xe4\xcc\x70\xf8\xcd\xc7\x21\xbd\xce\xd6\x2f\xd8\x39\x5e\xe3\x0b\x5c\xbd\xc4\xd5\x2b\xbc\x7c\x4e\x7e\xf8\xe5\x5d\xc9\xf9\x2d\x56\x0f\xf8\xb2\x3c\xc4\x1c\xdf\xb3\x5b\x7e\xc7\x1e\x2e\x2e\x56\xac\xe2\x09\x8a\x1c\x3f\xd7\x2c\x4f\x58\x99\x88\x36\xef\x32\x9e\xe7\xf8\x6a\xcb\xf6\x7b\x9e\x65\xf8\x03\xbe\x63\xf2\x31\x25\x3f\x1d\x78\x55\xa7\x45\x5e\x81\xe5\x09\xe2\x62\xb7\xe3\x79\x5d\xa1\xe4\x1b\x56\x26\x69\xbe\x41\x5e\x6f\x84\xa6\xcb\x75\xb6\x46\x5d\x60\xbf\xfa\x55\x69\x5a\xe6\xfc\xb0\x63\x79\x7e\x79\xb8\x63\x35\x2f\xb3\xa2\xb8\x8c\x19\xf9\xb6\x28\xb1\x4e\x37\x19\xaf\x51\xf2\x8c\xb3\x8a\xe3\xc5\xe5\x73\xf2\x35\xab\xf9\x1b\xe0\xea\xf5\xeb\x3f\xe1\xed\xbe\xc4\x0b\x42\xbe\xb9\xdf\x67\x2c\x67\x42\x37\x8a\x35\xd6\x69\x59\xd5\xc8\xd2\x9c\xbf\x21\x62\xc0\xb8\xc0\xd9\x8e\x6d\xd2\x18\xf9\x61\xb7\xe2\xe5\x19\xd6\x52\x74\xc6\x91\x26\x3c\xaf\xd3\x75\x1a\xcb\xce\x84\x01\xc0\x05\xaa\x6d\x71\xc8\x12\xb0\xec\x8e\x3d\x54\x58\x71\x7c\x64\xcf\x02\xd9\x29\x2f\xee\xc8\xb9\x6a\x54\x6f\x39\xce\xb6\xac\x4c\x56\x19\xcb\x6f\xcf\x70\x71\x81\x7d\x99\x8a\x01\xb3\x0a\x0c\xb2\x36\xc0\xea\x50\x23\x66\xf9\xb3\x5a\x88\xa9\x76\x87\x6a\xcb\x13\xf2\x5a\x49\xd8\xf2\x74\xb3\xad\x85\xc5\x0c\xf1\x96\x95\x2c\xae\x79\x49\xbe\x38\xf2\x32\x40\x5e\xd4\x48\xf3\x38\x3b\x48\x87\x26\xbc\x8a\x79\x9e\xf0\xb2\x22\x57\x2f\x65\xb7\x1d\xbb\x97\x23\x47\xc6\xf3\x4d\xbd\x85\xc7\xef\x4d\x63\x3d\x21\xf2\x75\xe5\xe3\x8f\x60\x58\x1f\x92\x0d\xc7\x9a\xc5\x75\x51\x92\xab\x57\x52\x42\xc2\xd7\xec\x90\xd5\xca\xd8\x5d\x91\x70\x39\xf0\x7a\x9b\x56\x58\x17\x79\x0d\x2f\x4b\x6f\x39\xce\x2e\x76\xb8\x7a\x75\x26\xc0\x21\xe4\x8a\x09\x17\x72\x7d\xf2\xf2\xb9\x94\xa2\x3c\x2d\xcc\xef\xa8\x25\x44\x61\xed\x96\x3f\xac\x0a\x01\xa8\x1d\xdb\x57\x6f\x08\x79\x9b\x65\xa2\xae\x12\xcf\x02\x1a\xf5\xb6\xa8\x38\xd2\x1c\x95\x41\x1e\xbf\x8f\xf9\xbe\x7e\x43\x08\xcb\xf6\x5b\x31\x4f\x0b\xbc\x0d\xc4\x3d\x63\xbb\x55\xc2\xc4\xf3\x5f\x83\x0c\xc0\x7e\x9b\xc2\xf9\x2c\xf0\x6d\xb0\x06\x50\x16\x87\xcd\x16\xab\x92\xb3\x7a\x2b\xbc\x81\x05\x3c\xb2\xe2\x35\x53\x8d\xbe\x0c\x56\x00\x76\x07\xd3\xe9\x6f\xc1\x0e\x40\xdc\x97\xf5\x55\x10\x03\xa8\x76\x45\x51\xbb\xc2\x16\xf0\xc9\x86\xed\x76\xca\xae\x77\xc1\x06\x40\x6e\x65\xfd\x10\xe4\xc2\xae\xaa\x27\xeb\xdf\xc1\x03\x00\x16\x1f\x6a\x0e\x16\xc7\xc2\x47\xea\xc5\x33\x92\xf0\xac\x56\xb2\xbe\x0e\x12\x00\xf7\xa9\xe9\xf4\xaf\xe0\x1e\x40\xb1\xe3\x1b\xe6\xca\xba\x0b\xee\x00\x6c\x4a\xf6\xa9\x27\xeb\x23\xe1\xfb\x2a\xcd\x8a\x1c\x0b\x7c\x13\x70\xd9\x37\x8d\x4b\xf9\xfc\x63\x50\x88\xc1\xc4\x65\xba\xaf\x05\x9c\xb5\xca\xef\x65\xd7\x24\x65\x25\xaf\xd2\xca\x51\xf2\x81\xfc\x66\xfd\xf5\x9f\xe0\x37\x31\x26\x6b\xd7\xdf\x83\x7d\x2b\xcb\x4c\xc1\x02\xbf\x12\xdd\x03\x0b\xfc\x39\xd8\x8a\x79\xd8\x16\xfa\xf9\x1f\x41\x09\x60\x9d\xe6\x2c\x43\x95\x6e\x76\x5a\xf2\x7b\xd2\x9a\xf2\x53\xf0\x5f\x21\x55\xbf\x5c\xe0\xe7\xa0\xb2\xa3\x5f\xb1\xd2\x28\xff\x44\xd2\xc2\x18\xf6\x5d\x20\x74\xd7\xec\xa0\xdf\xfd\x12\x08\x57\x94\x2c\x15\x54\xb6\xe7\x65\x5a\x24\xa2\xfa\x0d\xb9\x65\xfb\xbd\x92\xfa\x97\xe0\x16\xc0\xc1\xfa\xe9\x9f\x81\xe8\x2c\x45\x56\x87\x95\x1e\xd3\x02\x94\x90\xf3\xe5\xd4\x77\x49\x00\x2c\x09\x28\x96\xa4\x41\x63\x2e\xb4\xd1\x75\x1e\xf5\x4d\x13\x40\xb5\x36\x1d\xd4\x7b\xf8\x1e\x44\x93\xf7\xc0\x7b\x38\xef\xdb\xfb\x39\x86\xf5\xa6\xd0\xd6\x52\xfd\x13\xa2\x1b\x34\xf2\x27\xf5\x9b\x77\xfe\xd1\x37\xba\xbe\x69\x75\x74\x14\xba\x5a\xa9\x79\xa1\x24\x89\x62\x28\xeb\x7d\x39\x1e\xcf\xd6\x46\xb2\x65\x64\xdb\xfa\x72\xa0\x4a\x6f\xa8\x44\x4b\x8d\xae\x96\x76\x64\xca\x47\xa0\x54\x3a\x11\x21\x42\xf9\x2a\x84\xea\x2a\xee\x4a\x75\x28\x1d\x19\xd2\x10\xc6\xd9\x1d\xcb\x1d\x5f\x49\x43\xa8\xf6\x52\x28\x6d\x93\x76\xe2\x47\x2b\x8c\x22\x0a\x23\x39\x0c\x7a\x03\x5c\xcb\x51\x50\x1a\x46\x61\x57\x8a\x2b\xda\x5c\xc2\x70\x49\xc2\x10\x9d\xba\xce\x45\x5c\xdb\x9f\xe7\x75\x9f\xf5\xaf\x5b\xe1\xfb\x93\x8d\xba\x38\x11\x36\x0a\x57\x45\xe2\x51\x18\x22\xf0\x15\x89\xf7\x51\x38\x82\x9f\x01\x8e\xba\x05\x17\x48\x16\x2a\x16\x26\x98\xe8\x35\x74\xc9\x79\xd7\x07\x1a\xf0\x4b\xd2\x84\xbd\x80\x18\x17\x49\xc5\x47\xea\x17\x9f\xde\xdc\x9e\x3f\x51\xff\x68\x40\x1a\x01\xc2\x85\x50\x28\x9b\x44\x1b\x34\xda\x8e\xe8\xef\x80\xcd\xe0\x4a\x31\x04\x0c\x4b\xd8\x02\x95\x05\x85\xb3\x63\x23\x92\x28\x17\x4d\x2f\x5a\x9e\x69\xc9\x66\x6c\x44\x54\xeb\x17\xfe\x33\x71\x48\x55\x14\x86\x50\xda\x84\x7e\xe9\xe3\x66\xcc\xc7\xc3\x88\x6c\x67\x04\x3d\xd7\x78\x46\x07\xd5\x3a\xe4\xac\x4d\x8f\xc8\xa9\xd3\x70\x13\x82\x1a\xed\xaf\x42\x21\x2f\xa4\x2d\xf4\xa4\xcb\x1c\xda\x98\x8e\xc8\xde\xf0\x1b\x35\x6c\x3d\x58\xe8\xc1\x9e\x6c\xac\x8a\x0c\x8a\x3e\x20\xb4\x8e\x67\x66\x8a\x3d\xed\xe2\xe3\x13\x3a\x02\x71\xc8\x6e\x82\x1b\x65\x37\x4f\x53\xea\xd3\x41\xa7\xc9\x57\xf1\x9b\x03\x43\xef\x34\x1b\x07\x30\x56\xfd\x35\x68\x03\x34\xdd\x50\xa1\xe1\xcc\xc0\xec\x85\xe3\x79\x9f\x2b\xa7\x82\x77\x92\x46\xf4\x0c\xe9\xf9\xd1\x9e\x5b\x92\x6b\x5c\x6b\x9e\x57\x31\x10\x29\x62\x6c\xad\x1b\xa7\xa1\x21\xfb\x1c\xe5\xa3\x51\xff\x09\x62\x96\xb7\xc8\xae\x35\xca\x86\x1b\xdc\x58\xfb\x5c\x8f\xf5\x2c\x52\x4a\x75\xc0\x45\xed\xc0\x44\x00\xc8\x99\xd1\xa1\xe7\x4f\xf4\x77\xdd\x42\x5b\xec\x5a\xf8\x2b\x68\x49\x1a\xf1\xe0\xc1\xf3\xa1\xc9\x48\x22\x83\x86\x54\xf9\x4b\x7e\xc2\x81\xcc\x59\x60\xb1\x0c\xd7\x48\x73\x3b\xe4\xa7\x72\x8e\x19\x21\x62\xc2\xd8\x4a\x94\xf0\x95\x35\x5e\xa7\x66\x4e\x18\x53\x93\x57\x88\xc9\x50\xfe\x8c\xf0\x49\xc3\xf7\x46\x83\x25\xc4\x07\xa9\x4c\x4c\x4e\x0b\x98\x71\x16\xeb\xb9\x43\xad\x20\x91\x2d\x87\x32\xd1\x30\xb4\x61\x90\x20\x98\x83\x46\x5a\x87\xfe\x44\xb3\x5c\x6c\x60\x28\x79\x8d\xfa\x6a\xf8\x30\x2e\xf2\x71\x22\xad\xbb\x2c\xac\x66\xb0\x9b\xd7\x99\x90\xb7\x19\x5c\x74\x3c\x83\x23\xf3\xad\x3e\xd7\x56\xb7\x05\xd5\x58\x09\x7d\x94\x3b\x01\x93\x77\xf7\x16\xd3\x27\x40\xad\x8d\x34\x28\x1e\x16\x1d\xfb\x37\xcf\xf1\xe5\x58\xa4\x19\x4f\x52\xea\x4c\x3e\xb5\xb3\x2f\x78\x53\xa7\x54\xb2\xd6\xac\x6d\x6a\xb6\xcc\xf2\xa6\xc2\x2d\xec\x21\xa0\xf7\x30\xf0\x03\x55\x83\x95\x70\x6e\xd0\x28\x66\x11\x5e\xbc\xd6\x9e\x55\x50\x13\x7e\xf8\xbd\xe0\x0c\x44\x36\x64\x80\x8e\x9a\x49\x38\x6b\xc7\xab\x95\x55\x1a\x1c\xc9\x34\x40\xd9\xfe\x09\x66\x16\x23\x1a\xda\xc4\x09\xed\x94\xa2\x33\xa9\x47\xc0\x67\xa1\x12\x19\xa8\x20\x32\x85\x56\x09\x8c\xdc\xe8\x51\x56\xfa\x3f\xe6\x76\x8e\x1f\xec\x0a\xa3\xe3\xd1\x00\xd1\x41\xe1\x48\x51\x6f\xdd\x1e\xf3\x83\x31\xdb\xda\xdd\xd2\x69\x1b\x36\x36\x6e\xa8\x63\xbb\x6b\xfc\x9c\x6c\x6b\x48\xd3\x26\xd9\xc4\xfc\xf8\xb6\xce\x88\x0c\x63\x48\x7c\xc1\xac\x9f\x2d\x8b\x5a\x0a\x9d\x9f\xbe\xb6\x1b\x1a\x93\x54\x8e\x14\x26\xf6\x3a\x53\xe0\xf5\xa8\x04\xaf\x92\xab\x56\x14\xcd\x91\xb0\x1c\xe9\x16\xe7\x32\xe7\x48\x41\x8f\x43\x2d\xe5\xbe\xca\xc8\xcc\xa6\xdb\xa4\xfa\xd0\x39\x9e\x78\x15\x4e\x64\x04\x68\x33\x02\x1d\xc7\xd2\x40\x07\x63\xa6\x2c\x96\x2a\x99\xec\x2a\x16\x6c\x9a\x8e\xbb\xe7\x2c\x57\x4e\xd6\x64\x30\x28\xf9\x74\xa2\xcd\x71\xa7\xc3\x1c\xa0\x34\x68\xd4\x57\x06\xb7\x64\xbe\x66\xb0\x46\x3d\xc1\xe9\x0a\x7d\x26\x31\x9f\xdc\x0d\x3e\x0e\xbe\x76\xd5\xf3\x75\x04\x4c\xdf\xcc\x8a\xed\x0f\x16\x17\x9d\xb5\xd8\x54\xd2\x26\x93\x4a\x9b\x61\x61\xd8\x04\x13\xd3\x34\xdf\xc6\x40\xa3\xf1\x3e\x75\x53\x14\x20\xe7\x7b\xc4\x22\xcb\xaa\x22\xff\x1c\x3f\x7b\x99\x53\x58\x8e\x9d\x70\x74\x7e\x4d\xd3\xd9\x0f\x44\x6a\x57\x13\xb9\x75\xdd\xcb\x28\x59\xf5\x68\x50\xef\x14\x42\x79\x42\x12\xca\x10\xf2\xc5\x6a\xa9\x62\x28\xec\xac\x5e\x13\x94\x37\x9a\xeb\xb6\x99\xe9\xf5\x3c\x12\x3c\xea\xa4\x27\xe5\xaa\x5d\x41\x4e\xda\xef\x77\xc0\x72\xfa\x76\x70\x70\x53\x50\x12\xf4\xe1\x2f\xc9\x8d\x52\x10\x1d\x4f\x92\x96\xdd\x64\xd3\x09\xce\xd3\xf2\xce\x41\x4c\x1f\x07\x9b\xc5\xaa\xf4\xa3\xdf\xf1\xe3\x14\xf5\x3f\x26\x91\xf6\xb7\xfe\x6e\xb2\xa9\xb7\xc5\x4e\x81\x6a\x18\xa3\x77\xa5\x86\x04\xcc\x15\x91\x71\xa0\xbe\x4e\xf0\x55\x9f\x12\xad\x41\x8d\x4a\xc6\x5a\x76\x3c\xc1\x7d\x23\x77\x6a\xf3\x4b\x9d\x5e\xaa\x38\x69\x68\xe3\x44\x89\xbd\xcf\xe5\xac\x1b\x0b\xc3\xd9\x3b\xad\xa3\x33\x3b\xb6\x0d\xb8\xa4\x01\x6d\x8e\xc4\xdc\x9c\xc1\xea\x54\x9a\xd2\xb1\x83\xd4\xc7\xd7\xe9\x27\x86\xda\x34\x77\xb9\x49\xa2\x39\x34\x52\xf3\x0c\x95\x0c\x76\x73\xc1\x53\xb8\x6b\xb0\x79\x9a\x9d\xbf\xce\x1b\xfe\xe7\xb2\xa1\x9b\xf8\xe0\x52\x27\x3e\x3e\x7c\x43\xda\x93\xeb\xcb\x34\x77\xf5\xb6\x7b\x6d\xe8\x0d\xb9\xeb\x44\xf4\xcd\xca\xfa\x3b\x51\x3c\x7c\x30\x07\x03\x76\xba\xcd\x31\xa1\xd0\x22\xc2\xa5\xd5\xf4\xa1\x97\xa6\x0f\xa4\xcd\xd7\x6a\x73\x5a\xd8\x3f\x58\x22\x3b\x7f\x27\x6b\x75\x49\x40\xcb\xbc\xd1\x07\xb2\x9e\xfa\x37\x48\xd5\xc8\x83\x8b\xd1\x44\x16\x3a\x91\x85\xd7\x4b\x64\x8f\x91\xe1\x67\xe5\x87\x1d\x0e\xd3\x28\x96\xc6\xfb\x7a\x29\xed\xfe\x1d\x60\x0f\x96\x1f\xb1\xda\x3d\x9b\xec\x1c\x2c\x98\x23\xca\xee\xa1\x83\x7b\x52\x39\xfc\x8f\x71\xec\x62\xfe\x0a\x18\x9c\x3b\xf6\x04\xab\xe3\xc7\x4e\x65\xe7\x14\xb2\x0d\x81\x30\x0a\x85\xe0\x30\x0a\x87\x11\x34\x7d\xeb\x26\x87\x54\x25\x87\x9f\x7b\x22\x78\x54\xe2\x93\xf6\xde\x83\x04\x76\x54\xd0\xe9\x71\x3d\x9a\x78\xb8\x56\x7f\xd4\x27\xae\xad\xc4\xe0\xf1\x13\x87\xa3\x12\x9f\x74\xb0\x3f\x22\x71\xe8\x87\x93\x6c\x1c\x3b\x15\xd1\x8c\xdf\x5c\x9b\x8c\xd8\xd6\x34\x6a\x0d\xe8\x9f\x06\x2c\xc9\xff\x02\x00\x00\xff\xff\x3f\xc5\x21\x35\xb0\x24\x00\x00") + +func fontsNtgreekFlfBytes() ([]byte, error) { + return bindataRead( + _fontsNtgreekFlf, + "fonts/ntgreek.flf", + ) +} + +func fontsNtgreekFlf() (*asset, error) { + bytes, err := fontsNtgreekFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/ntgreek.flf", size: 9392, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsO8Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x59\x4b\x8f\xdb\x36\x17\xdd\xfb\x57\x9c\x85\x16\xdf\xb7\xa8\xd1\xa4\x68\xc0\x00\x69\xe0\xa2\x8b\xa0\x40\x91\x6e\x82\xee\x35\x36\xfd\x40\x3c\xba\x03\x3f\xd0\x4e\x7f\x7d\x21\x3e\xee\x43\x22\x65\x79\x66\x84\x44\x22\x87\xd2\xe5\xe1\x7d\x1e\xd2\xdb\xe3\xf6\x7d\xdb\xe0\x03\x7e\xc6\xbb\x0f\xf8\x11\x1f\x17\x7f\x9e\x0e\xbb\x43\xd7\x1e\xb1\xa5\xee\x82\x87\x67\x7c\xa1\xd3\x86\x3a\xfc\xe1\x3d\x3e\x7d\xfc\x69\xfd\xb8\x3b\xae\x7c\xb7\x5b\xae\xdb\xc7\x65\xbb\x5e\x5e\xbf\x7f\x5e\xfc\x72\xf7\xa5\x66\x39\xec\x8e\xfe\x72\xf8\xb7\xbd\x1c\xa8\xeb\xa7\xfb\x46\xdd\x33\xbe\x5e\x77\xbe\xbb\xe0\x7f\xdf\x96\xb1\xb5\x3a\xaf\x2f\xcb\xdd\x75\xe9\x37\xd7\x65\x7b\xfd\xff\xe2\x2f\x7f\x3a\xf7\xef\xbf\x03\x7e\xc0\x57\xef\x37\x67\x9c\xfc\xf6\xd0\x1d\xba\x1d\xfe\x3e\x5c\xf6\xd8\xb7\xa7\xcd\xc3\xb1\xed\xbe\x9f\xd1\x76\x1b\xac\x8f\xbe\x0d\x83\xd7\x27\xd0\x16\x67\x7a\xf4\xa0\xed\xe2\xb2\xf7\x58\xef\xdb\x53\xbb\xbe\xf8\xd3\x79\x09\x7c\xdb\x7b\x1c\xdb\xf3\x05\x1f\xc2\xdf\xcf\xe8\xbc\xdf\xe0\x42\x78\xf0\xd8\x50\xe7\xf1\x74\xa2\x27\x7f\x3a\x3e\x2f\x5f\xb2\xe8\x8c\xfa\x7d\xbf\xce\xdf\xdb\x0e\xbf\xed\xdb\x03\x3e\x9d\x9f\xfc\xfa\x72\xf2\xab\xeb\xe1\xba\xee\x57\xf8\x19\xbf\x6e\x36\xfd\xb4\x7b\x2f\xeb\xc0\xf6\xf0\x4f\xbf\x94\x80\xf9\x8b\x3f\x3d\xb6\x9d\x86\xfe\x12\x38\x40\xb3\xaa\xde\x56\x0b\x10\x35\x58\x2d\x9c\x73\xae\xe1\x07\x9c\xeb\xff\x98\xc6\xd0\x5f\xab\x30\x08\x7e\x2d\xb5\x90\xae\x1b\xad\x20\x8b\x80\xfe\xde\x8b\x24\xe7\x1c\x51\xb8\xc7\xe9\x1c\x10\x24\xde\x18\x83\xba\x56\xa1\x4b\x14\x50\x22\xc2\x75\x2e\xfc\x8b\x2b\x4a\xbd\xbc\x86\x80\x19\x01\x35\x39\xfd\x66\x1e\x6b\x18\x69\x90\xdf\x43\x5d\x2d\x1c\xe0\x00\x0a\xa3\x01\x4d\xdf\x66\xc5\xf4\xed\x7e\x3c\x0a\x75\xe1\xbb\x32\x52\x0a\x57\x13\xf5\x12\xd6\xe4\xd2\x8c\xbd\xd0\xf0\x2d\xb1\x6e\x11\xa1\xc5\xb1\x30\x98\x94\x31\x10\xca\xd6\x4a\xf3\x8b\xb5\xec\x23\x41\xcd\xef\x46\x04\x0d\xd4\x53\x14\x1c\x66\x0a\xea\x1b\x0d\x44\x83\x3b\x23\x28\x69\x86\xd5\x96\x35\xd3\x2b\x3e\xc9\xee\x4d\x19\xae\xb8\x04\x4a\x46\x09\x16\x4b\x8b\x1d\x68\x2c\x9a\x16\xa2\x30\x80\xf5\xd5\x0b\x4c\xee\x11\x2d\x64\xc7\xc6\xbd\x81\xd0\xb1\x8a\x92\x18\xad\x45\xfd\xa5\x6d\x53\xbe\x9a\xfa\x3b\xa3\x39\xed\x3d\x39\x16\xc7\x8f\x9a\x8d\x74\x50\x69\x83\x12\x3b\x0b\xb7\xb3\x02\x1b\xc1\x26\x9d\x64\x73\xf6\xba\x68\x85\xf0\xad\x23\xf6\x32\x48\x3c\x53\x1a\x6c\xb2\xf1\xd8\xe5\x0b\x2a\xe4\x98\x22\x15\x97\xc5\x26\x87\xf1\x14\x2e\xe4\x6f\xf2\x7b\x64\x05\xf4\x8f\x26\xf9\x51\xb4\x7d\x21\x16\x4a\x42\xd5\xe4\xc9\x01\x63\x62\xe0\x39\xe6\x2c\x16\x7a\x30\x60\x48\x3d\x4a\x59\x10\x9c\xb4\xc2\xad\x11\x93\x8d\x1c\xbb\x17\x4a\xe2\x42\x1c\x80\xd9\x82\x4e\x42\x25\x3a\xca\x7c\xa4\x83\xe5\x57\x85\xde\xb1\x7c\x22\xed\xed\x8e\x75\x2a\x8b\x72\xfa\x43\xa7\x7c\x74\x2a\x06\xc7\x5e\x09\x95\xec\xe3\x95\x54\x43\x73\x97\xaf\x65\x32\xd0\x2c\x92\xc8\x69\xef\xb2\xce\xd5\x0c\x41\xae\x4a\x71\x5a\x8a\x59\x93\x37\x4a\x59\x64\xa1\xe5\x65\xaf\x55\x59\x91\x15\xc4\xc9\x56\x60\x0f\x9d\x30\xd9\x12\xe5\xfc\x33\x3d\x62\xd6\x16\xe7\xca\x03\x7a\x66\x99\x9a\xb5\x4c\x63\x2b\xae\x24\xa1\x1b\x75\xeb\x00\x26\xed\x17\xd6\x13\x6c\x4e\x9f\x16\x4a\xa4\xf9\x06\x34\xfb\x88\x35\xb3\xc9\x15\x32\x64\x85\x32\x41\xe8\x73\x87\x52\x84\x41\xe3\x00\x71\x80\x9c\x5a\x92\x71\xa8\x90\xbc\x4a\x01\x0c\x15\x17\x82\x46\xb0\xc9\x98\x10\x9b\x22\xd2\x2c\x34\xbf\x9a\x94\xda\x14\x72\x44\x26\x28\xe3\x09\x2b\x48\xb9\x94\xc3\x04\x86\xc5\x5d\x40\x7a\x3b\x2b\xc8\x87\xca\x18\x42\x6c\x1c\xb8\x5c\xc8\xf2\x0b\xe9\xfb\x3e\xa1\xa6\xe4\x65\x7f\x9e\xd0\x29\xf3\x4a\x11\xca\xd0\x72\x60\xd3\x78\xc2\xba\x4e\x31\x40\xea\x8a\x1f\x8e\x69\xed\xa4\x4b\x29\x56\x35\x7c\xda\xcf\x64\x69\x4d\x0a\x62\xe1\xb1\xd5\x96\xcb\x41\x19\x67\x93\xb5\x23\x13\x03\x76\x12\x15\x23\x8e\x09\xda\xbc\x25\xb0\x25\x8c\x95\xc6\xbd\xd8\x26\xf1\xb5\x72\x55\x4f\x48\x0d\xd6\x60\x29\xd1\x4f\x64\xc8\x62\x39\x64\xcc\x0d\xc7\x71\x5c\xde\x10\x73\x45\x15\xe1\x03\x2b\x5c\x44\xc3\x39\x2d\xb8\x46\xf6\x27\x4a\x9c\xcd\x96\x77\x94\xb8\xfb\xb2\xce\xbd\x11\x32\x13\xa9\x9b\x42\x1a\x36\x2d\x2f\xc8\x8f\xd6\xbd\x92\x98\x5a\x85\x27\x37\x66\x4d\x5c\x02\x0c\xdd\xe2\x54\x46\xd3\xb1\xac\x08\x8e\xf8\xce\xcd\x6d\x05\xab\xb5\x92\x20\x50\x4d\x65\xd5\x5e\xa6\x2a\x55\xeb\xd7\x84\x8a\xc7\x73\xc4\xcf\xa2\x62\x85\xe8\x8a\x7b\x61\x25\x18\x2e\xef\x8d\xb3\xe8\x01\xcf\xeb\xff\x17\xc4\x97\x50\x0f\x71\x0e\x8a\xb1\x61\xd3\x37\xca\xef\x5c\xa1\x77\x19\xcd\x78\x82\x25\xb7\xce\xb0\xb5\x5c\xd5\x24\x7b\x15\x8b\x5a\x4a\xa5\xec\xae\xb7\x1a\xe1\x65\xc5\xd0\xf8\x74\x40\xe5\x4e\xc0\x12\x38\xee\xeb\x30\xb4\x61\x90\xf6\xf3\x52\x78\x6f\x36\x04\x4b\x74\x0f\xcd\xdf\xd4\x6c\xe4\x84\x3e\xa6\x03\x95\x26\xb1\x46\xb5\xdf\xd7\x1a\x19\x78\xc8\xeb\xfb\x64\x0c\x57\x28\xa0\x86\x4f\x8f\x9e\x03\x93\x0d\xf2\x61\xea\x39\xc3\x18\x64\x82\x30\x84\x9a\x8b\xda\x7a\x57\x62\x61\x77\xf2\xae\x29\xa4\xa3\x5c\xad\x1d\x08\x93\xb4\x33\x5c\x1c\x4c\xf5\x05\x0f\xb7\x7f\xe6\x90\x6e\x12\x29\x49\xea\x8e\x75\xa4\x9a\xc7\x31\x11\x4c\x92\x0f\xd2\x36\x5b\x55\x3a\xdd\xd2\x4e\x79\x4b\x77\xe4\x1a\xb3\x5d\x84\xda\x2f\x52\x33\xde\xdb\x68\x7a\x37\xcb\xca\xf7\x33\x41\xb5\xf7\x22\x39\x4b\x73\xb6\x31\xf8\x56\x9f\xef\xe9\xb4\x58\x7a\x52\x15\x3c\x0c\xa9\x25\xa3\x5a\x37\x93\x03\x56\xe8\xeb\x04\x8d\xb5\x96\x89\xcc\x1a\x56\x8d\x5c\x73\xc6\x7d\x4a\x48\x4b\x78\x0a\x86\xcf\xc4\xfd\xb5\x36\x2a\x7b\x53\x39\x12\xb5\xff\xdc\x3e\x6b\x12\xa4\x34\x46\x5a\xcc\x19\x30\x39\x83\x34\xf7\xab\xfb\xfd\xf8\x3c\xa7\x16\xde\x8d\x2e\x2c\xb1\x94\xcc\xd0\xa9\xc2\x36\x9b\x89\x1a\xa4\x43\xb6\x47\x35\x7e\xc7\xe9\xa4\x42\xc4\xf5\xf1\x9c\x20\xd2\x0d\x14\x82\x69\x68\x0a\x60\x40\x67\x8b\x4e\x03\xe4\xd3\xfa\x9b\x4e\x43\x85\x2d\x98\x11\x53\x65\x1f\x55\xa1\x0a\xab\x15\xac\xe3\x07\xea\x84\x8d\x69\x97\x3e\x05\xb9\x07\xb5\xca\x13\x2b\x3e\x0f\x25\xc5\xe0\x4a\xdb\xae\xd7\xab\x22\x63\x25\x4e\x52\x13\xf1\x23\xc7\x61\xce\x58\x9c\x8f\xd0\x49\x79\x57\xa5\xe8\x68\x57\x96\x10\x8b\x7e\x39\x2c\x3f\x51\x78\xa6\x5c\xcd\x8d\x7b\xa4\x5a\xa4\xd8\xb5\x6c\xda\xe5\x2c\xce\x99\x6d\x3e\x1f\x41\xe7\x42\x90\x5f\x49\xaa\x2a\x15\xfa\xf9\xbd\xb4\x0b\x20\xbd\xb5\xb5\x34\xfd\x45\x67\x66\x90\x4d\xfe\x1b\x6e\x89\xfb\x31\x71\xb2\xb7\xd9\x69\x61\xf0\x33\xde\x9b\x10\xc2\x29\xa1\x2f\xaf\x18\xfc\xea\xdb\xa6\x29\x9d\x6b\x69\xee\xe1\x66\xe8\xb1\x6a\xf2\x8f\x3a\x49\xe8\x7f\x01\x00\x00\xff\xff\x89\xcf\x74\xf8\x80\x20\x00\x00") + +func fontsO8FlfBytes() ([]byte, error) { + return bindataRead( + _fontsO8Flf, + "fonts/o8.flf", + ) +} + +func fontsO8Flf() (*asset, error) { + bytes, err := fontsO8FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/o8.flf", size: 8320, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsOgreFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x58\xdf\x6b\xdb\x48\x10\x7e\xd7\x5f\xf1\x11\xca\xc5\xe6\x9c\x6c\x93\x90\x42\x8f\x10\x74\x1c\xd7\xe3\x9e\x0b\x79\x5a\x98\x2a\xb6\x6c\x8b\xca\x72\xb0\x65\xda\xc2\xfe\xf1\xc7\xec\xcc\x4a\xbb\x92\xe2\xa6\x67\x52\x5b\x95\x35\x33\xdf\xcc\x7c\xf3\x63\xbd\xae\xd7\xb7\xc5\x3b\x7c\xc0\x3d\x6e\xdf\xe3\xe6\x1e\x37\x77\xd9\xe7\xb6\x68\x56\xc5\x61\x85\xe7\x1f\xf8\xa7\x2e\x9b\x06\x7f\x6d\x8b\x97\x97\xb2\xae\xf1\x1b\xfe\x2d\xfc\x7f\x2b\xdc\x99\x8f\x77\xb8\xba\xc2\x73\x71\x2c\x57\xd8\x37\xb8\x3e\x56\x1b\xec\xd7\xf8\x74\x28\x9a\xaf\xf8\xbc\x2d\xcb\x43\xd1\x64\x9f\xaa\x4d\x5d\xb6\x38\x94\x75\x59\x1c\x4b\xdc\x5e\xbf\x67\xa1\x3f\x4f\x9b\xd3\xb1\xc5\xfd\x02\x37\x1f\x3f\xde\x65\xd9\xdf\xdf\x5f\xea\xa2\x29\xda\x6a\xdf\xb0\x8a\x75\x75\x38\xb6\xa8\xab\xa6\xfc\x23\x63\x84\xb8\xc2\xc5\xae\xd8\x54\x4b\x34\xa7\xdd\x73\x79\xb8\xc0\x7a\x7f\xc0\xba\xaa\x4b\x54\xab\xb2\x69\xab\x75\xb5\xf4\xc2\x59\x01\x00\x57\x38\x6e\xf7\xa7\x7a\x85\xa2\xfe\x56\xfc\x38\xe2\xb9\xc4\x97\xe2\x72\xe1\x85\x9a\xfd\xb7\xec\x9d\x3c\xd4\x6e\x4b\x5c\x6c\x8b\xc3\xea\xb9\x2e\x9a\xaf\x17\x8c\xeb\xe5\x50\x35\xed\x11\xc5\x11\x05\xfc\xdd\x05\x9e\x4f\x2d\x96\x45\x73\xd9\xb2\x9a\xe3\xee\x74\xdc\x96\xab\xec\x83\x68\xd8\x96\xd5\x66\xdb\x32\xe2\x02\xcb\x6d\x71\x28\x96\x6d\x79\xc8\xee\xcf\x7c\xb9\x40\xb3\x6f\x51\x35\xcb\xfa\xb4\xaa\x9a\x0d\x56\xe5\x71\x59\x36\xab\xf2\x70\xcc\x6e\xdf\x7b\xb1\x5d\xf1\xdd\x7b\x8e\xba\x6c\x36\xed\x16\xb3\xf2\x7b\x78\x78\xb9\xdf\xed\xca\x46\x02\x73\x9c\xe3\x77\x14\x58\x9f\x56\x9b\x12\xeb\x62\xd9\xee\x0f\xd9\xcd\xbd\xd7\xb0\x2a\xd7\xc5\xa9\x6e\x05\xec\x6e\xbf\x2a\xbd\xe3\xed\xb6\x3a\x62\xbd\x6f\x5a\xcc\xea\xea\x6b\x89\x8b\xab\x1d\x6e\xee\x2f\x38\x73\xac\xb7\x68\x56\x5e\xef\x3c\xbb\xb9\xf3\x5a\x24\xd2\x0c\x3f\x31\x9b\x65\xef\xf2\xc1\x5f\x9e\x01\x20\xf0\x87\x81\xcd\x33\x18\xc0\xe4\x99\xb1\x64\x90\x67\xd6\x00\xfe\x3b\xfe\xc8\x33\x90\x7f\x74\x06\x87\x79\x9e\xe1\x09\x4f\xdd\x97\xe9\x87\x6a\xe5\x3f\xbe\x47\x0e\xce\xc1\xb1\xac\x23\xe0\xfa\x1a\x20\x27\xd7\xfc\xe2\x6b\xc0\x91\x73\xe4\x3a\x1d\xbd\x22\xf2\xf7\x1c\x1c\xf2\xcc\x80\x68\x9e\x67\x96\x88\xc1\xce\xe0\xc1\xb2\xa8\xc2\x03\x51\x9e\xcd\x68\x6e\xfc\x7d\x18\xb0\x17\xfc\xc1\xb6\x0d\x99\x19\x4b\xc7\xba\x49\x21\xce\x40\x98\x43\x1e\x26\x58\x63\xf3\xcc\x61\x46\x8f\xc0\x43\x9e\xc1\x12\x91\xb1\xa6\x87\x26\xd6\x18\xc1\x3c\xcf\x9c\x11\xd0\xd1\x9b\xe8\x16\xd3\xac\x89\xb1\xc7\xef\xb0\x64\xf3\x3c\x23\xef\x9b\x85\x55\x0f\xd3\x77\xc3\x39\xe8\xe0\x66\x0c\x81\x75\x5a\x88\xdb\x86\xdd\xe5\x94\x81\xd3\x94\xb8\xd5\x27\x23\xca\x40\x08\x7f\x1f\xef\x28\xd8\xf9\xd0\x83\x81\x7f\xa9\x56\xbd\x20\x7e\x79\xad\xfc\x72\x13\xcf\xbc\xa2\x55\xb3\xa0\x5a\x7d\xa0\x80\x51\xce\x20\x39\xc3\xc0\x39\x92\xa8\x49\xa2\x34\xa2\x3e\x5e\xce\xfb\xe4\x42\xbe\x52\x18\x9e\x00\xfa\x54\x78\x53\xc8\xb9\xba\x02\x75\x45\xd2\xc1\x4c\xf3\xca\x98\x73\x66\xca\xcb\x41\x04\x04\x37\xc7\xd9\x6a\x70\xe6\x62\x65\x0a\x8c\x26\x46\x0a\xa3\xbf\x52\x5d\x5d\x92\x06\x59\x4a\xa1\xf2\x95\x4b\x30\x9f\xb3\x89\x40\x75\x5f\x0d\x5e\xfe\x32\x04\x70\xa6\xae\x4e\xc5\x2d\xf6\x10\x2e\x64\x0a\x21\x55\x5e\x63\x92\xa4\x34\x4b\x52\x58\x49\xba\xce\x58\x9b\x48\x6e\xf4\xf4\x22\xd8\x1f\x89\xa5\xcc\x1a\x73\x6c\xfc\x75\x4c\xec\xbe\x52\xc5\x33\x2d\x48\x2b\xd5\x35\xa8\xa9\x11\xe9\xcf\xb1\x3f\x29\x70\xab\x0d\xd6\x68\x69\x67\x51\x8c\xa5\xb7\xf1\x03\xd6\x68\x21\x58\x79\x64\x36\x1f\xb4\xd5\x10\x22\x4f\x4c\xd1\xcc\xb0\xe9\x8b\x12\x1b\xb3\x50\x05\x3e\x66\x82\xcc\x2a\x23\xe2\xa6\x00\x43\x56\x82\x6d\xc8\x5a\x8f\x80\xbf\x62\x0c\x64\x60\xc9\x0c\x62\x2c\x30\xc5\xae\xf7\x84\xc8\x1a\x1f\x34\x6e\x40\x5e\xcc\x5b\xf9\x99\x18\x84\x2d\xcc\x43\xff\x95\x9d\x62\x6b\x2f\xe7\x1f\xb7\xca\x37\xeb\x63\xc3\xa2\xc6\xf8\xf6\x40\xb4\xb8\x1c\x56\x88\xe7\xba\x38\xa8\x40\xbd\x9f\xc6\x68\x3a\xd8\xda\xa0\x61\x8e\x41\xaa\x8c\xb7\x1e\x46\xe1\xa8\xb5\x45\x62\x21\xb9\x64\x85\x47\x12\xd2\x57\x7c\x0b\xaa\x8c\xe5\x7f\xc1\x23\x61\x20\x83\xd4\xe1\x6b\x62\xa0\x7d\x85\xa8\x51\x4b\x21\x30\x3e\x32\x7e\x6c\x87\xe2\x56\xbb\x53\x81\x91\x7b\xca\x49\xb9\x62\x00\x56\xec\x4f\x56\x65\x8c\x37\xc0\x35\x14\xd0\x5a\x0d\x8f\x1d\x89\xf5\x4c\x35\xc0\x2f\xa4\xbe\x37\x67\x94\xa1\x02\xd4\x4b\xf2\x3d\x1b\x12\x32\x1c\xca\x88\x92\x6f\xbb\x8a\x63\x68\x92\x15\xab\xb1\x25\x9b\x4c\xcd\x51\x3e\x49\x69\xc0\xeb\x90\x50\x5c\x08\x67\x07\x61\x7d\x9d\x06\x73\x0d\x4f\xbf\x48\x4d\x89\xf5\xe6\xd0\xd9\x43\x30\x88\x60\x71\x41\xf6\xd5\x4c\x32\xc7\x25\x44\x56\x4a\xc2\xd7\xb0\xd8\xb4\xe3\x6e\x0e\x4d\x00\xd3\x5b\x5b\x93\x6c\x17\x72\x15\xaa\x63\x7a\x08\xf8\x02\xea\x6b\xd1\x44\x29\x85\x35\x13\x1e\xea\x2d\x4f\x1b\xad\x26\x49\x1e\x7e\x4a\xb7\x20\x08\x11\xf5\x84\x0d\x83\xc7\xe2\x09\x21\x0b\x36\x85\x1b\xfb\xa9\x6d\xd5\x74\x54\x90\xa4\x5b\xb5\x8c\x8e\x0e\x08\xf7\x87\xdb\x27\xe9\x4a\x69\x63\x21\x25\xa4\x95\xf5\xa4\x9b\x12\x3d\x78\xd9\x9f\x7d\x43\x04\x49\x90\x74\xa0\x98\x64\x9b\xe6\x97\x26\xcc\x68\xe3\xd7\x2a\x61\x61\x93\x34\x53\xf5\xca\x8f\x7d\x37\xb5\x52\x3a\x9e\x41\x82\xb8\x4f\xac\xc6\x4d\x83\x66\x43\xc4\xec\x58\x2b\x0d\xb7\x4f\xdd\xa9\x44\x6b\x9f\x0e\x9e\x9a\xd6\x75\x77\xfa\xf7\xc9\x0d\x71\x7c\x31\x1c\x9f\xf1\x3c\x86\x2a\xce\xd3\x45\xba\xd3\x41\x72\xfe\x88\x86\xdd\xac\x5f\xf8\x16\xc3\xd5\x4c\xc5\x78\xab\x1a\xee\x3b\x2e\xac\x49\xd7\xd3\xe4\x0b\x9b\x88\x6e\x7f\x6a\x8a\xa0\x7c\x75\xc3\x84\x93\x4a\xb8\xb0\x30\xfe\x1c\xde\x84\x1d\xf8\x43\x07\x24\xe9\x63\x3b\xdd\x73\xba\xea\x42\x9e\x96\x45\x76\x70\xf6\x7a\x5b\xc4\x24\xcd\xdd\x62\x70\x36\x62\x81\x0e\x7e\xc9\xce\x87\x9b\xb5\x5f\xac\xa6\x37\xeb\x10\x1d\x5d\xc8\xc4\xba\x7e\x90\x51\x8e\x45\x00\xc4\x1a\x17\x86\x83\xd3\xe3\x13\xf0\xe0\x29\xe3\x86\xd4\x45\x62\x75\x6c\x3a\x6a\x25\xe4\x83\x11\x39\xf6\x25\x3d\x3f\x24\x0e\x26\x4e\x8e\xc2\x4a\xf8\x95\xf8\xa4\xa9\x7e\xfb\x2a\x7c\xce\x9a\xf2\x17\xd7\xe1\x68\xe2\xfe\x57\xee\xe5\x80\x11\xd3\x91\x42\x93\xb9\xa4\xd0\x66\xd0\x19\x18\xf7\xb9\x7e\x75\xe5\xa7\xf5\x58\xee\xa2\xbe\x15\xd2\x1a\x51\x96\x22\x02\x33\x14\x37\xc5\x5c\x52\xde\x4c\x1f\xee\x26\x8b\x09\xda\xfc\xb4\x5f\x23\x34\xec\xa7\xd0\xe2\x27\x06\x77\x2a\x19\x0b\xc7\xf2\xb1\x8a\x54\x4b\x5a\xcc\xa3\x69\xf1\x28\xc4\x9d\x9e\x12\x6f\xf1\x73\x50\xa1\x7d\xd0\x49\xce\xf2\x61\x02\xcb\xb4\x48\x23\x29\xc7\x6a\x01\x22\x23\xe2\x01\x0f\xd1\x6f\x29\xd0\x1f\x21\x26\xab\x28\x94\x92\x46\x46\xe7\x49\x24\xfb\x88\xc7\xfe\x57\x19\x1f\x93\x1e\x9f\xb1\xc6\xf9\x39\xd1\x8f\xbb\xf4\x23\x72\x7c\x46\x73\x92\xd6\xd0\x57\x05\xe4\xa4\x39\xdd\x6c\x5e\x17\x3b\x7b\xfa\xef\xc4\x10\x7a\xd5\x9b\x7e\x34\x98\xb0\xf6\xb6\xd1\xf3\x1a\xc8\x73\x47\xed\xb7\x81\x1c\x59\xa3\xb0\x17\xe8\x04\x09\x5d\xd3\x59\xa9\x46\xe7\x92\x35\xe2\xbf\x00\x00\x00\xff\xff\x49\xee\xa5\xb3\x37\x16\x00\x00") + +func fontsOgreFlfBytes() ([]byte, error) { + return bindataRead( + _fontsOgreFlf, + "fonts/ogre.flf", + ) +} + +func fontsOgreFlf() (*asset, error) { + bytes, err := fontsOgreFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/ogre.flf", size: 5687, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsPawpFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\xcf\x6e\x23\x37\x0f\xbf\xcf\x53\xf0\xe0\x83\x74\xf8\x02\xe4\xc3\xb6\x40\x6e\x06\xf6\xd4\xbe\x40\x8f\x84\xb3\x6b\x17\x06\x12\x27\x88\xed\x2e\xf2\xf6\xc5\x48\x94\x48\x4a\x94\x46\x63\x07\x35\x60\x9b\x33\x23\x91\x14\xff\xfc\x48\x69\x0e\x2f\x87\xff\xef\x36\xf0\x04\xbf\xc3\xe3\x6f\xf0\x08\xdf\xa6\xf7\xdd\xaf\xf7\x87\xc3\xcb\x01\x9e\x3f\xe1\xfb\xf5\xe3\x72\x3c\xc3\x5f\xbb\xd3\x69\xff\x01\xee\xc7\xaf\x40\x6c\x77\x3f\xce\x0f\xcf\xd7\x87\xfd\xcf\xab\x9f\xdc\xeb\x27\x1c\x8e\x1f\xe7\x0b\x1c\x8e\x7f\xbf\xec\x2f\x70\x78\x3b\x5d\x1e\x00\xfe\xb8\xc0\xf1\x0c\xcf\xbb\xf3\xfe\x27\xbc\x9d\xe0\xfd\x7a\x38\x7c\x26\xb6\x7f\x5e\x77\x27\xf8\xbe\xfb\xf0\xd3\xcb\xee\x7c\xd9\x9f\x2f\xf0\xb1\xff\xe7\x78\x3e\xbe\x9d\xe0\x7f\x70\x7e\x7b\xdd\x5f\x8e\xaf\x7b\x38\x9e\xe0\xf1\xe9\xe9\xdb\x34\x6d\x36\x9b\xed\xe8\xcf\x76\x02\x84\xed\xe4\xd0\x57\x3f\xfc\x00\x00\xe2\xcf\x76\xda\x40\xfc\xcc\xd3\x01\x01\x00\x03\xe9\xd0\x83\x43\x5f\x90\x72\xec\x30\x19\x24\xf1\x27\x5e\x22\x7d\xe7\x4b\x44\x87\x3e\x7c\xa3\x82\xfc\xf1\x8b\x4f\x01\x48\xbd\xcc\xb9\x10\xc4\x97\x2c\x78\xa6\xd0\x81\x0f\x94\x43\x3f\x4f\x0f\x96\x41\x20\x43\xcd\x77\x82\xb8\x79\x90\x03\x2f\xc7\x91\x4c\xc1\x5f\x2f\x31\x18\x3a\x7e\xc8\xe0\x81\x4c\x86\x8f\x24\x4d\x47\x7a\x9a\x64\xea\x39\xc0\x73\x8c\x25\x29\xa9\xc1\x50\x88\x64\xd2\x68\x25\x4f\xb4\xcf\xcb\x81\xf0\xc0\xc7\x90\x80\xd9\xa4\xb4\x24\x71\xdf\x96\x16\x22\x65\x76\x2b\x6e\x82\x55\xb6\x93\x8b\x11\x61\xff\xd0\xd4\x60\xe8\x24\x38\x84\x61\xb2\x34\x69\xe9\xf9\x99\x14\x29\xe6\xcf\x4b\xaa\x87\x46\x4d\x15\x53\x6b\xbe\x5c\x09\xfb\x84\xbc\x8e\xc9\xfe\xcb\x41\x86\x14\x64\x79\x6e\xf2\xce\x58\xcc\x95\x8f\x38\x12\x59\xb6\x94\xcb\x81\x6d\x4c\xab\xc8\xb6\x17\x2c\x6f\x89\xa8\xb5\xa9\xa8\x02\x6b\xe3\x17\x67\x0c\x2b\x32\xf3\x2a\x43\xa4\x5a\x0f\xf2\xc5\x70\xce\x80\xc8\x99\x2a\x4d\x1a\x39\x13\x63\x13\x93\x4b\x51\x84\x66\x82\x4c\xa6\x10\x73\xc4\xa1\xf2\xba\x01\x02\xc4\x38\xb1\x8d\x0c\x1c\x3b\xd5\xfc\x17\xac\xaa\xa0\xc9\x5a\xa2\x52\x33\xc5\x6e\xb2\x02\x45\x75\x4e\x98\x52\xd5\x22\x68\x96\xed\x80\xa9\x6a\x70\xda\xae\xb1\x03\x08\x2c\x44\x76\x51\x4e\xfe\xc8\x4c\xe5\x5b\xb2\x85\xc4\xd7\x02\x6b\x2b\x58\x92\xa2\x50\xad\x3e\xeb\x5c\xdc\x0b\xe3\xc8\xb3\x63\xeb\x48\x36\x12\x46\x52\xdc\x99\x3d\xe6\xbb\xc2\x50\x23\x6e\xe0\xd4\xab\x91\xa0\xe1\x6a\x9a\x26\x72\x60\x40\x46\x67\x1d\xa5\xc6\x37\xaf\x63\x85\x0c\xed\xe1\x2e\xe4\xf5\x40\xc5\x82\x97\x85\xd1\x36\x2a\x26\xb8\x61\xa8\xc9\x30\x43\x1d\x42\x1c\x43\x91\x0b\x45\x91\x66\xa2\x89\xdb\xb6\x93\x9b\xae\x1f\xf2\x6c\x6e\x39\x1a\x3a\xd6\xeb\xd0\xca\x6a\xad\x29\xa2\x03\xdc\x78\x54\xa0\x08\xb2\xec\x89\xcc\x4c\x22\xab\xce\xa8\x09\x0b\x49\x4e\x0a\x6b\x59\x5c\x31\x37\x64\x11\x41\x19\xda\x30\x8b\x29\xa3\xb1\x05\x0b\x23\xf1\x28\x4c\x1e\x4d\xe3\x0a\xb2\x6f\xfd\x1e\xf4\x14\x30\x63\xdd\xeb\xdb\x2a\x55\x00\x76\x89\xf0\x89\x54\x14\x4a\xe8\x21\xdb\x0d\x55\x00\x5b\xfd\x4e\x25\x1c\x54\xdf\xee\x27\x62\xbf\x93\xc5\x71\x3d\x58\xee\x3b\x6e\x64\x4d\x33\x98\x5a\x36\x7a\x23\x66\x52\x6d\xcf\xc1\x79\x0f\xde\x03\xa8\x86\xdf\x15\xdc\xee\x8d\xcb\x5e\x4d\x11\x50\x9b\xc9\x56\x27\x3a\x26\xc3\x6a\x3b\x52\x7b\x9d\x49\x07\xe5\xa6\x63\xb0\x6e\xa5\x36\x42\x74\x11\xbc\xb7\x11\xee\xce\xae\xf1\xc5\xd8\x61\x7f\x14\x35\xb5\x49\xb6\xd6\xdc\x91\x91\x22\x8b\xc0\x8f\xb6\x30\x28\xb6\x45\xf4\x35\x2e\xa0\x48\xf7\xf1\xcd\x61\x3b\xd2\xf2\x8e\x37\x76\xa8\x32\xdb\x57\x46\x9a\x4c\x19\x99\x33\x52\x62\x4d\xa3\x8e\x85\xc1\x4e\x4f\x00\x96\xdc\xeb\x32\x3c\x55\x77\x0d\x07\xfe\x17\x0b\x72\x2a\x2a\xca\xdd\xf5\x4d\xf5\x03\x3c\xcc\xdf\xb2\x26\xc3\x3d\xad\x2b\x55\x0a\xcc\x63\xef\x6d\x5d\x01\xa1\x07\x33\x2d\x72\x51\x46\x20\xcc\xe2\x67\x90\x5f\x0b\xc9\x16\xf8\x26\xf4\x59\x53\x6b\x35\xd0\x64\x39\x08\x8d\xac\x97\x17\xe9\x8c\x82\x60\x03\x5c\xd9\x02\x8d\x42\x00\x84\x35\x40\x7b\x0d\x49\xf2\xea\x62\x63\x59\x4f\xc9\xf8\x82\x28\x68\x75\xca\xcd\xfd\xf0\x8a\x82\x16\xf9\xd3\xf8\xe4\x6e\xf9\x27\x9e\x49\x1e\xc5\x81\x93\x70\xb1\x6c\xca\xf3\x0c\xbe\xce\x2d\x35\x4a\x8d\xf2\x51\x13\xe4\x6b\xa8\x24\x18\xae\x2e\xd5\xcf\x9e\xe3\xfe\xbf\xa5\xfe\x26\x04\x61\xd8\x0e\xcd\xd2\xe2\xc1\x2f\x86\x63\xdf\x1c\x83\xf1\x4c\x77\x33\x44\x6c\x27\x20\x12\xbe\x84\xb4\xdc\x5e\x9d\x72\x6d\x9c\x5f\x3c\x93\x34\xf7\x3d\xe9\x0c\x24\x77\x25\xb1\x53\x49\x39\x82\xbe\xbf\xc7\xd3\xfb\x2f\xe5\x73\xb2\x37\x72\xb7\x92\x7b\x95\x05\x8e\x16\x41\x3a\xa2\xea\x3c\xa5\xd6\x8b\x1c\x8b\x5d\xab\x3e\xd0\x11\x3a\x8e\x73\x6c\x98\x4f\x2c\xd6\xaf\xd1\xd1\x62\x84\xa2\xc3\xd3\x26\x6e\xd8\xea\x16\x5f\x77\xec\xe0\x07\x7c\x5d\xeb\x28\x41\xbc\xa5\xa3\x7a\x2b\x63\xbc\xb8\x49\x33\xda\x2b\xaa\x8f\x21\xf4\x9d\x5c\x5a\x60\x70\x45\xa9\xc9\xe6\x46\xca\x17\x77\xba\x2b\x22\x14\x02\x75\x6a\x5f\xff\x2d\x82\x68\xff\x62\x7d\x0f\xdd\xe4\x66\x5a\x56\x1e\xa7\xdc\xe6\xd9\x6e\xf4\xe5\x66\xcc\x83\x11\x6b\x6b\x39\xaa\xde\xd1\x9b\x48\x53\xe5\xcc\xdd\x19\xa2\x62\xce\x05\x34\xd6\x7c\x92\x15\x93\x11\x93\x36\x49\x07\xfe\x2f\xe6\x59\x8c\xb0\x64\xc0\x40\xe8\xf9\xbe\xcd\x88\x14\x29\xeb\xb7\x2e\xea\x9d\xb7\x50\x2d\xab\x8b\x56\xc7\x99\x98\xb1\xda\x8f\x6d\x8e\xc5\xc1\x9a\xc1\x11\x96\x72\x06\xf3\x6b\x57\x92\x00\x5f\xd1\x5a\x36\x28\x14\xbb\x75\xd1\x60\xea\x0d\xba\x39\xf7\x2e\xa3\xd7\x45\xcd\xb7\x12\x5c\x75\x13\xfc\x96\x2f\x75\x8d\xfa\x51\xdf\x8d\x48\x88\x4b\x21\x24\x8e\x24\x3c\x37\x7c\xa0\xc6\x18\x1c\x2d\xf4\x6f\x54\x04\xbd\x10\x80\x5a\xda\xa2\x46\xed\x30\x62\x78\x0d\xc6\x00\xc6\x92\x3a\x2e\x56\x91\xda\x7d\x6a\x6b\xdb\xb8\x33\x00\xb4\x5d\x8e\xc5\xfc\x7a\xcc\x02\x47\x2e\xa1\x55\xd0\xad\xe1\xd8\x8e\xde\x4e\xdf\x34\xdf\x59\x2e\x2f\x36\xc7\x7b\xca\xcb\xd8\xf2\xef\x2f\x58\x2a\xc3\x64\x73\xe1\xaa\x3b\xc4\xf1\xdf\x00\x00\x00\xff\xff\x15\x86\x8b\x9b\xab\x23\x00\x00") + +func fontsPawpFlfBytes() ([]byte, error) { + return bindataRead( + _fontsPawpFlf, + "fonts/pawp.flf", + ) +} + +func fontsPawpFlf() (*asset, error) { + bytes, err := fontsPawpFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/pawp.flf", size: 9131, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsPeaksFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\x4d\x6f\x24\x35\x10\xbd\xcf\xaf\xa8\xc3\x5e\xc1\xcb\x08\x89\xe5\xd6\x1c\xc8\x85\x0b\x0a\x77\x2b\x41\x9a\xa0\x48\x11\x48\x90\x0b\xfc\x7a\x34\xd3\xb6\xeb\xbd\xaa\xe7\xee\x99\x21\x22\x9b\xed\xb6\xbb\xed\xfa\xae\x57\xe5\xce\xcb\xdb\xcb\xf1\xf9\x93\x7d\xb1\xef\xec\xf8\xd9\xbe\xfa\xc6\x8e\x87\x9f\x7f\xfc\xe1\xa7\x5f\xec\xd7\xbf\xed\xf1\x8f\xdf\xed\xe1\xcf\xd7\xf7\x7f\xec\x4b\xf9\xfe\xdb\xc3\xc3\xeb\x6f\x6f\xa7\x77\x7b\x3c\xbd\x9d\x9e\xff\x3a\xd9\xf1\xeb\xcf\x87\x4f\x8b\xf8\xb5\x65\x39\x94\x5a\xf9\x62\xed\x62\x66\xed\xd9\x79\x74\x79\x58\xea\xb8\x99\x5d\xde\xef\xdf\xda\xbd\xff\x9c\xa7\xa5\x56\x6b\xd7\x0b\x87\xf3\x8f\x5d\xee\xe9\xed\x8d\x8b\x91\xd1\x65\xde\xdf\x5c\xa4\x5e\x55\xab\x41\x8e\xb6\xaf\x9b\xa0\xad\xcb\x54\x51\x93\x46\x67\x2c\x85\xb5\x4e\xb4\xb1\xba\x70\xaf\x7a\x1b\x49\xea\x6b\x87\x2c\xbe\xbf\xfa\xf6\xb0\x0e\xf6\x3a\xc1\xf5\x4d\x73\xd4\xfc\xd2\xc4\x6c\xe4\x1a\xfd\x78\x1b\xf2\x76\x76\x9d\x83\x7a\x49\x37\x22\xca\x46\x0c\x9a\x8c\x4b\xd7\xa7\x39\xac\xfb\xdc\x9a\xe7\xc5\x7b\xb1\x9f\xe9\x8b\xb0\xc0\x68\xb4\x1c\x08\xdd\xd6\x9d\xed\xd6\xb2\x4c\x0d\x19\x6d\x5c\x86\x8b\xfa\xb6\x7c\x27\xee\xe2\xfd\x4d\x9c\xd8\xf8\xa0\xc5\x1d\x11\x1c\xb4\x0d\x49\x0e\x04\x91\x10\x46\xd5\x60\x3d\xc8\x79\xc4\xa0\x58\x2d\xf2\xa3\x1f\x97\x01\x32\x21\x0c\xd5\xad\x80\x11\x49\x52\xc7\x85\x95\x92\xb0\x0d\x5a\x04\xed\xe0\x58\xc4\xb9\x2c\x50\x42\x18\x96\x83\x93\x70\xa6\xa7\x7e\x8d\xe1\x25\x6d\xcc\x61\x58\x70\x06\x9a\x81\xf9\x21\x9f\x72\xb6\x68\x1b\x93\x26\x95\x23\x20\xb8\x47\x6b\xa2\xf1\x4e\x06\x0b\x5a\x1b\x23\xd0\xfd\x93\x83\xa8\x2f\xcb\x45\x20\x38\xbb\x8c\xda\x81\x6a\x27\x67\xb3\x49\x27\xe3\x2d\x4d\xfa\x44\x07\xbd\xb9\xbc\xa6\x35\x01\x53\x6c\x39\x3f\x39\xd8\x6d\xce\xda\x95\x2a\xa1\x8b\x8d\x2d\x9d\x3f\x47\x92\x80\x29\xbb\x0b\x01\xe6\x86\xfd\x43\x89\x8f\xd8\x82\x52\x8a\xc8\x12\xb6\x81\x41\xa9\x04\x9d\xf2\x49\x1a\xdc\x2e\xc0\x8e\x0a\x44\x38\x10\x5b\x0e\x99\x94\x7b\xd2\x77\xcb\x67\xa1\xaa\x0d\x0a\x98\x38\xc6\xed\x0f\x54\xc8\x96\x2d\x85\x41\xf7\xe9\xe9\x29\x61\xb7\x6e\xaa\x3c\xca\x21\xa4\x2c\x20\x81\x0a\x33\xaa\x2c\x3d\x19\x19\x57\x63\x59\x48\xee\x26\x5f\x31\x76\xf7\xad\xbe\xab\xc1\x4f\xc2\x77\x4f\x11\xe8\x2f\x54\x6a\x87\x0c\xa3\xc6\x8c\xdc\x1d\xc6\xb8\xde\x46\x56\x07\x06\x03\xe6\xc7\x66\x52\x1e\x99\x4d\xc6\x50\xe0\x12\x68\x2c\x50\xa2\x22\x68\xc3\xb0\x4b\xb6\xb9\x20\x26\xdc\x87\x10\x4e\x11\xb6\xa0\xad\x7a\xd8\x8a\x88\x90\x93\x5e\xe5\x87\x91\x20\xce\x04\x9c\x06\xc4\xdc\x98\x78\xd1\xb8\x72\x4f\x62\xb4\x77\x81\xc4\xa2\xee\x63\x6f\x54\x2a\xf6\x4d\x35\x9e\x56\x20\xe7\xe0\xe0\x41\x6d\x49\xcb\x8b\xc2\x6e\xc1\x65\xb3\x8a\xab\x6c\x79\x8d\xb3\x6f\x0d\xb3\x36\xe9\xfa\xba\xaf\x5d\x8b\x02\x3e\x60\x90\x1b\x38\x24\x48\xd1\xd4\x2c\x69\x54\x35\x5f\x30\x1e\x73\x65\xa6\x9c\xaf\x46\x2c\x13\x47\xc7\x9a\xdc\xf9\x04\xeb\x67\xf9\xf5\xdc\xb0\x25\x30\x45\x5f\x68\x1c\x32\x56\x61\x4e\xce\xea\x2d\x10\x9c\x6a\x18\x14\xdc\x73\x4f\xf2\x56\x19\x1d\x2c\x14\xac\x98\xe7\x67\xdf\x93\x66\xac\x9b\x9a\x59\x0c\xcf\x58\xd7\xa3\x3b\xb4\x2f\xbb\x34\x6c\xbe\x50\x55\x43\x55\xcf\xd0\x8f\x74\x92\xaf\x8c\xfb\x19\x26\xf6\x31\x33\x62\x98\x9d\x71\xf7\xe4\x0a\x54\x16\x36\x1b\xce\xc6\xc0\xd6\xcd\xc5\x48\x48\x56\x2d\x04\xbf\xaa\x40\x4a\x90\x8d\x7c\x2b\xdc\x13\x24\x90\x40\x60\xf0\x62\xa6\xf3\x75\x26\x00\x9a\x61\xa8\x5c\xa0\xf7\x41\x5d\xd2\x3b\x09\xe1\x32\x6a\x25\x43\x32\xfa\x8c\xe1\x2d\x33\x66\x98\x0e\x64\xe0\x54\xe1\x51\x33\x71\x68\x29\xa1\x15\xa8\xa2\x94\x00\xc3\xa6\xe8\xee\x2d\x7d\xea\x0a\xec\xe2\x01\x21\xc9\x0a\x29\x35\x3d\xb7\xe0\x67\xcc\xed\x9b\xf8\x86\x51\x18\x4a\xcb\x4e\x99\xbe\x67\xd2\x67\xb7\xdf\xdb\x97\xab\xd1\xe5\xe4\x13\x22\x5f\x08\xe1\xe2\xd8\x35\x1d\x69\xa6\x1a\xe6\x10\xab\x35\xa3\x26\x48\x37\x19\x92\x15\xb9\xd1\xa6\x05\xd9\x46\xe1\x40\x59\xb0\x1b\x1d\x6c\xe0\x89\xaf\x61\x42\xf3\xbe\x0f\xc1\x44\x76\x80\xec\xf4\xeb\x6d\x1a\x0f\x12\x74\x44\x32\xef\xa3\xa7\x45\x7d\xe9\x07\xce\xf3\xb6\xf1\x07\x02\x42\xab\xc9\x20\x49\x1a\x87\x8c\x64\xd1\x23\xf0\xb4\x1b\x8a\x32\x13\xac\x71\xf0\xcc\xc5\x11\x7d\x86\x49\xd6\xad\xa6\x6c\x8a\x5f\xd6\x77\xfa\xf9\xa1\x84\x9c\xcb\xfb\xda\x7b\x2e\x33\x81\xc3\x21\xbe\x99\xbc\x52\x04\xcf\x44\x1f\x36\x9b\x5f\x3d\x26\xf5\x47\x06\x23\x83\x8e\x0a\x07\x1e\x0a\xae\xd1\xd3\x55\xc9\x12\x85\xdc\x0e\x87\x12\x3f\x18\xa4\x18\x20\xf3\x24\xe2\x77\xa3\x4c\xa0\x2e\xfb\xb7\x14\x6f\x28\xb7\xe3\x8a\x13\x8e\xa8\x22\x1d\xae\x09\x1a\xe0\x0a\x12\x74\x47\x14\xea\x8c\xc7\xa8\x4e\x3e\x96\x45\xb8\xca\x83\x12\xf2\x5f\xe3\x5e\xa9\xf4\x89\xb8\x23\x87\x01\x52\x44\x9c\x5a\x14\x30\x5c\x87\x22\x54\x5c\xe7\xc6\x8f\x47\x35\x3d\xb2\x66\xb6\x09\x80\xc6\x32\xc9\x75\x56\x16\xa0\x21\xa4\xe8\x63\x74\x9d\x15\x9c\x98\xd5\xe0\x55\x19\xf6\x7d\xb6\xfe\x42\x0a\x58\xaa\x85\x3b\x19\x86\x88\x0e\xc1\x6e\x5e\x2d\x42\xd1\x9d\x64\xd8\x94\x70\x09\x7f\x64\xad\xb9\x6f\x0a\xe1\xb7\x4d\x98\x02\x2a\xa4\xe7\x12\xbe\xcd\xaa\xaf\x0c\x16\x0e\xcd\x14\x87\x7c\x5c\x0d\xb5\x2b\xc4\xf1\xfa\x35\x71\xfd\xbf\xee\xa6\xf9\xb5\xe4\x36\x84\x70\x4e\xd5\xe0\xaf\xbe\x02\x47\xff\xd3\x78\x39\x84\x7f\xff\xd7\x83\x7f\x03\x00\x00\xff\xff\x75\x29\x6c\x29\xe4\x20\x00\x00") + +func fontsPeaksFlfBytes() ([]byte, error) { + return bindataRead( + _fontsPeaksFlf, + "fonts/peaks.flf", + ) +} + +func fontsPeaksFlf() (*asset, error) { + bytes, err := fontsPeaksFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/peaks.flf", size: 8420, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsPebblesFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x59\xcd\x6f\xdc\xb6\x12\xbf\xef\x5f\x31\x07\x01\x4a\x00\x3f\xbe\x97\x87\x87\x97\xb4\x28\x0a\x5f\x1a\x34\x40\x81\x29\x7a\x29\x7a\x93\xbc\x4b\x7b\x05\xaf\x77\x8c\x95\x36\x4e\x7a\xe8\xdf\x5e\xf0\x63\xc8\x21\x45\x6a\xe5\xd8\x4d\xdd\x15\x49\xf1\x63\x3e\x7f\x33\x1c\xdd\x1e\x6e\xff\xdb\x37\xf0\xee\x3f\xf0\x01\xde\xfd\x1f\xfe\xf5\x0e\xde\x6f\x7e\xd5\x37\x37\x07\x3d\x5e\xc1\xed\x70\x77\xd0\xd3\xf0\xa7\xde\xc1\xcd\x57\xf8\xe9\xe1\xb1\x9f\xf6\xf0\xc3\xbe\xdf\xe9\xf1\xfa\xac\x9e\xfa\x71\x3f\x1c\xef\x26\x3a\x2a\xbd\x3b\xff\x08\xef\xff\xfd\xdd\xff\x36\x5b\x7a\x78\x3c\xe8\xc9\xad\xf8\xed\x6b\x7f\x84\x3f\xe8\xbc\xbd\x87\x37\x5f\xcd\xe3\x7a\x3b\xaa\xf3\x49\xdf\x0d\xc7\x5e\x6d\xfb\xb7\xf0\xc1\x2c\xf9\xeb\xe3\x89\x1e\xbe\x87\xdb\x83\xd6\xd7\xdb\x51\xab\xc7\xf1\x6c\x36\x84\x37\x1f\xf5\x61\xf8\x02\xbf\x68\xfd\x76\xf3\xb3\x3e\xe9\x76\x84\xfe\x08\xfd\xb8\x1d\x06\xb8\xa5\xe3\x04\x9f\xda\xcf\x1a\x6e\xb4\x3e\xc2\x13\x9d\xee\x87\xe3\x1d\xd0\x51\x01\x7c\x82\x71\xea\x4f\x86\x84\xa7\x61\xda\xc3\xb4\xd7\x30\xec\x74\x0f\x74\xbb\x39\x8f\x66\xd6\xcd\xd9\xf2\x07\x6f\x14\x10\xe0\x5b\x98\x08\x6e\xce\xc3\x61\x07\xbd\xdd\xf7\x0a\xdc\x34\xb3\xf0\x81\xa6\xe1\x16\x14\x21\x29\x98\x68\xa3\xbf\x3c\x9e\xf4\x38\x42\x0f\xdb\xf3\xe9\xb3\x56\x00\xbf\xef\xf5\xd1\x9c\x78\xaf\xa7\xed\x5e\xef\x80\xce\x93\x5d\x78\xa0\x27\x7d\xda\xf6\xa3\x86\x83\x9e\x26\x7d\x1a\xaf\x60\x98\x40\x1f\x77\x7a\xb7\x39\x3f\xc2\x81\xc8\x12\xfc\x40\x27\x0d\x87\xe1\x5e\xc3\xa3\x13\x3a\x4c\xfb\xfe\xc8\x14\x5e\xc1\x48\xf0\x09\x4e\xfa\xd8\x3f\xe8\x1d\x0c\x93\xda\x34\x00\x00\xcd\xf5\x6b\x3d\xaf\x37\x84\xcd\xf5\x06\xcd\x8f\x6b\x11\xb7\xec\xb4\xac\xeb\x96\x20\xd8\x51\x70\xc3\xf0\x9c\x5f\xfb\x00\x08\x23\xb1\x49\xf6\xcf\xec\x4b\x68\xff\xec\x28\xda\xbf\xea\x68\x6d\x33\x6e\xda\x36\xf9\x61\x45\x88\x46\x8f\x96\x76\xb3\x01\x59\x0e\x55\x98\x00\x1d\x22\xb9\x09\x60\x27\x74\x9e\x43\x33\x81\xdc\x5c\x04\x50\x76\xb4\x23\x22\x6c\xe3\x5c\x77\x9c\xb3\x14\xfb\x0f\xdd\x39\xee\x1d\xa0\x99\xd9\xa1\x59\x02\x40\x09\xf5\x28\xc9\x26\x94\x5c\x60\x22\x21\x73\x36\x79\x0e\xdc\x2b\x04\xe6\xc2\x4e\x73\xfb\x0b\x09\x04\x29\x30\xef\x71\xa9\x97\xb6\x13\x44\x1b\x8e\xc4\x78\x3e\xb5\x4e\x32\x61\x7f\x24\x77\x58\xe7\x86\xfd\x51\x1d\x1a\x49\x00\x52\x72\x6e\xd6\x49\x2c\xc9\x4a\xa6\x95\x36\x55\xfa\x71\x54\x1b\xbd\x61\x20\xc1\x8b\xb4\xd6\xeb\x88\xf5\xc8\xc4\xb8\x93\xbd\x4a\x49\x79\x75\x7a\x56\xeb\x3d\x6c\x2d\xb1\x41\x96\xc2\x70\x9b\xa4\x45\xac\x5a\xf3\x60\x4b\x35\x76\x4a\xc9\x98\x35\x26\xca\xd6\xce\x5b\x42\x66\xcd\xbc\x4d\xb9\x61\xb8\xc3\xec\x69\x94\x8d\xcf\xec\x6a\xb9\x7d\x41\x15\x25\xe5\xad\xda\x37\x69\x13\xff\x5b\x39\xff\x1b\x69\x8d\x30\x26\x4d\x29\xfe\xc3\xc4\x34\x51\xee\x9e\xfb\xa5\x01\xc9\xe8\x97\x16\x32\x83\x68\x6d\x0f\xd8\x2f\x5d\x0f\xbc\x73\xf9\x1e\x78\xc3\x84\xd0\x15\x64\x08\xb2\x3c\x36\x59\xf3\xb4\x4a\x23\x6f\xd9\x01\x43\x1c\xfa\x24\x23\xe8\xf0\x84\xb0\x6d\xc4\x3e\xd9\xd6\xd6\x44\x31\x3a\x3a\x23\xed\x42\xdf\x6c\x19\x44\x14\x9f\x25\x5a\x53\x71\x06\x13\x6c\x03\x26\x9a\x86\x62\x4f\x45\xf3\xdf\x02\xad\x17\xb7\x56\x14\x95\x45\x72\xce\x45\x31\x40\x80\x62\x2f\x46\xe2\x11\x0a\x2c\x47\x0f\x0a\xee\x96\x8f\xd4\xb6\x0e\xeb\x53\xb8\xf4\x61\xab\xc6\xd0\x2a\xaa\x53\xbb\x80\xe4\x8c\xe4\x15\x4a\xdb\x59\xb5\xb5\x8d\xa7\x33\xed\x05\x1a\x11\x12\x35\x82\x47\xda\x0c\x55\xd6\x50\x9d\x58\x73\x67\x5e\xb5\xe5\x57\xcf\x17\xc8\x7c\xfd\x8c\x8f\x15\x02\x49\x91\x03\x0b\xc8\x37\x43\x93\x02\x30\x96\x00\x28\x03\x4b\x8a\xc2\x8d\x21\x5d\x84\xe4\xa0\xe7\x74\xcc\x46\x5e\x09\xeb\x81\x47\x5a\x1f\x3d\xa2\x45\x96\x51\xf8\x05\xe8\x6c\xb8\x15\xb4\xc6\x09\x48\x82\xd6\xb8\x08\xf3\x31\x09\x51\x5e\x84\xd9\x31\x36\x53\x25\x0c\xa1\x1c\x62\x72\x26\x37\x54\x96\x7a\x33\xd1\x85\x6e\x24\xc6\x70\x92\x68\x5e\x1d\xe3\x4c\x89\x0f\x52\x9c\x80\x79\x93\x33\xe9\x97\xc9\xe0\x7c\xfe\xeb\x83\xb2\xf3\x3d\x9f\xeb\xb9\xd4\xc1\xa5\x62\x3e\x83\x40\x90\x82\xf3\xd9\x93\xc9\xb9\x9a\x90\x3e\xb5\x79\xca\xc4\xfa\x8c\x9a\x27\x10\xc9\x04\x43\x98\x73\xe2\x88\x3d\x11\x07\x84\x73\xf8\x51\x95\xca\x6a\x9e\x2b\x93\xc8\x90\x7d\xea\x47\xc2\x12\xd1\x87\x13\x23\x5e\x39\xc7\x29\x42\x1c\xea\xc7\x7d\x9e\x8c\x51\x1d\x35\xf3\xe1\xf4\xb4\x81\x20\x72\x32\x6d\xe2\x39\x0b\x6d\x14\x6d\x2f\x5a\x67\x05\x2c\xd8\xa5\x83\x49\xe5\x69\xb1\xcf\xd2\x84\xdc\x0a\x9c\x55\xc6\x95\x83\x18\xcf\xf0\xc5\x83\xf9\x3a\x23\x5c\x9e\x44\x93\x30\x05\x87\x7c\x42\x1c\xf5\x77\xa3\x05\xcd\x62\x98\x10\x37\x10\xeb\xcd\xdb\x7c\xac\xd0\xb2\x56\x9a\x7a\xe5\xa6\xa0\x45\xe1\x38\x24\x36\x80\xc4\x15\x7c\x5b\x71\xe4\x54\x52\xb0\xb6\xe3\xf9\x71\xd0\xbd\x2c\xcc\xb8\xb6\x62\xfa\x94\x85\xe8\xca\xdc\x6c\x42\x45\x98\x56\x94\x0c\xbd\x18\xde\x47\x88\x5e\x1e\x43\x91\x19\xa4\x00\x9e\x42\xb9\xb8\xef\x42\x8a\x9f\xe2\x22\xb1\x30\x2a\x34\x50\xf0\xc1\x8c\x27\xe7\x39\x18\x6c\x2c\x22\x0d\x86\x34\x32\xdc\x3f\x28\xcd\x06\x50\x98\x66\x82\x17\xf5\xe3\xb2\x8b\x65\xdd\xe2\x0a\x2d\xef\xe3\x36\x54\x5b\xbd\xd6\x44\x88\xac\x55\xa7\x6c\x8f\x9a\x5e\xc7\x0e\x4d\xd9\x0c\x90\x52\x9b\x48\x4c\x41\x24\xdd\x50\x9a\x06\x50\xe0\x92\xdd\x3c\x2e\xa1\xb0\xc0\xd3\xe2\x37\x22\x91\xde\x93\x4c\xf0\xc5\xb1\x48\xf1\x8e\x4f\x0b\x62\x2d\x78\xa0\x12\xe4\x4a\xa6\x70\x61\x3c\x5c\xf0\xd7\xe2\x28\xc6\xd8\xcc\x95\x88\x92\xaf\x29\x62\xb4\x71\xfb\x11\xe4\xfa\xb5\xa3\x6d\x81\xb9\x32\x9f\x09\xa3\x72\x47\x94\x7b\x52\xf9\xcd\x8b\xca\x19\x9d\x60\x99\xe3\xa0\x44\x73\xcc\xe3\x20\x0a\x1f\xf4\xae\x32\x8b\xad\x32\xa2\xd4\x44\x9d\x56\xb1\x20\x56\xb1\xc2\xec\xa4\x8a\x95\x26\x4a\x5c\xc5\x82\x62\x15\xab\x24\x6a\x87\x75\xf1\x7a\x24\x8b\x10\xa5\x82\xc4\xd2\x78\x32\x27\xcf\x78\x66\x26\x25\x28\xbe\x64\xb3\xb5\x39\xd2\x96\x65\x4e\x80\x2b\xa2\x49\xbb\xe4\x20\xf2\x60\x56\x78\x1b\x0e\xe3\x2a\x14\x09\xd8\xef\x9c\x55\xaf\x38\x38\xa8\x4c\xa4\x8d\x29\x1c\xe5\x20\x94\x10\x45\x19\x61\x98\x4a\xc2\xe7\xa9\xec\xd9\xd8\x96\x8b\x86\x85\x7e\x1a\x1a\x63\x0e\x2a\x33\x53\x71\xa3\xc9\x52\x57\x69\xed\xd2\x39\x17\xd0\xac\xa6\xf5\x04\x9d\x38\x09\x77\x6b\x3b\x99\x48\xaf\x31\xc3\xa2\x22\xac\x91\xc8\x7c\x2a\x86\x56\x4c\x76\x8f\x01\x3d\xf0\x39\x4b\xce\x5c\xea\xb5\x94\x9c\x79\xef\x5a\x51\xdf\x4c\x7b\x22\x7d\xf0\x0e\x13\x2a\x4d\xae\x10\x95\x74\xf9\x2e\x04\xa2\x88\xd5\xc8\x6e\x98\x10\xa8\x4b\x0a\x60\x3c\x41\xda\x05\x66\x66\x82\x99\xd5\x70\xb6\xe4\xe5\xe4\x2d\x64\x5d\x4f\x94\xa1\x38\x1f\x0a\x79\x46\x34\xa0\x68\x3e\xdf\xda\xa8\xda\xc1\xab\xb5\x2b\x37\x6b\x59\x28\x70\x5f\x07\x92\x2a\x43\xa5\x24\xbe\x82\x33\x77\x49\xcd\x2b\x5d\xf1\x03\x44\x51\x66\x3e\x2a\xb1\x5c\xc5\x93\xdd\xc8\x98\x1b\x27\x13\x0c\x47\xc8\x7b\xf2\x4e\xf1\x39\x1b\x98\x3d\x0d\x9d\xa5\x83\x96\x37\xac\xd7\xe9\x28\x30\x0f\xc5\x7a\xdf\x22\xf3\x2b\x68\x55\xbe\x68\xd6\x0a\x9a\xab\xb4\x2a\x2a\x39\x75\x24\x6d\xfe\x2e\xde\x18\x13\xb3\xbf\x4c\x17\x0a\x65\x60\x2c\x78\xc5\x8a\xa6\x05\x24\x4b\xe7\x5c\xcb\x94\x69\x9b\xb5\x4c\x99\x96\x51\x3a\x5b\xa2\x94\xc6\xcc\x32\x3f\x10\x7e\x30\x8c\xb9\x56\x3b\x33\x62\x07\xdd\x4d\x6c\xdb\x5f\xaf\x1d\x37\x9e\xb7\x3d\x03\x39\xc1\x34\x67\x84\x58\xd6\xc0\x9f\xfa\x16\x19\x08\x0e\xe8\xad\xb1\xd0\xaa\x7c\x4d\xa8\x61\xba\x6c\x76\x2e\x7f\x52\x3e\x2a\x50\x2c\x18\x60\x0c\xa5\xb4\x18\x08\xd7\x38\x7d\xcb\x8a\x0b\x15\x29\x40\x59\x9c\x58\xaa\xbc\xaf\xb1\xfb\xd7\x74\xfa\xd2\x86\x12\xb3\x80\x2f\x01\xaf\x84\x78\xf3\x4a\xbe\x81\xda\x75\x5b\x77\x89\x5c\xa1\x91\x15\x70\xaa\xad\x5a\x8f\x7d\x26\x21\x55\xc2\x4f\x9f\x21\x57\xbe\x76\x63\xcc\xc2\xe4\x78\xe8\x77\xb4\xfc\x39\x67\xa1\x31\x17\xe7\x7a\x30\xbd\x24\xd7\xa2\x99\x3a\x05\x75\xd4\x2e\xca\x75\xa5\xe7\xb5\x3e\x4f\xac\x7b\x5e\x87\xd4\xca\xef\x02\x65\xcf\x5b\x50\x23\x09\x78\xf5\x26\x11\x42\x5a\x5d\xea\x0b\x1b\xe6\x6e\x21\xf1\x9c\x37\xa4\x04\xcf\x61\x1e\x36\xf8\x11\x52\xa8\x50\x95\xf1\x11\xac\x14\x67\xdc\xb7\x76\x50\xa2\xc6\xdc\x88\x70\x91\xf5\xc3\xfb\x2e\x98\x19\x92\x64\xd4\x82\x6a\x33\x7f\xf2\xff\xe0\xfe\x77\x5f\xe7\xbd\xc1\xf2\x9d\x95\xfd\x35\x7e\x1e\xc8\xfa\xde\xd0\x5d\x0d\x41\x96\x5a\x1d\x9e\xab\x70\xff\x91\x17\x9b\x17\x37\x92\xeb\xcb\xcb\x4b\xf3\x97\xeb\xf1\x49\x6d\xe0\x05\xa5\x9d\x67\x97\x73\x92\x83\xc5\x24\x79\xaf\x2e\x5c\x81\xab\x07\xaf\xbe\x7b\xcf\x85\x4f\x69\x65\xf3\x05\xe9\x2d\xcc\x5d\x97\x5e\x1c\xe9\x2e\xd0\xfa\x8d\x00\xfa\x8f\x7c\x88\x41\x95\x15\xa0\x3c\xd1\x28\xe4\xff\x77\x00\x00\x00\xff\xff\x36\x15\xb6\xce\x0e\x28\x00\x00") + +func fontsPebblesFlfBytes() ([]byte, error) { + return bindataRead( + _fontsPebblesFlf, + "fonts/pebbles.flf", + ) +} + +func fontsPebblesFlf() (*asset, error) { + bytes, err := fontsPebblesFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/pebbles.flf", size: 10254, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsPepperFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x55\x51\x8b\xe3\x36\x10\x7e\xd7\xaf\xf8\x1e\x02\x4e\x20\xd6\x6c\xae\x0b\xed\xb6\x4d\x11\xec\xd1\xc2\x3d\x94\xbe\x14\x0a\x75\x51\x7c\x8e\x92\x38\x97\xd8\x6e\x6c\xf7\x28\x75\xf7\xb7\x17\x8d\x9c\xc4\x92\x1d\x28\xb7\xec\x7a\xe5\x19\xcd\x37\x9f\x3e\xcd\x8c\x77\xa7\xdd\xbb\x74\x86\x67\x7c\x85\xaf\xf1\x84\x17\x51\x99\xaa\x32\x17\xb9\x3b\xed\xf0\xf1\x6f\xbc\xbf\x48\xfc\xc2\x16\xcc\xcf\x59\x9d\xd5\xab\x63\xf6\xf9\xa8\xb6\x59\x23\xd3\x4c\xb6\x9f\x16\xe2\xb5\x3c\x57\x27\xd3\x98\x2d\xd2\x62\x8b\xaa\xbc\xd8\x65\x53\x62\x97\xef\x4f\xa6\xb1\x18\x1f\xda\xb4\xc0\x6b\x7a\xc1\xfc\x98\xa9\x63\xbb\x6f\x4d\x63\xe4\x9f\x6d\x7e\x96\x6d\x76\x96\xa6\x5e\x88\xbf\xcc\xa5\xce\xcb\x02\x2b\xf9\x84\x38\xc6\xbb\x27\xfa\x90\x16\xf4\xf2\x3c\x70\xac\xac\x63\xf5\x0d\xfd\x68\x3e\xd2\xcb\xb3\x7d\xa9\xcf\x6d\x7d\x38\x97\x5b\x83\xa7\x25\x9a\x43\x5a\x7c\xaa\x6d\xde\x9f\x4e\xa6\x28\xf0\x7a\x48\xab\xca\x9c\x4e\x98\xef\xf7\x99\x6a\xf3\x36\x93\x66\xdb\x2e\x84\xf8\xb5\x36\x35\xea\x2a\x2d\xf2\xfa\x80\xec\x90\x5e\xd2\xac\x31\x17\xd4\xa6\xc1\xe7\xbc\x39\x20\x7e\x8f\xb2\x6a\xf2\xb2\xf8\x56\xe0\x4b\x7e\xf4\x7d\x25\x7e\xc7\x1a\x69\x04\x20\xc1\x1a\xc6\x2e\xf0\x07\xd6\xc8\x79\xf5\x0f\xd6\x28\x79\xd5\x61\x8d\x96\x57\xff\x62\x8d\x82\x83\xdf\xb0\xc6\xcf\x42\xcc\xd4\xed\x57\x09\x40\x09\x90\x12\x12\xbc\xb6\x0f\xb6\x90\x12\x33\x5e\x5d\x8d\xfc\xa2\x49\x93\x12\x31\xc5\x84\xab\xd1\xfe\xd7\x4b\x1b\xa2\x37\x4a\x48\xcd\x9e\xa8\x77\x28\x21\x49\x09\x92\x37\x6c\xad\x04\xe6\xa4\xc4\x5c\xff\x76\xc3\x56\x82\xf8\x89\x01\x9d\xee\x4e\x47\x09\x74\x4a\x90\xc7\x4f\x76\x16\xb6\xdb\x0c\xf9\x29\xa1\x41\x5a\x09\x8a\x7a\xde\x3d\xf8\x15\xdf\x61\x69\xdd\x63\xde\xbd\xf2\xea\x65\x33\x31\xc2\x1d\x58\x33\x23\x36\x6b\x1a\x00\xc3\xea\xe0\xf1\xb2\xc8\x11\xef\x0b\xc3\x61\x55\x93\x7e\x38\x18\x90\xb1\x07\xbb\x35\x0b\x89\xd1\x6e\xc7\xc2\x2a\xec\xb3\x70\xc7\x99\xe6\xac\x47\x9c\xf5\x64\x52\x25\x20\xbd\x02\x70\x06\x1a\xa8\x44\x4a\x24\x43\x95\x6c\xda\x38\xc6\xe0\x0a\x93\xbb\xca\x36\x4b\xc4\xc9\x31\xe2\x24\x17\x96\xc1\xdb\xc4\xd1\x78\xff\xd4\x99\x39\x62\x31\x76\x80\xc5\x58\x4e\x38\x6c\xb9\x68\x19\x3d\x90\x6f\x22\x82\x1d\x98\x3d\xca\xf1\x9d\x77\x6f\x8f\xe8\xba\xc2\x1d\xd6\x83\xf3\x72\xb5\xd3\x03\x88\x4d\x12\xd4\x04\x63\x12\x37\x94\xd7\x79\xd4\x2d\x9d\xa2\x7e\xe7\xf5\x3e\x76\x75\x13\xfc\xc7\x65\x3b\x90\x7b\x86\x47\x11\xc9\xa3\x0b\x9a\x72\x6c\xc2\x6a\x75\x55\x8c\xa0\x00\xf0\x88\x11\x6c\xbb\x77\x4a\x74\x14\xce\x9b\x0e\xbd\xa3\x0b\x4e\xad\x44\xe2\x8a\x3e\x19\x44\xf4\xb5\x3d\x4c\xaa\x6f\x2d\xad\x65\xd8\x65\x2e\x00\xfe\x15\xf2\xf9\x92\x89\x2b\xd4\x61\xf3\x53\x37\x1c\x24\xf0\xa6\xcb\xb5\x23\xe0\xf5\x0c\xe7\xeb\x3c\xc2\x6e\x60\x71\x47\x22\xdc\x39\x0b\xa4\xc3\xb8\x9f\x07\xbb\x23\x0f\x42\x5b\x76\x34\xc3\x14\xae\x3d\x0c\x05\x0c\x40\xe1\x38\x18\xce\x5b\x07\xc3\x02\x5a\x73\x47\xe1\x01\x12\x7f\x9a\x50\x10\xee\x72\x73\x76\x5c\x53\x85\xb4\x30\xa9\x01\x8d\x34\xf0\xab\x77\xbc\x9b\x6e\x2c\x74\xf8\xd1\xe0\x09\xad\x13\x04\x4a\xe9\x09\xa5\x10\x66\x77\x7f\x1d\x85\xa2\xc0\x55\xa7\x3f\x06\xa0\xc4\x0f\xdf\xfb\x99\x43\xe5\xed\xc7\xca\x9a\xbc\x4a\xd3\xfd\xbc\xc1\xff\x1c\x2b\x52\xd3\xc6\x2f\x86\x2b\x95\x40\xcb\x68\xa2\xf2\x9c\x31\xf2\x8e\xb8\x1c\x49\xe6\x76\x05\x77\xc3\xfb\x46\x77\xa3\xe3\x78\x34\x13\xad\x69\x62\x3c\xfd\x17\x00\x00\xff\xff\xdd\xb3\x22\x4a\x20\x0a\x00\x00") + +func fontsPepperFlfBytes() ([]byte, error) { + return bindataRead( + _fontsPepperFlf, + "fonts/pepper.flf", + ) +} + +func fontsPepperFlf() (*asset, error) { + bytes, err := fontsPepperFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/pepper.flf", size: 2592, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsPoisonFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\x4d\x8f\xdb\x36\x10\xbd\xfb\x57\xbc\x59\x18\x4d\x82\xee\x2e\xd6\xdb\xf4\x10\x22\x28\x78\x68\x0f\x3d\xf5\x52\xf4\x5c\xad\x25\xaf\x85\xd8\xd2\xc2\x92\x9b\xe4\xdf\x17\x22\x87\xc3\xa1\x3e\xfc\xb1\xbb\x49\x81\xc2\xdb\xd4\x22\x65\x8b\x7c\x7c\x1c\xce\x3c\x0e\xb5\xda\xac\xee\xb3\x39\x16\xf7\x58\xdc\xe1\xfe\x0e\x37\x0b\x2c\xde\xcf\x9e\xea\xb2\xa9\xab\xdb\xd5\x66\x85\x65\xbd\x7d\xaa\x9b\x22\x47\x59\xb5\x35\x56\xe5\xe3\xa6\x68\xf1\xf0\x15\x7f\x95\x55\x55\x7c\xc5\x9f\xeb\xac\xc4\xc7\xa6\x59\x95\x65\x6b\x8b\x5d\xd9\xdc\x2e\x97\xb7\xfb\xed\xc3\x6d\x91\xef\x7f\xe1\x76\xb0\xaa\xab\x16\x6f\xab\xfd\xf6\xa1\xd8\x35\xf8\x01\x4f\xfb\xaa\xdd\x67\x6d\x59\x57\xd8\x66\xbb\x4f\xcd\xbb\xd8\x4b\xda\x72\xda\xc0\xfe\xe9\xa9\xd8\x2d\xb3\xa6\xc0\x72\x9d\xed\xb2\x65\x5b\xec\xf4\xa3\xbf\x66\xff\x94\x39\x7e\x6f\x9a\x62\x83\x8f\x79\xd9\x5d\x6d\xb5\xaf\xca\xe5\x6d\xb5\xf7\x70\xf2\xac\x2d\x0c\xfe\x58\xb6\xb8\xff\xe9\x1a\x8b\x0f\x1f\xde\xcf\x7e\xfb\xf2\xb4\xc9\x2a\x8f\xa5\x5e\x61\x55\xee\x9a\x16\x9b\xb2\x2a\xcc\xac\xa3\x06\x37\xb8\xda\x66\x8f\xe5\x12\x1e\xfd\x15\x56\xf5\x0e\xab\x72\x53\xa0\xcc\x8b\xaa\x2d\x57\xe5\xd2\x3d\x3c\xcb\x00\xe0\x06\xcd\xba\xde\x6f\x72\x64\x9b\xcf\xd9\xd7\x06\x0f\x05\xfe\xce\xde\x5c\xbb\x87\xaa\xfa\xf3\x6c\xee\x7f\xd4\xae\x0b\x5c\xad\xb3\x5d\xfe\xb0\xc9\xaa\x4f\x57\xb8\xb9\xc1\xd3\xae\xac\xda\x06\x59\x83\x0c\xee\xee\x35\x1e\xf6\x2d\x96\x59\xf5\xa6\xed\x9a\x69\xb6\xfb\x66\x5d\xe4\xb3\xc5\xbd\x6b\x61\x5d\x94\x8f\xeb\xb6\x43\x9c\x45\x32\x66\x8b\xbb\xe9\x2f\xaf\x51\xd5\x2d\xca\x6a\xb9\xd9\xe7\x65\xf5\x88\xbc\x68\x96\x45\x95\x17\xbb\x66\x76\xef\x1f\xdb\x66\x5f\xdc\xc8\xb1\x29\xaa\xc7\x76\x8d\xb7\xc5\x97\xf0\xe3\x65\xbd\xdd\x16\x95\x27\xa6\x79\x87\x1f\x91\x61\xb5\xcf\x1f\x0b\xac\xb2\x65\x5b\xef\x66\x37\x0b\xd7\x42\x5e\xac\xb2\xfd\xa6\xf5\x60\xb7\x75\x5e\xb8\x81\xb7\xeb\xb2\xe1\x19\xdc\x94\x9f\x0a\x5c\xdd\x6c\x71\x77\x85\xba\x72\xcd\x66\x55\xee\x9a\x7d\x37\x5b\xfc\xec\x1a\xf1\x44\x77\xe8\x93\x5e\x67\xb3\x39\xe6\xf6\x99\x1f\x76\xd6\x11\x0f\x3b\xb3\xd6\x76\xf7\xe4\x42\xdd\x85\x2c\x5c\x8d\xdc\x4d\x22\x77\x93\x1f\x30\x64\x5c\xcd\xb8\x8b\xf1\x17\xff\x1d\x5f\x63\xc3\xaa\x71\xd5\x01\x77\x02\x4b\x00\x17\xd1\xd9\xc1\xbc\x7b\xec\xd9\xc5\xd0\xb1\x2a\x33\x12\xb8\xce\x3d\x82\xb1\xba\x25\x6b\xa9\xfb\x90\xd1\x93\x25\xbe\xe7\xd1\x75\x05\x50\xf7\xad\x7f\xbe\x63\xc4\x7d\xb8\x3a\x19\x22\xd3\x7d\x90\xa7\xa6\x2b\x1b\xbe\xe7\x9f\x37\x06\xdd\x87\x11\xac\xae\x0e\xd3\xc3\x3e\x36\x00\x5f\xf3\x50\x5d\xcd\x86\x3f\x0f\xd5\xf2\x3f\x06\x0e\x92\x5f\x92\x1b\x83\x47\x69\x67\x70\x55\xab\xe6\x92\x0c\x18\x20\x0f\x00\xbe\x66\x3a\xa4\xc6\xcd\x2f\xf7\xee\x81\xf7\xa1\x8e\x30\xed\x60\xf9\xef\xc2\xc4\x5b\xa9\xba\xef\x3b\x22\xbd\x01\xc4\xe6\x98\xd8\x50\xb7\x9e\xd7\x50\xa7\x50\xf5\x75\xc7\x71\xa4\xc6\x18\x72\x00\xd9\x28\x3d\xd3\x8e\x6a\x1e\x8a\xfb\xa5\x81\x32\xd3\xc9\x01\xb0\x65\x0c\xb9\x66\x74\x1d\x6c\x6d\x46\xdd\x58\x2c\x59\xc1\xce\x06\x13\x9a\xea\x08\x67\xf8\x8e\x7e\xe3\xff\x39\x22\x3a\xbc\x70\x9c\xbb\xa6\x3c\xdf\xc6\x04\xce\x99\x7f\x33\x82\x7a\xb0\x72\xc1\x54\xf3\x34\x33\x98\xd3\x2f\x3d\x26\x5c\x29\x34\x2d\x13\x17\xa7\x2c\x4e\x16\xc9\xd0\xc3\x14\x75\x25\x13\xda\x95\xc1\x05\x03\xe2\x3e\x92\x21\x25\x1d\x47\xa6\xd3\xb5\x4a\xe1\x61\x0a\xde\x02\x14\x56\x26\xa2\x41\x93\xd8\x00\x09\x8d\xdc\x9e\xd1\x16\x34\xbd\xca\x34\x14\xf2\x96\xe9\xbb\x21\xef\xaa\x02\x10\xef\x1b\xc4\x79\x38\x6f\x11\x60\xb8\x65\x14\xcc\x3b\x4c\xb3\x9f\xf3\x68\x9b\xf3\xc3\xbd\x4f\x1b\xe9\x91\x8a\xd5\x2b\x29\xce\x94\xc7\x18\xbd\xb9\x43\x9c\xd0\x16\x9e\x31\xba\x72\x42\xa7\x62\x8c\x67\x5f\xfa\x61\x24\x1a\xc8\x59\x23\x9e\xa8\x1c\x1c\xf1\x8b\x9a\x7e\xf1\x88\xcd\xe1\xc0\xd9\xef\x52\x96\xa2\xaa\xc6\xef\x5f\xd9\x93\xce\xa1\xeb\x52\x85\xf7\xa4\xf3\x71\x4a\x46\x57\x53\x0c\x52\x1c\x16\x62\xcc\x12\x37\x6a\x43\xcc\x82\x4c\x55\x77\x71\x41\x2c\xc4\x33\x09\xb4\xce\xb7\x50\x5c\xf0\x6e\x69\x21\x44\x30\xc3\xde\x93\x49\x35\xee\x3f\x18\x4d\x55\x0f\xa8\x78\x37\x06\xcb\xe1\x4a\x96\x71\x70\x35\xa2\x01\xe4\x4e\xea\x67\x3c\x55\x61\x2e\x93\xb5\x9d\xb0\x22\x9c\x24\x8c\x28\x13\x90\xb2\x0f\x22\xd1\xcd\x85\x68\x22\x2b\x93\x8c\x4c\xab\xa1\xe8\xda\x42\x0c\xe1\xf5\x34\x12\x3e\x06\x02\xad\x07\x47\x10\xf4\xc0\xf8\xf5\xc4\xb0\xfc\x62\x12\x66\x85\x0d\xe1\x83\x71\xa8\x89\x98\x4f\x0b\x34\x3d\x7c\xd1\x3a\xb1\x62\x55\x5f\xde\xcd\x72\x38\x22\x12\x7f\xec\x6d\x84\xbf\x09\x52\xcc\x78\x5d\xe3\x64\x98\xa2\xc1\x24\x91\xc8\x0c\x0c\x64\x8c\x21\x51\x32\x51\x73\x89\xc4\xb2\x14\xd5\x96\x0d\x80\x7c\x38\x50\x3a\xab\x5f\x54\xa6\x6a\x8e\x31\x24\xa2\x24\x9a\x6a\x1f\x87\x06\x12\x90\xd8\x04\x4a\x50\x79\x11\x81\x04\x26\x41\x23\xee\x78\x80\xa7\x0f\xc8\x0e\x79\xd1\x86\x1c\x47\x1b\xcd\xc4\x52\xa2\x9d\x83\x21\x93\x19\x33\xe4\xe8\x7a\xb4\xdf\x39\x00\x68\x6a\x65\x39\x4f\xd3\xf3\x33\x21\x9e\x0b\xb8\x30\x59\xac\x5e\x12\xff\xa2\xbd\x8b\xf6\x2d\xc7\x18\x3a\x17\x10\xa5\x12\x22\x0d\x56\x49\xf9\x5c\x40\x13\x97\xd0\xaf\x15\xcb\xe8\x5a\x9a\xcf\x4f\x8b\x53\x07\x1a\x85\x34\x1a\xbc\x63\xd2\xe8\xd1\x70\x9f\x6a\x34\x15\xfa\x94\x22\x8c\x3a\x07\xe2\x98\x11\x8d\x5f\x87\xad\x18\xf2\x90\xba\x51\xe9\xc8\x50\x6f\x73\x63\xfa\x0a\x68\x6c\x97\xf6\x4c\x8d\xc2\xd3\xec\x09\x4a\x34\x4a\x00\x35\x67\x15\x4d\x6e\x07\xe9\xb9\x52\x1e\xed\xb4\x7e\x06\x8c\xca\x9e\x06\x3a\xf2\xe9\x47\x13\x46\x81\x64\xb7\xcb\x37\x28\x11\x22\x89\x9c\x22\xa5\xf5\x53\x46\x95\x8a\x88\x32\x7c\x08\xf9\x59\x2b\x08\x07\x82\xa5\x1d\xc8\x58\x77\xdf\x28\x7d\xa4\x26\xda\x0c\x26\xfd\x59\x9a\x46\x2f\xed\x10\x31\xfd\xb2\x08\x2e\x99\xbf\xb3\x61\xb1\x13\x7b\x19\x92\x5d\xa1\x62\x4b\xfe\x62\x28\x1d\xfa\xe6\x17\xfb\x9e\x60\x86\xa2\x94\x49\x43\x1b\x73\x86\x6a\x13\x23\x79\x8c\xe3\xe1\xe2\x08\x20\xf4\x01\x85\xd8\x7e\x14\x10\x94\xe0\x60\xdd\x73\x92\x77\x1e\xc4\xaf\xc9\x80\x9a\x04\xd7\x58\x36\x49\x39\x89\x5f\x69\x40\x7d\x1d\x86\x92\x29\x83\xd2\xc8\x5a\x21\x4f\x31\x94\x0a\x8e\xe7\x46\x78\x3b\xc1\x4a\x37\x47\x26\x24\x08\x38\x0f\x06\x95\x0e\x18\x8b\xf0\x32\x65\x30\xc7\xa7\xec\x7b\x00\x8a\xe5\xd4\x35\x8e\x00\x4a\xbd\x81\x72\x06\xa9\x19\x25\xa0\x10\xfd\x82\x64\x55\x25\xe8\x77\xa6\x24\x5a\xda\x6f\x6d\x48\x0c\x48\xeb\xe9\x71\x1f\x30\x64\x4b\xa7\xe0\x80\xff\xcc\x05\x4c\xe6\x97\xb9\x7d\x9d\x5f\x56\xa3\x57\x4a\x41\x05\xa1\x81\x2e\xee\x69\x84\x09\x19\xaa\xd2\x46\x69\x14\x4b\x45\x96\x91\x11\x2b\x01\xa4\x45\x96\x19\x08\x81\x73\x68\xa7\x84\xf6\x20\x00\x44\xa8\x8b\x0a\x65\x23\xd5\x21\x41\x3b\xba\x94\xf6\x43\x1b\xbc\x00\xa8\xfb\x9b\xf7\xcb\x24\x65\x52\x65\x4b\xea\x7e\x52\x36\x52\x06\x6f\xbc\x07\x9e\x57\xb6\x0e\x07\x82\x68\x2f\x6e\x26\xd5\xe4\x78\x20\x49\x68\x4b\x3e\xc0\xfa\x25\x03\xa5\x5e\x81\x98\x2d\x60\x60\xc9\x2a\x4a\x66\x13\x2a\x15\xc9\x23\x3a\x2a\xee\x06\xd3\xaa\x8f\x32\xdc\xa1\x01\x43\x91\x15\xe4\xe2\xfd\x79\xde\x59\x32\x07\x9c\x22\x7c\xdd\xdd\xc5\xb9\xe1\xe2\xec\xed\xce\x33\xe2\xd7\xa4\xe1\xbf\xaa\x77\xe6\x14\x6d\x8a\x8f\x62\xba\x21\x82\x82\xa6\x09\x9a\x27\xd1\xe3\x2a\x75\xef\xd8\x9a\x43\xa8\xa2\x60\x53\x66\x2c\xbf\xfb\x72\xb6\x48\xb3\x65\x8f\xb8\x89\xb3\x05\xda\x88\x3d\x31\x07\x07\x12\x0c\x61\x77\xa0\xb6\xcf\x7e\xf5\xf5\xb2\x1d\x50\xe9\x0e\xa8\x7c\xc7\xe4\xf4\x4d\xa5\x5d\x92\x64\xa7\xda\x9d\x0c\x53\x0b\x6a\x9f\x67\xe5\x38\x40\x6b\x7e\x5f\xec\x2b\xfe\x97\x07\xd1\x6f\xbe\xca\x5e\x05\x90\x07\x21\x51\x96\xc2\x34\x81\x7d\x79\xc2\xcf\xa1\x2d\x51\x8a\x6a\x00\xad\x87\x8f\xa0\xbd\x3a\xe4\x23\xc4\x1d\x92\xe3\x34\x05\x37\xf1\xed\xa4\x80\x9b\x10\xa8\x11\xf5\x82\x3a\x34\x0b\x7b\x4f\x33\x79\xcc\xfa\xd2\xe0\x0d\xd2\xb9\xc9\xe8\xc3\x70\x7a\xf0\x3e\x30\xdd\x87\x8e\xcf\x49\x6f\x32\x03\x96\x01\x94\x57\x5b\x04\xa3\xba\x9b\x7f\xfb\x2a\xa9\xbe\x53\x15\x04\x23\x10\x28\x88\xfa\x5f\xfc\x13\x67\xb6\xd4\x4e\x4d\xa2\x8a\x74\x1c\x96\x5e\x58\x77\xc9\x71\xca\xd0\xc0\x91\x24\x4f\xfa\xd9\x13\x48\x9e\x3a\x39\x3c\xe9\x65\xa5\xfc\x25\x3d\x7e\xd1\x07\x32\xd2\x2d\x0d\xce\xdf\x8c\xe9\xe5\x61\x30\xd4\xa1\x03\xd2\x4e\x60\x4e\x4d\xdf\xc8\x59\x07\x4d\x9c\x75\x9c\xc1\x9c\x52\xe8\xea\x44\x9d\xd3\xfb\x31\x38\xbb\x0d\x11\xf3\x23\xf6\x24\xe4\x0a\x17\xdf\xf3\xce\x04\x8b\xbd\x71\xc9\xdf\x80\xf9\xef\x76\xc3\xe8\xbf\xd1\x1b\x13\x83\x51\xfb\xb1\x70\x9e\x82\xf0\x1a\xce\x79\x97\x4b\xfe\xe9\x92\x7f\xba\xe4\x9f\x2e\xf9\xa7\x4b\xfe\xe9\x92\x7f\xba\xe4\x9f\xf4\x54\x5e\xf2\x4f\x97\xfc\xd3\x25\xff\x74\xc9\x3f\x5d\xf2\x4f\x97\xfc\xd3\x25\xff\xf4\x3f\xc8\x3f\xe9\x04\x46\x00\x31\xf4\x0e\xda\x3d\xc4\x74\xd4\x60\xef\x96\xa6\x7a\x92\xd7\xae\x93\xcd\xfb\xf4\xcb\x78\x87\xa5\x2d\xbf\x76\x35\x2a\x6d\xa3\x04\x19\x7f\x09\x4b\xb2\x44\x83\x32\x12\x27\xd8\x7b\x47\xc8\x8e\x4a\x5b\xfd\x6a\x50\x3f\x01\x17\x02\xea\x89\x4b\x7d\xec\x06\xcb\xc7\xd0\xb7\xbc\x53\x15\xf7\xef\xe8\xbd\x17\x1d\x5f\xb4\xfa\x96\x37\x52\xa4\xce\x3e\xce\xf9\xff\xf2\xc0\x69\x0f\xfc\x1b\x00\x00\xff\xff\x36\x99\x57\x1a\xad\x39\x00\x00") + +func fontsPoisonFlfBytes() ([]byte, error) { + return bindataRead( + _fontsPoisonFlf, + "fonts/poison.flf", + ) +} + +func fontsPoisonFlf() (*asset, error) { + bytes, err := fontsPoisonFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/poison.flf", size: 14765, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsPuffyFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x59\x5f\x8b\xdc\xb6\x17\x7d\x9f\x4f\x71\x1e\x06\x6c\x81\x19\xb1\xbf\x5f\x5a\x1a\x58\x96\x81\xbe\x15\xfa\xda\x27\xc1\xf5\x36\xec\x96\x84\x24\x6d\x37\x5d\x68\xc0\x1f\xbe\xdc\x3f\x92\x25\x4b\x1e\x7b\x26\x19\xc2\x5a\xe3\xb1\x8f\xae\xee\x9f\xa3\x73\x95\xe7\x8f\xcf\xff\x7b\x3c\xe2\x27\xfc\x88\xbb\x37\xb8\xc3\xff\x0f\x7f\xbd\x3e\x3f\x7f\x3d\x3d\x7f\x7c\xc6\xef\x5f\xf1\xcb\xeb\xe3\x67\xfc\xfc\xf8\x82\xfe\xc3\xbb\xf3\x87\xd7\x3f\x5e\x9f\xfe\x79\x3a\xfd\xfd\xfa\xfe\xd3\xe9\xf5\xdd\xa7\xd3\xd3\x17\x77\xf8\xed\xe9\xe5\xcb\xfb\x3f\x3f\xe3\xee\x74\x87\x1f\xfc\xaf\x8f\x2f\xfe\xee\xed\xdb\x37\x87\xc3\xf1\x78\x3c\xef\xf8\x73\x3e\x80\x70\x3e\xf4\x70\xe7\xc3\x84\x29\xff\xd3\x93\xb3\x3f\x00\xce\xe7\xc3\x11\x04\x10\xf8\xc5\x1e\xae\x87\x93\x11\xb9\x9e\x64\x04\xf9\xec\x1f\x09\xac\x7d\x64\x2c\xe8\x32\x26\xc5\x17\xc3\x08\x38\x9d\x00\xb5\x45\x9f\x56\x93\x74\x66\x7b\x37\xc7\x89\x63\x79\x08\xfa\x84\xef\x08\x41\xd6\xd9\x93\x2e\x29\x50\xbc\xe3\xc8\xf1\xea\xc7\x40\x20\xdf\x19\x74\x86\xa5\x3e\xb2\xa9\xc5\x24\xc7\x43\xdf\xc1\xd9\x33\xbe\xe3\x7f\xf2\x45\x87\x36\xa7\x0e\xf5\x1d\x19\x22\xba\xb3\x65\xb0\x4e\xc4\x1f\xf9\xa1\x17\x77\x88\x21\x63\x00\x06\x06\xd0\x95\x8c\xc1\xb3\xe5\x13\x7a\x7a\xc0\x70\x3f\x8a\xed\x44\x3e\xa8\xf5\x35\x34\x07\x8f\x8e\x31\xca\x5e\xa2\xd0\xfe\x63\x81\x50\xeb\x25\x25\x06\x06\x9d\x30\x41\x66\x1c\x03\x74\xb6\xb8\x8c\x78\x31\x1f\xf1\x1c\xf6\x0c\x38\x87\x00\x49\x25\x46\x93\x8c\xa2\x64\x62\xf6\x62\x11\x77\xcd\xc6\x11\xea\xdd\x07\x00\xf7\xe2\xbf\x01\x43\x8a\x7b\x1e\x9c\x3c\x4a\x86\xa4\x37\x25\xf4\x82\x4a\x13\x26\xd2\x28\xf0\xcf\xd1\x78\x59\x94\x3d\x4b\x6e\xe1\xb1\xd2\x79\x2d\x6f\xd5\x0e\x9d\x5f\x2a\x46\xa4\x31\x95\xe9\xf9\xe3\x56\x9e\xab\x47\x97\x66\x66\x98\x32\x6c\x9a\xa0\x34\x03\x78\xec\xcc\x50\x64\x19\x5a\x25\xe5\x32\x43\x61\xf9\xe9\x3b\xd0\x68\x79\x08\x67\xac\x01\xa3\x0f\x76\xe8\x14\xf3\xb2\x5b\x8b\x56\x96\x68\x1c\xe0\x2c\x5f\xe2\xa5\x6f\x25\x5a\x32\x81\x8b\x41\x6d\x90\x00\x3a\x5b\x2e\x2f\x2b\x2e\xaf\x57\x70\x2a\xed\x28\x9d\x2c\x80\x14\x69\x42\x5f\x70\xba\x00\x50\x4f\x9a\x80\x70\xd9\x92\xb2\x18\x2e\x69\x22\x56\x81\x33\xea\x99\x30\xa5\xea\xe1\x51\xcc\xc3\x21\x82\x2c\x48\xac\xb4\x2c\xa5\x8e\x98\xe8\xd4\xb5\x24\x68\xfc\xc3\x18\x96\x96\xad\x39\xfb\x02\x10\xdb\xcc\x40\xfb\xa2\x46\x8b\x74\xce\x73\xcc\xde\xe1\x81\x85\x47\x73\x4a\xb2\xab\x4a\xa7\xb2\xc6\x66\x16\x10\x86\x33\xae\x76\xc6\x02\x84\xfb\x2e\xbb\xb3\x95\x57\x94\x48\x5f\x3d\x64\x0b\x63\xd2\xb7\x34\x4b\x19\x56\x72\x72\x0e\xd4\xac\xb8\x56\xed\xad\x3f\xd8\xa2\x07\x4a\x65\xe8\x52\x09\x9e\x0f\xf7\xcc\x73\x32\x7f\x80\x32\x28\x5f\x56\x13\xad\x4e\x96\x26\xcf\xec\x64\x9c\xb4\xc1\x19\x7d\xcb\x8f\x63\x88\x76\x30\x0b\x3f\x64\xb6\x1a\x55\xac\x57\x75\x19\x46\x0e\x99\xf9\x3a\xa6\xc8\x44\x93\xbe\x76\x99\xcc\x8b\xbb\x33\xb3\x2b\x72\x4c\x30\x1a\xe1\x34\x5b\xd0\x33\xd3\xab\x0f\xd9\x77\xb6\x61\x68\xc5\x96\x1b\xef\xb2\x24\x00\x97\x15\x80\x94\x44\xc1\x68\xfc\xc3\x5a\x9e\x20\xdf\xb1\x23\x23\x6a\x9e\x0a\xd0\x7d\x97\xdd\xe9\xe9\x72\x6d\x55\x40\x3a\xad\x50\x08\x19\x74\x7f\x0b\xd0\x94\x73\xb4\x22\xea\x62\xaf\xb6\x88\xcc\x22\x90\xfb\x26\x8b\x72\x20\xe3\x47\xcb\x41\x15\x58\x37\xf8\x48\x03\xca\xd0\xc3\xb6\x45\x51\x9a\xc1\x21\xca\x5f\x9a\x6e\x09\x3f\xac\xe6\x5b\xfa\x39\x3e\x5c\x24\x5c\x4e\x9b\x4a\x43\x64\x03\x26\xf3\x69\x9b\x83\x1b\xa6\x6b\x69\x4d\x18\x98\x47\xe4\x4e\x48\xe5\x77\xc1\xf4\x58\xf5\xe6\xf1\x14\x83\x2c\xe1\xa6\x1d\xe1\x8d\xb7\x7c\x17\xc8\xeb\x66\x22\x8b\x2b\x8a\x6a\x9f\x33\xab\xa5\x8d\xc1\x7c\x3a\x60\x34\xa0\x31\xec\x29\xca\xaa\xba\xd7\x64\x4a\x4f\x97\xf6\xf4\x46\x2d\xc5\xea\x1e\xd4\x23\xbb\x33\x77\x87\x45\x7d\x17\xcc\xa2\xb0\x65\xd1\xd4\xe0\x9b\x01\xa2\x7d\x38\xfc\xd8\x0e\xff\x6a\x51\x8e\x65\x7b\xb4\xa9\x7b\x52\x66\xa7\xf6\xcc\x44\x4f\x6b\x70\x81\xf4\x5b\x99\x1d\x5d\x73\x5d\xd4\xf6\x00\x05\xf2\xbb\x6a\x0d\x19\x18\xdf\x48\x80\x12\xbf\x0c\x74\xb6\xd0\x23\x64\x7e\xfb\x77\x21\x3f\x5b\xba\x9a\x8a\x59\x7a\x95\x39\x63\x88\x5b\xe6\x03\xb4\xb0\x79\xcf\x9b\x2b\xbb\x6c\x28\x5b\x5b\x7a\x42\x9c\x21\x29\x62\x8e\x01\xa6\xd4\xae\xe9\x86\xae\x96\x80\x85\x2a\x29\xf6\xe1\x1a\xd8\xb2\xd1\x65\x3d\x67\xbc\x44\x90\x95\xd6\x33\x22\x9a\x82\x41\xae\x62\xd2\xe3\xf1\xab\xbd\x9b\x69\x1c\xfd\xba\xa3\x3d\xb7\x34\x77\x55\xb3\x42\x53\xcc\xca\xda\xc2\x23\x6b\x52\xd6\x81\x1e\x08\xa2\x11\x7d\x50\x15\x99\x9d\x8a\xac\x5f\xcf\x07\xe8\x08\x37\x8d\x6a\x65\x98\x0b\xd4\x23\x13\xce\xc5\x03\x01\x54\x9c\x46\xb1\x77\x63\xf5\xa5\x0a\x3b\x6d\x5b\xc3\x35\xbb\x4d\x64\xb2\x6e\xc1\xad\x3d\x0d\xdb\xbb\x4d\x61\x51\xea\xe0\x54\xf9\xa2\xd7\x3b\x17\x79\x0b\xb3\x1e\xd7\xf2\xe0\xef\xf1\xc4\x82\x46\xdd\x91\xf7\x2c\xad\x6d\x51\xd6\xa3\xc6\xce\xcb\x6f\x5a\xb4\x58\x48\xd6\xa7\x71\xcb\xb8\x5b\x27\x5d\xb6\xa8\xec\x8b\x84\xed\xa3\xfc\x48\x9d\x12\xbb\x7f\x2d\x6a\xcb\xce\x71\xcf\xd6\xae\x4d\x4a\x2d\x96\x2a\x9d\x54\xbc\x9b\xc2\x43\x85\x60\xda\x56\x4e\x2b\xa6\xfb\x4e\x7d\x78\xb5\x4e\x92\xb2\xe7\x77\x61\x4d\x46\x71\x59\x21\xa6\xf9\x53\x7d\xe1\x38\x93\x3a\xd2\x77\xec\xc8\xb9\x0d\xd7\x3d\x2b\x73\xe8\xf6\xb9\x61\x23\xdc\x05\x78\xc8\xe4\xc6\x46\x94\x1a\x79\x03\x34\x1b\xf3\x8d\x36\x7c\x09\x44\xe9\x70\xa4\xab\x75\xcb\x8a\x92\xda\xcf\x3f\x29\x93\xdb\x1d\x7e\x7c\x3f\xbb\x12\x6c\xbf\xe9\x28\x6d\x38\x69\xea\xea\xf9\x06\x80\xd5\xaa\xef\xf4\x30\x47\xe6\x0e\x51\x29\xb7\x00\xd2\xe1\x10\x59\x41\xbb\xe5\x06\xb7\x72\xb6\xda\x72\x66\x25\x70\xf6\x1d\xba\xed\x00\xda\xa3\x8b\xd6\x72\x31\x02\x16\xa0\x39\x70\x0e\xbe\x47\x17\x55\x4e\xb7\x04\x61\x69\x64\x47\xc4\x7a\x42\xec\xb3\xdd\xbb\x8a\xda\x15\xbe\x1b\xd6\xe8\xa4\xca\x9f\x59\x0d\x0c\x7a\x96\xe3\xd3\x26\xbb\x66\x89\xd2\x99\x1d\xfe\xd8\xe9\xfa\x7d\x67\xca\xd8\xce\xd7\xcb\x83\x9f\x12\x60\xeb\xff\x6b\xf2\xad\x36\x6a\x21\x3b\x8d\x37\x11\x3e\xda\x51\x8e\xb1\x56\x3c\x16\xae\x66\xa2\xec\x80\x36\xc4\x43\x5a\x1f\x8a\x37\xd6\x65\xe2\x9a\xc6\xeb\xc9\x69\x87\xf1\xad\xa7\x2e\x4d\xa0\x5b\x1a\xbc\x34\xc9\xf7\xeb\x39\x32\xb3\x6f\x16\x4a\x35\xd0\x0d\x1c\x5c\x03\x7d\x3f\xda\xa8\xbb\xd9\xc9\xc3\x38\x3c\x44\x31\x37\x09\xe2\x4c\xe6\xff\x05\x00\x00\xff\xff\xc6\x63\x30\xa8\xef\x1c\x00\x00") + +func fontsPuffyFlfBytes() ([]byte, error) { + return bindataRead( + _fontsPuffyFlf, + "fonts/puffy.flf", + ) +} + +func fontsPuffyFlf() (*asset, error) { + bytes, err := fontsPuffyFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/puffy.flf", size: 7407, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsPyramidFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x97\x57\x74\xdc\xd4\x16\x86\xdf\xf5\x7c\xef\xfb\xbe\xbe\x01\x27\xc4\x93\x58\x1a\x7b\xec\x09\x21\x38\x84\x00\x01\x02\xa1\xb7\x10\xe7\xcc\xe8\x68\x46\x44\x23\x19\x49\x93\xd8\x74\xd3\x7b\xef\xbd\x2f\x7a\x67\xd1\x7b\xef\xbd\xf7\xde\x7b\xef\xac\x19\x2f\x98\xff\x0f\x7b\x78\x49\xd6\xfa\xbe\xa3\x23\xe9\x93\x6c\x6f\x05\x51\xe0\x99\x49\x29\x4a\x51\x86\xa4\xe0\x8a\x5b\x72\x96\x4c\xa4\xa6\x11\xfa\x52\x99\x90\x05\x91\x69\xfa\x56\x16\x9b\x34\x0f\xe3\x4c\xfa\xbd\xd9\xe5\x41\x67\x51\x5c\x8d\x9a\xbe\xcd\xa4\x5a\x37\xa9\xa9\xe6\x36\xcd\xc4\xf5\x86\x0b\xde\xe0\xa0\x13\x84\xb5\xc8\xe6\x92\xda\xc8\x9a\xcc\x8a\x37\xcb\x95\x42\x41\x5c\x4f\xe6\x37\x6b\xe2\x96\xcb\x03\xce\x12\x9b\x36\xc2\x2c\x0b\x93\x58\xc2\x4c\xea\x36\xb5\x95\x09\xa9\x85\x2b\x6d\x2c\x79\x22\x8d\xc4\x0f\x83\x09\xc9\xeb\x61\x26\x41\x12\xe7\x7d\x62\x32\x89\x92\xb8\xd6\xfa\x3f\xaf\x5b\xa7\xbd\x20\xb4\x69\x6f\x26\xb1\x69\xd8\xd6\x1e\x63\x91\xa9\x5a\x5f\x92\x58\x8c\x54\x93\x46\xc3\xc6\xb9\x44\x61\x6c\x67\x39\xce\xc2\xf1\xb1\xc8\xc4\x26\x6f\x9d\x2d\x09\x24\x08\xd3\x6c\xca\xcd\x71\x5a\x37\x2e\x05\xe9\x69\x98\x5a\x58\x95\xb8\xd9\xa8\xd8\xb4\x47\x82\x24\x95\x20\x8c\xac\x84\xbe\x8d\xf3\x30\x08\xab\xed\x83\x1d\x23\x22\x52\x90\xac\x9e\x34\x23\x5f\x4c\xb4\xca\x4c\x64\x52\xb1\xb2\xdc\xf4\xf6\xb5\x0f\x8a\x93\x55\xce\xb4\xa9\x45\x79\xdd\x4a\x4f\xdd\xa4\x7e\x25\x32\xf1\x8a\x9e\x56\x80\xb1\x34\x8c\xf3\xac\x75\x0f\x46\xda\xb4\x4f\x2a\xcd\x5c\xaa\x26\xee\xcd\x5b\xdb\x64\x8d\x66\x56\xb7\xbe\x53\x9c\xda\xa1\x6e\xc3\x5a\x3d\x6f\x5d\xb1\xe9\x44\xfe\x57\xd9\x27\x71\x92\x4b\xd8\x7e\x32\x61\x5c\x13\xdf\x66\x55\x1b\xfb\x36\xcd\x9c\xa1\xf6\x51\x0d\x33\xde\xbe\x71\x89\x6c\x5c\xcb\xeb\x32\xdd\x8e\xff\xb5\x16\x9b\x65\x33\x64\xa6\x18\x09\x9a\x7e\xcd\x4a\x60\xaa\x79\x92\x3a\x05\xb7\xbd\x83\x6f\x03\xd3\x8c\xf2\xa9\x6b\x6d\x24\xbe\x6d\xdf\xf7\xdf\x4f\xca\x71\x4b\xed\x65\x53\x25\x5b\x97\x47\xfb\x3a\xce\xe4\xe4\xe4\x48\xe7\x9f\x11\x47\x64\x99\xc8\x88\x23\xb3\xff\xb7\x54\x46\x9c\xb9\xa3\xa3\xa3\xf3\x90\xf6\xa8\xf4\xff\x2a\x9d\xa6\xd2\x35\x54\xba\xa6\x4a\x7b\x55\x3a\x5d\xa5\x33\x54\xba\x96\x4a\x67\xaa\xb4\x4f\xa5\x05\x95\xce\x52\xe9\x6c\x95\xf6\xab\xd4\x55\xa9\xa7\xd2\xa2\x4a\x07\x54\x3a\xa8\xd2\x92\x4a\x87\x54\x3a\xac\xd2\xb2\x4a\xe7\xa8\x74\x6d\x95\xce\x55\xe9\x3a\x2a\x9d\xa7\xd2\x75\x55\x3a\xa2\xd2\xf9\x2a\x5d\x4f\xa5\x0b\x54\xba\xbe\x4a\x17\xaa\x74\x03\x95\x6e\xa8\xd2\x8d\x54\xba\x48\xa5\x1b\xab\x74\x13\x95\x6e\xaa\xd2\xc5\x2a\xdd\x4c\xa5\x9b\xab\x74\x89\x4a\xb7\x50\xe9\x96\x2a\xdd\x4a\xa5\x5b\xab\x74\x1b\x95\x6e\xab\xd2\xed\x54\xba\xbd\x4a\x77\x50\xe9\x8e\x2a\xdd\x49\xa5\x4b\x55\xba\xb3\x4a\x97\xa9\x74\x54\xa5\xcb\x55\x6a\x54\x5a\x51\x69\x55\xa5\xbe\x4a\xad\x4a\x03\x95\xd6\x54\x5a\x57\x69\xa8\xd2\x5d\x54\xba\x42\xa5\x91\x4a\x1b\x2a\x8d\x55\x9a\xa8\x74\x4c\xa5\xbb\xaa\x34\x55\x69\xa6\xd2\x5c\xa5\x4d\x95\xae\x54\xe9\x2a\x95\x8e\xab\x74\x42\xa5\xbb\xa9\x74\x77\x95\xee\xa1\xd2\x3d\x55\xba\x97\x4a\x7f\x53\xe9\x4b\x2a\x3d\x4d\xa5\xfb\xab\xf4\x07\x95\x1e\xab\xd2\x33\x91\xba\xde\x70\xc7\x3c\xc0\xa6\xdc\x31\xbf\x92\x29\xf6\x77\xcc\x47\x6c\xdc\x8e\x79\x9b\x8d\xd7\x31\xef\xb2\x29\x76\xcc\x9b\x6c\x06\x3a\xe6\x3d\x36\x83\x1d\xf3\x01\x9b\x52\xc7\x7c\xcc\x66\xa8\x63\x3e\x61\x03\x0d\x3e\x64\x03\x0d\xbe\x20\x33\x00\x0d\x3e\x67\x03\x0d\x3e\x65\x03\x0d\xee\x65\x03\x0d\xee\x63\x03\x0d\x1e\x62\x03\x0d\xde\x67\x03\x0d\xee\x67\x03\x0d\xbe\x65\x03\x0d\xbe\x67\x03\x0d\xbe\x26\x33\x08\x0d\x7e\x61\x03\x0d\x7e\x62\x03\x0d\xfe\x60\x03\x0d\x9e\x67\x03\x0d\x5e\x65\x03\x0d\xce\x65\x03\x0d\xce\x63\x03\x0d\x2e\x60\x03\x0d\xce\x67\x03\x0d\x4e\x24\x53\x82\x06\x6f\xb1\x81\x06\x9f\xb1\x81\x06\xdf\xb0\x81\x06\x3f\xb3\x81\x06\x5f\xb1\x81\x06\x4f\xb1\x81\x06\x97\xb0\x81\x06\x37\xb3\x81\x06\x77\xb0\x81\x06\x17\x91\x19\x82\x06\x97\xa1\x71\xdc\x21\x88\x70\x1b\x1f\x04\x11\x6e\x65\x03\x11\xce\x61\x03\x11\x2e\x65\x03\x11\xae\x64\xd3\xfa\x7e\x93\x8e\x3d\x89\x2d\x84\x38\x99\x0d\x84\x38\x85\x0d\x84\xf8\x9d\xcc\x30\x84\xb8\x96\x0d\x74\xb8\x8e\x0d\x74\xb8\x9e\x0d\x74\xb8\x91\x0d\x74\xb8\x89\x0d\x74\x38\x9b\x0d\xbc\x0c\x17\xb2\x81\x06\x17\xb3\x81\x06\x97\xb3\x81\x06\xb7\x90\x29\x43\x83\xdb\xd9\x40\x83\x3b\xd9\x40\x83\xbb\xd8\x40\x83\xbb\xd9\x40\x83\x7b\xd8\x40\x83\x07\xd9\x94\xba\xfc\x29\x76\xcb\xd0\xe0\x61\x36\xd0\xe0\x11\x36\xd0\xe0\x51\x34\x5e\x3f\x34\xb8\x82\x0d\x34\x78\x92\x0d\x34\x78\x81\x0d\x34\x78\x8d\x0d\x34\x78\x9d\x0d\x34\x78\x8c\x0d\x34\xf8\x92\x0d\x34\x78\x9c\x0d\x34\x78\x82\x0d\x34\x78\x9a\x8c\xdb\xcf\x3f\x77\xcf\xb0\x85\x0e\xcf\xb2\x81\x0e\xcf\xb1\x81\x0e\x2f\xb2\x19\xe8\x32\x4c\x79\x2e\x74\x78\x99\x4d\xfb\xf7\x02\x5d\xe2\x2b\xbc\x00\x72\xbc\xc3\x06\x72\x7c\xc7\x06\x72\x9c\x4a\xc6\xeb\xef\x32\xd9\x79\x1e\xa4\x38\x9d\x0d\xa4\x38\x83\x4d\xb1\xcb\x3c\xe7\x79\x90\x62\x1f\x36\x90\xe2\x0d\x36\xf0\x4a\xec\xcb\x06\x1a\xec\xc7\x66\xb8\xcb\x4c\xea\xe1\xdc\x78\x00\x19\x9c\x1b\x0f\x64\x03\x0d\x0e\x5a\x2a\x23\x02\x0a\x22\x1c\xcc\x07\x41\x84\x43\xd8\x40\x84\x43\xd9\x0c\xfe\xe3\xa9\x1f\xc6\x0b\xa0\xc5\xe1\x6c\xa0\xc5\x11\x6c\xa0\xc5\x91\x6c\xca\xab\x9f\xed\x28\xf2\x38\x45\xfe\x87\x0d\x14\xb9\x9a\x0d\x04\xf9\x91\x4d\x71\xf5\xb3\xed\xcd\x1e\xb2\x9c\xc5\x06\xde\x8d\xff\xb2\x29\x75\xf9\xca\xf0\x70\x96\x3c\x81\x0d\xf4\xb8\x8a\x0d\xbc\x1b\x37\x90\xc1\x59\xf2\x78\x36\x50\xe2\x38\x36\x5e\x97\x6f\x1d\x0f\x67\xc9\x6b\xd8\x40\x83\x63\xd8\x40\x83\xa3\xd1\xfc\x19\x00\x00\xff\xff\xc2\x6e\xde\xca\x4c\x18\x00\x00") + +func fontsPyramidFlfBytes() ([]byte, error) { + return bindataRead( + _fontsPyramidFlf, + "fonts/pyramid.flf", + ) +} + +func fontsPyramidFlf() (*asset, error) { + bytes, err := fontsPyramidFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/pyramid.flf", size: 6220, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRectanglesFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x58\xcd\x6a\xdb\x40\x10\xbe\xfb\x29\xbe\x83\x20\x97\xc8\xc2\xa1\x29\x14\x8c\xf1\xa1\xcf\xd0\x93\x60\x70\x5d\x39\x0d\xd8\x0e\x34\xa6\x50\xd0\xc3\x17\xcd\xee\xcc\xae\xf6\x4f\xeb\xa4\x50\x5b\x58\xf3\xcd\xef\xb7\x33\xb3\x39\x9d\x4f\x4f\x87\x06\x5f\xf1\x8c\xcd\x33\x36\xd8\xac\xfe\x0c\xc7\xdb\xe1\xfa\x72\x1e\xde\xd7\xa7\xf3\x09\x3f\xff\xe1\xfb\xe1\xef\xeb\x2f\xfc\x78\x3d\x9f\x87\x97\xc3\x3b\xb6\x97\xeb\x70\x19\xae\xb7\xdf\xfb\xeb\x70\x3b\xbe\x5d\xd6\xc7\xb7\xcb\x0e\x9b\xa7\xee\xdb\x97\x55\xd3\xec\x53\xff\xf7\x2b\x10\x61\xbf\x1a\x81\x51\x3f\x89\xf4\x13\x00\x58\x08\x46\x6a\x34\x02\x23\xbf\x6b\x9a\x06\xc1\x17\x23\x26\x61\x4c\x3f\x11\x03\x18\x39\xfd\x02\x18\xc5\xfa\x08\xb0\x2a\x96\x95\x7f\xa2\x42\x15\x58\xf7\x20\x5e\xc1\x3a\xe9\xeb\x10\x18\x2c\x8c\x6c\x54\x44\x1a\x98\xe2\x5d\x60\xcb\xd6\x1c\x2c\xb6\x66\x13\xc2\xe9\x98\xc2\x87\xf7\x61\xb5\xb2\x4e\xa3\x88\x83\x94\x2f\x62\x33\xac\xcb\x68\x9a\xff\x8a\xd1\x7d\x59\x7c\x22\x44\x22\x2d\x09\x7b\xd1\x02\x68\xa5\x3a\x51\x88\x16\x16\xc7\x1a\x46\xe6\xc3\x66\x31\x05\x11\x47\xf5\x27\x49\x79\xc4\x8d\x8c\x26\x49\x9b\x35\xe9\x72\x86\x8e\x43\xef\xd0\xb1\x58\xa7\x5e\x79\xa2\x12\xbd\x2d\xaf\x75\xcb\x95\x36\x4c\x96\x4d\x31\x4c\x05\xbc\xf0\x89\x62\x3e\xf8\x16\x48\x08\x44\x19\x0b\x91\xa8\x7c\x95\x44\xc5\x61\x40\x52\x5f\x08\x91\xaa\xb5\x1a\x51\xac\x97\x45\xd5\x72\x85\x03\x46\x5d\x8d\x56\x91\xc9\xf8\x3a\xab\xbc\x7b\xca\xbf\xf6\xc8\x36\x9d\x44\xc3\x8d\x4e\xb9\xb1\xc5\x96\x51\x3d\x7a\xa6\x48\x4f\xbd\xe5\xa5\xb2\x4f\xb9\x49\xb3\x72\xbb\xba\x5b\x99\xfd\xca\x72\x64\xd2\xe5\xeb\xdc\x61\xa7\xf6\x3a\xe6\x62\xa0\x4f\x13\x19\x1e\xa3\xf8\x3c\x25\x4e\xef\xd4\xa8\x0c\x83\x43\xc7\x16\x60\xda\xd7\xb4\x67\x57\xc0\x48\x60\x44\xb6\x57\xd4\x59\x83\x5a\x6b\xab\x60\x10\x58\x6f\x67\x8b\x12\x82\xba\xe5\x94\x44\xcd\xb7\xd2\xc9\x08\x36\xa2\xa2\x00\x0a\xf3\x9c\xac\xb0\x36\x7e\xa8\x00\x22\xed\x9a\x75\xc1\x1a\xfb\x66\xf0\x6e\x3e\x7f\xd4\xc9\x76\xd9\x49\xf3\x60\x60\xf6\x81\x4a\x4d\x32\xc7\x12\x7f\x4d\xa8\x81\x8d\x11\xec\x2e\x4e\xde\x97\x12\x8a\x17\x82\x2a\x96\x44\xd6\xe4\xbc\x83\x53\xba\x70\xde\xea\x0a\x10\x72\xd2\x2d\x3b\x35\xb1\x79\x93\x1c\x77\xb5\xa0\xcf\x92\xcb\x3c\xa0\x2f\x9d\x6e\x7f\x83\x0c\xca\x5d\xb4\x86\xc0\x5a\x5b\x49\xe5\x00\x56\xda\x72\x12\x99\x24\x44\x2c\x89\x9c\x0c\x66\xee\x6c\xc1\x73\xcb\xc5\xe8\x0f\x49\x7b\xac\xfa\x60\x68\xf9\x43\x32\x33\x9e\xed\x2e\xe8\xc6\x6a\x92\xd4\xbc\xd5\x8c\xc9\x09\xa8\x63\x2e\xff\xae\x30\x2f\x13\x8e\x15\x6f\x00\x6e\x63\xf3\x56\x83\x07\xe3\xfc\x63\x6a\x7d\x80\x9f\xb6\xe2\xa6\x81\xe4\x72\x94\x11\x25\xb8\xb4\xdd\xa3\xb5\x5d\x5e\xf6\xa4\xee\x70\xfb\xf9\x92\xd6\x78\x39\xca\x66\x00\xfe\x3d\x0b\xc9\x0d\xc9\xbb\x7b\xa4\xa2\x96\x95\x2e\x49\x9d\xb4\xd1\x07\x13\xca\x63\xc6\x28\x72\x46\xfd\xf3\x94\x6a\x9a\x99\x39\x90\x48\x69\x3a\xea\x42\x2e\x6b\x8a\xb9\x46\x58\xa8\xaa\x0a\xe9\xf5\xac\x48\xbc\x8a\xba\xbb\x55\x6b\x99\xf9\x35\x5a\xe7\x57\xf1\x62\x06\x7c\x51\xf4\xae\x3b\x67\x2a\x17\x35\xe9\xfc\xed\x68\x6e\x81\x32\xd4\x29\xf8\x9d\x3c\x05\xf3\xc4\xb5\x99\xc3\x6d\x5e\xbb\x95\x9b\xf4\x5e\x0d\x1d\x7c\xd0\x2b\xb6\xf4\xaf\x39\x85\x67\x64\x4e\x5e\x11\x3d\x1d\x91\x1d\xd2\x3f\x02\x44\x6b\x0c\x8d\xf3\x8b\x45\xb2\x07\xfb\x7f\xa0\x90\xf5\xb3\x62\xa3\x4f\xc1\x2a\xd6\xa0\x14\xac\x62\xe6\x4b\x6d\xed\x7d\xac\xd4\xc6\x33\xa2\x29\x86\x06\xa2\x4b\x64\x5e\xba\xc0\x30\x1b\xe4\xcc\x30\xec\x7f\x00\x00\x00\xff\xff\xbd\x5d\x2e\x99\x39\x13\x00\x00") + +func fontsRectanglesFlfBytes() ([]byte, error) { + return bindataRead( + _fontsRectanglesFlf, + "fonts/rectangles.flf", + ) +} + +func fontsRectanglesFlf() (*asset, error) { + bytes, err := fontsRectanglesFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/rectangles.flf", size: 4921, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsReliefFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\xb9\x6e\x1b\x31\x10\xed\xf5\x15\x0b\x28\x6d\x44\xc8\x08\x62\xb8\x4b\x1d\xc0\x2e\x52\x13\x20\x14\x47\x42\x04\x3b\x2e\x1c\xa8\x48\xc3\x6f\x0f\xac\x5d\x72\xe7\xcd\x0c\xaf\xb5\xe4\x8a\x2a\xc4\x6b\x39\x7c\x7c\x73\xae\x74\x78\x3e\xdc\xec\x3e\x0d\xb7\xc3\xed\xb0\xfd\x3a\x7c\xde\x0e\x37\xab\x1f\xfb\xe7\xe3\xfe\x30\xfc\xfc\x37\x3c\x1c\x1f\x9f\x86\xfb\xe3\xcb\xfe\xf5\xef\xf0\xb0\xb9\xdf\x8c\xdd\x6f\xbf\x4e\xaf\xbf\x77\x7f\x36\xbb\xc7\xcd\xe9\x69\xf5\xfd\xf4\xb2\x1f\xb6\x77\x77\x5f\x56\xee\xed\xb3\xae\x6f\xe2\xd0\x78\x6f\x45\x83\x6b\xb0\x63\x5a\xb0\xce\x4c\x1b\x42\x6f\x5e\xcd\xf6\xe6\x6e\x58\x08\x22\xa6\x13\xa7\x8f\x6d\x5a\x03\x99\x74\x78\xee\x1b\xba\xe9\xbc\x23\x8a\xb3\x11\x04\xce\x7b\xb8\x13\x0a\x0d\xb7\x0e\x8f\x8c\x7b\x89\x94\xf5\x0a\xe5\x71\x8a\x04\xc4\x88\x65\x7a\x1c\xe7\xbd\xb7\xe3\x7c\x3c\x88\x3c\x2f\x20\x8e\xe8\xd6\x2b\x13\x97\x95\xef\xb0\x2f\x20\x0d\xe7\xca\xd6\x51\x23\xa0\x1b\xc5\x03\xac\x35\x00\x50\x55\x4a\xd0\xab\x1d\x2f\x67\xe7\x1b\x5b\x3e\x4f\xb9\xd5\x69\x04\xa9\x7c\x60\xd0\x74\x52\x8f\x29\x56\x94\x6f\xe2\x5d\xad\x76\x45\xb5\x9f\xb0\xaf\x54\xbf\x05\x86\x20\x65\xec\x36\x59\x2a\x03\xa0\xfa\x2b\x71\x9c\xc9\xba\xc1\x76\xc1\x10\xe0\x48\x2f\x75\x28\x01\x03\x34\xee\xa0\xd0\x35\x0a\x93\xba\x6b\xcd\x16\xc9\xc9\xa0\x14\x54\x79\x7f\x51\x28\xe1\x82\x30\x51\x10\x8a\x0c\x4d\x98\x8c\x90\xa5\x98\x32\xd5\x6a\x82\x53\x0e\xd7\x61\x00\x04\x2b\xd1\x42\x4b\xc6\x79\xc1\x7b\x78\x18\xc3\xeb\xeb\xe1\x4a\x42\x7c\xbf\x9d\xd6\x41\x69\x83\x58\xbf\x59\xe7\x46\x08\x15\x8d\xf1\x53\xa4\x13\xa3\xa6\x1d\xe7\xe8\x1f\x0f\x8c\x28\x22\x83\x91\x3f\x32\xe3\x30\xca\xe7\x42\x59\x2a\x7c\x55\x84\x35\x0a\x4a\x39\x39\x0d\x53\x33\x98\x90\xd4\x33\x31\x8e\x62\x90\xb3\xdc\xfb\xa2\x05\xc6\xa3\x49\x22\x33\xc2\x7f\xc4\x1e\x97\xb0\x9b\x92\x6c\xee\x9b\x46\x19\x64\x64\xcf\xbc\xa7\x64\x17\x56\x96\xe2\xb6\x14\x82\x75\x32\xd6\xbd\x03\x77\x7a\xd0\x24\x5b\x85\x3a\x27\x21\x6d\xa5\x9a\xef\x46\xd9\xc4\x68\xae\xc3\x37\xc1\x3d\xae\x48\xdc\x05\x56\x97\xd8\xe0\xb4\x1b\xfc\x56\x76\x0c\xe3\x55\xc9\xf8\x9a\xfb\x2a\x5d\xe3\x65\x09\x51\xbe\x29\xa6\x4e\xac\xcb\x15\xb2\xf3\xda\x4f\xa8\x35\x3b\xa8\xb5\x2c\x54\x8a\x35\xfe\x82\x51\x82\x29\xdc\xa6\xb4\x6f\xd4\x83\x32\xb2\x4b\x56\xab\x42\xbd\x44\x94\xe0\x2b\x1a\xf9\x17\xc2\x4d\x4d\xba\x25\x4a\x34\x46\xe5\xa5\x7c\x6b\x2b\xe3\xe4\x42\xbe\xcb\xef\x49\xe9\x41\x46\x76\x65\x90\x5f\x64\x27\xb5\xe2\x2c\x94\x1e\x22\x9a\x88\x57\x9e\xd9\x2d\x2c\x16\x04\x75\x43\x47\x28\x75\x5c\x72\xee\x16\x19\xac\x86\xbf\x1b\xb4\x46\x80\x2c\x0f\x8b\x34\xcb\xbc\x42\x84\x6e\x07\xef\x59\x55\x51\x91\xe4\x0d\xe3\x31\xad\x62\x27\x93\x5b\xe2\x89\xf0\x02\x81\x3f\x26\x10\xac\x82\x47\x3d\xd3\xc9\x9a\x15\x43\x6b\xa2\x90\x0e\x67\xcc\x2f\x2f\x86\x17\x0c\x8a\x1e\xd3\x83\xda\xe7\x32\x83\x82\x16\xd6\xd3\x6f\x49\x6f\x98\xc3\x8c\xf8\xee\xc5\x6e\x2f\x76\x7b\xb1\xdb\x8b\xdd\x5e\xec\xf6\x62\xb7\x17\xbb\xa0\x97\x9a\x20\xdf\x8b\xdd\x5e\xec\x3a\x99\x1b\x8c\xc7\x02\x57\xd4\xad\x3c\xcf\x14\xff\xd4\xc5\x46\x39\xbf\xe2\x24\x1d\x55\xc2\x96\xe2\x66\x25\x81\x73\x85\x4a\xcb\xac\x19\x7f\x60\xe9\x79\xe5\xc0\x7f\xb5\x60\xd1\x39\x69\xc6\xad\x46\x1b\xcc\x8c\x20\xfb\x7f\x00\x00\x00\xff\xff\x8a\x69\x12\xdc\xc7\x21\x00\x00") + +func fontsReliefFlfBytes() ([]byte, error) { + return bindataRead( + _fontsReliefFlf, + "fonts/relief.flf", + ) +} + +func fontsReliefFlf() (*asset, error) { + bytes, err := fontsReliefFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/relief.flf", size: 8647, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRelief2Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x3b\x6f\xdb\x30\x10\xde\xf5\x2b\x0e\x70\xd7\x86\xb0\x51\x34\xc8\xd6\x4e\x9d\xba\x74\xe6\x92\x20\x72\x1b\x40\x69\x01\xa7\x46\xe1\x7f\x5f\xd4\x12\xa9\x7b\x7c\x7c\x48\xb6\x3b\x31\x83\xf9\xd2\x1d\x3f\x7e\xf7\xe0\x31\xfb\x61\xbf\x7b\x7c\x47\xf7\x74\x4f\xdb\x8f\xf4\x7e\x4b\xbb\xee\x5b\x3f\xbc\xf4\xfb\x1d\x3d\x9d\xe8\x6b\x7f\x18\x5e\x7e\xd2\x97\x43\x7f\xfa\xf3\x6b\xd8\xd3\xeb\x79\xfc\xe9\xe9\xf0\xf8\xe3\xf5\xed\xee\xf8\xdc\x0f\x77\xfd\xf3\xb1\xfb\x7c\xfc\x7e\x7c\xfb\x4d\xdb\x87\x87\x0f\x9d\xff\xf7\xb7\xa9\x6f\xe2\xd0\x39\x47\xa6\x91\x6b\x42\x62\x5a\x20\x1f\x04\x42\x6f\x5e\xcd\xf6\xe6\x6e\x58\x08\x2a\xa6\x1d\xa7\x3f\x5a\xb4\x26\x74\xf2\xe1\xb9\x2f\x84\xce\x12\x51\x1d\x45\x10\x72\x5e\x9e\x49\x2a\x0d\xa7\x0e\x9f\x8c\xb2\x4c\xcb\xa6\x93\xfa\x34\x45\x06\x62\xc4\x32\x7d\x2e\xe7\x9d\xa3\x71\x3e\x6e\xc4\xbe\x37\x10\x47\x74\x9b\x6e\x5e\x06\xbf\x41\x2e\x20\x0d\xfb\xda\xd6\x0b\x2a\x98\xa0\xf9\x40\xb5\x12\x20\x34\x4a\xb0\x2b\x8d\x87\xa3\xf9\xc4\xa4\xe7\x39\xb7\x98\x46\xa1\x55\x0f\x94\xeb\xa4\x3e\x03\x5e\x94\x6f\x66\xa8\xe8\x88\xb0\x9f\xf0\xaf\x54\x7f\x09\x0c\x43\xca\xd8\x5d\xe4\xa9\x0a\x00\x8c\x57\xa7\xa4\x95\xef\x0a\x47\x10\x5b\x02\x1b\x5a\xc0\x42\xb9\x0e\x50\xd1\x45\x4c\xe2\xd0\x9a\x3d\xd2\xf8\x00\xa3\xa0\x2a\xfa\x8b\x4a\x19\x17\x8c\x89\x82\x52\xc9\xd0\x84\xc9\xea\x02\xae\xcc\xad\x9a\xe0\x54\xc3\xf5\x32\x01\x0a\x2f\x41\xa9\x25\x13\xbc\x22\x7a\x74\x1a\x93\xc7\xc7\xe9\xca\x42\xbc\xdc\x4f\xeb\xa0\x2c\x83\x58\x2f\x8c\xb9\x31\x4a\x4d\xe3\x42\xa6\x33\xa3\x45\x12\x67\x4c\x71\xc3\x88\x22\x32\x18\xf9\x63\x33\x5e\x66\xf9\x5c\x2a\x4b\xa5\xaf\x8a\xb4\xc6\x41\x81\x9d\xd3\x30\x91\xc3\x78\xe8\xbd\x28\x1a\x12\xb3\x3a\xfa\x9c\xb9\x84\x59\xc8\xd8\xf8\x31\x32\x3e\xe1\x37\x25\xdd\x3a\x36\xd1\x20\xa3\xdb\x15\x75\x17\x56\xd6\xe2\x26\x0e\x81\xbc\xcd\x75\x17\xe0\x4e\x0f\x16\xe9\x86\x50\x1d\xbf\x3e\xcc\x4a\x35\xdf\x0b\x75\x33\xa7\xb9\x0d\xdf\x0c\xb7\x93\x29\x00\x08\x5d\xcd\x07\x27\x69\x11\xb7\xb6\xa3\x79\x05\x37\x3e\x0a\x5f\xd0\x65\x0c\xe7\x2c\xa4\x8e\x20\xa4\x55\x5d\x0e\xc8\xce\x5b\x3f\x61\xd6\xec\xa0\xd6\xb3\xa4\x51\x88\x1b\x65\xaa\x06\xd6\x67\x09\x65\x70\x4a\x59\x1f\xbb\x42\x46\x77\xc9\x6b\xa1\xba\x6b\x64\x09\xbd\x82\xc8\xbf\x12\x6e\xee\xd2\x91\xba\x6b\xe1\xbe\x9c\x6f\xb4\x32\x4e\xae\xe4\xbb\xfc\x4e\x4a\x0f\x32\xba\x2b\x93\xfc\x2a\x3f\xa9\x55\x37\xbf\xa2\x3d\xca\x26\xe6\xc9\x33\x87\x05\xc9\x28\xa9\x1b\x7a\xe7\x12\x67\x28\x9c\x22\x83\xd5\xe9\xb7\xc1\xd2\x0c\x90\xe5\x61\x95\x65\x55\x86\x33\xa9\xdb\x8b\x77\x56\x55\x56\x64\xf7\x46\x84\x07\x3b\x99\xbb\x25\xee\x28\x1e\x10\xf2\x9f\x09\xdc\x34\x1a\x0f\xbe\xe9\x6c\xcd\x2a\x53\x6b\xa2\x90\x0e\x7b\xcc\x8f\x17\x46\x7c\xd2\x8e\xe9\x41\xed\x77\x99\x41\xc1\x0a\x9b\x2e\xf8\x4b\x58\x07\xbf\xad\xd8\x6d\xc5\x6e\x2b\x76\x6f\x50\xec\x7a\x67\xaf\xa7\x56\xec\xae\xd0\xdd\x8a\xdd\x56\xec\xb6\x62\x17\x64\x93\x56\xec\xae\xb2\xac\xca\x70\x26\x75\xaf\x28\x76\xcd\xdd\x10\x41\x0a\xbc\xb2\xeb\x70\xb1\x3a\x9d\x3f\xdf\x80\xfd\x2b\x76\xc2\xa8\x12\xbe\x14\x85\xc1\x05\xae\x0d\x6a\x3d\xb3\x66\xfc\x1f\x4b\xcf\x1b\x27\xfe\x9b\x25\x8b\xc6\xc9\x62\xdc\xe5\x15\xa9\xfb\x6f\x00\x00\x00\xff\xff\xa9\x47\x91\x3a\xcd\x21\x00\x00") + +func fontsRelief2FlfBytes() ([]byte, error) { + return bindataRead( + _fontsRelief2Flf, + "fonts/relief2.flf", + ) +} + +func fontsRelief2Flf() (*asset, error) { + bytes, err := fontsRelief2FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/relief2.flf", size: 8653, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRevFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x59\xbd\x72\x1b\x39\x0c\xee\xf5\x14\x28\xae\xb8\x14\xd1\x58\x4e\x8d\x19\x35\x29\xd3\xe5\x05\x9c\x58\x9a\x14\xb6\x33\x23\xcb\x99\xc9\xdb\x67\x2c\x89\x24\x3e\x00\x24\x41\xee\xda\x77\xd9\xc5\xae\x96\xe0\x87\x1f\x7e\x00\x77\x8f\x4f\xc7\xfb\x87\xff\x68\xb7\xa3\xdd\x1d\x7d\xb9\xa3\xcf\x3b\xba\xdf\x9c\x0e\x7f\x0e\xa7\xd7\xc3\xf6\xf8\x74\xa4\x1f\x7f\xe9\xdb\xc3\xf9\x4c\x5f\xb7\xf4\xfd\xd7\xdb\xe9\xf5\xfc\xfb\xe5\xff\xf3\x45\x78\x3e\xec\x7f\x3e\x3e\x6c\x9f\x4f\xaf\xdb\xb7\xe7\x97\xed\xe1\xf1\xed\xd3\x66\xc3\xef\x7f\xfb\x75\x4e\xd7\xf3\x7e\xc3\x44\xfd\xa3\x7d\xb2\xa8\x28\x3f\xa6\x9f\x88\xdf\xff\xc1\xaf\x6b\x48\x45\xbc\xde\xbe\xcd\xa8\x65\xba\xfd\x2d\x7d\x86\x2d\x04\x07\xc4\xf5\xf1\x34\xf8\xa2\xf3\xe6\x8d\xeb\xff\xe9\xbe\x7c\x9e\x8a\xd2\xea\xf3\x05\x84\x1c\x5b\x05\x21\xc2\x40\x9c\x02\x41\x0c\x16\xb3\x98\x58\x28\xbd\xfc\x20\x3c\xc1\x29\xca\x9c\xef\xc7\x3c\x71\xd5\x99\x32\x80\xdc\xfb\xc5\x13\xc9\xd3\x62\xb2\x12\x82\x9c\x64\x97\x01\x5d\x10\x25\x2b\xe5\x91\x63\xc7\xac\x2f\x7b\x26\xa3\xce\x70\x12\x46\x5f\x10\x8e\x85\xb0\x15\xcd\x72\x8e\xe0\x30\x23\x54\x01\xf5\x93\x02\x64\x48\x71\xa2\xa2\x4e\x2c\x09\x71\xbf\xbf\x24\x82\x99\xa9\x64\x5c\x12\xec\x81\xf0\x9f\x89\x82\x08\x1e\xfd\xc4\x89\xdb\xe1\xca\xca\x8e\x49\x3d\x43\x76\xf4\x28\x1a\x95\x13\x95\x0b\x09\x53\x06\x1a\x79\x8a\x55\x24\x60\x6d\x97\xc4\x64\x99\x9c\xda\x22\x8f\xc3\x05\x65\x52\x66\x9c\xeb\xf0\x59\xb9\xe8\x51\x54\x5a\x01\xa1\xe8\xb0\x92\x74\xf3\x72\x17\x04\xcb\xca\x90\x86\x30\x38\x06\xb8\x58\x6a\x23\x60\x75\x63\x47\xd6\x56\x22\xe3\xc1\x59\x0d\x0f\x41\x79\xab\x8c\x91\xda\xb4\xcb\x1b\x78\x74\x4a\x82\xe1\x04\xf5\x4c\x71\x17\xcb\x22\x23\xbd\x40\xae\x6a\xbc\xf0\xf1\x80\x06\x48\x77\x7d\x01\x1c\xcb\xa2\x22\xa7\xa9\xf4\x52\xcc\xd9\x5c\xb8\xb8\x8f\x87\x09\xdb\x0f\xad\x0e\x61\xf7\xf0\x20\x38\x0c\x78\x0c\x8f\xe7\x9f\x25\x4c\x13\xbd\xa8\xfa\x07\x70\xd7\x03\x01\xbd\xc8\xe2\x31\x83\x78\xac\xb3\x5a\x81\x48\x4d\x9d\x9f\x3f\x0c\x14\x58\x59\xef\x9d\xf2\xd1\xbe\x3f\xaf\xc2\xdf\x42\xb0\x6a\x7b\x44\x89\x29\xf5\xa5\xd8\x07\xf7\x20\x89\x80\x1e\x85\xdd\x33\x65\xbd\x56\xca\xe7\x4b\x3c\xd8\x3c\x65\x4b\xcc\x37\x15\x9b\x0d\x4d\xd8\x12\xca\x76\x62\xaf\xc2\x49\xdb\x3a\xad\x5a\xbc\xee\x4b\xce\xc8\xe9\xdd\x94\x25\x99\x2d\xaa\xfb\x4e\xc7\xdb\x68\x32\x14\xb9\xb5\xe5\x2a\x08\xa4\x63\x4d\x30\x44\xf6\xfe\xfc\xf3\x0d\x4f\x08\x77\x11\xd9\xca\x31\x21\x4b\x3d\xa0\xbf\xed\x89\x9e\xdb\x97\xc8\x71\x10\x03\xd6\xab\x70\x84\x9e\x0f\xe5\xc4\x47\x82\x90\xa1\xaf\x80\x88\xb7\xe6\x95\x09\xdc\xe7\x07\x97\xe8\x64\x8c\xa7\xf2\xc3\x82\xb8\x29\xcc\xd9\x3c\x71\x86\x58\x7b\xe5\xc7\xf4\xa0\x13\xb2\xe1\x81\x02\xa2\xbf\xf6\xeb\xc4\x51\x78\x3f\xb5\xd4\xd9\xc5\x62\x6c\x88\x8c\xfa\x2c\xf8\xd1\x72\x6f\xd9\xb1\x6d\xbc\x30\x89\x49\x16\x1d\x26\xb7\xd7\x9a\xb9\xa8\xe1\x41\x1b\x24\x26\xc8\x76\xa5\x56\x93\x37\x91\x89\x83\xc2\x80\x2b\x59\xb3\x8e\xc6\x58\xab\x1d\x23\x95\xb3\x2f\x4b\x3d\xac\xb7\x94\x1e\x88\xf1\xda\x61\xf6\x4a\x6b\x33\xe6\xbc\xf5\xaa\xc1\x91\x1c\xb4\x62\x15\x25\xe9\x89\x39\xb6\x5d\xe4\x89\xec\x53\xaf\x67\xec\x8f\x1d\x2b\xe5\x8d\x06\x75\x2d\xb9\x06\x62\x51\x1e\xd4\x92\xf7\xc3\xab\x28\xe3\x37\x01\xef\xeb\x08\xe1\x0b\x98\xa6\x27\x2c\x14\x3b\xf1\xe0\x9d\xb4\x30\x14\xb8\xf6\x9d\x7c\x69\xa0\x5b\x03\xaa\x59\xb5\xc0\x37\x3e\x55\xd6\x68\x33\x18\xd0\x45\xc1\x5a\x92\xda\xaa\xa6\x92\xbb\x61\xe4\x9a\x27\xd8\x93\x05\x29\xb4\x6b\xb6\xe8\xad\x80\xbe\x86\xce\xb5\xde\x0c\x39\x1f\x91\x21\x66\xb4\x0c\x6d\x46\x3a\x13\xce\x24\x2c\x67\x6a\xf9\x9a\x92\xeb\xf6\xa3\x3c\x77\xae\xda\x7c\x33\x43\xd8\x27\x12\x95\xb0\xb7\x34\xc3\xc6\xc5\x15\x54\xf4\xc5\x94\x41\x64\x67\xbe\x5a\xf3\x7e\x4c\x63\x06\x8e\x59\x5d\x47\x10\x25\x50\x96\xa1\x44\xdb\xf8\x13\xa6\xba\xff\xc5\xb0\x29\xe8\xc9\x3c\x01\x52\x6d\xde\x0e\x17\x47\xb1\xa3\x31\x87\xf6\x83\x15\x8c\x67\xac\xd0\xf2\xd5\xa0\x1d\x64\x0d\xea\xd8\x01\x84\x2a\x78\x56\x70\x84\x68\xcd\xf1\xee\x90\xb8\xd8\x3a\xc6\xee\x27\x1a\xa5\xf5\xb2\x8d\x9d\x28\xb9\xc7\xfc\x5c\x59\x96\xe6\x38\xe2\x0a\x64\x29\x37\xcf\x56\x70\x85\x5a\x54\x04\x75\x25\xe6\x8a\x9a\xb1\xed\xa3\x53\x8c\x23\xf2\x6d\x5f\x61\x8a\x6b\xf8\x45\xab\x2e\x4d\xd1\x78\xe0\xac\x53\x69\x33\x92\xfe\x31\x02\x99\x20\xc2\x66\xd6\x43\xff\x26\x85\x71\x3b\x02\xb4\x67\x85\x89\xaa\x8a\xdf\x74\xd5\x9b\x81\x90\x38\x9a\x09\x2e\x49\x15\x4a\x8d\x51\xb0\x77\x26\xf5\xba\x8d\xba\xaf\xe7\xbc\x36\xa8\x9f\xc6\xf1\xec\x9d\xab\x51\xad\x5c\x2d\xee\x28\x2e\xf3\x53\x4c\x45\xa7\x73\xa1\x5f\x6e\x11\xce\x0d\xf3\x23\x06\xc0\xa1\xdf\xd1\xac\x6a\xf3\x52\x7a\x88\xac\xa6\x40\xed\xef\x4a\xb8\x65\x91\x13\x94\x1c\x2f\x69\x6f\x47\x98\x3c\x6f\xe7\x33\xa8\xf3\xce\x6c\xf3\xbc\x53\x58\x62\x65\x67\x64\xee\x1e\x76\x93\xbb\x39\x04\x6e\x1d\x62\xfb\xbe\x63\x2d\xd9\x82\x30\xbb\x33\x11\xd0\x6c\x25\x39\x7b\x7a\xb5\x45\xef\x7e\x95\xf0\x41\x64\x3f\x92\x5d\x5d\x69\x6e\x62\x67\x79\xa6\xad\x5a\xf7\x02\x96\xb4\xf3\xa6\x35\xec\x94\x9a\x61\x31\x39\xfc\x46\xcb\x5d\xd4\x12\xdc\x3a\x5b\xae\xa1\x39\x96\x76\x1a\x76\x8e\x85\xa5\xc6\xfd\x6f\xbf\xf9\x17\x00\x00\xff\xff\xef\xe0\x0c\x2c\x8f\x2e\x00\x00") + +func fontsRevFlfBytes() ([]byte, error) { + return bindataRead( + _fontsRevFlf, + "fonts/rev.flf", + ) +} + +func fontsRevFlf() (*asset, error) { + bytes, err := fontsRevFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/rev.flf", size: 11919, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRomanFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\x5f\xab\xe4\xb6\x15\x7f\x9f\x4f\x71\xf0\x0c\x38\x7d\xa8\xc8\xa6\x85\x9e\x40\x28\xa5\xf4\xa9\x90\x60\xfa\x70\xc1\x2c\x0b\x5a\x47\x09\x0d\x69\x56\xb0\x65\x1f\xfa\xed\x8b\xce\x1f\xe9\x48\x96\x3d\xf6\xdc\xb9\xa5\xc3\xbd\x33\x92\x2c\x5b\xbf\xf3\xff\xe8\xc8\x3f\xff\xeb\xe7\x6f\x3e\xde\xe0\xdd\xd7\xe9\xef\x0f\x5f\xc3\xef\xdf\xc1\x37\x97\x7f\xc4\xdf\x3e\x7e\x82\xe5\x3f\xf0\xc3\x2f\x3f\xfe\x0a\xdf\xff\xf2\xe9\xa7\xcf\xff\x86\x1f\xdc\xf7\x8e\x9b\x7f\x09\x5f\x3e\xff\xf3\xe3\x6f\xee\xe3\x8f\xee\xcb\xaf\x97\xbf\x7f\xf9\xf4\x13\xbc\xfb\xf6\xdb\x3f\x5e\x6e\xb7\xdb\xf5\xd4\xd7\xf5\xe2\xa2\xbb\x5d\x2f\x88\x98\xbf\x67\x9c\x6e\xd7\x8b\xc7\xf1\xa6\x57\x79\x04\x00\xea\xef\xeb\x25\x62\x84\x88\x31\x4d\x7f\x19\xc1\xbf\x8c\x7c\x09\xf2\xa4\xd7\xb7\x72\xd3\x45\xf9\xca\x57\x1d\x8e\xf2\x45\x43\x0e\xcd\x47\x91\x94\x39\x34\xab\x9e\x93\x6f\x2c\x73\x78\xa8\x1e\x31\x78\xf2\xe7\xe8\x10\x8d\x45\x59\x3c\xd0\xb2\x89\xd1\xef\x01\x79\xcc\xcf\x88\xf8\x57\xc7\xf7\x22\x7c\x10\x31\x20\x4e\xa3\x8e\x1d\xe1\x51\x12\x04\x00\x22\x3d\x72\x10\xf0\x96\x05\xdc\x66\x92\x0a\xd1\x96\x5a\x92\x74\x7a\x04\x40\x96\xf7\x9a\xb0\xba\x4d\x0f\x8a\xd1\x15\xee\x8e\xe0\xd1\x51\x07\xd3\xaf\xae\xed\x11\x1d\x4e\xfa\x80\xc4\x87\xf7\x82\x90\x6f\x91\x7b\xfc\x12\xc3\x34\xa6\x6e\xc5\xcc\xbd\x0e\xd1\x4e\x8c\x24\xcc\x23\xf4\x34\x75\xeb\x9b\xf1\x0b\x0f\x88\x00\xc1\x04\xe6\x27\x11\x94\xa6\x78\x41\xe5\x07\x81\x90\x7f\xae\x97\xe8\xba\x53\x49\xd8\xfa\x53\x2d\x31\x8c\xe6\xfe\xcb\x5a\x57\x3c\x3a\xcc\x37\xa0\x3e\x0f\x86\xe7\xd9\xd3\x06\x5f\x49\x01\x56\x9d\x62\x34\xbb\xd3\x8e\x8a\x6c\x5f\x34\x95\xd3\x69\x05\x7a\x9c\xe4\x1a\xf0\x79\x26\x1d\x45\xd8\x53\x2a\xc8\xb2\x07\x91\x3b\x18\x9b\xcc\x16\x99\xed\xb1\x58\x23\xf3\xf2\x10\x42\x17\x63\x14\xc5\x83\x80\xd3\xe8\x67\x5c\x84\x70\x06\x80\xbb\x1d\x8f\xb8\x40\xb2\x45\x82\x97\x6e\x4e\x0f\x39\x27\x46\x17\xd3\x68\x44\xf6\x3c\x70\xe7\x37\xcd\x8b\xfa\x94\xf5\x6f\x43\x93\x0b\xd3\x30\xcc\xc8\x34\xf1\xa2\x1f\xb2\x5b\x04\x57\xc0\xba\x30\x65\xff\x1e\xc9\xc9\xb0\x51\xaf\x94\xf6\x28\x4d\x07\x21\x7c\x87\xb8\xb8\x7c\x25\xb1\x33\x69\x05\x79\x43\x87\x38\xb1\x19\x2f\x21\xf9\xf2\xb3\xd6\x51\x87\xba\x14\x38\xd4\x6d\x87\x11\xa5\xe3\xc2\x48\xa2\x14\xb5\x89\x91\xd8\x9b\xf9\x8b\xf9\x1e\x00\xba\x72\x0e\x42\x94\x0f\x29\xd7\x34\xf0\xe7\x76\xbd\x70\x0c\x5b\x5c\xbe\xc9\xb7\xfc\x61\x51\x3f\x85\x0b\x49\x14\x25\xba\xe7\x80\x15\xa4\x0d\x82\x67\x1a\xe6\x45\x2c\x12\xdf\x33\x06\x09\xad\xc4\x87\x89\x5d\x32\x2e\x31\xe0\x39\xfd\x8e\x86\x0b\x41\x78\x80\x25\xc9\x31\x41\xd6\x46\x59\x1b\x66\x6d\x9c\xcd\x19\xc5\x19\x08\xa4\x8d\xa2\x8e\x42\x37\x47\x83\x19\xd1\xb9\xec\x5d\xbc\x8a\x45\x9d\x89\x4f\x04\x4b\x48\x62\x49\xd0\xb4\x25\xc6\xd3\x5c\xb0\x10\x28\x6d\x4a\x9c\x75\xad\x3b\x01\xff\xb2\xa4\x67\x1b\x6b\xab\xf2\xb0\x62\xae\x71\x1a\xcf\x72\x61\xc3\x0f\x4b\xe8\x1f\xc6\xa6\xdd\x7a\xe3\xa3\x37\x7f\x68\x42\x8e\xe2\x10\x51\x06\xba\xec\xc2\xc4\xd2\xd0\x7c\x66\x5e\x74\x8e\x4f\x4d\x9e\xec\xe7\xe5\x56\xdf\x5e\x37\x1e\x89\x5e\xa7\x23\x9a\x59\x77\x1f\xe4\x16\x69\xbb\xf8\x59\x2f\xa2\xe3\x3b\x46\xf6\x05\x0b\x3b\x23\xf1\xd0\x92\x8e\x1a\x67\x9d\x34\xd5\x58\x47\xce\x1d\x39\xed\x3c\xad\x17\x16\x03\x04\x36\x2f\x4f\xee\x28\xa4\x65\x87\x25\x00\x50\xe4\x03\x40\x97\x92\x69\x17\x12\x33\x1c\xc0\x1c\xa7\x61\x61\x50\x64\x22\xa4\xa4\xb4\xc2\xcc\xcf\x9c\xea\xf5\xee\xf4\x8a\xdb\x76\xf5\x84\xb4\xe5\x70\x76\xc4\xe1\xa0\x43\xc5\x45\x78\x19\x52\x5f\xc7\xde\x5c\xd2\xe2\xec\x38\x78\x16\x47\xd2\x48\x23\x34\x2b\xd6\x60\xd6\xf0\xb6\x46\xae\x97\xec\xe1\x98\x85\x1e\xc5\xc5\xb0\x20\x41\x2c\x5c\x4d\x9c\x02\x0c\x21\x1b\xcd\x55\xf1\x34\xda\x15\x5f\x93\x66\x91\x43\x68\x73\x8a\x15\x94\x55\xb7\x12\x6c\x4e\x6f\x18\x95\x49\x70\xf4\xe6\xfd\x2e\xa7\x39\x14\xd0\x40\x13\x9d\x47\x61\x59\x6e\xb5\xec\x02\xcb\x2f\xc3\xb0\x43\x7d\xc9\xc2\x2c\xcf\x56\x02\xbb\xdf\xb7\x00\x29\x66\x29\xbe\x24\xa3\x6a\x79\x00\x2b\xce\xd2\x05\xb3\xc7\x28\x93\xa3\xe6\x6e\xf4\x09\xf8\x0a\xbe\x3d\x13\x16\x28\xac\x83\x38\x76\xb5\xac\x55\xb3\x56\x93\xf6\xfa\x86\x2e\x32\x6b\xca\x55\x1a\x5d\x7b\x5c\x9c\xf5\x02\x92\x01\xd4\x16\x68\x13\xed\x28\x16\x0a\x1b\xd7\x6d\x5f\xb9\x17\x7b\x4e\xe4\x30\x40\x05\x76\x7b\xca\x0e\x00\x34\xeb\x64\x32\xe5\xe2\x91\x56\x72\xbd\xec\x7b\xa8\xaa\x32\x6d\x06\x45\xe1\x2a\x18\xf0\x24\x35\x71\x09\xd4\x0d\x39\x4a\x51\xcc\x7d\x9f\x1f\x83\x88\x9e\x13\xe0\x32\xd9\x6b\xea\x25\x0c\xed\xf1\xf3\xa0\x95\x94\xcb\xc5\x4a\x7a\x8a\x7f\xb8\xfb\x0c\xe3\xd5\x6e\xab\xe8\x12\xde\x54\xf2\xe4\x67\x81\x77\x28\x34\x04\x29\x51\x04\x37\x19\x05\x14\xe5\x85\x6a\x08\x60\x06\xb0\x6a\x59\x56\xec\x87\xb7\x4e\x34\xeb\x0f\x55\x5c\x2d\xe2\x5e\x18\xbe\x17\xe8\xa0\x12\x85\x02\x49\x86\x0c\x46\x1e\xb2\x03\x4c\x4c\x03\xd9\xe3\x1a\xca\x91\x81\xc3\x51\xaf\xda\xcb\x6f\x74\x35\xea\xd9\xed\xfd\xab\xa3\x5e\x09\x7a\xb2\xef\x73\x45\xd5\x5c\x28\x7a\x40\x0e\xc8\x1a\x53\x79\xf4\x5b\x79\xef\xb5\xff\x86\x96\x67\xc7\x47\x0c\xf3\x96\x9c\x34\x2b\xff\xe6\x25\x8c\x4f\xc8\xb7\x1e\xe4\xe5\x1b\xb9\x9e\xcc\x4a\x2a\x9c\x30\x1b\x09\x16\x6f\x34\x17\xc3\x63\x3f\xcc\x94\x73\x9a\xca\x07\x95\x48\x12\x04\x96\xa5\x24\xfe\x38\x0c\x52\xc9\x3e\x99\x48\xc7\x26\x5d\xc8\xde\x42\xf3\x85\x22\xac\x72\xfb\xc9\x7e\xd6\xbc\xd7\x84\x65\xeb\x51\x34\x9f\x19\x6b\x2d\x47\x38\xd2\x2f\x89\x43\xde\x25\xcd\x54\x0a\x7f\x0d\xc0\x8c\x30\xca\x12\xe2\xb3\xf3\x22\x3a\x64\x4a\x17\x3c\x64\xeb\x17\x3c\x84\xd5\x8e\xbd\xd0\x9b\x17\xf7\xf6\xf8\x04\x76\x01\xee\x9b\x86\x46\x99\x0d\xf4\xbb\x64\xf4\xe9\xa1\x81\x3d\xc2\xf6\x28\xdc\x23\x75\x87\xe6\x1d\x52\x8f\x5d\xca\x56\x50\x82\x2e\xd7\x78\x00\x38\x37\xb9\xc9\x96\x19\xd1\xf1\xc9\x8a\xa1\xca\xd2\xe2\x70\x9a\xed\xde\x32\x28\xc5\x0b\x34\x5e\xa3\x17\x68\x8f\xc5\xad\x4a\x72\x56\x5c\x8d\x8c\x5a\x45\x6b\xa5\x51\x89\xe0\x99\x06\x0b\x8d\x47\x09\x23\xd7\xf2\x82\xa9\x50\xb1\x02\x4d\xe5\x01\x01\xab\x4a\x05\x5f\x2c\x6c\x14\xb8\x6e\xaa\x8f\x15\xdb\xaa\xc1\x71\x83\xe5\x2a\x05\xdc\xfb\xc9\xd9\x31\xb4\x3f\xf6\xf0\xc0\x97\x02\x83\x2f\xc2\xd7\xb3\x39\x2e\x22\x16\xd6\xbb\xc2\xf4\x7e\xce\xdc\x66\xcf\x3c\x82\x77\x7e\x52\xec\xda\xc4\x0a\x2e\x0e\x1c\x44\x06\xda\xd2\x0c\xf7\x56\x3e\xdf\x3a\x11\x75\x4e\xf4\x6a\x55\x3a\x7c\x9f\x16\x19\x67\x3e\x3f\xf0\x32\xe1\xd8\xb7\x79\xd6\xad\x69\x9b\xe3\x0a\x3f\x01\xfc\x4e\xab\xf1\x2e\x4e\x83\x24\xa7\x01\xbf\xca\xb9\x35\x6d\x8d\x86\xa1\xb6\xf5\xed\x36\x3d\x08\xf3\x95\x21\x47\xae\x92\x9e\x68\xf1\x4d\x4e\x72\x4d\x29\xa8\x2a\x72\x68\x95\x78\xa6\x3a\xfc\x54\xe7\x50\x7b\x9d\x7b\xb4\x4b\x01\x92\x96\x1f\xe6\x2a\x17\xce\x6d\x17\xe5\x44\x60\xbd\xf6\x2e\xed\xf4\x49\x0c\xd0\x2b\xca\x52\x5e\x3a\xf3\x57\x2a\x78\x65\xc1\x5e\x47\x96\xc7\x69\x78\x13\xda\x51\x6a\xaf\x52\x3b\x2c\x7c\x70\xf1\x21\xda\x5d\x4c\x59\x9e\x0a\x90\xec\x93\xf7\x91\x26\x85\xd9\x68\x89\xb3\x3e\x63\x9f\x2b\x2e\x38\x9b\x01\x2a\x81\x6b\x9e\xa2\x90\xc5\x9d\x68\x98\x32\xd0\xa3\xe6\xbf\x91\xde\xce\xdd\x54\xd4\x16\x03\xd4\x6f\xe6\xd4\x4c\x29\x72\x79\x3f\xc6\x47\x4d\x58\xd5\xfa\xea\xca\x4a\x55\x57\xe9\xa4\xe2\xf7\x5c\x04\x44\x39\xa7\xf3\xc3\xa8\xbe\x26\xc3\xba\x3d\x56\x4f\x91\x37\x74\x80\x9c\xac\x88\xff\x99\x35\x96\x03\x2c\x04\xa5\x83\x19\x9a\xeb\x61\x1c\x32\xed\x7e\xa6\xd9\xce\x3c\xc0\xc2\x7b\x2c\x7b\x88\x85\xd5\xa7\x0d\xe0\x90\xe3\x82\x13\x65\x71\x76\x7f\xc7\x2a\xd3\x55\x9c\x9e\xfa\xf4\x94\x68\xb3\x0c\xb2\x85\xa5\x33\x76\x37\xa8\xb9\x1e\xec\xb7\xd3\xf4\xde\xa5\x7d\xbf\xd6\xd4\x3a\x10\x5f\xe1\xd3\x57\xa0\x88\x07\x66\x6d\xc0\x63\xf1\x0c\xd1\xac\x6f\x03\x4f\xa9\x6d\x6c\xfb\xf4\x2d\xaf\x07\x9c\xa5\x1e\x0c\x2b\x2f\x1c\x56\x4c\xd9\x09\xed\x1b\x54\x95\xd7\x1b\x1a\x08\xb7\xaa\x49\x6b\x07\x39\x37\xa6\x34\x61\x32\x64\xd5\xcd\x80\xa5\x3a\xd2\x7d\x58\x8f\xe2\x66\x06\xe7\x2e\x42\xeb\x57\x29\xba\xd2\x81\xe1\x9c\x9d\x80\xe3\x84\x26\x57\x13\x2a\x31\xef\x2f\xa7\x99\x2e\xa5\x2f\xe4\xb9\xca\xeb\x11\x98\xdf\x8d\xa8\x5b\xcc\xb4\xb4\xd0\xdd\x6c\xf4\x7e\x8e\x58\xbc\x9e\xd7\x4a\x4d\xc7\x80\x9a\x9e\x7f\x41\x7c\x19\x5e\xda\xfc\xe8\xb8\x39\x6d\x82\xd1\x62\x85\x14\x1b\xec\xd6\xcc\x35\x1b\xb3\x6a\x5b\xb6\x3e\xab\x3f\x03\xa6\x33\xa3\x82\xd5\xc3\xd6\x03\xf8\xa1\x83\xb2\x07\xb5\x8b\x77\x07\xc4\x7a\xe8\x2c\x2f\x17\xe7\x8a\x85\xcd\x06\x92\x8b\x38\x8c\x66\x9f\x2d\x55\xac\x47\xfd\xe4\x93\x05\x6b\xde\x89\x8d\xce\x4d\xb9\xc6\xe1\x67\x29\xfe\xed\x83\x59\x39\xae\xfa\x85\xa1\x71\x18\xfe\x14\xf2\x5e\xd8\x95\x13\x30\x69\xd2\xae\x39\xf4\xb7\xcc\x7b\x9d\xeb\xa5\x94\xb4\x83\x9e\xb2\xa9\x0d\x7f\x57\x27\xa2\xa9\x31\xa3\x6e\x73\x67\xeb\x12\x7b\x8d\xeb\x85\x42\x8b\xfc\xc3\x6d\xdd\x97\x7f\xad\x0f\xab\xfc\x8b\xfb\xe0\xa7\x21\xfe\xb9\x1a\xd1\xb7\xa0\x5c\x55\x5d\xd8\xc0\x50\x12\x88\xdb\xf5\xc2\xc5\xec\x53\xef\x4d\xbd\xba\x23\x39\x28\xbf\xc7\x60\xb3\x51\x7e\x8f\x41\xf2\x52\xf8\xbf\x7b\x8f\xc1\x1e\x19\xc9\x7b\x3d\x5a\xd3\xd6\xac\xb7\x39\xdd\x85\xaa\xf0\xdf\x1d\xb0\xa7\x28\x62\x54\x3b\x27\xba\x87\x06\x94\xbf\x05\x29\xf1\x94\xd9\xc2\xef\x22\x9d\xad\x2e\x3f\xbd\x7a\x2c\x67\xc2\x51\x5f\x26\xd7\x7d\xc3\x5b\x97\x1a\xf6\x17\x7e\xcb\x9c\x30\xaf\x0c\x45\x20\x76\x0b\xf6\xbf\x0d\xe5\x36\x0d\x55\x7a\xcb\xf1\x90\x2b\xef\x8a\x34\x3e\x70\x6b\x5a\x84\xf9\xdc\x2b\x7f\xff\x0d\x00\x00\xff\xff\xb1\xed\x38\xfe\x94\x32\x00\x00") + +func fontsRomanFlfBytes() ([]byte, error) { + return bindataRead( + _fontsRomanFlf, + "fonts/roman.flf", + ) +} + +func fontsRomanFlf() (*asset, error) { + bytes, err := fontsRomanFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/roman.flf", size: 12948, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRot13Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x1c\xd3\xd5\x77\x5d\x45\x18\x86\xf1\xfb\xe7\xaf\xd8\x50\xa4\x05\x92\xec\x99\xed\xe8\xdb\x16\x28\x45\x0b\x94\xe2\x72\x9a\x9c\xd3\x06\x4e\x93\x70\x92\x14\x8a\x15\x77\x77\x77\x77\x77\x77\x77\x77\x77\x77\x87\xf5\x76\xcd\xc5\x6f\xae\x66\xcd\x5a\xdf\xf3\x75\xba\x9d\xd8\x5a\x9a\x04\x9f\x34\xe9\x0b\x49\x0d\xbd\xd1\x89\x90\xf5\x77\xba\x9d\x64\xea\xcc\x69\x49\x68\x9a\x3c\x99\xbf\x24\x99\x37\x3c\x38\x31\xda\x4b\xe6\xb4\x7a\xad\xa1\x56\x32\x75\xf1\xd8\xb2\x8b\x86\x47\x3a\xfd\x93\x13\x9d\xf1\x45\xfd\x83\xdd\x69\x49\x93\x0f\x84\x38\x10\xeb\x7e\x98\xd1\x1a\x6f\x0f\x25\xa3\x23\xc9\xdc\x76\x6f\xd1\xf0\x48\xab\xeb\x47\x66\x75\xdb\x23\x23\xc9\xcc\x85\xad\xb1\xb1\x76\xb7\x9b\xe4\x03\x4d\x06\xb3\x47\x06\xbb\x93\x43\xed\xf1\x64\x70\x61\xab\xd7\x1a\x9c\x68\xf7\xc6\x93\x10\xeb\xbe\x58\x14\x74\x86\x17\x74\xdb\x13\x49\xaf\xdd\x6d\xb7\xc6\xdb\x49\xec\x0f\x49\x5f\x5f\x12\x62\x32\x7d\x72\xc1\xb2\xaf\xc1\x52\xb1\x9c\x58\x5e\x4c\x11\x2b\x88\x15\xc5\x4a\x62\x65\x31\x55\x4c\x13\xab\x88\x55\xc5\x6a\xa2\x4f\xf4\x8b\x01\x91\x8a\x20\xa2\xc8\x44\x2e\x0a\x51\x8a\x4a\xd4\xa2\x11\xab\x8b\x35\xc4\x9a\x62\x2d\xb1\xb6\x58\x47\x68\x0a\x9b\x8a\xcd\xc4\x1c\xb1\xb9\xd8\x42\x6c\x29\xe6\x8a\xad\xc4\x3c\xb1\xb5\xd8\x46\x6c\x2b\xb6\x13\xd3\xc5\x0c\x31\x53\xac\x2b\xd6\x13\xeb\x8b\x59\x62\x03\x31\x5b\x6c\x28\x36\x12\x1b\x8b\x4d\xc4\xf6\x62\x07\xb1\xa3\xd8\x49\xec\x2c\x76\x11\x23\x62\x54\x8c\x89\xdd\x45\x4f\x8c\x8b\x09\x31\x29\x16\x8b\x3d\xc4\x9e\x62\x89\xd8\x4b\xb4\xc4\x7c\x31\x28\x86\x44\x5b\x74\xc4\x02\xb1\x50\x0c\x8b\x5d\xc5\x6e\xa2\x2b\x16\x89\xbd\xc5\x3e\x62\x5f\xb1\x9f\x78\x58\xbc\x2a\xde\x16\x1f\x8b\x5f\xc5\xdf\xe2\x3d\x11\x62\xcd\xfe\xa6\xe1\x00\x11\xb2\x94\x03\x4d\xe0\x20\x13\x39\xd8\x64\x1c\x62\x72\x0e\x35\x05\x87\x99\x92\xc3\x4d\xc5\x11\xa6\xe6\x48\xd3\x70\x94\x08\x79\xca\xd1\x26\x70\x8c\x89\x1c\x6b\x32\x8e\x33\x39\xc7\x9b\x82\x13\x4c\xc9\x89\xa6\xe2\x24\x53\x73\xb2\x69\x38\x45\x84\x22\xe5\x54\x13\x38\xcd\x44\x4e\x37\x19\x67\x98\x9c\x33\x4d\xc1\x59\xa6\xe4\x6c\x53\x71\x8e\xa9\x39\xd7\x34\x9c\x27\x42\x99\x72\xbe\x09\x5c\x60\x22\x17\x9a\x8c\x8b\x4c\xce\xc5\xa6\xe0\x12\x53\x72\xa9\xa9\xb8\xcc\xd4\x5c\x6e\x1a\xae\x10\xa1\x4a\xb9\xd2\x04\xae\x32\x91\xab\x4d\xc6\x35\x26\xe7\x5a\x53\x70\x9d\x29\xb9\xde\x54\xdc\x60\x6a\x6e\x34\x0d\x37\x89\x50\xa7\xdc\x6c\x02\xb7\x98\xc8\xad\x26\xe3\x36\x93\x73\xbb\x29\xb8\xc3\x94\xdc\x69\x2a\xee\x32\x35\x77\x9b\x86\x7b\x44\x68\x52\xee\x35\x81\xfb\x4c\xe4\x7e\x93\xf1\x80\xc9\x79\xd0\x14\x3c\x64\x4a\x0f\x3d\x34\x15\x8f\x98\x9a\x47\x4d\xc3\x63\x22\xa6\x29\x8f\x9b\xc0\x13\x26\xf2\xa4\xc9\x78\xca\xe4\x3c\x6d\x0a\x9e\x31\x25\xcf\x9a\x8a\xe7\x4c\xcd\xf3\xa6\xe1\x05\x11\x43\xca\x8b\x26\xf0\x92\x89\xbc\x6c\x32\x5e\x31\xb9\x7b\x8b\xa1\xe0\x35\x53\xf2\xba\xa9\x78\xc3\xd4\xbc\x69\x1a\xde\x12\x31\xa6\xce\x32\xc6\xc0\x3b\x26\xf2\xae\xc9\xdc\x67\x8c\x39\xef\x9b\x82\x0f\x4c\xc9\x87\xa6\xe2\x23\x53\xbb\xe5\x18\x1b\x3e\x11\x31\x4b\xf9\xd4\x04\x3e\x33\x91\xcf\x4d\xc6\x17\x26\xe7\x4b\x53\xf0\x95\x29\xf9\xda\x54\x7c\x63\x6a\xbe\x35\x0d\xdf\x89\x98\xa7\x7c\x6f\x02\x3f\x98\xc8\x8f\x26\xe3\x27\x93\xf3\xb3\x29\xf8\xc5\x94\x5e\xa3\x98\x57\xfc\x66\x6a\x7e\x37\x0d\x7f\x88\x58\xa4\xfc\x69\x02\x7f\x99\xe8\x6d\x8b\x45\xc6\x3f\x26\xe7\x5f\x53\xf0\x9f\xf8\x3f\x00\x00\xff\xff\x89\x5e\x4d\x5c\x75\x05\x00\x00") + +func fontsRot13FlfBytes() ([]byte, error) { + return bindataRead( + _fontsRot13Flf, + "fonts/rot13.flf", + ) +} + +func fontsRot13Flf() (*asset, error) { + bytes, err := fontsRot13FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/rot13.flf", size: 1397, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRoundedFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x58\xcb\x6a\x5e\x37\x10\xde\x9f\xa7\xf8\xc0\x59\xe8\x6c\xac\xc6\xf4\x42\x76\x7d\x01\xa7\x50\x68\xe9\xe2\xc0\xe0\xc6\x09\x0d\x69\x5d\x48\xf1\x22\xa0\x87\x2f\x9a\x9b\xa4\x23\x9d\x9b\xd3\x82\xfd\xff\x42\xbf\x34\xf7\xf9\x66\x34\x1f\xfe\xfc\x70\xf7\xf0\x0a\x3f\xe0\x7b\xdc\x7d\x83\xd7\xdf\xe1\x6e\xfa\xf9\xef\xe7\xa7\xc7\xf7\x8f\xf8\xfd\x0b\xde\x7e\x7c\xf7\x09\xf7\x1f\x9f\xde\x7f\xfe\x07\x6f\x6f\xef\x6f\x65\xf9\xe3\xe3\xf3\xe7\x3f\x1e\xfe\xba\x7d\x78\x77\xfb\xfc\x69\xba\x7f\xf8\x82\xd7\x6f\xde\x7c\x3b\xbd\x7a\x75\xb3\xf5\x7f\x33\x81\x70\x33\x25\x24\xfb\xa0\x64\x7b\xbc\x02\xe4\x0c\x6f\x05\xcc\x01\xf3\xcd\x94\x22\xb0\xc8\xaf\xf9\xc0\xd6\x37\x2f\x28\x5f\xce\x7b\x94\xf0\x0b\x12\x93\x21\xfe\x9d\x66\xd9\xfd\xa9\xdf\x45\xa2\x27\x4a\x85\x54\x45\xcd\x68\x25\x91\x1b\x44\xf9\x46\x22\x22\x64\xf9\x99\x8a\x48\x9e\x1a\x0a\xaa\x86\xe8\x41\x33\x58\x5b\x00\x11\x91\x0f\xe5\xef\xbc\xc8\xdf\xaa\x3e\x82\x08\x53\xf8\x33\x17\x39\x45\x58\xc0\x26\xc9\xc4\xe6\xbc\x37\x83\x10\x74\x2f\x1b\x28\xef\x2d\x44\x71\xa9\xa8\xc0\x2c\x6e\x76\x14\x13\xaf\x3e\x98\x95\xb0\x99\xd9\x2f\x68\x3f\xe1\x24\x55\xab\x4c\x8f\xf9\x89\x56\xd5\x67\x20\xe5\xa1\x06\x64\x1b\xb0\x09\xf3\xef\x2a\xca\x92\x90\x98\x15\x66\x16\x32\xb0\x8d\x62\x42\x32\x46\x6c\xcb\xde\x1d\xee\xee\x95\x5f\xc4\x0b\xf3\xc0\x0b\x8d\x39\x07\xda\x37\xb6\x19\x5c\x9b\xb2\x0f\x48\x78\x90\xf8\x7e\x70\xe6\x04\x0f\xbd\x69\x3c\x48\xaf\xed\x85\x05\x34\x2c\x3a\x1e\x22\x52\xde\x54\xa1\xa0\x39\x43\x96\x5d\x11\x51\x73\x0c\x88\x24\x4c\x16\x3e\x1b\x3b\xab\x7a\x90\x05\x26\x85\xe2\x37\x71\x05\xd4\xa0\x66\x6c\xac\x2d\x21\x51\xe1\x42\xc9\xcf\x12\x1f\x10\xf1\x66\x09\x61\x65\x9f\x94\x02\x24\x8f\x56\xc4\x76\xa8\x91\x53\xd3\xdd\x30\xda\xed\x55\x24\x37\x39\x2b\x23\x79\x96\xb5\x69\xa4\x00\x1c\x62\x52\xb5\xa4\x34\x94\xad\x84\x84\x48\x7f\x4e\xe2\x81\x6c\x07\xd4\x04\x72\x98\x9a\x48\xcc\xd4\xd2\x25\x6a\xaa\xbe\x1d\x8e\x82\x1f\x28\xd1\x96\x4d\x12\x07\x09\x37\x0c\xb4\x6a\x37\xba\x6c\x41\xa3\x67\xde\x0e\xb4\x3d\x4d\xcb\xee\x59\x2f\x8c\xd2\xab\x4f\xb4\x8d\x33\x6d\xc2\xb7\xf0\x17\x24\xaa\x0a\xe4\x4d\x1d\xfe\xf4\x90\x70\x0c\x12\x4e\xac\x86\x4f\x8d\x0c\x87\xcc\xa9\x4d\x4a\x07\xfe\x7c\x28\xc8\x69\xf5\x55\x8d\x82\x5c\x12\x36\x21\x22\xfb\x08\x16\x3f\x8c\x2a\x8a\xf1\x8b\x65\x63\xf2\xb3\x4b\xe5\xe5\x0b\x9e\xd3\x18\xd5\xa2\x6e\x9e\x13\xe8\xda\xca\x1f\xcf\x14\x94\x4c\x99\x55\x36\x96\x38\x98\x6c\xba\x7b\x2d\x7f\xdc\xfe\x22\x90\x2d\xed\xac\x6a\x7a\x88\x3b\x0e\xad\x0d\x7e\x58\xfb\x92\xc5\x89\x8e\x1f\xb1\xcb\x9f\x1d\xbb\xa1\xe4\xf6\xbc\x92\x6d\x1f\x13\xcf\x52\x73\xa5\x29\xad\x42\xf1\xc8\x6e\xe6\xd3\x0c\xd3\x70\x4d\x73\x0b\xb3\x93\xdb\x6e\x21\x8e\x45\xed\x64\x5e\x1c\x21\x6d\x8b\xb8\x6a\x16\x8f\x90\x8e\x3c\xcf\xb4\x74\x25\x6e\x0a\x92\x47\xf8\x00\xe9\xd6\xf2\x0b\xf4\x30\x59\xab\xa3\xd6\x62\x30\xa5\x45\x12\x8a\xe5\x5f\x7a\x4f\x79\xf4\x09\x35\x17\x68\x37\x26\x2f\xfa\x1d\xd6\x25\x27\xa4\x44\x29\x5d\xca\xbe\x71\xbe\xf4\x11\xfe\xdf\x52\x33\x6b\xee\x57\x88\xed\x1a\x2a\x51\x64\xa8\xf5\xf2\x08\x07\xb9\x17\xbc\xcd\x65\xd1\x2e\x20\xc3\x50\x36\xc0\x65\xab\x23\x64\x50\xbd\x5c\x36\xed\x8b\x68\xb6\x1a\x5a\x21\xf1\xd7\xf5\x10\x55\xf3\xad\xa9\xd0\x2f\x47\x6d\xf6\x46\x2e\xbf\xd4\xa7\xe7\xa9\xb1\xcd\xac\x11\x5e\xc6\x88\xea\x51\x1f\x68\x96\xbf\x2e\x17\xe0\xb9\x60\xcb\x6b\xb2\x55\x1d\xb1\xca\x66\x0d\xf9\xbe\x4f\x8f\x2c\x54\x77\x99\x05\x59\xcc\x63\xe7\x32\x4b\x82\x0c\xe5\xa9\x60\xb2\x6d\x60\x61\x45\x4d\x51\xd8\x10\xac\xfe\x32\xf4\xb1\x5b\x45\x1b\x4f\x34\xf5\x8e\x70\x5e\xe4\x2d\x0a\x8b\x72\xac\xec\x51\x38\x06\xeb\xe7\x52\xfd\x55\x34\x4f\x15\x47\xd1\x4d\x34\x13\xb2\xa1\x7d\xcc\x1e\x3e\xbd\x7a\x18\xd8\x5a\x6e\x57\x0f\xac\x1f\xcd\x36\x79\x58\x7f\x1c\x3f\x16\xb3\x92\xd2\x21\xa7\x9b\x69\x59\x79\x79\x68\x63\x8e\x16\xab\xe5\xde\xf3\x97\x8e\x3f\x8e\x34\x6e\x16\x0d\xaa\x38\xa6\x38\xa2\xcc\xdd\xfd\xd5\x43\xd4\xb2\x39\x42\xe7\x1b\x08\x75\x7e\xa7\x03\xfe\x5e\xcb\x4c\x6d\x5e\x90\x47\x7f\xc7\x5f\x85\x03\x0b\x3c\xdb\x93\xbe\xbc\xe9\xf5\x51\xef\x90\xb5\x46\xac\x63\x33\x0c\xd4\x28\x79\x97\xce\xbb\xa1\x14\xc2\x2e\xd4\x9b\xc7\xc5\xb8\x5d\x31\x43\x3b\xc4\x60\x3b\x2b\xe2\x66\x70\x38\xbc\x50\xb4\x9a\x23\x7d\x32\xa5\xa6\x0d\xf1\x31\x4d\x3f\xcb\x19\x4d\x74\xc6\x7e\x54\x03\xa0\x33\x00\xad\x0d\xb0\x7b\xff\x84\x01\xc7\x0e\x6c\xde\x3f\xda\xc3\x79\x07\xf7\x02\xfe\xa5\x93\x93\x0e\x42\x27\x2b\xe7\x02\xa8\xe1\xef\x6f\xd4\x24\x01\x74\x22\x0f\x8b\x17\x47\x03\x9d\x7a\xc7\xb4\xcf\x97\xab\x61\xa3\x86\x6c\xd1\xfc\xfc\x44\x4c\xc7\x36\x5c\x53\x7b\x00\x58\x19\xae\x2a\x61\xa9\x51\xfc\x1c\x00\x75\xf7\xb1\xe0\x57\x2f\xe9\x71\x08\xd9\xed\x7d\xea\xf8\xe3\x8a\xe3\xad\xa0\x73\x2b\x21\x23\xc6\xdf\x6c\xc0\xd8\xe5\xc8\x15\xfd\xdd\xf1\x41\x0d\x71\x58\x00\xe0\x73\xaf\x38\x1a\x0d\x58\xc9\x93\x8a\xc7\xbf\x68\x49\x0e\x08\x42\xcd\x5d\x57\x7b\xae\x7d\x21\x95\xf9\x79\xff\x42\xd2\x27\x88\x55\x6b\x4d\x7d\xe8\x20\x41\x1b\xde\xd0\xf4\x59\xda\x9c\xfa\x40\x04\x4b\xd4\x19\xc5\xd2\x36\x50\x07\xab\x55\x8d\x9d\x89\xe8\x2b\xdf\x84\x63\x6a\xfc\xfb\xff\xda\x95\x9e\xa4\x66\xb4\x42\x3f\x0f\x3a\xd5\x07\xb4\xf7\xaf\xe1\xdf\x80\xff\xb5\x34\xee\xb9\xe9\xb4\x29\x21\x05\x9b\xc4\xe8\x0e\xa5\xb0\xba\xff\x6f\x00\x00\x00\xff\xff\xed\x11\xce\x52\x3b\x1a\x00\x00") + +func fontsRoundedFlfBytes() ([]byte, error) { + return bindataRead( + _fontsRoundedFlf, + "fonts/rounded.flf", + ) +} + +func fontsRoundedFlf() (*asset, error) { + bytes, err := fontsRoundedFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/rounded.flf", size: 6715, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRowancapFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x56\xef\x4f\xeb\x36\x14\xfd\xee\xbf\xe2\x2a\xe2\xc3\x26\x15\xbf\x86\x3d\x7e\x55\xe8\x2d\x08\xd6\x27\x44\x33\x45\x80\x2a\xed\xa3\xe3\x38\x6d\x46\x62\x47\xb6\x03\x63\x7f\xfd\xe4\xc4\x89\x93\xfe\x00\xca\xa6\x09\xe9\xb5\x08\x1a\x17\x7c\x7c\xce\xb9\xe7\x5e\x9c\xe6\xe9\x11\x39\x80\x13\x38\x06\xff\x0c\xbe\xc2\x91\x8f\xd0\x34\x5b\xe4\x4c\x03\x15\xfc\x89\x49\x95\x09\x0e\xf1\x0b\xdc\x32\xae\xe1\x77\xa2\x14\xe3\x23\x78\xe4\xf5\x43\x50\x15\x19\x5d\x62\x96\x54\x23\xf0\xbf\xf8\x5f\xce\x8f\x11\x92\xe2\x99\x70\x4a\x4a\x9c\xe6\x29\x42\x53\x29\x0a\x50\x25\xa3\x5a\xb2\xa0\xca\x2a\x6a\xfe\x18\x5d\x13\xcd\x26\x70\x4f\xf4\x08\x7c\x1f\xae\x19\x05\xff\xfc\xfc\x17\xf0\x8f\x26\xc7\xe7\x13\xff\x0c\x0e\xc7\x27\xe3\x71\xbd\x77\x02\x37\x84\xc3\xd5\x92\x64\x70\xb1\x0a\xf3\x0d\xdd\xb1\x32\x7f\x01\x2d\x26\xe0\x5d\x67\x8a\x56\xca\x90\x55\x40\x62\x51\x69\xd0\x4b\x06\x45\x95\xeb\xec\x30\x15\x5c\x43\x4e\xe4\x82\x1d\xe6\x4c\x6b\x26\xa1\x94\x62\x21\x49\x31\x82\xe9\xcd\xf7\xd9\x6f\x0f\xd8\x43\x60\x5e\x17\xcd\xf2\x70\x16\x3c\x15\x09\xa6\x4a\x60\x77\xd6\x83\x98\x40\x68\xe0\xca\x9c\x81\x64\x34\x2b\x33\xc6\xb5\x02\x91\x42\x9e\x29\x0d\xed\xd6\x57\x41\xee\xab\xf8\x4f\x46\xf5\x04\xee\x45\xc1\xf4\x32\xe3\x0b\x48\x85\x04\x25\x0a\x26\x38\xab\xdf\x9f\x97\x4c\x32\xc0\x80\x31\x42\xdf\x1a\x0b\x66\x82\x3e\x32\x59\x92\x5c\x88\xbf\x09\x5c\x44\x37\x77\x37\xb3\xd9\xd5\xd1\xe9\xe9\x38\xa0\x22\x96\x04\x57\x3c\x6b\xf0\xd1\x54\x54\x3c\xa9\xa5\x53\x52\xaa\x1a\x9c\x40\x29\x94\xca\xe2\x9c\x41\x6d\x84\xf9\xac\xa9\xf0\x08\xb2\x14\x08\x7f\x31\x47\x3f\x8b\x2a\x4f\x80\x12\xc9\x40\x0b\x60\x7f\x95\x84\x27\xbf\x22\x5b\x03\xe5\xfb\xa7\xbe\x7f\x36\x0e\x16\x19\x61\x31\xa6\x14\x17\x82\x13\x55\x57\x1e\x93\x0a\x7e\xba\x33\x45\x87\x2b\x49\x9e\x53\x21\x93\x9f\x3b\x9d\x97\x30\x15\x5c\x63\x5b\x70\x7f\xdc\xab\xf5\xc9\xe4\xf8\xab\xa9\xf5\xf7\xf0\x01\xa1\x03\x80\x83\xe0\xf5\x9f\x01\xea\x7d\x7d\xee\x45\x9d\x25\x58\x7f\x3f\x00\x52\x48\xa3\x27\x09\x23\xa3\xcb\x7e\xfe\x19\x28\xff\x9f\xd6\x60\x12\x86\x61\xdc\xe8\x4f\xc2\xc8\x4b\xc2\xa8\x5e\x24\xa1\x79\x45\xb5\x63\xc6\x21\xf3\x6d\x16\xee\xb9\xf3\xb2\x35\x0e\xc0\x6e\xdb\x06\x77\xeb\xb5\x70\x98\x84\x53\x0b\x67\x4e\xf1\xb6\xc0\xf5\xc8\x19\xb0\x79\x03\x66\x8f\xb7\x6c\x30\xb1\xcc\xe6\x1b\x91\x36\xf3\x82\x79\xc7\xcb\xca\xe9\x78\x45\xef\xe0\xd5\xba\xe3\xf0\xba\x70\x25\x8d\x6b\xd0\x19\x07\x8e\x6b\xd8\xff\xdd\x7f\x05\x1a\x39\x98\x0d\xa0\xb5\x83\x4e\xab\xd7\x07\x69\x04\x36\x26\x76\x25\x75\x2e\x6e\x11\x5e\xfb\xe5\x38\xb6\xe6\xb9\xc0\x0c\x13\x33\x8c\xcc\x36\xe1\x16\x90\x14\xb2\x05\x8b\x06\x62\x9d\x50\x07\x10\x58\x0e\x3d\xd3\x5a\xdf\xec\xa2\xcb\xec\x2d\x7c\x58\x20\x1e\x0a\x34\x11\xb6\xbc\xbc\xcb\x3a\xc3\x3b\x09\xec\x81\xad\x0b\x1c\x0a\x59\x89\xc5\x5a\x5b\xf5\x5a\x2b\x5a\x8d\xf1\xc6\x8e\xdd\xd4\xb5\xab\xc7\xc4\x83\xc2\x6e\xea\x90\x77\x0e\x82\x95\xce\x4d\x5c\xe7\x5a\xac\x8f\x76\x2e\x1e\x4c\x94\xc8\x5b\x69\x89\x41\x47\xec\xc2\x0b\xf7\x78\x85\x8e\x57\xe4\xb5\x85\xda\x89\x97\x9b\x74\x36\x25\x6f\xfa\x95\x0c\x78\xc1\xbc\x01\x33\xce\xc4\xcd\xf6\xa8\xcd\xf0\x6b\x7e\xd9\xac\x0c\x67\x88\xcb\xdb\xd0\xac\xbe\x57\xeb\x02\xdf\x99\x01\x5b\xc4\xb7\x1a\xab\x87\x36\x0c\x41\x87\xf5\x47\xf8\x74\xd9\x89\x9a\x5b\xa0\xed\xbc\xd6\xb8\xbd\x12\x7f\xbc\x3e\xe1\xe6\x9b\xb9\xc2\xd6\x41\x70\xeb\x06\x41\x5d\xab\x0f\x0d\x82\x15\xde\x0e\x72\xee\x66\x67\x72\xb9\xcb\xb0\x1a\xcc\x0c\x9c\xb4\x0d\xd1\x3d\xd7\xab\xee\xf9\xcd\x7f\x43\xfb\xfb\xc5\xfe\x7e\xb1\xbf\x5f\xf4\x37\xff\xd0\xf7\x0b\xd8\xdf\x2f\xf6\xf7\x8b\x3e\xd0\xfe\x7e\xb1\xbf\x5f\x7c\x9e\xfb\xc5\xbf\x5e\xfc\x13\x00\x00\xff\xff\xdf\x17\x66\x40\x70\x15\x00\x00") + +func fontsRowancapFlfBytes() ([]byte, error) { + return bindataRead( + _fontsRowancapFlf, + "fonts/rowancap.flf", + ) +} + +func fontsRowancapFlf() (*asset, error) { + bytes, err := fontsRowancapFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/rowancap.flf", size: 5488, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRozzoFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\xdf\x6b\x1c\xb7\x13\x7f\xbf\xbf\x62\xd0\x2d\x1c\x04\x7d\x7d\xdf\x18\x42\xa7\x6f\xa5\x67\xf2\x12\x0a\xa2\xe4\x65\x29\x85\xdc\x56\x72\x9b\xc6\xf5\x85\x38\x6e\x49\xfe\xfa\x22\xcd\x8c\x34\xda\xd5\xfe\x48\xea\xf6\xc0\xbb\x92\x56\x3b\x33\x9a\x9f\x9f\x59\xdf\xde\xdd\x5e\x9f\x3b\xf8\x06\x5e\xc0\xf5\xff\xe1\x7f\xcf\xe1\x7a\xf7\xe1\xf2\xf9\xf3\xe5\xea\xf6\xee\x16\x6e\x2f\xf7\x1f\x61\xf8\x04\x3f\xbc\x7d\x17\xe0\xc7\xcb\xc3\xe3\x5d\x78\x07\x3f\x3d\xde\x9f\x3f\x5c\x7f\xf7\x70\x7b\xbe\xba\xbc\x0f\x57\xc1\x5f\xfd\x7a\xf9\xf3\x67\x78\x7e\x7c\x7e\x7d\xfc\xf6\xc5\xee\xf5\x6f\x6f\x1f\xe0\xaf\xf3\x03\x0c\xe7\x87\xe0\xe1\x72\x0f\x2f\x1f\x7f\x7f\xfb\xe9\xfc\xc7\xf9\xfb\xbb\xf3\x2f\xef\xe0\xf5\x87\xc7\xf0\xfa\xd3\xfb\x00\x2f\x2f\xf7\x1f\xaf\x76\x5d\xb7\x9f\xfb\xdb\xef\x10\xb1\x93\xab\x41\xd3\xed\x77\x10\xa0\x8c\x01\xe4\xba\xdf\x21\xa4\xad\xe9\xaa\x1e\x4c\xaf\xe9\x06\x1e\x1d\xf8\x44\x05\xcb\x2f\x6d\xa1\x27\x2e\xbd\x50\x3f\x0b\xf2\x44\x88\xc9\x6f\x3c\x4b\x53\xe4\x07\x1e\x01\x93\xd0\x27\x44\xd9\xdc\x23\xe0\x20\xaf\x21\xde\xc4\x91\x41\x40\xc7\x6b\xc8\x64\x2c\x5a\x38\x02\x9f\xf8\x28\x9c\x8e\x42\xe6\x18\x9f\xc7\xd1\x11\x0c\x1a\x2d\x87\x1e\xf1\xd0\xe3\x60\xd3\xb2\xf5\x03\xf4\xee\x40\xca\xec\x71\x00\x88\xeb\x1e\x87\x38\x31\xf1\xd5\x1e\x69\x52\x1d\xac\x1e\xa7\x17\xd2\x56\xd7\xed\x77\x36\x09\xee\xe6\x15\x1e\x48\xb9\x5e\x14\x99\x0d\xcb\xa6\x65\xe5\xf7\x62\x04\x13\xef\x71\x43\x10\x3b\x0c\x62\x8f\xa0\x5e\x8c\x37\xb6\xa1\xa3\xe7\x86\x38\xf6\x03\x20\xf8\x28\x1a\xbc\xb9\xc1\x57\x87\xf8\xd0\x3b\x40\xe8\x87\xb6\x92\x16\x14\x97\x96\xb0\xf8\x83\x9a\x57\xaf\x94\x7b\xcb\xf7\x66\xd4\x55\x6f\x12\xea\xf3\x8e\xbb\x40\xb5\xe5\xe3\xb4\xe0\xc5\xb1\xc4\xaf\xd9\x8d\x13\x01\xb7\xac\x80\x80\x08\x62\x86\x64\x3f\x10\x63\x9c\x90\x67\xe4\xbf\x3d\xcf\x98\x95\x49\x33\xb3\x25\x54\x7c\xe5\x1c\xde\x64\x1d\x8f\xee\x23\x1d\x5b\xb4\x06\x29\xb4\xc0\xa4\x5d\x37\x39\xa2\x24\x90\xec\x33\x96\x40\x47\xf8\x44\x92\x42\x8d\x42\xc4\x80\x45\xa4\x08\x01\x38\xe1\x2b\xda\x1c\xe0\x0d\x6f\x30\x68\xd4\x86\x16\x35\x00\x1f\x59\xe6\x04\x40\x43\x4f\x1a\x8a\x1e\x6c\x8c\x01\x76\x5e\x92\x18\x61\x9e\x5a\x11\x9e\x52\x08\x10\x81\x18\xa6\x24\x18\xe4\x14\x19\xe5\x5a\x34\xa6\x89\x39\x23\xdb\x12\x48\x3d\x62\xca\x00\xda\x92\x37\x95\x21\x2b\xcb\x8d\x26\x22\x22\x2f\xdb\x40\x89\x26\x24\x33\xd8\x90\xb3\xa9\xd3\xaf\xaa\xc1\x7e\x07\x36\xaa\x69\x20\xd1\x4c\xcc\x8f\x8e\x58\xaa\xf5\x93\x96\x4b\xef\x69\x49\x95\x88\x6a\xe7\x3d\x69\xdf\xe5\x53\xc9\x19\x49\x83\x8e\x9f\x44\x25\x8e\x1d\x77\x72\xe0\x69\xe4\xcd\xc6\xe2\xf2\xd6\x51\x32\xe0\xa0\xe5\x90\xcd\xce\x97\xb3\x71\x8f\xc3\x4c\xc2\x01\xd0\x35\xab\x5b\x5b\x6b\xba\x48\x2a\x07\x15\xbf\x1b\x52\x11\xcb\x93\x13\x48\x23\x18\x0f\x59\xb7\x73\xc1\x68\x0e\x20\xc9\xc1\x8c\xbd\xbc\x0e\x1f\x1b\x8c\x31\x26\x24\xb3\x07\x03\x71\x16\x4c\xca\xfc\x70\x4a\x44\x53\x9d\x37\x01\xcc\xb3\x10\xcc\xb3\x14\x46\x6f\x4c\x88\xbf\x35\x4f\x8d\x77\xae\x7b\xf2\x94\x6b\x9f\xcc\x3d\x4a\xfd\xcb\x81\x42\xbf\x81\x83\x98\x27\xca\x14\x4d\x6e\x25\x34\x80\x13\x07\x0d\x53\xe2\x48\xc3\x57\x62\x9d\xc6\x86\x39\xd5\x44\xa7\x3e\xf4\x25\x65\x02\x1c\xfa\x1c\xc2\x19\x5f\xc4\xb1\xf5\x12\xc0\xd6\x57\xa9\x6f\x12\x28\xa8\xe2\x44\x87\x89\x4e\xf0\x3a\xbd\x63\x2b\xb9\x37\x88\xb2\x9c\x71\xbb\x4d\x52\x22\xe2\x29\x97\x52\x30\x49\x42\x1c\xcb\x57\xbb\xe4\x06\x32\x79\xb4\x56\xc9\x22\x21\xd0\x8a\x2b\xc9\x2f\xba\x4e\x55\xc6\xbe\x2c\xf9\x41\x06\x24\xa0\xb0\xcd\x74\x0d\x66\xc3\xb0\x46\xbb\xd3\xeb\xa4\xbc\x17\x4a\x93\x51\xca\x67\x8e\x2b\x82\x3b\xcc\x2a\x85\xdd\x4d\x44\xcb\xb6\x55\x7e\xa9\xfc\x60\x06\x3a\x69\xd5\xcf\x8e\xb6\x58\x9a\x22\x93\xfe\x54\x64\xfa\x12\x99\x14\xb7\x2a\x32\x87\xbc\xc0\x91\x99\x17\xb6\x44\x26\xe3\x5c\x81\x9e\x71\xdc\xed\x77\x03\x8f\xa3\xc0\x3c\x4e\xb2\xd3\x78\x25\x32\xff\x5d\xac\xa4\xaa\xb4\xaa\x85\x58\x9c\x14\x0b\x40\x50\xc3\xff\x40\xe0\xa1\xa2\x1f\x0b\xf6\x76\x81\x69\xb3\x4e\x83\x73\x02\x7b\x67\x30\x09\x47\xe6\x11\xc3\x47\xcb\x24\xba\xc1\xbb\x66\x75\x8a\x29\xb5\xe4\x11\x77\x20\xc0\x45\x89\x93\xc3\x46\xbd\xb2\x34\x5b\x32\x0d\xe9\xab\xdb\x36\x4e\xa9\x68\x94\xea\x27\x29\x94\x7c\x19\x25\x1d\xc9\xd4\x65\x28\x00\x7d\x29\xc8\x69\x5a\x88\xf4\xdb\x3a\xd5\x3e\x87\x4c\xc5\x84\x28\x2b\x36\x89\xb6\x62\x54\x82\x92\x58\xf5\xed\x20\x6b\x05\x5d\xa2\x9d\x59\x15\x26\xd0\xe5\x96\xb4\x34\xa4\xf3\x48\x45\xd3\xd1\x84\x84\x12\x9b\x6e\x32\x6c\x7a\x16\xb5\xf6\x71\xd5\xd5\xed\x92\x17\xf4\xe3\x91\xf1\xcf\x72\xad\x12\x94\xbe\x7a\xa3\x0e\x17\x9e\xa6\x4f\x2b\x7d\xe9\xda\x2d\x33\xf6\xac\x66\xa7\x7a\xe2\xf5\x7b\x55\x87\x1a\x57\x55\xab\x92\x8d\xc8\x3c\x64\x99\xf9\xd7\xaa\x53\x59\x69\x6c\x22\x4a\x24\x7a\x36\x84\xdc\xe2\xac\x95\x50\x59\x9d\xa6\x9e\x41\xa1\x98\x3a\x0b\xcd\x79\x85\x5a\x8e\xe8\x81\x1b\x54\xca\x1d\x48\x1f\x4a\x22\x3f\x6a\x1a\x11\x6d\xc0\x45\xe4\xa6\x0a\x74\x3e\x86\x2f\xa0\xa0\x57\xf8\x60\x72\xce\x19\xd9\x92\xc2\x02\x10\x52\xf6\xb9\x4c\x6b\xb9\xfa\x10\x9c\x99\xad\xb9\xde\xd9\x90\xf6\xe1\x00\x92\x91\x99\x71\x8e\x99\x3a\x78\xd4\x60\xa2\xa2\xed\x87\xb2\x02\x51\x52\xc3\x95\x1a\xb9\x31\x68\x08\xa1\xc6\x1e\x9b\x81\x14\x9f\xc8\xc0\x3a\x90\x5a\xde\xca\x8e\xa2\x85\xcb\xb2\xf1\x20\x63\xa6\x02\x94\x86\xb6\xaa\xbe\x10\xde\x95\x93\xf1\x69\xa1\x72\xe6\x5a\x27\xb5\x5e\x46\x9e\xd3\xcc\xfb\x23\xf2\x5f\xab\x6a\x98\x3a\x40\x00\xe5\x01\x83\x72\x01\x57\x7c\x60\xa5\x6f\x1f\x9f\x7d\x5b\x14\x37\xc1\xcf\x3f\x70\xd1\x0a\x59\x83\x4a\xd8\xc2\xc2\xa2\xcd\x2d\x48\x37\x72\x90\xc5\x80\xe9\x08\xcb\x70\xdf\x46\x5d\x73\x4f\xd8\xc8\xdb\x19\x0c\x13\xab\x42\x06\x6a\x00\xb9\x13\xfd\xd2\x48\x85\x05\x50\xf2\xd5\x96\x82\xae\x80\x15\x85\x22\x14\x82\x30\x05\x3d\xb8\x09\xa1\x19\xa2\x15\xe1\x2d\x10\xa5\x30\x71\x2d\x46\x63\x66\x2d\x0f\x21\x52\x7d\x21\xab\x50\x89\x82\x25\x1b\xaa\xc6\xba\x4e\xfa\x0a\x9d\x2c\x8c\xb5\xdb\x09\x50\x29\x38\x45\x60\x4a\x46\x29\x4d\xeb\xdb\x02\x11\x24\x16\x08\x64\xa4\x6f\x4c\x40\x16\x17\xe8\x94\xdd\xc8\xa0\x7c\x89\xef\x56\x2e\x14\x11\xe3\xf7\xfb\xec\x8f\xfc\x21\x29\x7b\x31\x14\x24\xc4\x1f\xed\xad\xc7\xc1\xa6\x03\xa0\x33\x3d\xce\x15\xad\xe5\x11\x75\xbf\x6c\x2f\xf9\x67\x91\xfc\x73\x83\xbf\xb7\x3e\xf1\x07\x9e\xc4\x50\x3a\xa9\xc2\x92\xd3\x95\x30\x55\x1d\x55\x09\x05\xe9\xaa\x32\xe1\xdc\x59\x4d\x59\xb5\x57\xf2\xf7\x5f\xa2\x5d\x04\x50\x6b\x46\x7f\x67\x96\x7d\x99\x9c\x96\x22\xb3\x50\x72\x34\xd8\xce\xad\x91\x30\xf4\x13\x39\x08\xce\x89\x08\x2c\x98\x50\x60\x60\x07\xc5\xdb\x60\x94\x3c\x9b\xec\x26\x59\x82\x3d\x7a\x8b\x1d\x9e\x50\xeb\x30\x3e\x6b\x56\x6f\x66\xa6\x15\xfe\xa4\xec\xe1\x2b\xcb\xc8\xdf\x01\x00\x00\xff\xff\x09\x1d\x1a\x8f\xf0\x1d\x00\x00") + +func fontsRozzoFlfBytes() ([]byte, error) { + return bindataRead( + _fontsRozzoFlf, + "fonts/rozzo.flf", + ) +} + +func fontsRozzoFlf() (*asset, error) { + bytes, err := fontsRozzoFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/rozzo.flf", size: 7664, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRunicFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x54\x5d\x6b\x2b\x37\x10\x7d\xd7\xaf\x38\x84\x40\x12\x1a\x5b\x38\xf4\x06\xd2\xc2\xc5\x29\xf4\xe1\x3e\xf4\xed\x3e\x2e\x6c\xe5\xdd\x59\xaf\xc8\xae\x64\x24\x6d\x1d\x83\xc8\x6f\x2f\xfa\xda\xd8\x89\x7b\x09\xa5\xf4\x03\xea\x10\xcf\xcc\x4a\x73\xe6\xcc\x99\xf1\x76\x43\x77\x27\x2e\x71\x8f\x7b\xac\x3e\x61\xb1\xc2\xea\x81\x7d\xed\x09\x34\xb4\x64\xd0\x4d\xae\x17\xe6\x09\x58\x2c\xb0\x39\xe0\x27\x73\x10\x0a\x8f\x03\x3d\x0b\xd5\x92\x61\xd7\x22\xbb\xeb\x51\xb8\xbe\xb1\x4b\xd3\xeb\x96\xec\x92\xda\xe9\x86\x75\x72\x3b\x90\x83\xa1\x81\x84\x25\xdc\x2d\x57\x01\x65\x75\x87\xc7\x69\x8b\xd5\xc3\xc3\xf7\xec\x6b\x2f\x2d\x3a\xad\x1c\x1a\xad\xac\xb4\xce\x42\x77\x70\x3d\xc1\x4c\x8a\x2c\xa4\x8a\xc1\x09\x95\x25\xf0\xc5\x41\x5a\x88\xa6\x99\x8c\x70\x04\x61\x59\x27\x0c\x84\xc5\x17\x3c\x29\xbd\x5f\x02\x8f\xea\x80\x46\x1b\x43\x8d\x93\x5a\xd9\x5b\x68\x03\xd9\x92\xb0\xd0\x0a\x72\xdc\x19\xfd\x9b\x54\xdb\x08\x1e\xca\x5f\x59\x88\xdd\x8e\x84\x11\xaa\x21\x26\x0c\x61\x4f\x43\xa3\x47\x5a\x02\xbf\xe8\x56\x76\x87\x74\xbb\xb0\x0d\x56\x2a\xc2\x5e\xba\x1e\x23\xfd\x88\x5d\x6a\x91\x16\xa3\x90\x03\x46\x0a\xc8\xcc\x90\x9d\x06\xb7\x64\x8c\xfd\xfc\xbc\x1b\x84\x12\x81\x4b\x68\xb0\x93\xc6\x3a\x0c\x52\xd1\x0f\x2c\xc8\x8f\x05\x2e\x46\xb1\x95\x0d\xd4\x34\x6e\xc8\x5c\xa0\xd3\x06\x9d\x1c\x28\x90\x56\x4e\x76\xb2\x89\xc9\x4c\x00\xc0\x02\xb6\xd7\xd3\xd0\x42\x0c\x7b\x71\xb0\xd8\x10\x7e\x15\x57\xb7\x31\x49\xe9\x3d\xbb\x4c\x97\x42\x73\x17\xbd\x30\xed\x66\x10\xea\xe9\x22\x68\xbf\x33\x52\x39\x1b\x94\x12\x88\x4f\x6f\xb1\x99\x1c\x1a\xa1\xae\x5c\x80\xb1\xe3\x64\x7b\x6a\xd9\x7d\x42\xe8\x49\x6e\x7b\x17\x18\x0b\x34\xbd\x30\xa2\x71\x64\xbe\x79\x78\x0b\xa5\x1d\xa4\x6a\x86\xa9\x0d\x92\xb5\x64\x1b\x0a\x9b\x62\xd9\xea\x53\x4c\x1b\xc5\x73\xec\x1c\x03\xa9\xad\xeb\x71\x4d\xcf\xe5\x72\xa3\xc7\x91\x54\x12\xc6\xde\xe0\x3b\x08\x74\x53\xbb\x25\x74\xa2\x71\xda\xb0\xc5\x2a\x22\xb4\xd4\x89\x69\x70\x89\xec\xa8\x5b\x8a\x8d\xcf\xc3\x61\xab\x87\x78\x2d\x49\x19\xf8\x9d\xe0\xb2\x28\xcf\xe5\xfa\xd5\xd4\xd1\xf8\xda\x1f\x3f\x0c\x66\xcd\x8e\xfe\xfe\xbe\x00\xf1\xb3\x66\x75\xcd\xab\xba\x5e\xb3\x2a\x84\x7c\xcd\x78\x0d\xd4\x55\x38\xaf\x78\x38\xcf\xf7\x3e\x8e\x5b\x63\xcd\x7c\xed\xcf\x7a\xff\x50\xaf\xdf\x54\x21\xd3\xbb\xae\x6f\x72\x38\x7b\x7f\x75\x31\x5f\x05\x78\x8f\x0a\x6f\x7c\xa0\x7c\xaf\x99\x7f\xc9\x27\xf8\x1c\x45\x8b\x43\x38\xf7\x30\x65\xe0\x9d\xa9\x50\x90\xd3\xc3\x0a\xb9\x34\xc0\x7d\x39\xe1\xf0\xf9\x94\xa3\xb8\xbc\x2a\x2e\x0f\x59\xd1\xe5\x21\xad\xf2\x1f\x85\x08\x9f\x3f\x74\x13\x32\x4f\xb0\xa9\xad\x60\xe6\x32\xe5\xee\x6c\xcb\x52\x86\x11\xc5\x82\x28\x6b\x19\x1d\x9e\x3a\x4d\x6c\xb1\x66\x89\xeb\x2c\x8c\x2f\x62\xcc\x8c\x0b\xdf\x99\x57\x35\x13\xcc\xf4\xce\xfe\xa7\x12\x98\x4b\x04\xa7\x42\x2e\x8a\xe4\xe5\xdf\x09\xcf\x4e\x9a\x4f\x8e\x79\xc9\x4c\x69\x29\x27\x76\x32\xcb\xfa\x66\x5e\x67\x27\x9b\xae\x16\xfd\x8b\xfa\xfc\x75\x50\x27\xdd\x1c\xb5\x15\x4c\x2a\xee\x73\xf9\x6c\xe1\x33\x11\x9f\x3b\xc8\x2b\x08\xfe\x32\x0b\x5a\x65\xd6\x79\x08\x3c\x2b\x7e\x7c\x9e\x89\x15\x5e\xef\xe7\x98\x68\x16\x96\xef\x7e\x57\xaf\xcb\x9d\x5b\xe7\x47\x8b\x7f\xba\xc6\xc8\x0b\x83\xbc\x30\xbc\xae\xeb\x7a\xd6\x3c\xda\xbc\x25\x1c\x65\x04\x7e\x5e\x13\x3f\xd3\xf6\xf3\xcc\xb2\xc4\x67\xbc\xb2\xf1\x99\x44\x61\x91\xdb\xf2\xef\xed\x9f\x4c\x08\xe1\xe7\x2c\xce\xc9\xbb\xa0\xbc\x11\xde\x88\xf5\xb2\xa8\xeb\x57\x71\x23\x5c\x50\xa1\xce\xc0\x7e\x2e\xf1\xd6\xfb\x37\xbc\x6f\xff\x0f\xfe\xab\xc1\xef\x01\x00\x00\xff\xff\x5b\x92\x7b\xc9\xc1\x0b\x00\x00") + +func fontsRunicFlfBytes() ([]byte, error) { + return bindataRead( + _fontsRunicFlf, + "fonts/runic.flf", + ) +} + +func fontsRunicFlf() (*asset, error) { + bytes, err := fontsRunicFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/runic.flf", size: 3009, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsRunycFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x56\xdf\x8b\x1b\x37\x10\x7e\xd7\x5f\xf1\x71\x04\x92\xd0\xb3\x15\xbb\x49\xca\xb5\x47\x70\x0a\x7d\x48\xa1\xd0\x87\x3c\xe4\x61\x61\xa3\xdb\x9d\xf5\x8a\xec\x6a\x8d\xa4\xcd\xdd\x81\xb8\xbf\xbd\xe8\xd7\xda\x3e\xaf\x8f\xd0\x1f\xe9\x05\x6b\x46\x3b\x9a\x99\x6f\xbe\x19\x89\x34\x5d\xb3\x16\xcf\xf0\x16\x6f\xb1\x7a\x83\xc5\x0a\xeb\x57\xec\x63\x4b\xa0\xae\x26\x8d\x66\xb4\xad\xd0\x5f\x80\xc5\x02\x37\xf7\xf8\x55\xdf\x0b\x85\xf7\x1d\xdd\x09\xe5\xcd\xd7\x22\xa9\x9b\x5e\xd8\xb6\x32\x4b\xdd\x0e\x35\x99\x25\xd5\xe3\x3b\xd6\xc8\x6d\x47\x16\x9a\x3a\x12\x86\xb0\x5e\xae\x7c\x94\xd5\x1a\xef\xc7\x2d\x56\x57\x57\xaf\xd9\xc7\x56\x1a\x34\x83\xb2\xa8\x06\x65\xa4\xb1\x06\x43\x03\xdb\x12\xf4\xa8\xc8\x40\xaa\xb0\x39\x82\xb2\x04\x3e\x58\x48\x03\x51\x55\xa3\x16\x96\x20\x0c\x6b\x84\x86\x30\xf8\x80\x2f\x6a\xb8\x5d\x02\xef\xd5\x3d\xaa\x41\x6b\xaa\xac\x1c\x94\xb9\xc4\xa0\x21\x6b\x12\x06\x83\x82\xec\x77\x7a\xf8\x2a\xd5\x36\x04\xf7\xe9\x9f\x1b\x88\xdd\x8e\x84\x16\xaa\x22\x26\x34\xe1\x96\xba\x6a\xe8\x69\x09\xfc\x31\xd4\xb2\xb9\x8f\xa7\x33\x5a\x2f\xa5\x22\xdc\x4a\xdb\xa2\xa7\x5f\xb0\x8b\x25\xd2\xa2\x17\xb2\x43\x4f\x3e\x32\xd3\x64\xc6\xce\x2e\x19\xfb\x11\xbb\x41\x2a\x8b\xaf\xa4\x8d\x1c\xd4\x71\x8d\xa3\xa1\xda\x63\x1f\x77\x3b\xd2\x95\x8f\x22\xea\x3a\x7e\xea\x86\xdb\xf4\xa9\x23\x6b\x49\x1b\x76\xc5\xd7\x6b\x7e\xf5\x13\x7e\x27\xad\x45\x8d\x3f\x25\xe9\x8a\x70\x7d\x43\xdd\xf6\x75\x2f\xed\xa6\x97\x36\x72\xcf\x7e\xbb\xdb\x75\x42\x09\x9b\xf2\x35\x52\x1b\x8b\x4e\x2a\xfa\x99\xf9\x8e\x63\x81\x8b\x5e\x6c\x65\x05\x35\xf6\x37\xa4\x2f\xd0\x0c\x1a\x8d\xec\xc8\xf3\xa4\xac\x6c\x64\x15\x9c\x99\x00\x80\x05\x4c\x3b\x8c\x5d\x0d\xd1\xdd\x8a\x7b\x83\x1b\xc2\x67\xf1\xfc\x32\x38\xa9\xe1\x96\x3d\x8b\x87\x7c\x55\x17\xad\xd0\xf5\x4d\x27\xd4\x97\x0b\xdf\xee\x9d\x96\xca\x1a\x5f\x8d\x40\xf8\x7a\x89\x9b\xd1\xa2\x12\xea\xb9\xf5\x61\x4c\x3f\x9a\x96\x6a\xf6\x36\x46\x68\x49\x6e\x5b\xeb\x11\x0b\x54\xad\xd0\xa2\xb2\xa4\x9f\x34\x5e\x42\x0d\x16\x52\x55\xdd\x58\xfb\x2e\xd5\x64\x2a\xf2\xc3\x69\xd8\xea\x4d\x70\xeb\xc5\x5d\xa8\x1c\x1d\xa9\xad\x6d\xf1\x82\xee\xf2\xe1\x6a\xe8\x7b\x52\x91\x18\xf3\x12\x3f\x40\xa0\x19\xeb\x2d\xa1\x11\x95\x1d\x34\x5b\xac\x42\x84\x9a\x1a\x31\x76\x36\x82\xed\x87\x9a\x42\xe1\xd3\x3c\xb0\xf5\xab\x70\x2c\x52\xe9\xf1\x1d\xc5\x65\x81\x9e\x67\x9b\xbd\x28\x83\x70\xa5\x3b\xfc\xe8\xc5\x86\x1d\xfc\xfb\x7e\x1b\x84\xbf\x0d\x2b\x4b\x5e\x94\xe5\x86\x15\x7e\xcb\x37\x8c\x97\x40\x59\x78\x7b\xc1\xbd\x3d\x9d\xfb\xf6\xb8\x25\x36\xcc\x95\x6e\x56\xfb\x9f\x6a\x7d\x92\x85\x04\xef\x45\xf9\x32\x6d\x27\xed\xdf\x4e\xe6\x0a\x1f\xde\xa1\xc0\x23\x1d\xc8\xeb\x86\xb9\x87\x64\xc1\xbb\x40\x5a\x68\xc2\xdc\xc7\xe8\x81\x13\x51\x20\x47\x8e\x1f\x0b\xa4\xd4\x00\x77\xd9\xc2\xe1\x92\x95\x23\xab\xbc\xc8\x2a\xf7\x5e\x41\xe5\xde\xad\x70\xdf\x1a\xc2\xff\x9d\x55\x63\x64\x1e\xc3\xc6\xb2\xbc\x98\xd2\xe4\xb3\x93\xcc\x43\xe9\x5b\x14\x12\x22\x8f\x65\x50\x78\xac\x34\xa2\xc5\x86\x45\xac\x13\x31\x2e\x93\x31\x21\xce\x78\x27\x5c\xc5\x04\x30\xc1\x9b\xfd\xc5\x14\x98\x52\x78\xa5\x40\x4a\x8a\xa8\xa5\x7b\xc2\x93\x12\xfb\x93\xf6\x3c\x7b\x46\xb7\xe8\x13\x2a\x99\x68\x7d\xd4\xaf\xd9\xce\xc6\xa3\x99\xff\xcc\x3e\xdf\x37\xea\xa8\x9a\x83\xb2\xbc\x88\xc9\x5d\x4a\x9f\x24\x5c\x02\xe2\x52\x05\x69\x04\xc1\x1f\x26\x42\x8b\x84\x3a\x35\x81\x27\xc6\x0f\xed\x09\x58\xc6\x75\xda\xc7\x08\x33\xa3\x3c\xb9\x57\xfb\xe1\x4e\xa5\xf3\x83\xc1\x3f\x1e\x63\xa4\x81\x41\x1a\x18\x5e\x96\x65\x39\x71\x1e\x64\x9a\x12\x8e\xdc\x02\x37\x8d\x89\x9b\x60\xbb\xa9\x67\x89\xe2\x19\x2d\x4f\x7c\x02\x91\x51\xa4\xb2\xdc\xa9\xfc\x9b\x0e\x7e\xfb\x2e\x91\x73\xf4\x16\xe4\x17\xe1\x11\x59\x0f\x8b\xb2\xdc\x93\x1b\xc2\x79\x16\xca\x14\xd8\x4d\x29\x1e\x6b\xdf\xe1\x21\x9d\x16\x57\x4c\x4b\xea\xc3\x8c\xf9\x3a\x56\x3d\x63\xde\x2f\xa9\xed\x53\x6f\x8e\xa4\x2b\xf2\x7d\xf8\x94\x6f\x41\xe1\x26\x87\xc7\xc2\x15\x7c\xe2\x23\x8b\x68\x3c\x5e\x1d\xe7\xc7\xed\x98\x3b\x54\xc4\x79\xfb\x14\x07\xaa\x38\x13\x29\x3d\x42\x2e\xeb\xde\xb0\xff\x1d\xbf\x31\x27\xde\xf9\xc9\x28\xd2\x40\x9f\xf2\x14\x30\x5c\xa7\x27\xe5\x1c\xcb\x38\x6c\xc2\x79\x56\x78\xf1\x34\x2b\x08\x81\x3e\xbb\xa8\x7f\x9e\x49\xc7\xc3\x5b\xc1\x93\x36\xcb\x47\xc1\xf7\xc3\xcd\x4f\x5f\x82\x59\xfc\xfc\x70\x0c\x8e\xf2\xf9\x65\xb1\x48\x3c\xcc\x32\x18\x22\xc0\x4d\xf8\xe7\x41\x1d\xb4\xea\xec\x4c\xcc\x1e\x3a\x03\xf6\xf4\xd6\xce\x04\x7c\x28\xe2\xff\x89\xfc\xea\xfe\xeb\xdb\xf9\x8f\x37\x7f\x05\x00\x00\xff\xff\x92\xe7\x29\xb9\x32\x0e\x00\x00") + +func fontsRunycFlfBytes() ([]byte, error) { + return bindataRead( + _fontsRunycFlf, + "fonts/runyc.flf", + ) +} + +func fontsRunycFlf() (*asset, error) { + bytes, err := fontsRunycFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/runyc.flf", size: 3634, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSbloodFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x5b\x6f\xdb\x36\x1b\xbe\xd7\xaf\x78\x58\xfb\xa2\x05\x3e\x13\x39\x34\x69\xfb\xe2\xc3\xc0\x0e\x41\xb1\x62\x5d\x5a\x20\x1b\xb0\x5b\xda\xa6\x65\xb5\x36\x19\x48\x4c\x8c\xec\xd7\x0f\x3c\x49\x94\x25\xc7\x1e\xb0\xab\x21\x6e\x2b\xb3\x16\xf9\x1e\x9e\xe7\x3d\x49\xab\xcd\xea\x42\x4e\x71\x8d\x2b\x9c\xbf\xc7\xec\x1c\x17\xe7\x45\xf1\xa9\x2a\x37\xca\x62\x61\xf4\xa3\xaa\x9b\xca\x68\xcc\x9f\xf0\xab\xd2\x16\xb7\xb2\x69\x94\xc6\xeb\x1f\x4a\x5b\x2d\x16\x4f\x73\x55\x37\xf7\x72\xa1\xb8\xa9\xcb\x37\x98\xcb\x46\x2d\x61\x34\xec\x5a\x15\x2b\xa3\x2d\x96\xaa\xa9\x4a\xad\x96\x98\x3f\x51\x51\x7c\xac\x6d\xb5\xd8\x28\x5c\x5c\xbf\x7f\x07\xb3\x42\xad\x16\x5c\xd6\xb6\xe1\xb2\x59\x54\x15\x15\xdf\xa4\x5d\x13\xb4\xda\x35\xbc\xb2\x4b\xfe\xb0\xad\x16\x6b\xae\x96\x0f\x6c\x21\x95\x66\x5b\x69\xd7\xdc\xac\x2b\x33\x6b\xac\xb4\xca\xdf\x58\x9b\xdd\x46\xea\x25\xaf\x55\x63\x8d\xe6\x52\x37\x5c\x2b\xcb\xbc\x88\xe6\xbe\xae\xb4\xdd\x54\xfa\x87\xff\xad\x91\xcd\xba\xda\x56\x7c\xb7\x93\x7c\x61\xb6\x4c\x1b\x3b\x5b\x99\x7a\xb6\x95\xd5\xa6\xf8\x54\x9b\x2d\x61\x59\x35\x8d\xda\x08\xfd\xa0\xab\x05\xd7\x0f\x4e\x03\x5e\xdf\xc8\xc7\x6a\x89\x3b\x8e\xcf\xee\xe6\x9b\xe2\x56\xed\x9a\xb2\x36\x0f\xf7\x0d\xed\x39\xf0\x3f\xb9\xb1\x61\x35\x93\xb5\x2d\xee\x1e\xe6\xdf\xd5\xc2\x12\xbe\x54\x5a\x11\xee\xb6\x72\xb3\x51\x35\x7e\xfe\xf2\xf5\xeb\x0d\x3c\x3a\xaf\xaf\xb0\xa9\xb4\x7a\x53\xdc\x48\xab\x08\xd7\xb8\x35\x8f\x38\xff\xf0\xe1\x2d\xce\xaf\xe8\xec\x9a\x2e\xcf\x30\x3b\xbb\x3e\x3b\x2b\xbe\xd6\xa5\xd4\xd5\x5f\xd2\x56\x46\x13\x6e\xfd\xb7\xdc\xe0\x0f\x5d\x79\x82\xec\x53\xe1\x74\x34\x84\x77\x97\xc5\x9d\xd2\x4b\x55\x13\xe6\x66\x2e\x45\x74\xb6\xf8\x78\x7f\x5f\x9b\x47\xb5\xdc\xfb\xf9\x37\xd5\x34\xb2\x54\xb3\xcf\x37\x84\xff\x5f\x7e\xf8\x5e\x96\xdb\x69\xa5\x2f\x44\x29\xcb\xad\x4a\x48\xfd\x54\xdc\xde\xfe\xfe\x6d\xf6\xcd\x34\xb6\xd2\xe5\xec\x17\xd3\x58\x42\x6f\x47\xf1\x67\xad\x56\x63\xc4\xed\x33\xec\x89\xef\xa1\x44\xe7\x57\xef\x2e\xdf\x16\x45\xf1\xc9\xd4\xb0\x6b\xd3\x28\xec\xd6\xc6\xad\x1e\xca\xb5\x85\x5d\x4b\x1b\x11\xdb\xc9\x06\xd6\x18\xcc\xab\x92\x73\x8e\xcf\x30\xab\x95\xaa\x71\xe7\xef\x52\x31\xcb\x3e\xc5\x14\xc0\x74\x72\xc2\xd7\xa4\x98\x0a\x21\xda\x2b\x13\x2c\xdc\x99\x14\x53\xe2\x14\xd7\xfe\x36\x84\xf0\xa7\x04\x43\xda\xd4\xca\x19\x5f\xf8\xdd\xfe\xa0\x40\xd0\xc0\x84\x08\x7a\x20\x98\x00\x13\xcc\xfd\xce\x18\x23\xe6\x2e\xee\x77\x22\x80\x40\x48\x52\x10\xf5\xc3\x9f\xf4\xdb\x85\x60\xcc\x1b\x88\xb0\x0c\x5b\x9d\x10\x2f\x80\x13\xcf\x8f\xc7\xc3\x40\xd0\x0b\xb7\x08\xe6\x0b\x11\xc4\x30\x46\x48\x1e\xc3\x19\x30\x38\xec\x34\x23\xb8\xc0\xbc\x37\x51\xbb\x73\xa0\x15\xc1\x92\x55\x4e\x3f\x21\x81\xd7\x13\xe4\xcd\x0f\x08\x8d\x5d\xa3\xb6\x80\x55\xf0\x35\x21\x14\x24\x23\x09\x9d\x74\x7b\xe2\x09\x44\xea\x18\x73\x5f\x19\x84\x2d\x0d\x89\x07\xef\x39\x8b\x5e\x30\xb7\x62\x22\x9c\x02\x18\x11\xe5\x7e\x80\x53\x9f\x8b\x20\xae\x03\xa1\x5d\x79\x34\x22\xb2\x2d\x14\x9e\xca\x7d\x3c\x87\xae\x07\xe5\xe4\x9c\x73\xdc\x75\xfb\xa7\xfd\xa5\xc8\x95\x8c\x6d\x68\x97\x6e\x3d\x1d\x5e\xbc\x8a\x78\xdb\x47\x42\x8c\x89\x16\x19\xc6\x12\xb9\x2c\x52\xeb\x3d\x18\xf8\x20\xda\x70\xf4\x51\x14\x49\x73\x87\x83\x79\x3e\xa6\x22\xa8\x4e\x42\x1f\xc5\x8c\xe8\x51\xf6\x3a\xd0\xa6\x03\x6d\x99\xd9\x9c\xc5\xf8\x40\x1e\xc2\xe4\xb4\xd1\x9e\xb6\x4e\x42\x96\x01\x8e\xf5\xd6\xae\x90\x7f\x44\x34\xcc\x80\x14\x3c\x03\x6f\x7b\x64\xa4\x10\x02\xb2\x38\xed\xeb\x0f\xf1\x2f\x90\x92\x26\x4b\x5f\x84\xf4\x25\xda\xcb\xfe\x36\xdc\x5a\xb4\xd3\xf1\xa8\x1f\x27\xa1\x2d\x3a\x03\x90\xd7\x00\x20\x99\xc0\x03\x86\x1d\xe3\x63\x84\x0d\x39\xc7\x01\x2b\x3c\x0d\x27\x8a\x10\x21\x05\x3b\x20\x29\x3a\x42\x34\xb4\x22\x1c\x4e\xab\x88\x79\xb8\xb4\x95\xbb\xad\x75\xcf\x26\xd7\x5e\x12\x64\x89\xdc\xde\xed\x12\x19\x3e\xa8\xf6\xd2\x31\x16\xf4\xfe\xaf\xa9\x9c\x3f\x93\x99\xad\x7e\xd1\x06\x43\x5e\x52\x52\x12\x75\xfa\x3d\x12\xcf\x62\xe8\xb2\x41\xa4\x60\x7a\xf5\xaa\x75\x8f\xe8\x20\x93\xa9\x9b\x78\x7d\xb1\xf9\xc1\xb3\xe9\x34\xfa\xfc\x8f\x67\x43\x52\x1c\x6a\x4a\x23\x65\x20\x25\x46\x3f\x30\x9f\x49\x8c\x71\x11\x7b\x51\x45\x87\x2b\x49\x3f\xb9\x32\x1a\x91\xf9\x92\xb9\x72\xaa\x15\x23\xf5\xcc\x5b\x31\x8c\xed\x2c\xc3\x44\xa7\xda\x97\x98\x5e\x9b\x8d\x19\x76\xb8\x48\x9d\x26\x22\xe6\xe8\x71\x6a\x3b\x34\xfa\xd4\x76\xf3\xc6\x28\xb5\x47\x6b\xde\x49\xd4\x86\xd3\xa1\xd6\x85\x33\xde\xfb\xa8\x6d\xd0\x86\xb2\xb2\x14\x0e\x70\x5f\xe4\x43\x06\x70\x1a\xb4\xd2\x7d\x23\x59\x32\x52\xb4\x83\x03\x81\xda\xe1\x01\xe3\x98\x77\xc9\x98\x63\xde\x22\xd7\xa7\x8d\x1f\x0c\x9e\x2c\x7e\xe2\xbf\x28\xc8\xf5\x54\x91\xfc\x07\xf2\x0a\xe7\x3f\xfd\x22\x37\xea\x9a\xaf\x33\xc9\xb5\xb1\x88\xc4\x48\x44\xfe\xd3\x26\xcd\x0f\x06\xf5\x50\x44\x0e\xf0\x49\x11\x89\x5c\x06\x32\x3b\xa2\x14\xf7\x97\xd0\x59\x42\x83\xf1\xf1\x70\xa5\x38\x44\xf6\x80\x29\x8c\x74\x61\x8c\x76\x61\x3e\xec\xc2\x79\x07\x4d\xa3\x73\x68\x9f\xa9\x57\xb4\x44\xd2\x60\x60\x3a\x98\x4e\xa3\x5c\x8c\x36\xcf\xa3\x22\xc0\xdc\xd4\xcd\x9e\x29\xfc\xed\x14\xdc\x97\xc3\x90\x45\x6b\x18\x01\x43\xbc\x22\x18\xe5\x2f\x71\x9e\xf5\x43\xf1\x7e\x63\x3e\x92\x90\xe8\xe2\x05\x1d\x47\x5e\xc8\x78\x42\x76\x12\x58\x36\x64\xec\xc3\xcc\x47\x61\x1e\x1f\x73\xba\x47\xa6\xde\xa4\x78\x28\x9b\x93\x2b\xa1\x72\xa5\x07\x85\x54\x04\xba\x47\x9b\x49\xbf\x87\xb7\xcf\x49\xd9\x20\x1d\x9a\x78\x9a\x21\x72\x75\x6d\x44\xa6\xf6\x9f\x02\x2a\xb5\xfe\x60\x24\x7a\xda\xc2\x2c\x88\x2c\x83\xc4\x81\x41\xe3\xd8\xf8\x71\xc2\xe6\x62\xca\xc3\x67\xda\x36\x2d\x50\xf0\x39\x3e\x0a\x4f\x8e\x3c\xca\xbd\x4c\x07\x2f\xd3\xc1\xcb\x74\xf0\x32\x1d\xbc\x4c\x07\x2f\xd3\xc1\x7f\x61\x3a\xe8\xbf\xb5\x62\xa1\x09\x65\x59\x12\xb7\x8f\xbc\xe0\x8a\x7d\xbd\x23\xd1\x7d\xf1\xd0\x4e\x91\xbd\xca\x44\x3e\x05\x64\x2f\x3e\xd1\x9b\x02\x02\x0e\x93\xf8\x5e\x5a\x74\x5d\xfb\xd8\x6a\x52\x64\x7f\xfe\xed\xff\xfc\x1d\x00\x00\xff\xff\x06\x60\xe4\xa7\x4c\x1a\x00\x00") + +func fontsSbloodFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSbloodFlf, + "fonts/sblood.flf", + ) +} + +func fontsSbloodFlf() (*asset, error) { + bytes, err := fontsSbloodFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/sblood.flf", size: 6732, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsScriptFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x5b\x7d\x6f\xe2\xb8\xf3\xff\x9f\x57\x31\x7f\x44\x3a\xfa\xfb\xb5\x6b\x92\x3e\x50\xa4\xd3\xa9\x59\xc8\xb6\xd1\x52\xc2\x05\xd8\xbd\x95\x22\x59\xec\x36\xdd\xa2\xa3\xa4\xe2\xe1\x4e\x2b\xe5\xc5\x7f\xe5\xc7\xd8\x8e\x9d\x02\xed\xad\x0a\x71\x82\x3d\x33\x1e\xcf\x7c\x3c\x33\xce\x3e\x2e\x1f\x83\xb9\x07\x5d\xb8\x04\xff\x0a\x3a\xe0\x77\xa0\x03\xe7\xbd\xce\x45\x6b\xf2\x63\xbd\x78\xd9\xc2\xf7\x5f\x70\xbb\xcc\x57\x2b\xe8\x3f\xcd\x5f\x5e\xf2\xe5\x12\x2e\x50\xef\xbc\x15\xaf\x7e\x2c\x77\x0f\xf9\x06\xe2\x49\x02\xc3\xf9\x76\xb1\x3a\xf3\x5b\x8f\x8b\x9f\xcb\x7c\x0b\xeb\x7c\x99\xcf\x37\x39\x04\x1f\x7c\x38\x3b\x03\x3f\x80\x70\xf7\x13\xfc\x5e\xef\xa2\x35\xce\xd7\xcf\x8b\xcd\x66\x51\xac\x60\xb1\x81\xa7\x7c\x9d\x7f\xff\x05\x3f\x17\xff\xe4\x2b\xd8\x16\xf0\x5c\x3c\x2c\x1e\x7f\xc1\xf6\x69\xb1\x81\xc7\x62\xb5\x3d\x85\xf9\x06\x96\xc5\xea\x27\xb9\x6e\x9f\xf2\x16\xed\xb0\xc8\xd7\xbf\x6d\x60\x35\x7f\xce\x09\x8d\x97\xe5\xfc\x47\xfe\x00\xc5\x0a\xe6\xf0\xa3\x78\x7e\xce\x57\x5b\x58\x2e\x56\xf9\x87\x56\xeb\x9e\xf5\x7e\x20\x73\x18\xcf\x77\x4b\xf8\xb8\x5b\x6f\x8b\x15\xfc\xbe\x29\x96\xbb\xed\xa2\x58\xdd\xe4\xf3\xf5\xf6\x69\xb9\x58\xfd\xfd\x61\x95\x6f\xff\x00\x3f\x40\xbd\x2b\x22\xc8\x82\xcd\x0e\x56\xf9\xbf\xf0\x32\x5f\xcf\x9f\xf3\x6d\xbe\x6e\x6d\x76\x2f\x2f\xc5\x7a\xcb\x08\x7e\x8a\x6f\xc9\x5c\xe7\xab\x07\xd2\xfc\xba\x58\x7d\x00\xb8\x9f\xff\x82\xf9\x72\x53\xc0\xf7\x1c\x36\xcb\xc5\xcf\xa7\xed\xf2\x17\x3c\x0b\x29\x1e\x8b\x35\x7c\xcf\xb7\xdb\x7c\x0d\xbb\x4d\xde\x2a\x1e\x29\xf9\xc7\xdd\x72\x79\xf6\xef\xe2\x61\xfb\x84\xfe\xce\xd7\x2b\xb4\x79\xde\x6d\x9e\x60\xbe\xdc\xe6\xeb\xd5\x7c\xbb\xf8\x27\xdf\x9c\xc2\xf7\xdd\x16\x1e\xf2\xc7\xf9\x6e\xb9\x85\x62\xb7\x7d\xd9\x6d\xc9\xcc\x47\xc9\x14\x7e\x3c\xcd\x57\x3f\xf3\x87\x0f\xad\x96\xe7\xdd\xb8\x3e\x37\x2d\x80\x9b\x16\x94\xca\xa7\xe0\xcf\x80\xfc\x58\x90\xbb\x92\x3c\x26\x03\x94\x2f\xd6\x85\xf6\x11\xff\x68\xb3\xa4\x7f\x37\x2d\x38\x3b\xfb\x7f\xfa\xa7\x37\x95\x0e\xda\x30\xde\x14\x6d\x72\x2d\x71\x89\x6f\x5a\xd0\x2e\x71\x49\xee\x71\x89\xcb\x13\xfa\x1c\x4a\x50\xfb\xa9\x03\x49\xff\x13\x40\xf4\x0e\xd1\x5f\x11\x7d\x88\xa0\x7d\x52\x75\xa9\xb1\x6a\x9f\xb0\x2b\xca\x78\x6f\xc8\x08\x8d\x0c\x63\x94\xd9\x58\x11\xa5\x90\x0e\x9e\xf2\x91\x2a\xe3\xa4\x88\xde\x40\xfb\x82\x4c\x51\x19\x21\xcf\xa6\xa9\x7d\x21\x43\xad\xea\x35\x2b\x11\x53\xec\xff\x31\x55\xa2\x32\x73\x2a\x42\xff\x41\x59\x11\xe5\xbe\x3e\xd0\xf8\x88\x69\x3a\xa8\x02\xa7\x7a\xc6\xa9\x7a\xfb\x53\x05\x95\x2a\xbb\xd8\x16\xad\x5a\x2d\x75\xd1\x30\x06\xd1\x2b\x63\xca\x05\xa6\x3c\xd9\xa0\x2b\x57\x19\x56\xd5\x20\xad\x53\x42\xba\xac\xab\x5e\xb5\x68\x8c\xb9\x00\xd4\x6a\x3c\x4d\x2a\x8c\xb1\x45\x2a\x2c\x87\xb0\x65\xf6\x30\xa6\x33\xf2\xd8\x7d\x86\xf9\xbd\x6b\xb1\xa8\x5b\x90\x2b\x66\x66\x0f\x20\xcd\xdc\x61\xee\x98\x09\x52\x0a\x02\x5c\x82\x3d\x38\xf2\x8e\xc8\xe3\x9c\xd9\x7d\x89\xb8\xe8\x99\x63\x20\xe6\x1c\x09\x07\x3e\x37\xa6\x18\x0f\xc9\x05\x71\x89\x2a\xdc\x8a\x7b\x55\xed\xae\xae\x4e\x3e\xa2\xa4\x7d\xd8\x7a\x89\x75\x12\x17\x6d\x04\xb7\x2c\xe1\x8e\xba\x95\x59\x7e\x44\x75\x7f\x45\x95\x57\x66\x16\x88\xb3\x18\x7d\x65\xfc\xae\xa9\x1b\x24\x91\xe2\xe3\x75\x6b\xe3\x96\x43\x45\xe3\x3e\x5a\xd8\x7d\x40\xe1\x44\x56\x45\xe8\x1f\xe3\x53\xee\x12\x88\x02\x25\x6d\x12\xf5\x31\x9e\xc4\x26\x54\xc7\xa8\x0c\x02\x9f\x56\x4b\x58\x42\xe5\x55\x66\x93\x61\x62\x65\x1f\x16\xfc\x3e\x15\xf3\x91\x06\x45\x2c\x4c\xcc\x89\x3f\x69\x2b\x36\x66\xf8\x27\x91\xc6\x63\xb2\xb4\xf1\x09\x67\xef\x81\xda\xd0\x8d\xdb\x18\xef\x55\x0a\x69\x53\x86\x12\x03\x99\xe1\x78\x58\x36\xdb\x28\xab\x29\xc4\x98\x0d\x17\x46\xc8\x92\x61\xec\xf1\xc5\xa2\x57\xa7\x97\x51\x29\xa8\xbb\xb4\xf1\x09\x94\x1e\xff\x11\x97\x6c\x6e\x80\xf8\xf2\x40\x1b\x23\x8d\xad\xd2\xd2\x27\xd8\x3e\xe1\x76\x8f\x32\xe6\x0e\x88\xcf\x02\xb5\x9b\xd0\x4e\x3c\x43\xa5\x84\x12\x02\x15\xbc\x45\x9e\x65\xb2\x85\x1a\xc4\xc0\x42\x8d\xa5\x72\xc5\xc0\x2c\x2b\xc3\x28\x6b\x84\x37\xb1\xb5\x4a\x98\x13\xb2\x64\x98\x73\x45\x1c\xe6\xb2\xd2\x26\x38\x12\x82\x73\xa8\x29\x21\xf3\x44\x0b\x34\x7b\x34\x05\xf7\x30\xeb\x98\xe1\x12\x9f\x88\xfd\xd0\x63\x1d\x3d\x2c\x5a\xdc\x12\x9c\x64\x4e\x31\xa6\xfb\x8e\x10\xa8\x8a\x62\x5e\xbd\xd1\x9c\x45\x33\xb0\xca\x61\xe4\x54\x11\x64\x00\xd5\xd2\xd8\x9b\x8d\xee\x57\x6d\x8f\x19\xce\xd0\x11\xdb\x63\xdd\x7d\xb1\xe2\xbe\x9e\xd6\xb0\x8c\x67\x6a\xaa\x36\xe8\x6a\x8f\xe1\x60\x82\xe8\x1a\x1f\x02\x26\x95\x38\xd2\x7c\xd1\x81\x56\xa0\x38\x11\xf7\x21\x29\xa1\xf0\xa0\x3d\xbc\x98\xff\xe8\x89\x96\x27\x26\xf5\x8a\x17\x7b\x58\x79\xde\xc6\x2a\x9e\x82\xfb\xc6\xa2\x1e\x8b\xf9\x48\xda\x92\xb4\xb0\x8f\xd2\x20\xac\xd0\x45\x06\x25\xad\xad\x09\xac\x11\xae\x13\x37\x19\xe8\x4c\xea\x8c\x6a\x53\x30\x18\x52\x5e\x99\x88\xb5\x00\xbc\x4c\x52\xf0\x90\x70\x0d\x42\xb5\xd1\x6c\x34\x6a\x15\xee\x81\xbd\xc9\x36\x48\x41\x02\x55\xe1\x16\x87\x22\x8f\x9b\x60\x1b\x8b\xbd\x84\x07\x84\x42\x4a\x24\xac\xc7\xc4\x30\x5c\x4f\x02\x78\x90\x77\xa3\x46\x16\x34\x44\xe0\x1e\xc1\xe7\x22\x22\xfb\xec\xa6\xb6\xf9\x33\x20\xb6\x04\xb0\xb8\x94\x54\x59\xda\xe2\x59\xbe\xe0\xc6\x15\xd3\xd4\xaf\x9e\x76\x65\x9e\xc0\xa3\xa7\xcc\x9d\xfd\x68\xeb\x41\xb5\xc0\x83\x0b\x24\xac\x83\xe8\xdb\xb5\x7d\x0b\xb8\x2e\x65\xec\x21\x61\x23\xc3\x0c\x33\x50\x5d\x5a\x9b\xf8\x02\x38\x38\xc2\x37\x47\xe0\xfc\x49\x29\x86\x96\xfb\x4b\xac\x2e\x0f\x19\x2b\x51\x8e\x05\xe5\x96\xe8\xd6\xc3\x9e\x98\x9a\x72\xd1\x46\x94\x59\x45\xc6\x3e\xb9\x53\x29\x20\x97\xcf\x6a\x82\x54\x9d\x95\x22\xcd\x16\xaa\x00\xda\xd8\x57\x6a\x00\x4a\x16\x5e\x3c\x66\x03\xa8\xb4\x48\x3e\xd1\xba\x42\x51\x75\x15\x51\x07\x62\x7b\x27\x11\xb7\x7c\x65\xad\xf1\x09\x88\xd0\xd5\x0e\xcb\xaf\x2a\x10\xec\x11\x73\x0d\x7e\xf8\x92\xb1\x3f\xa6\xd2\x92\xfd\xb1\x08\xc1\xb1\x8b\x9b\x54\x1c\x58\xd4\xaa\x53\x56\xc8\x36\xef\xe3\x0e\x73\x06\xe6\x07\xda\x16\xee\x32\x67\xd9\xe0\x21\x81\x74\xa2\x92\x8f\x46\xc2\x12\xb2\xb2\x61\x7c\xa3\x03\xcb\x22\x04\x89\x04\xed\xe3\x4f\xa5\x02\xe4\xf4\x61\x5f\x77\x12\x9c\xa9\xdc\x64\x37\xb0\xaf\x6d\x75\xc1\x25\x07\x48\x3e\xe5\x06\x6b\x70\xb4\xaa\x6d\xcc\x33\xfc\xbe\xee\x15\x56\xe4\xa4\x6b\xcb\xc5\x6d\x58\xa3\x9a\xa9\x68\x22\xa8\x44\x54\x3a\x7b\x98\x0a\xbf\x22\x3e\xcc\x43\x0e\x37\xb2\x2c\x95\xae\x03\xa1\x02\x3e\x5a\x6e\x8d\x62\x7f\x33\x19\x4b\x1b\x45\x74\xeb\xf3\x90\xb0\x51\x73\x63\xe4\xc3\x90\x44\x87\xdf\x55\xa4\x10\xbb\xde\x8d\x51\x99\xd4\x3f\x92\x4e\xa6\x8d\xfd\x43\xb6\x91\x0a\x4b\x3c\x3f\x61\x79\x58\xfd\x5b\x43\xb0\x02\xaa\xbc\xfb\x95\xd4\x78\x8f\x10\xb6\x90\x60\xe8\x8c\xcc\x1b\x03\x72\x4e\x81\x2d\xe5\xbe\x81\xe4\x7e\xb1\x63\xa1\xd6\x17\xf6\xf0\x75\x53\xb6\x6a\x6a\x07\x82\x94\xaa\x61\x43\xb4\xc3\x1c\x90\x57\xb2\xa8\xc5\x94\xa2\xea\xca\xef\xb0\xd0\x71\xe5\xf9\xfe\x55\x07\x60\x94\x9c\x7d\x4c\xa3\xf0\x33\x4c\xc6\x61\x3f\xaa\x55\xb6\xeb\x5f\x64\x9c\x0f\x10\x8f\xbe\x44\xe9\x34\x1a\x40\xf4\x57\x7f\x18\xde\x87\xd3\x38\x19\xc1\x7d\x98\x7e\xae\x0a\x4c\xaa\x85\x52\xb6\x74\x6c\x00\xd0\x8f\x46\x53\x98\xc4\xb7\x23\x9b\xd7\x94\xbc\xfe\x23\x14\xae\xc4\xd8\x8a\xd2\xfc\xab\x73\x80\x71\x32\x1b\x0d\x14\x42\x58\x74\x41\x2c\x64\xc4\xb8\x14\x56\x86\x4b\x1e\xb8\xb5\x91\x56\xd2\x33\x94\xe8\x5f\x5d\x00\xf4\x67\x69\x1a\x8d\xfa\xdf\x0c\x11\x33\xc0\xcc\x47\x91\xa8\x61\xf1\x6d\x43\x89\x4c\x75\x11\x2f\x01\xbe\x45\xa3\x1a\x19\xb6\x32\x38\xc3\x08\x0b\x21\x6b\x33\x54\xc9\x5c\x01\x7c\x4c\x93\xcf\xd1\x08\x3e\x86\x69\x5d\xad\x2a\x08\xf8\x57\x5d\x80\x49\xd4\xa7\xab\xc1\xf9\xd2\x84\x0d\x44\x1c\x4c\x7d\x0d\xb3\xfa\x3f\xfb\x22\xa3\xae\x01\x06\x71\x18\xa5\xd1\x24\x9e\x30\x4b\x66\x1b\x93\xe7\xba\x80\x61\x49\x3d\x80\x7e\x32\xfe\x96\xc6\xb7\x77\xca\xd2\xb2\xe2\x13\x87\x37\xe2\x0f\x99\x40\x0f\x44\x92\x4f\x5e\x9d\x2b\xd9\x5c\x99\xef\x33\x37\xe1\x8e\xc2\x4b\x74\x35\x5f\xf5\xbb\x1d\x80\x4f\xd1\x7d\x3c\x8a\x47\x11\x24\xe9\x20\x1e\x85\x43\x88\x47\x83\xb8\x1f\x4e\x93\xd4\x1e\x12\x62\x5e\xa2\x64\xd4\xec\xf5\x79\xbf\xeb\x03\x0c\xa3\x4f\xd3\xb3\x71\x12\x8f\xa6\xf1\xe8\x16\x06\xc9\xec\xe3\x30\x82\x70\x74\x3b\x8c\xe0\xcf\x59\x32\xd5\x4c\x9d\xcf\x87\x46\x73\xac\xf8\xc3\x6d\x23\x33\x80\xd4\xef\x06\x40\x4f\xa2\x2a\x6b\x60\x69\x04\xef\x52\x72\xa9\xd4\x8b\xa6\xe2\xee\x39\xc0\x24\xf9\x34\x85\xbb\x6f\xe3\xbb\x68\x54\x8b\x0d\xd8\x45\x39\x7f\xb0\x90\xb8\x00\x48\xa3\xdb\x78\x32\x8d\xd2\x68\xe0\x58\xa6\x53\xa8\x96\x89\x95\x60\x32\xb9\x8d\x63\x24\x97\x89\x86\xa3\xaf\x2e\xd3\x25\xc0\x7d\xd8\x4f\x93\x91\x52\x30\x37\x32\xa9\xfa\xd5\x5c\x92\x2b\x80\x41\x74\x9b\x46\x91\xb4\x68\xb6\xb4\xac\x62\xae\xc4\xdc\xb5\xdd\xcb\xef\x76\x01\xc6\xc3\xd9\xe4\xec\x3e\x1e\xcd\x26\x2e\xcc\x31\x8e\x84\x2a\x8f\x34\x25\xb9\x06\x98\xcc\xc6\x51\x3a\xe9\xa7\xf1\x78\x0a\xd3\xaf\x89\xc8\x40\x4f\xe8\x61\x88\xf4\x2a\x3d\xbb\xf4\xbb\x3d\x63\xe0\x5d\x1a\x45\x54\x25\x14\x9b\xa8\x4b\x72\x9f\x74\x4c\xe4\xba\x03\x10\xf6\x67\xd3\x08\xc2\x3e\x81\xcf\xfa\x91\x9b\x9a\x78\xfa\xd7\x3e\xc0\x7d\xdc\x4f\x13\x75\xca\x7b\x6c\x31\xa5\xdc\x62\x4a\xed\x57\x21\x45\x00\x30\x8e\x87\xfd\x34\xf9\x2a\xd7\x42\x9e\xf5\x94\xe2\x7c\xa2\x04\x59\x16\x35\x1b\x60\x6c\x9a\xfe\xf5\x39\x11\x74\x30\x18\x46\x30\x48\xa6\xea\xa4\x5b\xe0\x25\x5e\x43\x64\xe2\x5f\x13\xa8\x8e\x06\xf1\x70\x18\x56\xca\x56\xbf\xe8\x10\x01\x75\x64\xc0\xa5\xbe\x0a\xc9\x28\xaa\x9f\x80\xd9\x96\xef\xfa\x8a\x98\xf1\xa4\x3f\x1b\xba\xe1\xc6\xba\xd5\xbf\x0a\x37\xd7\x5d\x00\x8a\x99\x07\xe1\x8d\x86\x31\x0a\xf6\xe8\xea\xb9\x06\xf8\x32\x1b\xde\x86\x29\x7c\x4a\x43\xb6\x23\x24\x23\x42\x32\x4c\xa7\x51\x2a\x8a\xca\x2c\xef\x11\x85\x64\x51\x5f\x42\xfc\xbc\x19\x69\x27\x6e\xc6\xd2\xf5\xec\x0c\xee\xc2\xe1\x27\x4e\x5d\x10\x67\xb4\x45\x78\xc2\x9c\x05\x84\xbf\x18\x2a\xe9\x75\xea\x64\xa9\xbf\x08\xc9\x27\x0c\x36\x45\xbc\x80\xb8\xf0\x18\x9f\xc8\xea\x98\x10\xdf\x94\xdf\xd8\xec\x7b\x6a\x1c\xf3\xe7\x2c\x9a\xe8\x9a\x66\xbd\x0b\x11\x3f\xf0\xf2\x89\xeb\x98\xce\xef\x05\x00\xc3\x70\x1a\x8f\xa0\x1f\x8e\xe3\x69\x38\x84\x61\x34\x9d\x46\x29\x84\xf0\x35\x9e\xde\xc1\x6d\x1a\x7e\x89\x58\x38\x2a\x47\xbe\x35\xa0\xf6\x7b\xe7\xcd\x4c\x29\x66\xf0\x98\xe8\xfd\x98\x5e\x34\x33\xed\xc7\x69\x7f\x76\xff\x69\x18\xfd\xc5\x38\x67\xef\xc7\xf9\xb2\x99\xf3\x34\x1e\x0e\xe8\x74\x51\x55\x2a\x7d\x3b\xd3\xab\x66\xa6\x4a\xe4\xf4\xae\xd9\x92\xdf\xeb\x36\x33\x4e\x09\x5e\x84\x1f\x13\x6e\x56\xd2\x27\xa0\x8d\x4f\xde\xc8\xf9\xda\xc5\x39\x12\x33\xab\x0e\x26\x09\xd9\xea\x70\x12\x44\xcc\x2d\x18\xaa\x2c\xf1\x2b\x49\x98\xdf\xeb\x39\x18\xf7\xb9\x69\x55\x80\x7f\xd0\xc9\xa8\x3c\xed\xe2\x8c\x82\x4e\xc7\xc1\x28\x32\xbc\x55\xd6\xd9\xe5\x61\x67\x9b\x15\xe4\xfe\x38\x3b\x93\x70\xe0\x3a\x80\x0d\x3a\x7e\x33\x1b\xe9\x9f\xe8\x6d\x6c\x5c\xd8\x13\x59\x3c\x12\xbd\x71\x4a\x2e\xc8\x89\x6a\xee\x50\x54\xf8\x79\x1c\x2b\x17\xd0\xc4\x16\x48\x65\xfb\x2c\xf6\x44\x48\xc7\x5e\x94\x12\x95\xd0\x4c\xb7\x78\x9d\x8d\x0b\x55\x62\x0b\x88\xbe\x81\x8d\x0b\x47\x62\x1b\x6c\xa2\xb7\x4d\xc9\x05\x1d\xb1\x15\xb3\x8a\xb7\xb0\x72\x61\x45\x34\xbd\x73\xbd\x81\xc0\x33\xf3\x43\xdf\x40\x08\x3a\x2e\x78\x18\x39\xf1\x7f\xbf\x03\xe5\x06\x30\x0c\x7c\x17\x54\x24\x0e\xa8\x38\xaa\xb6\x15\xf8\x2e\xa8\x48\x5c\x56\x78\x1c\x1b\x17\x54\x24\x8d\x56\x78\x1c\x2f\x17\x54\x24\x96\xe5\x7a\x03\x1b\x17\x4c\x24\x4d\xc6\x7e\x1c\x2b\x92\xd1\xce\x86\xd3\x78\x3c\x24\xc1\xbf\x52\x66\xd1\xd2\x70\x2f\x43\x1e\x2b\x36\x3b\x6b\x26\x81\xef\x82\x03\x2e\xf5\x64\x9a\x26\x9f\xf9\x76\xcb\x0b\x4f\x24\xe5\xe6\xb5\x3d\xfe\x12\x83\x48\xc8\x11\xae\x8a\x51\xa6\xc8\x2e\x28\x98\x99\x20\xca\x02\x53\x78\xf7\xd2\x6a\xe0\xbb\x20\x62\x66\x9a\x76\x15\xa7\xbe\xb7\x08\x2e\xe4\x98\x59\x63\x56\xb6\x49\xfe\x07\x72\x04\x2e\x30\x99\xd5\x6d\xf5\xbf\x2b\x76\x07\x81\x0b\x6c\xbe\xd5\x56\x04\xe9\x02\x34\xbc\x46\xf0\xca\xbb\x03\x41\xe0\x82\x9e\xe9\x5d\x92\x8e\x94\x8c\x54\xbe\x86\xd9\xf4\xde\x8d\x3e\x31\x4a\x5f\xc2\xcd\xe4\x3e\x1c\x4a\xea\x93\xbb\x30\x1d\x03\x55\xe8\x81\x15\xf2\x20\xb8\xb0\x52\x34\x33\xba\x4c\x93\xe4\xc0\x83\x83\x20\xb8\x6c\x62\x52\x8b\x10\x8f\x64\x72\xd5\xc4\xc4\x1e\x1f\x1e\xc9\xa9\xdb\xc4\x49\x40\x3e\xd2\x8e\xf1\x0e\x67\x72\xdd\xc4\xc4\xa8\x65\xbf\x89\x51\xaf\x89\x91\x91\x81\xf1\xc1\xed\x93\x63\x38\x9d\x77\xec\x9c\x24\x69\xc5\x8b\x39\x71\x59\x75\xaa\xc2\xb4\xd2\x7d\xb8\x11\x9c\xfb\x56\x0e\xf5\xd4\x4a\x2d\xc9\x68\xd5\x2d\x19\xb1\x53\x1e\x6a\x52\x45\xe9\x07\x56\xfa\x46\x46\x95\xa9\x1b\xe5\x1e\x6f\x88\x04\xe7\x76\xb7\x36\x32\x28\x74\x30\x59\xbb\x6f\x5b\x32\x26\x74\xb8\xc8\x76\x97\xb6\x65\x48\xc5\xa1\xa4\xed\x8e\xac\x67\x44\x52\x60\xf9\xed\x7a\x45\x25\x38\xb7\xbb\xab\x9e\xfa\xa0\x03\xe8\xd9\x3d\xd3\x92\xe3\xa0\x6c\x7f\xa2\x76\x2f\xac\x27\x33\x86\x3e\x25\x59\xb7\x3e\x2f\xec\x6e\xc7\x92\x17\xd1\x95\x9d\x97\xe3\xdf\x4a\xed\x3d\x7c\x93\x13\xa1\x66\x77\x31\x77\x7a\xa2\x3a\xf4\x51\x2f\xc7\x04\x17\x76\xaf\x4b\x6a\x5e\xe7\xf0\xe9\xc6\xc3\xe9\xe0\xc2\xee\x7c\x66\x4e\x82\x8e\xa4\x6e\xf7\x41\x5b\x2a\xa2\x38\xe1\x61\x2c\xec\xae\x98\xd4\xb7\xa3\xa3\xa8\xdb\xbd\xb1\x9e\x78\x1c\xfb\x76\x40\x70\xd1\x05\x18\xc4\x5f\xe2\x89\x9e\x6b\xe8\x7d\x13\x7a\xad\xfe\x03\x46\x02\x76\x5a\x76\xdf\xac\x25\x1c\x35\x5b\xe1\xb9\x05\x62\xa2\x22\x25\xd7\xd0\xc8\xdb\xbd\xd4\xcc\x33\x32\xa8\xed\x65\xa0\xba\xea\x3e\x6f\x3b\x04\x97\x76\xb7\x35\x13\x0a\xf4\x2e\xbc\xec\x4e\x6d\xcd\x1c\xf4\xe0\xe9\x58\x86\x76\x97\xae\xa7\x08\xef\xf3\xea\x48\x70\x69\x77\x72\x33\x17\xa8\x45\xa0\x3a\xa3\xe6\x17\xa4\x82\x4b\xbb\xaf\x8b\xd8\x5f\x60\x9f\x7c\xf1\x51\x69\xbc\xfe\xa6\x5e\x70\x69\x77\xf3\x6f\x0d\x35\xc9\xa3\xa6\xf1\xbf\x00\x00\x00\xff\xff\xc7\x1c\x89\xcb\x08\x3c\x00\x00") + +func fontsScriptFlfBytes() ([]byte, error) { + return bindataRead( + _fontsScriptFlf, + "fonts/script.flf", + ) +} + +func fontsScriptFlf() (*asset, error) { + bytes, err := fontsScriptFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/script.flf", size: 15368, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSerifcapFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x55\x4b\x6a\x1c\x41\x0c\xdd\xf7\x29\xde\x62\x16\x12\x34\x51\x32\xf8\x00\x8d\xf7\xb9\x41\x81\x70\x9c\x19\x30\x84\x10\x26\x64\x91\xdb\x07\x7d\xaa\xaa\xab\xc6\x33\x04\x42\x76\x93\xd8\xed\x6e\x95\x3e\x4f\x7a\x7a\xdd\xe7\x6f\xe7\xe3\xcb\x01\x4f\x78\xc2\xf1\x23\x3e\xe1\xb8\xfc\x3c\x5d\xde\xce\xaf\x2f\x3f\xf0\xe5\x37\x9e\x2f\xbf\x5e\x4f\xf8\xfc\x01\xcf\x6f\xdf\xbf\x9e\x2e\xcb\x72\x38\x6c\xfb\xdf\x6d\x81\x62\x5b\x04\x65\x5b\x8a\xca\xb6\x90\x72\x18\xcd\x4c\x6a\x8f\x0b\xec\x5f\xfd\xb3\x2d\xf9\x7f\x77\x03\x85\xfd\x78\xb4\x80\xdd\x57\x20\x96\x02\xa4\x52\x73\x6a\xf8\x40\x95\xbd\xa6\x32\xd8\xca\xaa\x0a\x1a\x92\x2c\x18\x17\xbb\x7a\x16\x31\x4f\x02\xd9\x7d\xa9\x10\x3d\x59\x31\x93\x27\xa2\x48\xd3\x51\x39\x28\x3b\x56\x02\x47\x3f\x66\xf3\x02\xa4\x9c\xf9\x51\x2f\xb5\x7c\x6f\xd8\x10\x5b\x94\xf6\x29\x5c\x45\x50\x8d\xd0\x6c\x9b\x1d\xae\x20\x00\x65\x0f\x0e\x16\x02\x87\x4b\x20\x07\x8c\xd2\x3a\xf7\x4c\x70\x23\x53\xd4\x6c\x33\xf3\x27\x23\x48\xa0\x46\x51\xd1\xab\x43\xf6\x41\x9b\x8f\x81\x95\xb1\xe6\xca\xd9\x7a\xef\xbc\x47\x5b\xce\x20\x61\x8a\xce\xe0\x60\x6a\x8d\xfd\xc8\xc3\xb1\xee\xd4\x6b\x1e\x62\xb5\xc3\xf7\x23\xab\x15\x80\xc4\x86\x34\xfa\x63\xdf\x76\x73\x1d\x8d\x95\x9e\x9b\x3b\x31\x10\xb6\x7f\xba\xb3\x30\x57\x73\x94\xd8\x5b\x9e\xb6\x29\x07\x6a\x34\x79\x79\xd5\x12\xe0\x68\x28\x0e\xac\x4e\x23\xd6\x99\x0e\xc7\xac\x01\x5a\x03\xf5\xa4\x0c\x34\x74\x81\x4f\x47\x84\x49\x21\x47\x92\x69\x0d\xa6\x43\xee\xab\xb7\xaf\x2b\x25\xf7\xae\xc9\xd6\xcc\x4c\xb1\x7a\xaa\x34\xb5\x74\x63\x2f\x6b\x5e\xe4\xa1\x6a\x3d\x6e\x64\x66\x4b\x1c\xd3\xe2\x5c\x7d\xe5\x46\x15\xb2\x67\xf6\xb1\x32\x35\xb6\xba\x43\xad\x5f\x24\x31\x98\x06\xa3\x90\x14\xe9\x2f\x82\x70\x2b\x54\x9d\x68\x28\xf5\x17\xf2\xab\xcc\x95\x98\xa0\x4c\x13\xbc\x11\xad\x65\x64\x6f\xbd\xd5\xea\x1d\xa5\x69\xdd\xbe\xaa\x4f\xa6\x58\x33\xad\x3a\x9d\x49\x22\xce\x95\x6f\xa3\x1e\x1d\x4a\x11\x99\x33\xb4\x11\x49\x89\x37\x94\x89\xcf\x85\x8b\xb0\x8d\x99\xd2\x89\xfb\xb0\xe7\x51\xb7\x2c\x21\xfe\x49\xfd\x8a\xe1\xd5\x10\x8c\x0e\x9f\x8b\x94\xa2\x37\x12\x4b\x9f\x1a\xbe\x16\x5c\x1d\x7c\xa0\x88\xf7\x30\xb6\xe9\xb3\xf4\x8e\xda\xed\xa1\xcc\x1f\x93\x87\x86\x1f\x1a\x7e\x68\xf8\xdf\x35\xfc\xff\x6e\xfe\x04\x00\x00\xff\xff\x48\xe3\x7f\x45\xd1\x0a\x00\x00") + +func fontsSerifcapFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSerifcapFlf, + "fonts/serifcap.flf", + ) +} + +func fontsSerifcapFlf() (*asset, error) { + bytes, err := fontsSerifcapFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/serifcap.flf", size: 2769, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsShadowFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x5a\x5f\x6f\x9b\xca\xd3\xbe\xf7\xa7\x78\x2e\xd0\xdb\x46\x4a\x82\x21\x89\x13\x4b\xd5\x91\xa9\x4d\x12\x54\x07\x7c\x30\x6e\x4f\x25\xa4\xad\xdb\x90\xc6\x3a\x8e\x1d\xf9\xcf\xa9\x2a\xf9\xc3\xbf\x5a\x96\x5d\x76\x61\x17\x93\xfe\x72\x81\x89\xcd\x3e\x33\x3b\x33\xcf\xcc\xec\x2e\x4f\xcb\x27\x77\x6e\xe1\x0a\x97\x70\x7a\xe8\xc2\xe9\xa2\x8b\xcb\x7e\xdf\xed\x4c\x9f\xe7\x8f\xeb\x5f\xf8\xfe\x1b\x77\xcb\x6c\xb5\xc2\xf0\x79\xfe\xfa\x9a\x2d\x97\xe8\xd9\xfd\x0b\x9c\x9d\xe1\xfb\x7c\x9b\x3d\x62\xbd\xc2\x74\x37\x5f\x3d\xce\x37\x8f\xf8\x3f\x4c\x5f\xd8\xb0\x4e\xb0\xfa\xb1\xdc\x3f\x66\x5b\x04\xd3\x08\xe3\xf9\x6e\xb1\x3a\x73\x3a\x4f\x8b\x9f\xcb\x6c\x87\x4d\xb6\xcc\xe6\xdb\x0c\xee\xb9\x43\x71\x1c\x17\xde\xfe\x27\x9c\x7e\xff\xb2\x33\xc9\x36\x2f\x8b\xed\x76\xb1\x5e\x61\xb1\xc5\x73\xb6\xc9\xbe\xff\xc6\xcf\xc5\x7f\xd9\x0a\xbb\x35\x5e\xd6\x8f\x8b\xa7\xdf\xd8\x3d\x2f\xb6\x78\x5a\xaf\x76\xa7\x98\x6f\xb1\x5c\xaf\x7e\xd2\xcf\xdd\x73\xd6\xc9\x1f\x58\x64\x9b\x77\x5b\xac\xe6\x2f\x19\xc5\x78\x5d\xce\x7f\x30\x35\xe7\xf8\xb1\x7e\x79\xc9\x56\x3b\x2c\x17\xab\xec\xbc\xd3\x79\x60\x4f\x3f\xd2\x39\x4e\xe6\xfb\x25\x3e\xee\x37\xbb\xf5\x0a\x1f\xb6\xeb\xe5\x7e\xb7\x58\xaf\x06\xd9\x7c\xb3\x7b\x5e\x2e\x56\xff\x9e\xaf\xb2\xdd\x5f\x70\x5c\xbb\xdf\xa3\x8a\x2c\xd8\xec\xb0\xca\x7e\xe1\x75\xbe\x99\xbf\x64\xbb\x6c\xd3\xd9\xee\x5f\x5f\xd7\x9b\x1d\x03\xbc\x0d\xee\xe8\x5c\xe7\xab\x47\x7a\xfb\x65\xb1\x3a\x07\x1e\xe6\xbf\x31\x5f\x6e\xd7\xf8\x9e\x61\xbb\x5c\xfc\x7c\xde\x2d\x7f\xe3\x85\x6b\xf1\xb4\xde\xe0\x7b\xb6\xdb\x65\x1b\xec\xb7\x59\x67\xfd\x94\xc3\x3f\xed\x97\xcb\xb3\x5f\x8b\xc7\xdd\xb3\xfd\x6f\xb6\x59\xd9\xdb\x97\xfd\xf6\x19\xf3\xe5\x2e\xdb\xac\xe6\xbb\xc5\x7f\xd9\xf6\x14\xdf\xf7\x3b\x3c\x66\x4f\xf3\xfd\x72\x87\xf5\x7e\xf7\xba\xdf\xd1\x99\x87\x51\x82\x1f\xcf\xf3\xd5\xcf\xec\xf1\xbc\xd3\x81\x65\x0d\x74\x17\x7a\x3d\x58\xe2\x4a\xd8\xf5\x84\x5e\x01\xb0\x9f\x91\xff\xfb\x19\x9f\x2d\xd0\xaf\x2d\x0b\xca\x27\x7f\x10\xf4\x51\x1c\xd8\xb7\x04\x38\x10\x1c\x0a\x3c\xf9\x1f\x80\x1c\xa8\x9c\x72\xb0\x82\x90\xcb\xb0\x40\x98\x0e\x29\x21\x48\xe9\xcd\x7b\x00\x36\x1f\x6e\xe5\x4f\x93\x13\xf1\x95\x65\x17\xaa\xd9\x85\x74\xbb\x9c\x03\x9f\x06\x08\x4e\xb8\xe6\x04\x29\x83\xb5\xde\xe3\x1b\xf0\xa1\x10\x45\xec\xd4\xb6\x24\xad\xf2\x71\x39\x0e\xc3\xb7\xa4\x2b\x57\xd9\xb2\x0b\xeb\xa1\xf6\x91\x92\x34\x37\x30\x95\x05\x3e\x3d\xdd\x27\xb1\x2d\x3e\xff\x94\xa9\x98\x82\xcf\xd7\x22\x00\x49\xd9\xe3\xa9\xad\x58\x6d\x20\xdd\x72\xe3\x15\xb6\x47\x69\x6b\xdd\x90\xca\xb5\x9c\x63\x05\x91\xdf\x11\xfa\xc7\x75\xb6\x70\x1c\x51\x89\x20\x70\x2b\xe9\x1c\xa5\xa0\x0d\xb8\x77\x0a\x33\x82\xc6\x93\x72\x97\x7b\xc9\xaa\xc8\x27\xec\x37\x6e\x4e\x6e\xd5\x03\xf7\xe5\x80\x4d\x41\xb8\x01\x27\x7c\x00\x07\x93\x27\xa8\x0c\xe1\xb2\xea\x83\x89\x4e\x13\x89\x01\x32\x19\x08\x41\x09\x5f\x8d\xfd\x7c\x5c\x2e\x1e\xec\xae\x9d\x28\x66\xc3\x82\x2d\xdc\x62\xef\x9b\xed\x44\x48\xf1\xb3\xe4\x0a\xe1\x8b\xaa\x33\x64\xda\xa8\x7e\x69\x96\x62\x78\x50\x48\xd6\x0d\x91\x82\x86\xd1\xab\x1a\x40\x95\x9f\x2b\x11\x5b\x84\x57\x31\x25\xc1\xb7\x94\x13\xa7\x46\x96\xd2\xdf\x75\xcf\x2b\x9a\x95\x58\x48\x2d\x59\x96\x3c\x09\x66\xdb\xea\x03\xcc\x9f\x54\xed\x6a\x9a\x2c\x5d\x6c\xd9\x20\xdf\x44\x8c\x97\xe6\xa2\x06\x3b\x2d\xd4\x4a\xb9\xfb\x0b\xbf\xa5\xc2\x4b\xcc\xd0\x85\x73\x04\x28\xb1\xf3\x28\x4b\xe5\x4c\xc6\x83\xac\xf0\x65\xa9\x41\xc9\x2c\x43\x90\x71\xeb\xe4\xc9\x05\x50\xee\x52\x1d\x6b\x74\xd8\x47\xa4\x58\x02\xc7\x22\x44\xd0\xc7\xd2\x24\x1f\xdd\x90\x52\x31\x31\x98\x88\xef\x8e\xce\x45\x0e\xe5\xba\x14\xcd\x0c\x72\x4b\x17\xc5\x52\x4e\x32\x22\x17\x11\x91\x85\x20\x7d\x2a\xe8\x3c\x18\x2d\xf1\x55\x51\x84\xd3\x46\x66\x51\x25\x58\x51\x78\xc7\x2b\xd2\x39\x77\xfa\x41\x0e\xf6\xe2\xe9\xba\xc7\x8e\x59\x95\x16\x9e\x62\xbe\xa9\x5d\xf3\x9d\x6e\xbe\xd2\x10\x4a\x12\x3e\xb8\x1c\x92\xd6\x86\xfc\x41\x86\x97\x52\x9c\xe2\x88\x22\x7b\x1b\xdc\xdd\x2c\x45\x31\x58\x93\x14\x7c\x28\xa4\xa0\x3e\x44\xca\xdc\xa9\x52\x62\xb8\x4b\x49\x2d\xe2\x79\xba\x28\x2b\xaa\x28\x08\xe5\x9d\xa6\x6e\x6b\x83\xb1\xd9\x6c\x29\x6b\x24\x8a\x36\x89\xfd\xc7\x1f\xa2\xff\x95\xfd\x44\x5a\x4d\xfe\x15\x04\x15\xa4\x8a\x53\x85\xd2\xa1\xc9\x80\x1c\x4a\x80\x58\xdc\x70\x74\x6c\xd5\xc6\x5c\xef\x62\x8c\x18\x24\x99\x4b\x5b\x54\x69\xcd\x31\xf5\x1d\x6a\xf0\xb3\x8c\x72\x30\x76\x72\xf9\xc3\xbc\x1a\x40\xd4\x03\x08\x0b\x58\x62\xee\x55\xe5\xf5\xbd\x09\x8f\x0d\x06\x6b\xa7\x6c\x94\x25\x1a\x6c\xe9\x43\xd3\xe8\xe9\xee\x78\x5b\xc6\x79\x2d\x8a\x64\x7a\xb0\x0c\xad\x6b\x39\xda\x12\x45\x48\xae\xd8\xa7\x9a\x7c\x28\xe5\xda\x7a\x05\x39\xd7\x97\x76\x21\xa4\xb0\xf0\x7b\xab\x2c\x1f\x15\x27\x94\xbc\x69\xad\x93\x2c\xa0\x68\xea\xf3\x16\xc3\x20\x40\x72\xb3\xa8\x19\x3c\x85\xe8\x7b\x05\x93\x26\xec\xee\x50\x16\x67\x83\x75\x0e\x22\x15\x1e\xea\xd5\xe2\xc4\xd2\xae\xc0\x4a\x25\x44\x23\x24\x6a\x45\xe5\xa6\x2a\x5f\xa9\x13\x10\xb9\x4b\x57\x1e\x8e\x88\x96\x6c\x40\x27\xf4\xad\xe2\x72\xb5\x2c\x54\x4a\x43\xdd\x8c\xda\x88\xd1\x55\x94\xaa\x47\xdb\xb4\x9c\x8d\x52\xac\xf3\x6a\xa1\x78\x83\x7f\x51\x2c\xa6\xe4\x48\xb3\xa0\x34\x12\x02\xb8\x16\xca\x6a\xe0\x8b\xc5\x2c\x2b\x09\xf5\x62\x2d\xc5\x23\xff\x3f\xd5\xb5\x0d\xd5\xde\x45\xa9\x02\x26\x86\x14\xeb\xca\x54\xc9\xe4\x22\x89\x13\xbb\x9e\x3f\xd5\x10\x90\x12\xbf\x54\x07\x24\x80\x1a\x46\x4d\x63\x29\xf7\x7f\xe3\x91\xa9\xcd\xf8\xc7\x27\xa9\x76\x93\xc2\x40\xf9\xe2\x57\x59\x0c\xe8\xfa\x2e\xb1\x1a\x65\xd9\xfd\x03\x55\x05\xf2\x37\x10\x6b\xf7\x3a\x4b\x4a\xae\x88\x82\xa0\x0e\xfd\x86\xbf\x14\x78\xb6\xae\xca\xf3\x3c\x0a\xb7\x57\xb7\x50\x6a\x5b\x29\xe4\x04\xa9\xa0\xfe\x1b\xdb\x7c\x72\x22\xb2\x86\xa6\x05\xd2\x2f\x0c\xc5\x90\xf6\x6b\x6e\x59\x4a\xbb\x4c\xad\x55\xec\xc8\x8a\xf5\x98\x62\x9a\x68\x17\x65\x40\x64\xc1\x03\xdf\xed\x39\x14\xbc\x2b\xe8\x3a\xe8\x38\xbd\x2e\x10\x46\x67\x1f\x63\xdf\xfb\x84\xe9\xc4\x1b\xfa\x94\xde\x2c\x77\xea\xae\x74\x88\x03\x04\xe1\x67\x3f\x4e\xfc\x11\xfc\x7f\x86\x63\xef\xc1\x4b\x82\x28\xc4\x83\x17\x7f\x3a\x96\xd2\x9d\x9e\x0b\x0c\xfd\x30\xc1\x34\xb8\x0b\xe5\x28\xb1\x8a\x2d\xaf\xb2\x3c\x02\x28\x22\xa0\xd8\xf2\x72\x7a\x17\xc0\x24\x9a\x85\x23\x31\x1a\xd6\x29\x29\xe2\x01\x38\x90\x72\x69\x58\xb6\xc4\xe4\xb4\xda\xea\x17\x9a\x5c\x02\xc3\x59\x1c\xfb\xe1\xf0\x6b\x81\x97\x42\xe2\x8f\xf0\x0c\x8f\x3a\xaa\x53\x1e\x74\xd5\x4d\x32\xa7\x77\x05\x7c\xf5\x43\x81\xc2\xf3\x42\xd9\xe5\xca\xfd\x6e\xbd\xb7\x75\x7a\x3d\xe0\x63\x1c\x7d\xf2\x43\x7c\xf4\xe2\xba\xe1\x54\xda\x39\xbd\x6b\x60\xea\x0f\x73\xab\x97\x96\xe0\xb1\xc5\xfb\xb1\x14\x29\x5f\xb5\xa7\x79\x86\x1b\x74\xf0\x9e\x70\x36\x3a\xbd\x1b\x60\x14\x78\x7e\xec\x4f\x83\xa9\x12\x6a\x39\x1a\x0c\x77\xb2\xd6\x7d\x60\x18\x4d\xbe\xc6\xc1\xdd\xbd\xe4\x50\xfa\xb3\x20\x2c\xe8\xbc\x79\xb0\x23\x5f\xe2\xcb\x8b\x7c\x72\xe0\x29\x34\xe5\x2b\x85\x1c\xfa\xba\x0b\xdc\xfa\x0f\x41\x18\x84\x3e\xa2\x78\x14\x84\xde\x18\x41\x38\x0a\x86\x5e\x12\xc5\x32\xe9\x4a\x12\x34\xef\xd2\x39\xd7\x0e\x30\xf6\x6f\x93\xb3\x49\x14\x84\x49\x10\xde\x61\x14\xcd\x3e\x8e\x7d\x78\xe1\xdd\xd8\xc7\xdf\xb3\x28\x91\xe3\x38\xdf\x9c\xe0\xa9\x14\x62\x67\x45\xda\x5b\xa9\xa6\x6d\xe7\xda\x45\xbe\xed\x2c\x5c\x22\x92\x32\x11\x8b\xe6\xfc\x4f\x38\xb6\xa2\x26\x43\xb9\x00\xa6\xd1\x6d\x82\xfb\xaf\x93\x7b\x3f\xd4\x37\xbc\xc7\xe6\x7a\x09\xc4\xfe\x5d\x30\x4d\xfc\xd8\x1f\x99\x5d\x23\xe6\x93\xff\x02\x5b\xb8\x06\xe4\x40\x52\x83\x6b\xae\x80\x07\x6f\x18\x47\xa1\x49\x0d\xdd\x5d\xa9\x5a\x0f\x18\xf9\x77\xb1\xef\x73\xb5\x2c\x29\x17\x72\x97\x8a\x85\x91\xa5\xb6\x15\xce\xf5\x35\x30\x19\xcf\xa6\x67\x0f\x41\x38\x9b\x2a\x59\x04\x86\xbd\x5f\xdd\x52\xdf\xb9\xbe\x01\xa6\xb3\x89\x1f\x4f\x87\x71\x30\x49\x90\x7c\x89\xf2\xd1\x27\xda\xe2\x59\x29\x50\xce\x75\xbf\x32\xfa\x3e\xf6\xfd\x9c\xe5\xc5\xe6\x97\x68\x74\x6c\xed\xf8\x9b\x2e\xe0\x0d\x67\x89\x0f\x6f\x48\xb3\x61\xa7\xa8\x8c\x96\x61\x9f\xdd\xb9\x71\x80\x87\x60\x18\x47\x6a\x70\x99\x96\xc5\xe7\x82\x13\xa2\xe3\x73\x6e\x5c\x60\x12\x8c\x87\x71\xf4\xa5\xb4\xbc\x18\x41\x6d\x5f\xee\x55\x1e\x44\xb0\x12\xb5\x69\x67\x48\x17\x54\x99\xd1\x68\xec\x63\x14\x25\x3c\x43\x29\xfb\x93\x8a\xee\x34\xd1\xfa\xa3\x60\x3c\xf6\x3a\x65\x20\x97\x99\x4d\x6c\x78\xd2\x67\xaf\x54\xc3\x46\xa1\x5f\x5b\x43\x96\x3e\x19\x74\x4a\x9d\x7a\x34\x2a\xa7\xc3\xd9\xd8\x9c\x31\x8a\x1c\xce\xbd\x52\x89\x5d\x25\xcc\x6e\xae\x81\x3c\xad\xb5\xcc\x16\xa9\xb2\x53\x5a\x6e\x85\xf2\x78\xb0\x6b\xb5\xdd\xb9\xb9\x01\x3e\xcf\xc6\x77\x5e\x8c\xdb\xd8\x63\xc9\x3c\x0a\x29\xb4\x17\x27\x7e\x9c\x4f\x9b\xad\xf6\x79\x1f\x4d\x0b\x7a\xee\x1a\x56\xd7\x0e\xb4\xa1\x22\x65\xb8\xdb\x40\xb9\xed\x2a\xfd\xe5\xc2\xfa\x7a\x61\xf7\xde\xf8\x56\x91\x24\x04\x09\x2e\xe4\x72\xca\x5d\x07\x2a\xa6\x56\x4f\x99\x90\x7e\xb7\x2e\x24\x67\x06\x9f\xd3\x94\x51\x44\x99\x15\xc9\x9b\x62\x31\x2d\xea\x1c\x75\x5e\xe6\x89\x31\xa1\x72\x3b\xf2\xf7\xcc\x9f\x2a\x39\x5c\xec\x3e\x8b\x36\xc3\xb6\x0c\x8b\x6f\xa7\xef\x02\x63\x2f\x09\x42\x0c\xbd\x49\x90\x78\x63\x8c\xfd\x24\xf1\x63\x78\xf8\x12\x24\xf7\xb8\x8b\xbd\xcf\x7e\xb1\xad\xce\xf3\x5b\x6a\x55\x56\x6a\xb9\x79\xaa\x85\xa1\x7f\xd1\x8c\x9c\xa7\x02\x66\xdc\xb7\x22\x5f\x36\x23\x0f\x83\x78\x38\x7b\xb8\x1d\xfb\xff\xd0\x61\x76\x2a\x4e\xe2\x5a\xc2\x5f\x35\xc3\x27\xc1\x78\x44\x15\xb7\xcb\xf3\x80\xb6\xc8\xbd\x66\x64\xa9\x33\xf9\x5f\x56\x03\x4e\xff\xba\x59\x4e\x4c\xa9\xed\x7d\x8c\x72\xcf\xd2\xfe\xe4\xc4\xfa\xa3\xd3\x05\xa7\x7f\x63\x12\xc4\x90\x2d\x25\xdf\xd8\x10\x9b\xf5\xf9\x9a\xbd\xdc\x97\x66\xf4\x32\x10\xac\x6f\x90\x31\x2c\xbc\x2d\xb2\x6c\xfb\x73\x8b\x9c\x21\x83\x8e\xdb\xed\x1a\xb0\x7d\x43\xf4\x4b\x47\x10\x8d\xa5\xd6\xed\x3a\xcd\xc8\xb5\xe8\x6f\x8f\x6c\x62\xac\xdf\x14\xfd\x2a\x3c\x31\x1e\x80\xba\x5d\x13\x6d\xfd\x5a\x8c\x4a\xdd\x73\x6b\xe5\x4d\xd4\x0d\x14\x83\xa7\x62\x7d\xc3\x10\xa4\x44\xa9\x2c\xec\xdd\xae\x89\xab\x81\x6a\x66\xbe\x17\x54\x39\xa8\xa9\xe3\x99\x18\x1a\xd4\x8d\xcb\x4d\x7b\x1c\xd4\x44\xc7\xa0\xc1\xa4\x0c\x56\x5a\xd8\xe5\xdf\x11\xb5\x22\xe4\xe8\x26\x0e\xfa\xc9\x3d\x0b\x76\xe9\x84\x42\x3a\x93\xcb\xd7\x28\xbc\x8c\xd6\xce\x2b\x18\xb4\x89\x7a\xa1\x26\x13\x16\xe7\x42\x45\x2f\x26\x36\xfb\xd2\x4a\xc5\x71\x1d\x13\xe7\x22\x53\xc5\x69\xb9\xa3\xe1\x3a\x26\xce\x45\xa6\x8a\xd3\x1a\xd9\xc4\xb9\xa8\xb1\xe2\xb4\x86\x37\x71\x2e\x32\x55\x9c\xd6\xc8\x26\xbe\x45\x4d\xa1\xd7\x1a\x9d\xae\x8a\x66\xe3\x24\x98\x8c\x69\xc7\xa9\xac\xcc\xcb\x13\x93\x41\x07\xa7\xc0\x3b\xb6\x95\x20\xbd\x6c\x93\x23\x98\xf8\x56\xe8\x37\x4d\xe2\xe8\x93\xcf\x54\xb2\xb9\x4a\xfc\x90\x52\x2c\xdd\x88\xad\x53\xce\xc4\xba\x99\x29\xb7\xb7\xdc\x06\x73\x1d\x13\xe3\x66\xa6\xdc\xde\x1a\xd9\x44\xb8\x59\x63\x6e\x6f\x0b\xef\x9a\xa8\x37\x6b\xca\xed\xad\xd1\x4d\xf4\xfb\x5a\x31\x0b\x8f\x62\xfd\xb1\x60\x7d\xa7\xc8\x75\x4d\xf4\x4b\xee\xa3\x38\x54\xea\xbd\x78\x27\xc3\x12\x0b\x51\xdd\x49\xb1\xeb\x0a\xca\x4d\x1f\xbc\xb1\x00\x9c\xde\x7b\xf1\x04\xd3\x37\xee\x28\xba\xee\xa5\x16\xcd\xd8\x43\xb7\xdc\x3d\x75\xdd\xab\x26\x5c\x29\xce\x88\xf5\x36\xdc\x5e\x13\xae\x31\x9b\xb5\x04\xbf\x6e\x02\xd7\xe5\xb2\x96\xb8\x37\x4d\xb8\xa6\x4c\xd6\x12\xbb\xdf\x84\xad\xf6\xcb\xef\xa5\x97\xa3\xde\xb5\x01\xbf\xe8\xea\xc1\x7d\xb9\xd7\xe5\xda\xf2\x6d\x94\x1c\x53\x3a\xd4\xd4\x6c\xe8\x32\x70\x47\x0b\x5e\x6b\x8e\x25\x21\xe6\x23\x59\xde\x17\x5f\xb8\x5a\x50\x5d\x57\x3c\x68\x77\x08\xeb\x5e\xe8\x09\xa7\x6b\x87\x5b\x43\xea\x59\xa7\xeb\x83\xd5\x92\x7c\x0c\x57\xcf\xba\xa6\x06\x58\x2a\x99\xe2\x5d\xb7\x54\xd7\xad\x5d\xe8\x99\x57\x6f\x7f\x07\xcd\xef\x36\xba\x17\x7a\x96\xa9\x7d\x2f\xe1\x2f\xaa\x6a\xcf\x25\xdc\x0b\x3d\xa3\x0c\xbd\x6e\xfd\x0d\x89\x83\xbc\x1b\xe5\x5e\xe8\x39\xd4\xd8\xe4\xb6\x7c\x0f\xc6\xbd\xd4\x53\xa8\x68\x71\xcb\x83\x45\xda\x61\x94\x07\x18\xdf\xa4\xd7\xbc\x6a\xaf\xe3\xb8\x97\x7a\xe6\xe8\x7a\xdb\x7c\xd2\xef\xda\x9d\x65\xbb\x97\x7a\xf2\xb4\x68\x6f\x1b\xcf\xc5\xdc\x4b\x3d\x83\x5a\x34\xb7\x47\x70\xf5\x34\x6a\xdb\xda\x1e\x01\xd7\x73\xa9\x45\x63\x7b\x04\x57\xcf\xa3\x96\x6d\xed\x11\xec\x6b\x60\x14\x7c\x0e\xa6\x95\x86\x96\x87\xe7\x49\x7d\x81\x4b\x4e\xea\x21\xab\x27\x57\xb5\xb1\x55\x32\x33\x6c\xa1\xa0\x38\x96\xa8\x35\xb7\x0c\x5d\x4f\xb6\x37\xf6\xb6\xf5\x5a\x75\xa5\x27\xda\x1b\x3b\x5b\x0d\xae\x9e\x6c\x7f\xd2\xd7\x6a\xc0\xf5\x8c\xfb\x83\xae\x56\x83\xad\x67\x5d\xb5\xa7\x3d\x66\x90\xea\xbb\x0b\xee\x95\x9e\x75\xf5\x8e\xb6\xe5\xcb\x2d\xee\x95\x9e\x69\x5f\xff\xc0\x08\x55\x65\xff\x3f\x00\x00\xff\xff\xf9\x5f\xcd\xb2\x32\x34\x00\x00") + +func fontsShadowFlfBytes() ([]byte, error) { + return bindataRead( + _fontsShadowFlf, + "fonts/shadow.flf", + ) +} + +func fontsShadowFlf() (*asset, error) { + bytes, err := fontsShadowFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/shadow.flf", size: 13362, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsShortFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x53\xc1\x8a\xdb\x40\x0c\xbd\xeb\x2b\x74\x08\xac\x0d\xc9\x0c\xdd\xf6\x50\x68\x58\x04\x7b\x6a\xc9\x6d\x2f\x4b\x33\x30\x71\x12\x27\xcd\x92\xd8\x69\x12\xb7\x2c\x08\x7f\x7b\xd1\x8c\x67\xac\xcd\xad\x90\xc0\xbc\x91\xe6\xe9\x49\x4f\xde\x1d\x77\x8f\xd5\x04\x3f\xe3\x23\x7e\xc5\xd9\x27\xfc\x02\x2f\xbf\xda\xcb\x0d\xab\x66\x8b\xd7\xbf\x75\x7d\xc3\xf5\x3b\xbe\x74\xeb\xd9\xcf\xfa\xd2\x62\xb1\x6e\xb7\xed\x89\xba\xfd\xf1\xdd\x74\x9b\xab\x39\x34\xdb\x43\xd5\x54\xa6\xde\x76\x25\x3c\xb7\xa7\xf3\xb1\xbe\xd5\xdb\xf0\xf8\xdc\x5e\xe4\x78\x6b\x71\x77\xd8\x1f\x23\xcf\x8f\xae\x6a\xf0\xb9\xba\x60\xf1\xb6\xa1\xb7\x6e\xdf\xd5\xb7\xda\xfc\xee\x0e\x27\xd3\x6d\x4e\xa6\xbe\x96\xf0\xfd\x74\xbe\xb4\x7f\x84\x62\x5f\x1d\x1a\x5d\x1b\x60\x42\xf1\x47\x80\x4c\x80\x86\x00\x91\x08\x1e\x1e\xc2\x21\x02\xe6\xf8\x0f\xa0\xe0\x15\x81\xe7\x52\xa0\xe0\xd6\x12\xd8\x36\x05\x4b\x82\xe2\x75\x00\x53\x02\x61\x20\x02\x4b\xe0\xe2\xc9\x05\x10\x4e\x2c\x0f\xd9\x25\x1e\xcf\x5e\x34\x60\xc2\x38\x10\x10\x78\xaf\xb4\x20\x81\x89\xd7\x28\xef\xd3\xb5\x75\x04\xce\x26\xf1\xc2\x12\xf9\x22\x2e\x09\xac\x4f\xc1\x92\xc0\x94\xe9\x99\x64\xf6\xdc\xa7\x4c\x39\xf9\x14\x44\x61\x2f\x7c\xee\xb4\xd7\x05\x8b\xbe\xfc\x10\x0c\x18\xed\x28\xdf\xa8\x69\x86\x43\x52\x37\x0e\xc3\x13\xcc\xee\xc7\x22\x34\x9c\x9b\xf5\xa2\xc0\x09\x73\xdf\x07\x45\x4c\x60\x67\x9c\xe5\x96\xf1\x1f\x79\x57\x04\x6e\x9a\x2c\x73\x04\x9c\x2a\x2e\x7b\x82\xa5\x57\x20\x17\xb0\x3d\x12\x38\x3f\x12\xca\x91\x71\xc4\xe1\x87\xc3\x76\xf8\xb4\x03\xc2\x1c\x9d\xa3\x48\xc6\x3e\xd7\xb5\x81\x61\xa0\x88\x57\x18\xae\xdc\x78\xf5\xc1\x2d\xd6\x1d\x87\xc8\xab\x8e\xa4\x32\xc5\x4a\x59\x13\x2d\x1b\x97\x25\x48\x66\xd5\x87\x4c\x9b\xad\x8e\x07\x45\x8e\x93\x8a\x30\x75\x1b\x0b\x46\x10\x0c\x1a\x29\x83\xdb\xb9\xad\x5e\xf5\xe8\x24\xc7\x65\x25\x6a\x2e\x03\xd9\x60\x1e\xe6\xd5\x1d\x1c\x87\x74\x5d\xb0\x9e\x5d\xde\x37\x2f\x1b\x95\x00\xab\xb4\xb8\x07\x36\x2f\xf4\x72\x45\xc0\x13\x55\xa7\x88\x2a\xa2\xc8\xfc\xa9\x9a\xd1\x3d\x93\x1d\x1c\x8a\xce\x93\x82\x9c\x33\x95\x22\xcc\x79\x86\x53\x4d\x15\x8a\x94\x0a\x0c\xae\xa9\x7e\x38\x45\x56\xaa\x1f\x9f\xb7\x64\xa6\x26\x28\x69\x0b\x4d\x9d\x97\x01\x43\x68\x91\x45\x08\x7c\x9a\xdf\x65\xda\xf0\xf1\xe0\x9d\x3f\xf3\xb8\x87\xba\x25\x31\x07\x9f\x86\xb6\x9b\x4e\x99\x63\x8c\x1a\x6f\x00\xa5\x02\x0b\xfe\xbf\x34\x19\xc6\xb7\x14\xf9\x17\x00\x00\xff\xff\x6e\x3d\x1c\xff\xfd\x05\x00\x00") + +func fontsShortFlfBytes() ([]byte, error) { + return bindataRead( + _fontsShortFlf, + "fonts/short.flf", + ) +} + +func fontsShortFlf() (*asset, error) { + bytes, err := fontsShortFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/short.flf", size: 1533, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSlantFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x5b\x6d\x4f\xdb\xc8\x16\xfe\x9e\x5f\xf1\x7c\x40\xba\x20\x51\x06\x1b\x08\x44\x62\x2b\xd2\xc4\x05\xab\x21\xce\x3a\x49\xbb\x95\x22\x4d\xd3\x62\x4a\xb4\x21\x41\x49\xd8\x55\x25\xff\xf8\xab\x79\x9f\xb1\xc7\x4e\xd2\xf6\xde\xee\xaa\x36\xac\xe7\x9c\x99\x73\xce\x73\x5e\x67\x1f\xe7\x8f\xe1\xf4\x00\x4d\x5c\x20\x68\x22\xb8\x40\x70\x8a\x53\x04\x57\x67\x41\xab\x31\x9c\x4f\x17\x1b\x7c\xfd\x81\xdb\x79\xb6\x58\xa0\xf3\x34\x7d\x79\xc9\xe6\x73\x9c\x91\xd6\x19\xde\xbc\xc1\xd7\xe9\x3a\x7b\xc0\x72\x81\xe1\x66\xba\x78\x98\xae\x1e\x1a\xf1\xe2\xdb\xfc\xf5\x21\x5b\x23\x1e\x26\xe8\x4d\x37\xb3\xc5\x9b\xa0\xf1\x38\xfb\x3e\xcf\x36\x58\x65\xf3\x6c\xba\xce\x10\x9e\x04\x6c\x71\x10\xa2\xfd\xfa\x1d\x41\xab\x75\xde\x18\x64\xab\xe7\xd9\x7a\x3d\x5b\x2e\x30\x5b\xe3\x29\x5b\x65\x5f\x7f\xe0\xfb\xec\x9f\x6c\x81\xcd\x12\xcf\xcb\x87\xd9\xe3\x0f\x6c\x9e\x66\x6b\x3c\x2e\x17\x9b\x63\x4c\xd7\x98\x2f\x17\xdf\xd9\x73\xf3\x94\x35\xf8\x07\xb3\x6c\xf5\x9f\x35\x16\xd3\xe7\x8c\xd1\x78\x99\x4f\xbf\x89\xbd\x4d\xf1\x6d\xf9\xfc\x9c\x2d\x36\x98\xcf\x16\xd9\x49\xa3\x71\x2f\xbe\x7e\x60\x07\x1b\x4c\x5f\xe7\x78\xf7\xba\xda\x2c\x17\xb8\x5e\x2f\xe7\xaf\x9b\xd9\x72\x71\x93\x4d\x57\x9b\xa7\xf9\x6c\xf1\xf7\xc9\x22\xdb\xbc\x45\x10\x92\x56\x93\x6d\x64\x26\x4e\x87\x45\xf6\x2f\x5e\xa6\xab\xe9\x73\xb6\xc9\x56\x8d\xf5\xeb\xcb\xcb\x72\xb5\x11\x04\xdf\xc7\xb7\xec\xac\xd3\xc5\x03\x7b\xfd\x34\x5b\x9c\x00\xf7\xd3\x1f\x98\xce\xd7\x4b\x7c\xcd\xb0\x9e\xcf\xbe\x3f\x6d\xe6\x3f\xf0\xac\x76\xf1\xb8\x5c\xe1\x6b\xb6\xd9\x64\x2b\xbc\xae\xb3\xc6\xf2\x91\x93\x7f\x7c\x9d\xcf\xdf\xfc\x3b\x7b\xd8\x3c\x91\xbf\xb3\xd5\x82\xac\x9f\x5f\xd7\x4f\x98\xce\x37\xd9\x6a\x31\xdd\xcc\xfe\xc9\xd6\xc7\xf8\xfa\xba\xc1\x43\xf6\x38\x7d\x9d\x6f\xb0\x7c\xdd\xbc\xbc\x6e\xd8\xc9\xfb\xc9\x08\xdf\x9e\xa6\x8b\xef\xd9\xc3\x49\xa3\x01\xf6\xe7\xe0\xe0\xa6\x21\x9e\xe0\x2f\x07\x07\xe0\x2f\xec\xc9\x5e\xf8\x13\x37\x0d\xf1\xc4\x8d\xf8\x9a\x52\xfe\x24\x20\xec\x49\x40\xd8\xa7\x84\x12\xf6\xe5\x21\x3d\x12\x4b\xf5\x02\x0a\xca\x7e\x8f\x1c\x47\x37\x8d\x9c\xe4\xfc\x6b\x41\x5f\x52\xb7\x69\x83\x52\xf6\x2f\xfb\x81\x52\x02\x42\x40\x28\xa7\x0e\x50\xfe\xab\x9b\x86\x79\x97\x7c\x39\x6b\xc3\xb4\x40\x8e\xbf\x50\xbd\x59\x4e\x02\x87\x14\x38\xc2\x4d\x83\xf0\xff\xc4\x5e\x24\x0d\xf1\xb5\x5e\x78\x48\x8f\x28\xe1\x2b\xd8\x87\xe2\x53\xfe\x42\xa1\x56\xf1\x13\x5b\xbc\x25\x09\x4a\xe5\x96\x0e\x41\x21\xbe\x60\xcc\x31\x21\x39\x63\xcb\x69\x5d\xe3\xa6\x31\xa1\x94\x52\x32\x21\x25\x12\x5c\x08\x87\x4c\x68\xd0\x32\x13\x22\x13\x5f\x96\x4e\x48\xb5\x3a\x04\x33\x22\xb6\x08\x29\x9c\x9c\xe6\xce\x32\x49\x26\x47\x2e\x5e\xa4\x84\x94\x3e\xd5\x69\x1d\xc1\x28\x31\x53\x4a\x72\xc6\x96\x53\x14\x2a\xa1\x34\x87\xd8\xaa\xad\x0a\x77\x19\xa4\x6a\xb9\x66\xa9\xd4\x24\xdf\xb6\xe0\x52\x58\xe6\xfc\x25\x8c\x88\x9b\x50\x89\xaa\xfc\x82\xff\x61\x54\xb9\x48\xb9\x1d\x7b\x36\x53\xa6\x4a\x8f\xc4\x8f\xfa\x43\x29\x52\x23\x54\x57\xf7\x80\x25\x18\xbf\xee\x85\x74\x85\xc2\xa5\x32\xb8\x7c\x85\xe2\x89\xd6\xbb\xbd\x5a\x2f\x66\xcf\x6b\xb8\xe8\x82\xa5\x0c\x1b\x5d\xc2\xd2\xd8\x4b\x2e\x79\x09\xe9\x4a\x73\x23\x90\xf2\x20\x45\x31\x48\x61\x89\x75\x4a\xf5\x94\x9b\x24\xf8\xf7\xd6\x52\xcf\x26\xa5\x8c\x04\x42\xc5\x06\x95\x4e\xb5\x52\x8b\x5a\x35\xfe\x43\xf1\xe6\x7b\xd4\x1f\xb3\x03\x68\x4d\xea\x13\xcb\x2d\x38\xf0\x2e\x1c\x81\x53\x21\x1a\x61\xd8\x2e\x67\x63\x2a\xfb\xc8\x99\x2a\x40\x53\x0e\x4b\xce\x0d\xf9\x8e\x5a\x2d\x9b\x84\xd4\xd3\x84\xd2\xe3\x5a\x69\xcb\xdf\x48\xee\xdc\x5a\x85\x73\xb1\x5d\xcf\xb6\x4f\x85\x0f\xe2\xf0\x14\xee\x85\x8a\x4d\x08\x0f\x71\xd3\x98\x70\xb9\x61\x42\x27\xae\x7b\x51\x80\x17\xa2\x26\x52\x5d\x96\x4d\x95\x30\xc6\x84\x62\xe8\xa9\xc3\x0a\xa1\x5a\xb4\xa5\x44\x88\x32\x5b\x02\xe3\x09\x7c\x71\x44\xd9\x8d\x16\xa3\x85\x2d\x4a\xf1\x45\x9e\x44\x6b\x82\xcb\x55\x8a\x73\xa2\x8d\x48\x5b\x20\xa4\xfd\x41\xf8\x3f\x02\x22\x3c\x21\xa7\xcc\x95\xca\x16\xe4\x34\xf7\x9a\x2f\xb4\xf5\x72\x4b\x30\xa6\x60\xed\x40\x9b\xae\xd7\x41\x58\x96\x2b\x4d\x97\x48\x69\x13\x01\x05\x58\xe6\x54\x26\x51\xdc\xc5\x44\x19\xb1\x86\xe3\xd6\x5d\x54\xe1\x10\x9a\xa5\xb5\x93\x3a\x1c\x56\x12\xe2\x51\x12\xda\x15\xb9\x6e\xb3\x0a\xd0\x1e\xb9\x08\x8b\x52\x67\xda\x26\x17\x3b\x41\xb1\x92\x14\xaa\x21\xce\x41\xaf\xf6\x52\xf4\x51\xce\xa1\x94\x8d\x50\x13\x1a\x45\x10\x50\xc1\xb5\xb4\x0d\x4f\x10\x91\x7b\x10\x5c\xc9\x3e\x07\x31\xe7\xd0\xc1\xfd\xf8\x5a\xab\x26\x67\xe1\x57\x1c\x82\xc7\x76\x2f\x09\x3b\xa4\xdb\xb9\x41\x31\x52\x96\x45\xa8\x0f\xcf\x7c\x86\x12\x62\x4e\x6d\x47\x69\x7b\xcb\xb2\x1c\x35\x39\x3b\x65\xcc\x8d\x46\x18\x61\xa2\xcf\x62\x48\xe5\x35\xee\xfe\xa7\xc3\x6a\x15\x5e\xac\x58\xa9\xa2\xe5\x2f\x87\xf5\x09\x9d\xfc\x04\xff\x63\x68\xfe\x7e\x75\x96\xc3\x1d\xf3\x6b\xc2\xcd\x6e\x8b\xd8\xd4\x38\x70\x13\x9d\x61\x60\xee\x62\xd3\x13\xee\x4d\xbc\x37\x1e\x66\x77\x73\xa6\x0a\x10\xdc\x66\x39\x81\x5c\x98\x02\xf8\x9b\x5c\x9e\x7b\xe1\x44\x2d\x40\xe5\x22\xe5\xb4\x48\x38\x54\x2c\x42\x24\x2f\xbb\x2b\x93\xde\xcb\xd4\x87\x2f\x10\x6f\x50\xb6\x08\x0d\xab\x82\x1a\x64\x68\x63\x4b\x27\x98\xc8\xd8\x8d\x2d\x79\x43\x4d\xa2\xa1\x11\x48\xdc\x30\x67\xc0\xa7\x62\xa2\x41\xae\x57\x69\x3a\x43\xa7\xd2\xdf\xb2\xd8\x0b\x19\x7d\xc5\x32\x19\x8d\x61\xe2\xbb\x97\x9b\xcc\x03\xb5\xc3\xf0\x7a\x3c\x53\x0f\x10\x56\xcb\x20\x27\x79\x2e\x13\x81\xda\xca\xce\xcd\xd6\xcb\x2f\xc5\xfc\xfd\xc6\x2d\x82\xf0\xb1\xa6\x08\xb2\xd2\x14\x50\xe5\xe4\xbf\x14\x91\x79\x5c\xe1\x19\xa0\x0f\x5b\x95\xa4\x11\x7a\xe2\x26\xd1\x55\xf9\x91\x84\x26\x57\xaf\x42\x04\xf1\x16\x44\x42\xea\x32\xd5\xb5\xe2\x52\x11\x50\xc7\x15\xf1\x41\x33\x96\x41\x8d\x6d\x98\x48\x70\x4f\x9c\xcd\x96\x03\x9a\x13\xa1\x4d\x7c\xae\xf6\x00\x96\xb6\xb4\x8c\x8d\x90\xed\xf0\x2a\xb3\x59\xcb\x15\xed\x26\x67\x93\x23\x12\x9f\x9c\xa5\xef\x94\xb5\xda\xf6\x6c\xdd\x5a\x63\x96\xd9\x96\x6d\x4c\xbb\x14\xcd\xbd\x1b\xd6\xe5\x8e\xcc\x4a\x64\x20\xf6\xba\x09\x54\x36\x49\xea\x37\x5c\x14\xb2\x1d\x6f\xbe\xb8\xa2\x72\xc5\xe5\x8a\xcc\x03\x8c\xda\xe0\x55\x25\x73\x1b\x54\xba\xb8\xd2\xe6\xa9\xbd\xbd\xc7\xb6\xd5\xe1\xb6\x05\xbd\x93\x52\xd0\xdd\x0f\xcf\xda\x2b\xda\x76\xe6\x9c\xb9\x58\x1f\x6e\x89\x74\x75\x8b\x0f\xa9\x6a\x1a\x55\x57\xc5\xea\x80\x0a\x53\x62\xc3\x12\xc9\xc4\xab\x73\x71\x4e\x53\x90\x79\x32\x0a\xbf\xdf\x92\x5e\xd3\x8a\x8c\x2a\x2a\xca\x88\x58\xe9\x7a\xcc\x4a\x7b\xb1\xbd\xde\x90\x90\x71\xf4\xa6\x9c\x20\x1b\x51\xd9\xf1\x94\x8b\x8a\xbe\xe5\x1d\x8c\x1a\x78\xd8\x82\xf6\x26\x17\xdb\xfd\x89\x8a\x19\xa2\x8f\xa4\x96\x49\x18\x17\x62\x8f\xdb\x24\x53\xdd\x9c\x6b\xb6\x49\xd8\x4d\x32\x96\xba\x95\x97\x15\x7b\x63\xe5\x26\x9b\xdb\x24\xab\xec\xad\xd1\xb7\xf4\x6d\x45\x6f\x8d\x4c\x08\x3f\x82\x6a\x05\xaa\x0e\x2c\x4c\x3c\x2d\x39\x74\xdb\x2d\x6a\xcf\x48\x45\xfe\x22\x20\xb3\xa5\x42\xa0\x76\xaf\x80\x0a\x12\xb2\x87\xb2\x6b\x9d\xe2\x90\x80\x22\xb1\x47\x6e\x58\xb3\x8b\x2f\x3b\x47\xc3\xff\xf3\x41\x2a\x63\xb2\x71\x78\x96\xc7\x2b\x16\xa2\x0c\x6d\xb2\x65\x56\x08\xbb\x9c\x50\xd0\x3c\x05\xfa\xc9\x9b\x77\x69\xd4\xfe\x80\xe1\xa0\xdd\x89\x04\xf1\xfd\xbb\xf5\x41\x33\x00\xe2\xfe\xc7\x28\x1d\x45\x5d\x44\x7f\x75\x7a\xed\xfb\xf6\x28\x4e\xfa\xb8\x6f\xa7\x1f\xf6\x0f\xac\x41\x33\x04\x3a\x51\x7f\x84\x61\x7c\xdb\xb7\x31\x62\x67\x31\x6e\x02\x54\xee\xb0\x07\xcd\x33\x60\x90\x8c\xfb\x5d\x87\x8c\x25\xb9\x63\xca\xd3\x54\xd1\x27\x86\xce\x40\xa9\x20\x79\x48\x8f\xcb\x3d\x0d\x41\xf8\x1c\xe8\x8c\xd3\x34\xea\x77\x3e\x1b\xda\x84\x7b\x42\x01\xc5\x5c\x67\x59\xa6\x29\x00\xd9\xd8\x12\x65\x80\x6c\x92\xe5\xde\x99\x42\xd0\xbc\x00\x3e\x47\x7d\x43\x5c\x85\x09\xaa\x2a\x5b\xe2\xcc\x29\xa8\x19\x8e\x14\xbc\x60\xd0\x6c\x02\xef\xd2\xe4\x43\xd4\xc7\xbb\x76\x5a\xe1\x70\x64\x7c\x55\xd3\x84\xb2\xc3\x09\x9a\x97\xc0\x30\xea\x70\xad\xda\xe2\x94\xde\x95\x40\x69\x56\x9a\x5d\x8e\xdc\xbc\xa9\x66\x9b\x45\xee\x0a\xe8\xc6\xed\x28\x8d\x86\xf1\xb0\x61\xc1\xc2\xa0\x82\x3b\xa5\x03\xe5\x9d\x74\x46\x7e\x50\x3e\x61\x0b\xe8\x24\x83\xcf\x69\x7c\x7b\x37\x72\x45\x66\xe5\x56\xfc\xa7\x89\xf6\xab\x5c\xb3\xb9\x02\x9f\xd2\x56\xce\x8b\x5d\xad\xae\x09\xa5\x56\x34\x08\x2e\x4f\x81\xf7\xd1\x7d\xdc\x8f\xfb\x11\x92\xb4\x1b\xf7\xdb\x3d\xc4\xfd\x6e\xdc\x69\x8f\x92\x54\x86\x73\x9d\xb4\x8a\x9c\x95\x4e\x38\x8c\x9d\x70\x7e\x00\x94\xf5\x74\x19\x00\xbd\xe8\xfd\xe8\xcd\x20\x89\xfb\xa3\xb8\x7f\x8b\x6e\x32\x7e\xd7\x8b\xd0\xee\xdf\xf6\x22\xfc\x39\x4e\x46\x0e\xa6\x74\x29\xa0\xbb\xab\xaa\xbf\xaa\x3a\xac\x4e\x0d\x16\x5c\x86\xe0\xb3\x39\xa3\xbc\x52\x39\x04\xd7\x1c\xdc\x68\x20\x88\x9c\x01\xc3\xe4\xfd\x08\x77\x9f\x07\x77\x51\xbf\xe1\x1e\xc4\x29\x44\xad\xd9\x88\xb3\x8d\x73\x20\x8d\x6e\xe3\xe1\x28\x4a\xa3\x6e\x9d\xb6\x00\x4b\x5b\xcc\xc3\x2a\x6d\x1d\x83\xaa\x62\x5c\x06\x7e\xbf\xb6\x2e\x80\xfb\x76\x27\x4d\xfa\xb5\x43\x1b\x75\xca\xa2\x52\x38\x89\x26\xd0\x8d\x6e\xd3\x28\x52\x1b\x2d\x16\x42\x44\xa7\xa8\xc4\x8a\xa3\x16\x85\x4b\x60\xd0\x1b\x0f\xdf\xdc\xc7\xfd\xf1\xd0\x16\xbe\x86\x8e\x9c\x52\xc1\x6a\x9a\x50\x39\xfb\xab\xec\xab\x06\x97\x57\xc0\x70\x3c\x88\xd2\x61\x27\x8d\x07\x23\x8c\x3e\x25\x0d\x67\x4a\x03\xdd\x66\x76\x87\x33\x3e\xa5\xb6\x0a\xb4\xee\xd2\x28\x6a\x58\x4e\x27\x97\x96\x41\xe5\xe4\xa6\x96\xda\xd5\x29\xd0\xee\x8c\x47\x11\xda\x1d\xe6\xc3\xf5\x10\x40\x89\xdd\x5b\x5e\x07\x57\x01\x70\x1f\x77\xd2\xc4\x31\x50\x2b\xc5\xdf\xda\x6f\x3d\x51\x09\xac\x1b\xe6\xae\x42\x60\x10\xf7\x3a\x69\xf2\xc9\x52\xa2\x29\xa4\x21\x67\x8b\x38\xd4\x18\xa2\x56\xea\x45\x4b\x1d\x47\x4e\xf4\x8c\xed\xb6\xdb\xed\x45\xe8\x26\xec\x88\x70\xa6\x7c\x07\xe2\x78\x7c\x11\xff\x9c\x05\x8c\xa8\x1b\xf7\x7a\xed\x86\x22\xe6\xfc\x45\xf9\xb6\x8f\xf8\xb7\x17\xae\x36\x92\x7e\xd4\x50\xdd\x93\x6b\x9d\x80\xaa\x63\x96\x7a\x20\xc1\x55\x93\x99\xfd\xb0\x33\xee\xd5\x7a\x29\xe5\xa4\x26\xc2\x47\x15\xc6\x2d\x3e\xbd\x5e\x02\xdc\xc3\xee\xe6\xa0\x14\x9a\x8d\x43\x32\x43\x1a\x59\x0e\xba\xbd\x83\xe0\xea\x0a\xf8\x38\xee\xdd\xb6\x53\xbc\x4f\xdb\x22\xd2\x24\x7d\x46\xb8\x9d\x8e\xa2\xb4\xa1\xa6\xdd\x62\xf3\xd7\xa2\x6d\x45\x75\x9b\x8b\xfd\x23\x20\xce\x8a\x67\x9d\xe2\xcb\xd8\x6e\xba\x5f\x42\x6c\x8c\x5f\xcb\xcf\xef\xae\xdd\x7b\x6f\x33\x83\xc3\x4e\xe1\x9f\x33\x94\x30\x93\x97\x03\x88\x13\x87\x8b\xf3\x50\x73\xd0\xd6\x69\x99\x31\xc7\x9c\x3a\xea\x50\x97\xd2\xc6\x4f\xe4\x54\x8e\x08\xa8\x9a\x9b\xe3\x1a\xd6\xa1\x19\x33\xeb\xdc\xf2\xa0\xee\xd1\xed\xd3\xb7\xec\x94\xed\xcf\x71\x34\xac\xc9\xd7\x54\xd3\x97\x73\xf5\xb4\x7d\x82\x56\x08\xf4\xda\xa3\xb8\x8f\x4e\x7b\x10\x8f\xda\x3d\xf4\xa2\xd1\x28\x4a\xd1\xc6\xa7\x78\x74\x87\xdb\xb4\xfd\x31\x6a\x38\x5e\x4f\x46\x27\x5e\x48\xe8\x32\x42\x17\x11\x8e\xe5\xb5\xce\xea\xa9\x73\x6f\xe3\x2b\xc0\x76\xa3\x7e\x5e\x4f\xbd\x13\xa7\x9d\xf1\xfd\xfb\x5e\xf4\x97\x58\x45\x64\x8a\x47\x55\x43\x52\xd4\x42\xa6\x14\x32\x95\x90\x1b\xe0\x5b\x17\xf5\x8c\x46\x71\xaf\x2b\x8f\x21\xab\x34\x76\x10\x55\xa8\xed\x51\x71\x05\xad\x66\x3d\x27\x3b\xf1\xfa\xe5\xfa\x2e\x68\x5d\xd6\x73\x4b\x99\xab\x68\xbf\x4b\xa4\x05\x1c\x1e\x1e\x1d\x49\x3f\xbe\xdf\x50\x34\x68\x5d\x55\x31\x8a\xac\x14\xc2\x1e\xa8\xc9\x5f\xea\xb1\x12\xec\xa1\x23\xcf\x77\xcc\xe0\x51\x24\xfe\x9e\x6e\xbe\xe2\xde\xaa\xe0\xde\x91\x76\x62\xfc\xfb\x4f\x0d\x5e\x09\x1f\x45\x73\x5e\xe1\xe9\x69\x05\xaf\x68\x07\x3c\xa9\x2b\x0a\xc4\x6a\x36\xda\x16\x1f\x9e\x06\xf5\xd4\x6b\xf1\xb4\x9d\x7a\x95\x2f\x88\x76\xc7\x93\xf2\xea\xb4\x66\xfe\x19\x9e\x56\xb9\x85\x68\x0f\x2b\xa7\xaa\xc3\x62\x06\xc7\xb4\x74\xcf\x8b\x73\xab\x72\x13\xf1\x76\x95\x40\xb6\x83\x50\x2d\xb4\x2a\xdf\x10\x6f\x57\xc9\x0e\xd4\xab\xfc\x41\xbc\xb3\x4a\xa0\xc2\x4e\xdd\x48\x3a\x3c\xad\x72\x05\xf1\xee\x2a\x01\x75\x66\x41\x75\x2a\xa9\xf2\x07\xd1\xe8\xae\xba\x4f\x42\xa9\x35\x6e\x91\xe9\xb7\x2a\xcd\x7d\xd5\x7e\x78\x5a\x05\xfc\xfe\x6e\x7e\xdb\xcc\x9f\x61\xc6\xcf\xa5\xe9\x33\xe3\x14\x54\xc1\x3e\xa9\xb0\x31\x2a\x6d\x6c\x97\x9e\x79\x18\x54\xc1\x3e\xa9\xb0\x31\x4a\x89\x75\xdd\x69\x1b\xf5\x2a\xd8\x27\x5e\x1b\x93\x26\xa6\x2c\x6c\x37\x16\x55\x80\x4f\x8a\x8a\x50\x7a\xd0\x6a\xd8\xf5\xc6\x56\x18\x54\xc1\x3c\xf1\x58\xf0\xaf\xb6\x03\xc3\x80\x95\xa8\xe3\xde\x28\x1e\xf4\x58\x76\xee\xf6\x56\x6e\xf4\x83\xe4\x5c\x5a\x6f\x79\xf1\xa5\xee\xb6\x6a\x1a\x55\xe0\x96\x3b\x1e\x8e\xd2\xe4\x83\x2e\xe7\x74\xe2\x4f\x88\xca\xc6\x09\x51\x1d\x04\xfe\xc6\xb7\xab\x5b\x86\xee\x76\xab\xe0\x3d\xf6\xd8\xa7\x31\xd0\xea\x89\x83\x4f\x01\x55\xa0\x1e\x7b\xac\x14\xda\x4c\xf7\xe3\x51\x85\xe8\x71\x95\xad\x42\x1b\xeb\x5e\x8c\xc2\x2a\x40\x8f\xb7\x59\xd3\x4f\xb4\xb8\xc3\xb0\x0a\xe0\x9f\x0b\xa2\xe3\x82\x53\x62\x2b\x5f\x32\x28\x4c\x90\xc2\xb0\x0a\xda\xa3\xbb\x24\xed\x3b\xfa\xf6\x0e\x5d\xa9\xaa\x2e\x3d\xc3\xb0\x30\xd4\xa0\x1e\xde\xb7\x7b\x9a\xf4\xf0\xae\x9d\x0e\x30\xac\x76\xe3\xfb\xb6\xbb\xc3\xf0\xdc\xcb\xc8\x57\xa1\x14\x4d\x77\xb7\x21\x7f\x18\x5e\xd4\x71\xa8\x35\xdc\x5d\x39\x34\xeb\x38\x6c\x37\xdb\x5d\xd9\x5c\xd6\xb1\xa9\x70\xb3\xfb\x71\xb8\xaa\xe3\xb0\x9b\x93\xdd\x71\x6c\x13\x86\xad\x3a\x5e\x85\xea\xc4\xe8\x46\xd6\x29\x3b\x1f\xe9\xec\xd4\xcf\x26\xb2\x6b\x89\x9b\x86\x3b\x74\x17\xd4\x61\xc5\x3e\x73\xb5\xe2\x98\x7a\x3a\x08\x9c\x53\xe0\xe5\x54\xae\x43\x6c\x8e\xf5\xd7\x46\x64\xf1\xc1\xa9\x87\x5e\xea\xc5\xca\x43\xc9\x49\x27\x20\xb5\x17\x43\xc2\x33\x3f\xcc\x4b\x15\x87\xcc\x3c\x74\xe2\xb1\x85\xaa\x1f\xd3\xfe\x4a\xa3\x9c\x72\xc8\x8c\x40\xd6\x1b\x9e\x3b\x34\xe1\x99\x1f\xd2\xbe\x0a\x83\x5a\x66\xaa\xad\x54\x65\x02\xaa\xd0\x98\x78\xd3\xe6\x33\x3f\xac\x8b\x95\x85\x72\x82\xba\xae\xa8\x1d\xa2\x85\x67\x7e\x14\x97\x2a\x0a\x55\x2e\xd3\x5d\xae\x90\x84\x67\x7e\xe4\xfa\x2b\x09\x55\x48\x98\x3a\xc2\x3f\xd0\x76\x44\xee\x87\xab\xa7\x82\xb0\x67\x46\xd4\x9a\x2c\x5a\x2c\xaa\xee\x60\x84\xe7\x7e\xb4\xea\xca\x21\x57\xdd\x9b\x3f\xf2\xfc\x0f\x31\xd7\x10\xbf\x63\x18\xfa\xc2\x43\x8d\x3f\xe4\x9f\xfb\xc1\xb9\x63\xad\xa0\x2e\x5a\x5a\x71\x5f\x5d\x1e\x2e\x78\xb5\x73\x3f\x4c\x7f\x4b\xa5\x70\xee\x07\xeb\x6f\xa9\x13\xce\xfd\x90\xfd\x7d\x55\xc2\xb9\x1f\xb2\xbf\xb1\x46\x38\xf7\xc3\xf5\x7f\x52\x21\x9c\x5f\x02\xdd\xf8\x63\x3c\x74\x6b\x03\x3d\x16\x91\x43\x37\x46\xdd\x9e\x6e\xb9\xff\x5f\x83\xa0\xe4\x07\x6e\xa9\x4a\x70\xa3\x86\x6a\x8e\x10\x35\xee\x52\x03\x2f\x91\x65\x15\x04\xe3\x87\xee\xcf\x56\x07\x9e\x20\x7b\xe1\x87\xed\xcf\xd6\x06\x3e\x0e\x7e\xfc\xfe\x52\x65\xe0\x63\xe3\x87\xef\xaf\xd6\x05\xde\x04\xe8\xc2\x0f\xe7\x62\x55\xe0\xce\x24\x29\xd9\xf7\xb2\x54\x78\xe1\x87\xb6\xa9\x12\x4c\x99\x60\xea\x84\x3d\xae\xec\x85\x17\x7e\x68\x7f\xae\x68\x2e\x59\xdd\x25\xfb\xe2\x89\x89\x70\xf6\x9c\xff\xd8\x6a\x6c\xe9\x6b\x0a\xff\x0d\x00\x00\xff\xff\x0d\x26\x17\x04\x9d\x3c\x00\x00") + +func fontsSlantFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSlantFlf, + "fonts/slant.flf", + ) +} + +func fontsSlantFlf() (*asset, error) { + bytes, err := fontsSlantFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/slant.flf", size: 15517, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSlideFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x58\x4d\x8f\xdc\x36\x0c\xbd\xfb\x57\x3c\x58\x0b\x6c\x02\x34\xcc\x4e\x91\x14\xd9\x5b\x3f\x80\xc2\x87\xa0\xed\xa1\xed\xdd\x99\xf1\x74\x8d\xcc\xda\x03\x7b\xb6\x40\x00\x23\xbf\x3d\x18\x5b\x94\x28\x89\xf2\x38\x7b\xb0\x65\x49\xa4\xc8\xc7\x47\x52\xb3\xc7\xd3\xf1\xc7\x9a\xf0\x13\xde\x63\xf7\x1e\x0f\xd8\x3d\x14\xc5\x78\x6a\x0f\x0d\x1d\x4f\x47\xbc\xfa\xed\x35\x76\x8f\x8f\xef\xf0\xe9\x0b\xfe\x6d\xf7\x97\x7e\xc0\x5f\xf5\x50\x1f\x6a\xbc\xfa\xff\x3c\x0f\x7e\x6e\xbb\x23\xbd\x5c\x8e\xe3\x33\xed\x4f\xaf\xf1\xf8\xee\xed\xc3\x87\xb7\xbb\x0f\x54\x14\x1f\xfb\xfe\xf3\x88\xae\xdd\x37\x68\x3b\x8c\xfd\x73\x83\xcf\x6d\x77\x40\x7f\xc4\xb8\x1f\x9a\xa6\x1b\x51\x77\x07\x9c\xeb\x73\x33\x50\xf1\x7b\xd3\x9c\x70\x1c\x9a\x06\x97\x1e\x43\x73\x3e\xd5\xfb\x06\xa5\x29\x7f\x40\x59\x95\xf3\xc6\x72\x2a\xaf\x6b\xb3\xa2\xfe\xf2\xd4\x0c\xd8\x3f\xd5\xc3\x48\x45\xf1\xf7\xf0\x05\xfd\xf9\xd2\xf6\x1d\xca\x37\xcf\x78\xb3\x9b\x37\x1e\xda\xb1\xfe\x74\x6a\x30\x3e\xbf\x8c\x4f\xb3\x86\xff\x9a\x0b\xf6\xa7\xa6\x1e\x70\xec\xbb\xcb\x55\xf2\x63\x7d\x69\xbb\xe5\x0b\xf5\x3d\x9a\x7b\xb4\xf7\xe8\xef\xf1\x72\x8f\xee\xeb\x2c\xf3\xc7\x57\xb6\x66\xc4\x2f\x25\xfe\x2c\xf1\x4f\x89\xba\x44\x5f\xe2\x65\xb1\xeb\x57\x2a\x0a\x02\xee\x0a\xa2\xf0\x01\xfb\xb8\x2b\xcc\x04\xf9\x98\x97\xe7\x11\xec\xb2\x5d\x5b\x5e\x30\xd7\x75\xd0\xac\x00\xd7\x3f\x7e\xdd\x15\x30\x15\xaa\xe9\x3a\x63\x4c\x55\x55\xd3\xac\x64\x75\x0e\xf6\xef\x2a\x0c\x33\x2d\xda\x8c\xdd\x66\x4c\x35\x2d\xc7\x90\x71\x33\x2c\xb7\x6c\xbe\x4e\x01\x76\x8d\x75\x92\xb1\x62\xa6\x72\x4b\xcb\x20\x3c\x8d\xe0\x54\xd9\xfd\x91\x40\x30\x27\x4c\x15\x88\x39\x2c\x19\x88\x19\x06\xab\xc4\xba\x4d\xf0\x5f\x76\xcd\x69\x0a\x67\xc9\xc2\x23\xe5\x3d\xb8\x86\x6d\x82\xa9\x16\xa8\x04\xa4\x6e\x4e\xee\x13\xee\x92\xf5\x96\xc1\x31\x8c\xb2\x87\xcb\xed\x11\x62\xce\x33\x42\xec\xb7\x0f\xff\x42\x06\xc3\xfa\x54\x6a\xe4\x34\xb9\x65\xaa\xec\xa7\x09\x22\x5c\x2c\xef\x59\x3f\x21\xb6\x8f\xf9\x10\xb2\xc0\x52\x05\x4c\xb6\x80\x6b\x4b\x80\x2c\xbe\x3c\x0e\x9e\x1c\x1a\x96\x23\x44\x9a\xae\xa7\x45\x20\x7a\x6e\xe4\xc5\xdc\x8c\x51\x4c\x9a\x83\xbe\xec\x9e\xf8\x10\x1e\x54\x93\x3d\x84\xa6\x48\xcc\xf8\x5c\x91\x26\xdd\x3c\x4d\x78\x42\x08\xc4\x7c\xae\xa4\x62\x82\x33\xf9\x28\x99\x0d\x51\x5a\x99\xb9\x29\x36\xe9\xd8\x2a\xc9\x49\x1a\xd3\xb2\xcb\x96\xd2\x96\x86\x41\xee\xb2\xb3\x8e\xa2\x8b\x7d\x1a\xed\xb5\x24\x48\xf5\x4c\xca\x19\x79\xfe\x2c\xa9\x1d\x24\x28\xa3\xae\x82\x55\xc5\x99\x00\xae\x6f\x2a\xed\x00\x35\x22\x01\xb5\x72\x24\x0f\x77\xeb\x33\x91\x91\x11\x5b\x1d\xff\xa4\x91\xb7\x4f\x4b\x07\x79\xb6\x1a\xc4\xb9\x71\x33\x81\xf5\x94\x92\xd6\x26\xa5\x52\xf3\x2d\x06\x50\x3b\x4d\xf5\x44\x77\x52\x88\x55\x92\xa6\xe1\x8b\xd7\x7c\xe9\x67\x69\x1e\x50\xa2\x58\x43\x2f\xee\xab\xc6\x77\x1d\xc4\x89\x89\x80\xe9\xd0\xde\x12\x4b\x96\xb0\xe7\xf0\x41\x1e\x30\x81\x81\x68\xc9\x69\x23\x16\xa0\xa5\xe8\x65\xe9\xab\xb6\x8d\x2d\x90\xac\xb1\x9e\xb6\x97\x3f\xe5\xce\x32\x37\xf2\xef\x38\x2d\x1b\x80\x94\x87\x9b\xfa\x8f\x76\x1d\x70\x45\x26\x1d\xac\xd1\xf7\x3b\xc9\x95\xdf\xad\x9e\x86\x84\x17\xde\x61\x19\xf2\x3c\x57\x94\x13\xd2\x4e\xb3\x62\xaa\x66\xa1\x62\xea\x4a\xab\xac\xd6\x8a\xcf\x24\xef\x8c\xe1\x2b\xc8\x6b\x7f\x1d\xe2\xd6\x2b\xcd\x70\x77\x47\x4a\x69\x35\x89\x5e\x16\xbc\xa2\x13\x5c\x83\xe2\x2a\x3a\xc5\xf7\x3a\x79\xbd\x0b\x69\x2f\x9b\x95\xbb\x7a\x12\x05\x7e\x07\xcd\x98\x3b\x74\x70\x91\xb6\xba\x0c\xfb\x45\xb2\x12\xa9\xc5\x24\xae\x3a\x32\x61\xfc\x37\x44\x71\x14\x39\x22\xcb\x14\x82\x79\x29\x60\x27\xf8\xa2\xe0\x04\x83\x6f\x84\xc8\x2c\xf7\x3a\x69\xf3\x92\x9e\x9a\x40\x44\x81\x89\x12\x26\x28\xba\x75\x23\x28\x70\x7b\x0b\x3e\x88\x00\x9d\x97\x89\xaf\x4a\xfe\xe9\xb6\x89\x3b\x13\x2d\x0e\xe9\xcc\x62\x03\x44\x6f\x0c\x23\x19\x18\x12\x19\x60\x41\xc8\x1a\x00\xcf\x14\xff\x6b\xd3\xc0\xa3\xe2\xdc\x83\xd4\x1f\x91\x4c\xc7\x22\x31\x29\x8d\x28\x62\xf4\x63\x92\xe5\x4e\xa8\x26\xa5\x57\x6e\x88\x6b\x48\x42\x79\xc2\xaa\x66\xd5\x87\x30\x31\x28\x97\x28\xf2\x67\x23\xfc\xb7\x28\x6a\x39\xa7\x33\x70\xe6\x12\x25\x0f\xa7\x20\x7f\x1c\x75\x8e\xab\x8c\xb5\x6b\xa9\xbe\xcb\x6a\x51\x8f\x4e\xc9\x11\x71\xc5\x3c\x3d\xd7\x44\xd4\x2b\x6e\xac\x80\x82\xa4\x48\xfc\x00\x5a\x17\x34\xff\xef\x8c\x24\x37\xf2\x0f\x19\x53\x6e\x06\x14\x18\xa2\xc4\xd8\x61\x57\x49\xe2\x71\x6e\x47\xf5\x5d\x20\xc3\xbf\xa7\x89\xb6\x54\x69\xcc\x16\x2c\x58\xd3\xb6\x92\x38\x6d\xa8\x42\x89\xd6\xdb\x69\x69\x34\x66\x66\x19\xea\x23\x49\xb4\xad\x7a\xfa\x4b\x00\xb9\x82\x9b\xfc\x9f\x22\xb9\x72\x7c\x0b\x00\x00\xff\xff\x77\x0b\x22\xaa\xf6\x14\x00\x00") + +func fontsSlideFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSlideFlf, + "fonts/slide.flf", + ) +} + +func fontsSlideFlf() (*asset, error) { + bytes, err := fontsSlideFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/slide.flf", size: 5366, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSlscriptFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\x5d\x6f\xa3\x56\x13\xbe\xe7\x57\xcc\x45\x24\x40\x32\x4c\x12\x65\x57\x1b\xc9\xb2\xc8\x7b\xf1\xaa\x5d\xa9\x52\x2f\x5a\xed\x5e\x20\x9d\x10\x38\x89\x69\x6d\xb0\x00\xaf\xeb\xca\x3f\xbe\x9a\x99\x73\xf8\x26\x71\xeb\xc4\x98\x8f\xf9\x3a\xcf\x3c\x33\x73\x78\xdd\xbd\xde\x27\x37\xf0\x19\x1e\xe0\xfe\x16\x6e\xe1\xee\xce\x71\xfe\x97\xd4\x3a\x83\xb2\x80\xf4\x58\xd5\xf9\x0f\xed\x7d\xf6\xe1\xe5\x0c\x5f\x93\x02\xbe\x95\xbb\x46\x57\x40\x9f\xf5\x1f\x49\x91\x46\x69\x95\xef\x43\xad\xd3\x3a\x3c\xee\xf3\x74\x1b\xea\xec\xb8\x21\xd5\xfb\x07\xf8\x7a\xdc\xc1\xdd\xe3\x97\x4f\xce\xff\xf3\xb7\x9d\x6e\x82\xfc\x6f\x9d\x41\x52\x64\xac\xfd\x72\x86\x6f\xba\xc8\xf4\x6e\x07\x3f\xe5\xe9\x9f\xba\x80\xf5\x69\xcb\x27\xd1\x21\xa9\x92\xba\x7c\x6d\xc2\xb4\xdc\x6f\x58\xb8\x2c\x00\x3e\xc1\x2f\x49\x05\x77\x8f\x8f\x0f\x0e\xdd\x7a\xca\x32\x9d\xc1\x3e\xaf\xeb\xbc\x78\x83\xc3\xb1\x48\x9b\x63\xd2\xe4\x65\xc1\x1e\x8a\xe3\xfe\x45\x57\xb5\xf3\xb4\xdf\x93\x13\x71\x69\x3e\xb4\x12\x5d\x55\x49\x06\xbf\xe6\xba\x4a\x35\xad\xe4\xc0\x67\x51\x7a\x48\x8a\xb0\xac\xde\x36\xad\x30\x79\xfe\x02\x4f\xc7\x37\xb8\xbf\xbd\xbd\x67\xcf\xbf\x17\x99\xae\xea\xb4\xac\x34\xbb\xca\x92\x7a\x0b\xdb\xe4\x87\x86\x17\xad\x0b\x38\x1e\xb2\xa4\xd1\x19\x34\x25\xec\x49\x24\xdd\x95\xb5\xde\x9d\x61\x9f\x34\xe9\xb6\x07\xa8\xe3\x18\x0f\xbf\x6d\x35\x80\xab\x5c\x48\xb7\x49\x95\xa4\x84\x6e\x5e\x43\x53\x69\x36\x93\x17\x90\x40\x7d\xd0\x69\x9e\xec\xe0\x94\x9c\x43\x80\x9f\x1b\xd8\x27\x67\x78\xd1\xd6\x42\x5e\xd4\xba\x32\xc2\xcd\x56\x43\xa3\xff\x6a\x20\x29\xce\xa7\xad\xae\x34\x9c\xcb\x23\x9c\xf2\x7a\x4b\x11\xed\x74\xf1\xd6\x6c\x35\xd9\x84\xb4\x2c\x02\x6b\xa1\xd0\x69\x43\x30\xee\xf2\x82\x96\xd1\x9c\x68\x25\xcd\x89\x14\x9a\x46\x57\x75\xe8\x38\x37\x37\xd1\xdc\x37\x62\x13\x7c\x44\x3a\x22\x9d\xbb\x74\xa7\xb4\xf7\x41\x84\xe8\x4e\x19\x39\x2e\xb8\xe6\xb2\x3d\x88\x36\xeb\x07\x18\x60\x10\x39\xfc\x03\x91\x83\x80\xad\x95\xde\x2f\x9d\xac\x58\x10\x3c\x85\xf2\x00\xc1\x07\x56\x74\x67\x15\x3c\xbf\x17\x1f\xab\x20\x78\x7e\x2f\x7a\xb6\xea\xf9\x7c\x85\x31\xfd\x78\x00\xdf\xe9\x2a\x46\x88\x8d\x25\xfb\x13\x39\xb4\x9c\xc8\x71\xf9\x84\xff\xe9\x91\xc2\xce\x78\x77\x84\x11\x10\xd8\x1e\x15\x2e\x0a\xf1\x55\xcc\xe1\x06\xc1\xf7\x20\x60\xc3\x31\x4c\xe3\xe8\xe0\x67\x51\x34\xa2\x30\x23\x6a\xff\x6d\xe0\x94\xc0\x1b\x73\x08\x48\xaf\xbb\x94\xcc\xf6\x14\xda\x35\x4e\x61\x9c\x0b\x5f\x29\x93\x14\xf0\x45\x90\x14\x3c\x65\x93\x35\xc9\xa6\x62\x69\x16\x65\x93\x63\x76\x18\x7b\x20\xf6\xc2\x20\x70\xc5\xde\x12\x3b\x46\x0a\x00\x01\x19\x56\xcb\x01\xc8\x35\x72\x10\x6e\xc0\xd2\x00\x38\x87\x22\x28\xa5\x5a\x88\xdd\x20\x08\xc5\xb0\x3f\x9f\x9b\x0e\x2c\x25\x11\x8f\x39\x27\xd6\xc8\x2d\x13\x7a\xba\x76\x59\x89\x67\x56\x8e\x66\xe1\x73\xee\x54\xdf\x45\x97\xa1\x71\x6e\xda\x43\xd9\xd6\x65\x1f\xea\xf9\xc7\xae\x3c\x46\x8a\x30\x72\x98\x88\x5c\x14\xfc\xed\xeb\x31\x91\xf8\xd0\x5b\x45\x2b\x6f\xf4\x7b\x7a\x26\x51\x12\x35\x5a\x87\xb3\xc0\x9b\x4b\x25\xa9\x2a\x63\x59\xad\xe0\x37\x16\x1d\x11\x50\x32\x8a\x00\x9e\x7a\x9f\x30\x9d\xc2\x9a\x14\x88\x31\xea\x2a\x05\x11\x78\x87\xe2\xf3\x35\x81\xe4\xe0\x43\x85\x67\x09\xc9\x78\x50\xab\x45\x85\x16\x0d\x04\xee\xb3\xd4\x25\x57\xa2\x86\x03\x85\xc1\x19\x9d\x52\xeb\x5b\xb1\xbb\xe7\xc0\xbd\x98\x15\x99\x08\x83\x75\x17\x62\xef\x84\x28\x47\x27\x2b\xa2\x87\x6f\x29\x47\x50\xb7\x58\x2f\xa8\xd1\xc7\x9a\xbc\x10\x1c\xab\x20\x08\x2e\xac\x1f\x2b\x84\x58\x2d\xa8\xa9\xb6\x18\x60\x23\xd7\x28\xb5\xc8\x2e\xd7\x0a\x47\x90\xa8\x2e\x3a\x13\xdc\xda\xf6\x2c\x7f\x09\x43\xee\xcb\x5d\x23\x6f\x11\xed\x4a\x48\xf1\x9f\x98\xf5\x4d\x32\xd1\x74\x2e\xb7\xcf\xb0\x31\x56\xca\x6a\x0d\x18\x70\x0d\x29\xdd\xab\xda\xe8\xc4\x83\xf4\xc9\x61\xe6\xaf\x20\xa5\xa7\x62\xeb\xe1\xf9\x5a\x0f\xf1\xc2\x1a\xcc\x48\x35\x33\x95\x1d\x59\xda\xcf\x17\x96\x1a\xb2\x98\x1e\x32\xa5\xae\x61\x31\xd3\x91\x94\x0d\x1f\x6d\x4f\xef\xd5\xe6\x47\xca\x7d\x2e\x7b\xed\x08\x88\x11\x96\xb8\xac\xa0\xaf\x67\xfb\xae\xa1\xa4\xa7\xbc\x01\x29\xe7\x7c\x32\x41\x65\xd4\x03\x6d\x39\x4c\x42\xe2\x59\x78\x66\x0a\x0e\xac\x2b\xea\x24\x7c\xc7\xa2\x85\x56\xad\x1b\x32\xb6\xa4\xa3\xf1\x80\x1d\xce\x0c\x54\x93\xc9\x11\xdb\x3d\x89\x00\x22\x0f\xe2\xe9\x78\x11\xd3\xed\x2e\x41\x4d\x67\x10\x76\x93\xa3\xff\xed\x6d\x38\x54\xb7\xe1\xe0\xdd\xc7\xf3\x70\x9f\x35\x18\x0f\x4a\x85\xc2\x8c\xcb\xec\x20\xe8\x4f\x60\x45\x0b\xf3\xdf\x19\x89\x2a\x64\x1c\xc7\xe0\x58\x61\xde\xb3\x29\xa9\x90\xae\x6b\x0f\xdc\xb5\x96\xa8\x1f\xe1\x8c\x25\x6b\x4a\xf6\x01\x68\x36\x1a\x62\x0d\x37\xfc\x7c\x8d\xa3\xf0\xc5\xe2\x8a\xfd\xfa\xdc\xf0\x2e\x10\x39\x17\x5c\xd8\x65\x20\xe0\xa4\x67\x59\x98\x4b\x6a\x93\xd1\x60\x70\x8f\x66\xad\x8c\x7d\x34\x84\xa0\x89\xee\x2e\x6d\x67\x10\xd6\x53\x47\x86\x4a\x28\x44\x64\x14\x66\x10\x9f\x96\xb1\xd4\xbd\xd9\xf4\x77\x96\xa7\x35\x33\xac\x09\x51\x34\x6a\xeb\x85\x59\xdd\x66\x45\x31\x84\x8b\x59\x31\xbf\x86\xfc\x68\xc0\x96\xfb\xee\x98\x54\xd3\xac\x6c\xa6\x59\x31\x6e\xe5\x7d\x63\x06\xac\x9e\x90\xd4\x9d\xbf\x20\x64\x27\x1d\xcf\xb9\x59\x44\xdb\x63\x08\xa1\x30\x74\x91\xe8\x2b\x4a\x52\x8c\xf0\x3e\x10\x2b\xfa\x33\x3d\x6c\x01\xd8\x41\x21\x86\x2b\x49\x43\x3c\x5f\x88\xa3\xa4\x71\x1b\x23\x50\x8c\x6d\xd3\x27\xdc\x79\xdb\x5c\xe4\x08\x20\x45\xee\x5d\x06\xb6\xe9\xbd\x4b\x5d\xf7\xde\xf5\xd1\x8b\x8b\xc1\x04\x65\x1b\x60\xe5\x66\x4a\x5d\x19\x4f\xb1\xc2\xb9\x2a\x1a\xc4\x57\x2a\x55\x46\xff\x66\x37\xda\x29\x5c\x39\xf8\x4b\xd6\xf9\x6f\x73\xcf\xdc\x29\xa1\xbc\xa2\x9d\x9a\xb7\xf8\xc5\x4a\x12\x43\xef\x93\xb0\x7d\x39\x33\x78\xd8\xdd\xb6\xb2\xdb\x6d\x1c\x85\x19\x45\xce\x3f\x01\x00\x00\xff\xff\xbf\xcb\xeb\x13\xa7\x12\x00\x00") + +func fontsSlscriptFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSlscriptFlf, + "fonts/slscript.flf", + ) +} + +func fontsSlscriptFlf() (*asset, error) { + bytes, err := fontsSlscriptFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/slscript.flf", size: 4775, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSmallFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x5a\x6d\x6f\xe2\x38\x17\xfd\xce\xaf\x38\x1f\x46\x9a\x22\x95\x9a\x04\x0a\xad\x54\x8d\x9a\x81\xb4\x8d\x96\x12\x9e\x10\x3a\x3b\x52\x24\x2f\x33\x4d\xa7\x68\x29\x54\xbc\xec\x6a\xa4\xfc\xf8\x47\x8e\xed\xc4\x71\xec\x94\x5d\x69\x27\x88\xda\xc7\xf7\x5e\xdf\x73\x7d\x7c\xc3\xcb\xfa\xc5\x5d\x7e\xc2\x25\xfa\x70\x7a\x70\x2e\xe1\x74\xd1\x85\xeb\xf6\x9d\xcb\xd6\xfc\x6d\xb9\x5e\xe3\xc7\x6f\xdc\xaf\xd3\xcd\x06\xa3\xd7\xe5\xfb\x7b\xba\x5e\xa3\x4f\xae\x7b\xe8\x74\xf0\x63\xb9\x4f\x9f\xb1\xdd\x60\x7e\x58\x6e\x9e\x97\xbb\xe7\x56\xb0\xf9\xb9\x3e\x3e\xa7\x7b\x04\xf3\x10\x93\xe5\x61\xb5\xe9\x38\xad\x97\xd5\xaf\x75\x7a\xc0\x2e\x5d\xa7\xcb\x7d\x0a\xf7\xc2\x61\x93\x1d\x17\xde\xf1\x17\x9c\xeb\xeb\x7e\x6b\x96\xee\xde\x56\xfb\xfd\x6a\xbb\xc1\x6a\x8f\xd7\x74\x97\xfe\xf8\x8d\x5f\xab\x7f\xd2\x0d\x0e\x5b\xbc\x6d\x9f\x57\x2f\xbf\x71\x78\x5d\xed\xf1\xb2\xdd\x1c\xce\xb1\xdc\x63\xbd\xdd\xfc\x62\xcf\xc3\x6b\xda\xca\x07\xac\xd2\xdd\xe7\x3d\x36\xcb\xb7\x94\x61\xbc\xaf\x97\x3f\xb9\x6d\x4b\xfc\xdc\xbe\xbd\xa5\x9b\x03\xd6\xab\x4d\x7a\xd1\x6a\x3d\xf2\xd1\xcf\xcc\xb1\xd9\xf2\xb8\xc6\xd7\xe3\xee\xb0\xdd\xe0\x66\xbf\x5d\x1f\x0f\xab\xed\xe6\x36\x5d\xee\x0e\xaf\xeb\xd5\xe6\xef\x8b\x4d\x7a\xf8\x02\xc7\x25\xd7\x03\x66\xc8\x8a\x7b\x87\x4d\xfa\x2f\xde\x97\xbb\xe5\x5b\x7a\x48\x77\xad\xfd\xf1\xfd\x7d\xbb\x3b\x70\xc0\xbb\xe0\x9e\xf9\xba\xdc\x3c\xb3\x8f\xdf\x56\x9b\x0b\xe0\x71\xf9\x1b\xcb\xf5\x7e\x8b\x1f\x29\xf6\xeb\xd5\xaf\xd7\xc3\xfa\x37\xde\xa4\x15\x2f\xdb\x1d\x7e\xa4\x87\x43\xba\xc3\x71\x9f\xb6\xb6\x2f\x39\xfc\xcb\x71\xbd\xee\xfc\xbb\x7a\x3e\xbc\x92\xbf\xd3\xdd\x86\xec\xdf\x8e\xfb\x57\x2c\xd7\x87\x74\xb7\x59\x1e\x56\xff\xa4\xfb\x73\xfc\x38\x1e\xf0\x9c\xbe\x2c\x8f\xeb\x03\xb6\xc7\xc3\xfb\xf1\xc0\x3c\x9f\x86\x31\x7e\xbe\x2e\x37\xbf\xd2\xe7\x8b\x56\x0b\x9f\x6e\xf5\xff\x6f\x5b\x00\xc5\x6d\x0b\x19\x32\xf6\x2f\x65\xff\x9e\xd1\x36\xfb\x1e\x10\x7f\xce\x07\x9c\x21\x43\xfe\xf5\x13\x9e\x90\xff\xf9\x13\xf8\x53\x0e\xe4\x43\xf9\x97\x34\x63\x88\x1c\x99\x02\x17\xec\x1b\xf1\x19\xe2\x33\xd8\x6a\x34\x2b\x26\xe7\xf3\xb2\x8c\xe6\x06\x74\x6e\x6e\x5b\x20\xa0\x84\x7f\x29\x2d\x01\xe5\xb6\xd0\x36\x01\xe1\xab\x13\x90\xfc\x3b\x10\x4a\x0a\xc3\x4b\xe3\xa9\x84\x26\xa0\x19\xe5\xd3\xbf\xb0\x8f\xdc\x88\x84\xb2\xff\x32\x94\xd3\x50\xc6\xe4\x2c\x77\x38\xe3\x0b\x7d\x82\x12\x13\x50\x9a\x2f\x09\xc2\x43\x87\xf2\x81\x84\x26\x6c\x0c\x5b\x99\x2d\x80\x84\x3b\x96\xc7\x57\x3c\x08\x25\xaa\xd7\x94\x24\x34\x37\x0b\x37\x39\x00\x91\xe6\x14\x61\x2d\x83\x5a\x84\x54\x09\x22\xa0\x79\x2d\xbf\x30\x78\x52\xec\x58\x1e\x1c\x81\x46\x05\x56\x7d\x47\x75\x9c\x4a\x66\xc8\x30\x88\x38\x40\xec\x09\xf3\x4e\xc3\x11\xc1\x00\x01\x0f\x47\x86\xb3\x36\x0f\x48\x42\x29\x29\xcd\x2f\x63\x4f\x78\x3e\x2a\x59\x59\x6e\x69\x11\x83\x76\x65\x55\xe9\x85\x32\x90\x72\xf7\xb8\x79\x19\x45\x22\xdc\x25\xd5\x81\xe0\xe6\x65\xc5\x56\xd2\x4a\x78\x35\xf3\xc4\xfa\xa0\x22\xa5\x6d\xb0\x22\x59\x0b\x03\xc1\x07\x26\xf5\xf5\xcb\x8d\x00\x8f\x8a\x9c\x53\x8d\x65\x65\xfd\x33\xd0\xdc\xff\x46\xd8\xea\xba\xe7\x62\x9b\x94\x88\xdf\xea\x5b\x6b\xd8\xe6\xfa\x9f\xab\xd9\xa4\x72\xe1\x06\x37\x05\x09\x2a\x39\x6c\xcc\xb8\x4c\xdf\x33\x8d\x34\x5f\xf0\x45\xb2\xa5\x55\xdf\x00\x11\xf8\xdc\x21\xf6\x3c\xa3\xed\x5a\xda\x95\x89\xc7\x46\xf3\x70\x10\xd0\xbf\xf2\x30\x27\x2c\x68\xe7\x54\xe6\x21\x2d\x59\x29\x19\x07\x42\x93\x02\x40\xcc\xa7\x44\xf1\xae\x9e\x14\x3c\x36\x68\x48\x0a\x5a\x58\xc4\x53\xfc\x4c\x7c\x93\xa8\xd1\x28\x61\x39\x6e\x41\x9c\xac\x0d\x19\xba\x1a\x71\xb4\xcc\x04\xcd\x60\x8a\xb2\x7d\x60\x56\x27\x6e\xdd\xda\x82\xb8\x35\x6b\xcb\xb3\x44\x10\x97\x45\x5d\x30\x38\x33\xf9\xc6\xb9\x46\x65\x69\x34\x5b\x0b\x01\x0b\x2a\xeb\x81\xc4\x37\x57\x0f\x9e\x90\x19\x4b\x50\x5e\x9e\x3f\xf3\xa4\xcc\x68\xa6\x6e\x9c\x1c\x5d\x50\x5f\x7e\x28\x73\xb4\x6e\xb2\x24\x75\x96\x97\x69\x69\x4d\x42\x8a\x42\x05\xa8\x8e\xd6\x02\x93\x48\x0f\x2e\xfe\x92\x33\x12\x7d\x15\xa8\x59\x5b\xd4\x4b\x5a\x16\x4c\xd5\xe7\x5a\xfa\x25\xdc\xba\x3c\xed\x2c\x1b\xda\x84\xdd\x98\xda\x02\x1b\x02\xbb\x1c\xaa\xd5\x1b\x79\xb4\xda\xea\x2d\x55\x6a\x78\x51\x68\x21\x6d\x35\x9d\x68\x14\x65\x6e\x95\x59\x40\x33\x7b\x4c\xf2\x93\x3f\xcf\x04\x56\x4d\xc4\x09\x95\xe0\x49\xea\x86\xa4\x52\x59\xcb\x29\xea\xac\xf2\x68\x4b\x90\x90\x84\x28\x73\xb5\xe9\x0a\x82\x9c\x2d\x66\xb2\x53\x5d\xd4\x0d\x3d\xfb\x3e\x32\xd1\x18\x08\x79\xa8\x41\x3b\x73\xeb\x1c\x17\xbb\x96\xd5\x24\x4a\xc6\xc6\xca\xf5\xa5\xab\xc2\x4b\xe9\xa0\xbe\xb5\x62\xbb\x14\x11\x53\x1c\xce\x1c\x0c\x84\x67\x13\x49\xa4\x96\x50\x1f\xda\x39\xa0\x3f\x2b\xe7\x82\x76\xce\x80\x03\x56\xf5\x57\x39\x51\xca\x05\x59\xd3\x8b\x8a\x6e\x24\xb9\x8c\xca\x67\x99\x9b\x17\x4a\x72\x56\x94\x68\x91\xcc\x02\x35\xd3\xfc\x10\x25\x49\xa4\x60\xe3\xfa\x9a\x97\x04\x9d\xfc\x30\x4d\xea\x95\xae\xac\xb6\x99\xa0\x71\x66\xa6\x71\xa3\xff\xea\x01\xa1\xfb\x2f\x3e\x7c\x16\xdc\x37\xd5\xe5\xf2\xb8\x37\xc9\x2f\xe9\x79\x21\x12\x44\x36\x50\x22\x97\x55\x56\x95\x41\xcf\x9f\xa2\x1c\xd3\x5a\x72\xa1\x5c\xca\xb8\x60\xe1\x2c\x68\x69\x7f\xe1\x00\xad\x14\x5c\x2d\x3c\x38\xc1\xe1\xda\xf6\xd8\x04\x55\xd5\x90\x4a\x1e\x41\xe4\x51\xbe\x55\x27\xef\x11\xe7\x78\xd5\x08\x14\xc8\xd6\xad\x17\xc6\x2a\x17\xa5\xd2\x52\x3d\xf4\x50\xf2\x48\x4d\xe3\x9a\x47\xda\xd1\xcd\xe4\xa2\x25\x50\xcc\x1f\x5e\xb4\x9e\x44\xc9\xd2\x05\x25\xaa\x63\xcb\xe1\xca\x8c\xa4\x52\xb1\x2d\x0b\x24\xf9\x70\x52\x4b\x99\x66\xbb\x45\x79\xa2\xd5\x5b\x16\x8a\xca\xc9\x31\x75\x46\x57\xee\x32\x54\xde\xe6\xe4\x53\x5e\xeb\x0c\xd9\x5a\xe4\xac\xa9\xa4\xb2\x3d\x50\x70\xb8\x9e\xe7\xe5\x92\x64\xbc\x5e\x12\x59\x29\xab\x4f\x5d\x3c\x9c\xd1\xb6\xa0\x1c\xe9\x74\x12\xcb\x91\x52\x1f\xcd\x95\xaf\xbc\xea\x92\x7a\x65\x94\xc3\x4b\xce\x37\x1d\xab\xc6\x05\x9a\x4b\xaf\x1c\x4e\xa5\xf9\x52\x79\x9c\xb8\x40\x73\x4e\x2a\x0a\x55\x50\x31\xbb\xc9\xef\xd1\x6c\x5a\x95\x93\xce\xa0\x0b\x4c\xc3\xce\xd7\xc8\xf7\xfe\xc0\x7c\xe6\x8d\x7c\x73\x4f\xc4\x19\x38\x40\x30\x7d\xf2\xa3\xd8\x1f\xc3\xff\x73\x34\xf1\x1e\xbd\x38\x08\xa7\x78\xf4\xa2\x3f\x3e\xaa\x91\xce\xc0\x05\x46\xfe\x34\xc6\x3c\xb8\x9f\x96\xf9\x97\x65\xa2\x0a\xe4\xc5\x9f\x3f\x44\x6f\xc3\x19\xf4\x80\x59\xb8\x98\x8e\xcb\x39\x52\xa8\x51\x02\x9a\x14\x4a\x59\xac\x7c\x5e\x11\xa7\x1c\xa2\x0f\x8c\x16\x51\xe4\x4f\x47\xdf\x05\x0a\x23\x59\xbe\xf5\xb8\xe0\x49\x2f\x4b\x1b\x41\xa2\x96\x36\x67\x70\x09\x7c\xf7\xa7\x72\x71\x2a\x0b\x5c\xa9\x46\xb2\xfc\xbb\xac\xf2\x49\x2a\x14\x06\x30\x00\xbe\x46\xe1\x1f\xfe\x14\x5f\xbd\xc8\xd4\x54\x52\x69\xe2\x0c\x86\xc0\xdc\x1f\xe5\x21\x55\x1d\x16\x0c\x14\x79\x22\xe5\x48\x22\x5a\x22\x67\x54\xb0\xc7\x19\x5c\x01\xe3\xc0\xf3\x23\x7f\x1e\xcc\x8d\x1c\xf9\x24\x15\xc3\x27\x45\x83\x88\xc9\xd7\xc0\x28\x9c\x7d\x8f\x82\xfb\x87\x72\x8f\xac\x77\xc5\x8c\x87\x2c\x2f\x9e\xb2\x7a\xc9\xb3\xd5\x19\x76\x81\x3b\xff\x31\x98\x06\x53\x1f\x61\x34\x0e\xa6\xde\x04\xc1\x74\x1c\x8c\xbc\x38\x8c\x1a\x05\x4a\xfd\x82\xe1\x0c\x1d\x60\xe2\xdf\xc5\x9d\x59\x18\x4c\xe3\x60\x7a\x8f\x71\xb8\xf8\x3a\xf1\xe1\x4d\xef\x27\x3e\xfe\xb7\x08\xe3\x4a\x1a\x4a\x35\x98\x1b\x2a\xaf\xdf\xc5\x05\xbc\x5a\x1a\x9c\xa1\x8b\xbc\x1f\x58\xec\x71\xad\xe9\x50\x64\xf1\x27\x54\x9b\x0e\xce\xb0\x07\xcc\xc3\xbb\x18\x0f\xdf\x67\x0f\xfe\x54\x57\x49\x59\xd9\x40\x2a\x6b\xaa\x33\xec\x03\x91\x7f\x1f\xcc\x63\x3f\xf2\xc7\x1f\x04\x3a\x43\xa7\x2d\x02\x9d\x65\x49\x52\x0f\xf4\x25\xf0\xe8\x8d\xa2\x70\xda\xd4\xb8\xaa\xd6\x4e\x67\x38\x00\xc6\xfe\x7d\xe4\xfb\x85\xd7\xf9\x6a\x17\xbc\x1e\x92\xdb\xaa\xae\x74\x86\x43\x60\x36\x59\xcc\x3b\x8f\xc1\x74\x31\x57\x32\xb3\xa1\x01\x97\x0b\x10\x69\x0d\x97\xaf\xce\xf0\x0a\x98\x2f\x66\x7e\x34\x1f\x45\xc1\x2c\x46\xfc\x2d\x54\x45\x74\x5b\x3d\x7f\xaa\x21\xbb\xd6\x26\x3e\x44\xbe\xaf\x1f\x5d\x19\x15\xca\xab\x32\xf5\xaa\x0b\x78\xa3\x45\xec\xc3\x1b\xb1\xe2\xd3\x12\x27\x1a\x51\xdd\x54\x9d\xbd\x72\x80\xc7\x60\x14\x85\x4a\x99\xb2\x1c\xaa\x19\x2e\xce\x4b\x39\xc2\x67\xbb\xc0\x2c\x98\x8c\xa2\xf0\x9b\x9e\x51\x24\x4f\xa5\x3c\xc2\xc5\x55\x81\x6a\xaa\xcb\xb9\xea\xb1\xd5\xc7\xe3\x89\x8f\x71\x18\xb7\x60\xe8\x32\x56\x8c\x65\xc5\xcd\x1f\x07\x93\x89\x57\x8c\xad\x75\x27\xdb\xb4\x9d\x8f\xbd\xac\x06\x31\x9c\xfa\x5a\x57\x91\xea\x37\x0a\xe7\x6a\xc0\xd2\x6b\x3e\x5a\x4c\xec\x44\x36\xaa\x43\xbd\x5d\xe1\x5c\x0d\x81\xbc\xb2\x9c\x48\xe2\x82\x0e\x89\xda\xf6\x92\x8d\x2f\xad\xbd\xe1\x5c\x5d\x01\x4f\x8b\xc9\xbd\x17\xe1\x2e\xf2\x78\x01\x0d\xa7\x0c\xd5\x8b\x62\x3f\x2a\x9a\xe4\xa2\xe5\x9d\x11\x96\x68\x3c\xed\xd8\xc7\xcf\x7c\x3b\x72\x15\x52\xed\x53\x70\xf4\x6b\x33\xfa\x83\x37\xb9\x2b\xa1\x4b\xe4\x12\x58\xb4\x62\x29\x21\x54\x6f\x7f\x38\xd7\xdd\x3a\x6a\x9e\xd8\xd2\xea\xb9\x08\xaf\xb4\x9b\xe5\xb9\x62\x38\x6d\x97\x96\x1b\x4d\xe7\xab\xa8\x47\xf6\xff\x16\xfe\xbc\x5a\x28\x79\x9a\x88\x16\xa1\xb4\x5d\xbf\x81\x39\xd7\x2e\x30\xf1\xe2\x60\x8a\x91\x37\x0b\x62\x6f\x82\x89\x1f\xc7\x7e\x04\x0f\xdf\x82\xf8\x01\xf7\x91\xf7\xe4\xb7\xca\x08\xb3\x32\x2b\x6a\x99\x55\x93\x39\xd7\xbd\x66\xd0\x9c\xb5\xd5\x13\x50\x6c\x7b\x13\x68\xbf\x19\x74\x14\x44\xa3\xc5\xe3\xdd\xc4\xff\x33\x07\x94\x6a\x94\x24\xd9\x87\xc8\x97\xcd\xc8\x71\x30\x19\xfb\x1c\x94\x64\x02\xf4\x63\x73\x07\xcd\xa0\xcd\x87\x79\x13\xf0\xb0\x19\x38\x62\x04\xf4\xbe\x86\xf9\xb6\x15\x9a\xea\xec\xac\xdd\xfe\xd0\xe4\x2b\x1b\xb2\xdc\xac\x62\xbb\xce\xa5\x20\x62\xd5\x81\x4a\xa1\x9f\x51\x03\x17\xae\x2d\xa0\x23\xb1\x6d\x65\x8d\x3b\xa5\x51\xdc\x66\xf9\x7c\xdb\x72\xbb\x5d\x0b\xac\x5f\xcb\xdb\xdb\xa2\x3b\x9f\x21\xbf\x3c\x66\x94\xde\x94\xe7\x88\xdb\x75\x9a\xa1\x8a\x6c\x15\xca\x83\x57\x41\x33\x94\x8d\x4d\xbe\x21\x47\x2b\xed\x23\x0b\x9e\x8d\x48\xfe\x29\x49\xc4\x30\x21\x2e\x86\x37\x95\xaa\xea\x76\x6d\x64\x0a\xea\xb4\x97\xac\x57\x5a\xd7\xfa\x19\xe0\x76\x6d\x14\x0a\x6a\x31\xc4\x6d\xf9\x46\xc6\x8e\x67\x63\x4f\x60\x0a\xa4\x8c\x24\xe5\xa1\xb4\x82\xda\x98\x13\x18\xa3\x59\xbb\xc6\x65\x32\xd5\x39\x74\xf5\x1a\xe7\x76\x6d\xec\xf1\xe3\x87\xaa\x0a\xcc\x0a\x15\x98\x23\xb6\x8b\xdb\x3b\xad\x75\x08\xdc\xae\x8d\x3e\xd3\x0f\x6a\x53\xd9\x79\xaf\x35\xdc\x5d\xc7\x46\x9e\xd0\x5e\xf4\x8b\x4e\xb7\xe5\x6a\xed\x3a\x36\x1a\x85\x96\xa2\x2f\xcf\xfa\x46\x50\x1b\xa1\xc2\x53\x8a\x7e\x23\xb2\x8d\x5a\xe1\x07\x81\x6d\x04\xb5\xd1\x2a\x3c\x81\xaf\x8d\xc0\xec\x32\xb0\x98\xc4\xc1\x6c\xc2\xf4\x59\xe5\x16\x99\x1b\x95\xe4\xd7\x5e\xf1\x42\x3d\x21\x89\xd2\x1d\x73\x1d\x1b\x99\x84\x55\xf3\x98\xdd\x63\x6b\x37\x15\x49\x2b\x9c\x11\x22\x5f\x97\x30\xb5\x53\xcb\x51\xc7\xc6\xab\x45\x35\x9d\xca\x7c\xa2\x32\xa1\x9a\x7b\x2f\xae\x63\xe3\xd4\xc2\x90\x53\x45\x52\x9d\x84\x6c\xe3\xd5\xc2\x98\x58\x65\x66\x21\xc9\x4e\x80\x77\x6d\x0c\x5b\x34\x97\x9a\x93\x5a\x52\xae\x6b\xa3\xda\x77\x63\xb5\xcd\x63\xa2\xb6\x2f\xd5\x57\xfc\x39\x9e\x8d\x65\xf1\x43\x18\x4d\x4d\xed\x55\xde\xcf\xd7\x7b\xb5\xae\x5b\x90\x6a\xfe\xe8\x4d\x0a\x98\xf9\x83\x17\xcd\x30\xff\x8f\xcd\x2b\xd7\xed\x1b\xd1\x1a\x94\xe9\x47\x6f\x46\x5c\xf7\xb2\x09\xb2\xa9\x44\xd9\x21\x07\x4d\x90\x1f\x16\x28\x3b\xee\xb0\x09\xb7\xa9\x3c\xd9\x21\xaf\x9a\x20\x3f\x2a\x4e\x76\xd8\xeb\x26\xd8\x66\x3d\xda\x84\xdb\xeb\x9a\x71\x7d\xa5\xfc\xc8\xde\x5d\xb9\xf3\xc5\xab\xa6\xf3\xba\x1a\x75\x7b\x8e\x11\xb2\xae\x45\x1b\x5e\x89\xf1\x4b\xb7\xdb\x73\x8d\x50\x75\xfd\xa9\x0a\x28\xdb\x9b\x30\xb7\x67\x66\x4e\x5d\x82\xaa\xf2\xc9\x8e\x66\x66\x8e\x51\x85\x56\xc5\x93\x1d\xd2\xcc\x1c\xb3\x10\x35\x74\xc0\x3b\xe2\xb7\x2a\x89\x41\x3a\xf5\xcc\x14\xaa\x6a\x51\x2a\x2e\xb0\x89\xb1\x05\xed\xf6\xcc\x74\xd1\xf4\xa7\xd2\x20\x32\x61\x98\xf9\x71\x82\xe6\x2c\xde\x9a\xe8\xa5\xb5\x67\xe6\xc6\x89\x8a\xb3\xf1\x37\x02\x6e\xdf\xcc\x0f\xae\x37\x93\x84\x88\x5e\x32\x49\x12\x9d\x69\x5a\x93\xc5\xed\x9b\x69\xa1\x69\x4c\x56\x6a\x20\xdf\xe0\xa0\xf9\x25\xa3\xdb\x37\xd3\xc3\xa0\x30\x2b\xf4\x30\xbf\x89\x74\xfb\x66\x7a\xe8\xd2\x52\xa7\x87\x0d\xcd\x4c\x0f\x93\xa6\xac\xd1\xc3\x06\x69\xa6\x47\x58\x8f\xa0\xf2\x0a\xcc\x8e\x66\x26\x84\x59\x45\xfe\xb7\xd7\x4d\x6e\x7f\x08\x8c\x83\xa7\x60\xae\x8a\xc8\x6a\xb7\xa8\xb8\x39\x69\x3f\x30\x73\xfb\x66\x82\xd4\x74\xa4\xfe\x7e\x99\xf0\x97\x32\x84\x68\x6e\x9a\xc9\xb1\xf8\xe0\x80\x6f\x78\x3d\xe6\x5e\x9a\x39\x61\xd2\x8b\xea\x01\xdf\x08\x69\x26\x87\x59\x28\x6a\x07\x7c\x23\xae\x99\x21\x66\x85\x78\xf2\x0b\x42\xf7\xd2\x4c\x15\x5d\x1a\x36\x46\xa0\xfa\x3e\xd9\xbd\x34\xf3\x45\x55\x87\x30\xff\xda\xa4\xf6\x2b\x01\xf7\xd2\x4c\x94\xef\xff\xd5\xeb\xaa\x89\xff\x0f\x00\x00\xff\xff\xde\x17\x3d\x84\xc8\x2f\x00\x00") + +func fontsSmallFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSmallFlf, + "fonts/small.flf", + ) +} + +func fontsSmallFlf() (*asset, error) { + bytes, err := fontsSmallFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/small.flf", size: 12232, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSmisome1Flf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x98\x5f\x4f\xdb\x3c\x14\xc6\xef\xfd\x29\x1e\x21\xa4\x17\xa4\x26\x6e\x51\x79\x01\xe7\x26\x17\x88\x8d\x2d\x70\xc3\x2e\x23\x59\x69\xeb\x74\x11\xad\x8d\x92\x14\xd6\xc9\x1f\x7e\x72\xfe\xba\x14\x41\x29\x99\x34\xa1\xa4\x17\xb5\x1b\xe7\xe7\xe7\x39\xc7\x76\x4f\x1b\x2f\xe2\x93\xe8\x10\x67\x38\xc3\xe8\x1c\xce\x08\xa3\x73\x92\x2d\x93\x4c\x2d\xc5\xc8\x8d\x17\x31\xb9\x54\x4f\x32\x4b\x7e\x8b\x19\x1e\x45\x9a\x25\x4a\x42\xc5\x28\xee\xe7\x69\x32\x2d\xc6\xc0\xc1\x8d\x8b\x20\x91\xb3\x4c\xac\x71\xb4\x9c\x2f\x8a\xa6\x2f\xd7\xbf\xdc\x69\xe6\xce\x56\xae\x98\xad\x8e\xc9\x95\x98\xa4\xab\x28\x5d\xe3\x64\x3c\xc0\xe8\xe2\xe2\x7f\xe2\x38\x0e\xd9\x44\x11\x72\x95\xcc\x17\x22\xc7\x54\xc9\x7a\xbe\xc9\x1a\xdf\x85\xcc\x71\x1b\x65\x99\x90\x38\xba\x17\x32\x97\xfe\x74\x3d\x11\x69\xf6\x10\x4d\x85\xab\xd2\xf9\xf1\x00\xe7\xce\x68\xe8\x5c\x8c\x07\x98\x44\x99\x98\x11\x25\x91\xff\x14\x88\x95\xcc\x33\x3c\xa8\x2c\x17\x33\x43\x0a\x84\x94\x22\xcd\x71\x97\xab\xe9\x3d\x23\xe4\x2a\x55\x4b\x86\xcc\xf4\xfc\xf8\x29\x71\x57\x8f\x91\x2b\x17\x38\xda\x18\x77\x4c\x2e\xa3\x5c\x30\x8c\x4e\xf1\x6d\xb5\x30\xda\xc7\x18\x0e\xd9\x70\xcc\x4e\x4e\xf1\xe5\xe6\x07\x21\x5f\x45\x2a\x10\xa5\x02\xc6\x4d\x39\xa9\x8b\x5b\x25\x9d\xb8\xb4\x73\xfd\xdf\x12\x51\x9c\x46\xc9\x6c\x80\x24\xc6\x5a\xad\xf0\x14\x49\x19\x55\x3e\x73\xa3\x75\x39\xc0\x44\x90\xe5\x1a\xf3\x95\xc8\x72\x17\xd7\xb5\x6c\xe3\xa3\x09\x53\xe5\x68\x22\x62\x95\x0a\x97\x98\x18\x76\x78\x11\x3c\xbf\x9a\x5b\x08\x20\x20\x21\x21\x90\x22\x07\x70\x87\x1c\x0a\x53\xdc\x6f\x3d\x03\x6c\x73\xb6\xae\xad\x98\x37\x13\x35\x90\x43\x00\x87\xfe\xee\x6f\x3e\xd9\x78\xf5\xdd\x4f\xd7\x35\xcb\x82\x73\x0e\xa0\x68\xd3\x10\x08\xcb\x36\x65\xac\xe8\xf8\xa4\x68\xb2\x90\xf3\xd0\x27\x08\x69\xc8\x18\x05\x68\x39\xbe\x68\x96\xcf\x86\x94\x73\x0a\xec\x05\x65\x16\xb4\x6a\xee\x09\xa5\x36\xb4\x7c\xbc\x84\x9a\x4f\xd1\x05\x94\x76\xa4\x74\xd3\x7e\xd8\x42\xc3\x6e\xa0\x46\x74\x6d\xbf\x26\xf9\xd6\x39\xb2\xa7\x7d\x66\xc5\x74\x37\xfb\x4d\xe0\x29\x33\x83\x78\xad\x94\x76\xb2\xa4\x78\xd8\x2a\x0d\x0d\xb6\x56\xca\xe8\xfe\xd9\xdf\x09\x5a\x72\xde\x8c\xa9\x6d\x1f\xad\x7d\xe7\xa0\x86\x7a\xcc\x73\x0e\x06\xce\x81\x19\xa3\x99\x06\x74\xad\x54\x73\xae\x77\x80\x82\xa2\x4a\x54\xa3\xa8\xd2\x5f\x8e\xd9\xd5\xbe\x05\x65\x01\x0f\x2a\xa5\x34\x68\x97\x14\xa7\xef\x4e\x94\x05\xd5\xe0\xba\xb6\xaf\xdb\xec\x6b\x2b\xfb\xba\xb3\xbd\xff\x17\xb6\xe9\xc6\x3a\xfd\xc0\x8e\x2a\xb7\x51\x09\x6d\xe4\x75\x7a\x9e\x7a\xac\x85\x6a\x66\x29\x7d\x65\x49\xbd\x08\x0d\x9f\x9f\x52\xef\xdc\xfb\x0d\x14\xed\x8e\x2a\x67\xa8\xec\xd3\xce\x76\x14\xe5\xf4\x23\xd9\x7f\x09\xaa\x19\x0b\x6a\xa8\x66\x56\x4c\x03\xcf\xdb\x29\xfb\x36\xb4\xe8\x94\x4a\x19\xb5\x0e\x94\xfd\x95\x1a\x79\x9c\x07\x55\xf6\x99\x9d\x7d\xaf\x8e\xa9\x66\xac\x48\xf9\x3e\xd0\xc2\x73\x9b\x28\xcf\x7b\x6f\xa2\x5e\x3c\x4f\x6d\xa5\xcc\x86\x7e\xe8\x2b\x3a\xb8\x6b\xa0\xe6\xe7\xc4\x1b\x31\xed\x2b\xa8\xbe\x82\xea\x2b\xa8\xbe\x82\xf2\xfb\x0a\xaa\xaf\xa0\xfa\x0a\xaa\xaf\xa0\x3e\x73\x05\xf5\x8f\xfc\xd5\xf4\x7a\xf7\x4f\x00\x00\x00\xff\xff\xf4\x63\xd4\xfe\x38\x17\x00\x00") + +func fontsSmisome1FlfBytes() ([]byte, error) { + return bindataRead( + _fontsSmisome1Flf, + "fonts/smisome1.flf", + ) +} + +func fontsSmisome1Flf() (*asset, error) { + bytes, err := fontsSmisome1FlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/smisome1.flf", size: 5944, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSmkeyboardFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\xd5\x7f\x7f\xdb\x34\x13\x00\xf0\xff\xf5\x2a\xae\xcf\x53\x20\xa6\xb1\x9d\x64\x59\xbb\x9a\xad\xa4\xa5\xeb\x16\xba\x66\xa3\xc9\x80\x81\xc1\x28\xf6\xc5\xf1\x62\x5b\xc6\x52\x1a\x32\x0c\xaf\x9d\x8f\x1d\x39\x4d\xba\x6e\x17\x86\xfa\xc9\xc9\x96\xbe\x27\x4b\xf2\x8f\x4e\xe2\x49\x87\xef\x43\x17\xba\xd0\x3e\x84\x36\x3c\x3c\x62\x4c\x26\x33\x5c\x8e\x05\xcf\x03\x6b\x12\x4f\x18\x3b\x0d\x78\xa6\x30\x80\x89\xc8\x61\x12\x85\x31\x2a\x18\x2f\xe1\x12\x53\x05\x03\x2e\x25\xa6\x4d\x98\x61\xaa\xd2\x9e\xbf\x1c\x63\x2e\x33\xee\xa3\x25\xf2\xb0\x09\xed\xb6\xdd\xe9\xd8\xc7\xdd\x26\x8c\xb9\xc4\x80\x89\x14\xd4\x14\x21\x0a\x90\x3b\x8c\x5d\xe4\x22\x71\xca\x9e\x5e\xc0\x13\x9c\x59\x33\x35\xb5\x24\x42\xe3\x8c\x4b\xb8\xc2\xe8\x2d\xe6\x06\x1b\xe0\x42\x86\xb9\x98\x67\xd2\x81\x1c\x7d\x8b\xe7\x4a\x5a\x5c\xfa\x51\xc4\x86\xf3\xf1\x5b\xf4\x95\x03\x23\x1e\xcf\xcc\x38\x4a\xd1\x81\xad\x79\x9f\x73\x85\x0e\x74\x3a\x30\x10\x37\xd0\x3e\x3e\xee\x42\xeb\xc8\xe9\x1c\x3a\xed\x16\x98\xad\xc3\x56\x8b\xbd\xcc\x43\x9e\x46\xef\xb8\x8a\x44\xea\xc0\xb5\x58\xf2\x18\xfa\xa9\x54\x91\x9a\x2b\x04\x31\x81\x11\xfa\xd3\x54\xc4\x22\x5c\xb2\x17\x51\x8a\xd2\x81\x6e\x9b\x0d\x31\x0d\x30\x77\x60\x2c\xc6\xbc\x17\xf2\x30\x41\x6b\xb1\xe0\x96\x2f\x12\x76\x9a\x65\xb9\xb8\xc1\x40\x77\xd6\xcd\x57\x28\x25\x0f\xd1\xec\x9f\x3b\xf0\xf8\x01\x97\xf9\x34\xda\xf7\x45\xba\x9d\x7c\xc2\xae\x71\x82\x39\xa6\x7e\x79\x9d\x92\xc5\x2a\xdb\xe7\x22\x7c\x9f\x65\xf1\xd2\x1c\x89\xf7\xb7\x8e\x0d\x06\xa3\x57\xe6\x2b\x21\x55\x94\x86\xe6\x73\x21\x95\x03\xdb\x13\x64\xac\x9f\x02\xcf\x55\xe4\xc7\x08\xef\x8d\xdd\x04\x29\x27\x51\xa4\x7a\xbe\xb4\xe6\xc9\xd8\xc2\x60\x0e\x8d\xe1\x3c\xc3\x7c\xa8\x72\x44\x75\x11\x85\x53\x85\x79\x67\x34\xcf\xc7\xc2\x80\x45\x1e\x29\x94\x0e\x3b\x01\x76\x02\x7d\x98\xf2\x1b\x04\x5f\x24\x99\x90\x18\x00\x87\x14\x17\xf5\xd3\x32\x11\xa9\xda\x83\x27\x06\x3b\x81\x48\x41\x24\xc1\xe7\x71\x8c\xc1\xd6\xed\xb2\xa0\xaf\x20\x47\x89\xc9\x38\xc6\xea\x31\xe1\xbe\x9a\xf3\x78\x8d\x9a\xb0\xc0\x38\x6e\xc2\x2c\x4a\x03\xae\x47\xe3\xe9\x72\xc1\x97\x7b\x30\xc5\x1c\xcb\x71\x39\x48\x9e\x64\x31\xea\x6e\xf0\x56\x65\xd7\x9a\x9d\x40\xe1\x42\x59\xec\x9d\xeb\x32\x07\x0e\x4c\xd3\x3c\xd8\xbd\xae\x72\x0a\x80\x2a\xee\x58\xaf\x72\xa4\x6e\xe3\xba\x4e\x74\x9d\xe9\x3a\xd6\x35\xde\xe6\x7c\xc2\xdc\xec\x6a\x37\xdc\x9d\x6b\xc6\x60\x73\x1b\xf5\xd1\x9d\x40\x74\xb0\xa2\x38\x85\xa2\x28\x60\x55\x8a\xa2\x38\x2b\x43\xbf\x0c\xa3\xed\x8e\xea\xf4\xe5\x3a\xac\x3b\x58\x51\x78\x5e\x51\x86\xaa\x14\xeb\xd3\x62\xe7\x0e\x56\xae\xa9\x5e\x98\x3e\xba\x13\xa8\x8e\xea\x41\xda\x9c\xfd\xb3\xed\xd9\x57\x6d\x17\xdb\x6d\x6f\xd6\x8b\x79\xfd\x81\x15\x7d\x7c\x09\xbb\x2f\xeb\xe3\xeb\xa0\xd6\x56\xdf\xa4\x61\x19\x2e\xb7\xd7\x70\x55\x86\xa7\x65\xd8\xdb\xee\x70\xca\x60\x94\xe1\x1a\xd5\x3c\x4f\x77\x5f\xd6\x07\x3a\x6e\x77\xe7\xdf\xdf\x9d\x8d\xb6\xb8\x1e\x06\xfe\x4b\x89\x8b\x7a\x8f\xcf\xb8\xb4\x3e\x79\x98\x8d\x77\x89\x61\xc2\xa3\xf8\x9e\xff\x8c\x65\xa9\xde\x15\x1d\xcb\x72\x39\x7a\xde\x84\x73\xcc\x94\x55\xfe\xc3\xba\xe2\xfe\xd4\x82\x73\x94\x51\x98\xb2\xf5\xd7\x0d\xd6\x39\x75\x96\x5d\x06\x77\x15\x6f\xdf\xc1\x7b\x0a\x6b\xe8\xf9\x55\x78\x95\x51\xe6\xd4\x63\xd4\xc5\xd5\xb5\xb1\xf1\x5b\x57\x2e\xd3\x1f\x31\xdd\x50\x62\xd7\xf3\x3c\xe3\xce\x20\x55\x8f\xeb\x79\xb6\x0b\x60\xbb\xb7\xa7\x75\xee\x6a\x18\x00\x68\xac\x1a\x5c\x4f\xe7\x34\xea\xd6\xfa\x8a\x06\x80\xe1\x95\x0d\x46\x79\x91\x92\x18\x5e\x9d\xbb\x39\x99\x6a\x00\xc3\xd5\x29\xab\x45\xe8\x2b\x56\x73\xb3\x57\x6b\xd6\xdf\x7a\xd0\x87\xd5\x8f\x35\xf4\x16\xd9\x9e\x5d\xe5\xac\x4e\x6e\x37\x43\x6f\x56\xfd\xe8\x19\xfa\x67\xaf\x4f\xed\x6a\x0f\x3e\xf6\xf4\xb9\xd5\x90\xab\xc8\x14\xc6\x96\x03\x07\xdd\x43\xb3\xd1\x32\x1e\x99\x47\xc7\x2d\x38\x7c\xd0\x7a\x74\x6f\xe2\xd0\x6c\xb7\x5a\xd0\xed\xc2\x50\x09\x7f\x36\x15\x71\xd2\x84\xe1\x02\x03\x4c\x19\xdb\xf8\xe6\xf6\xd8\xc6\x7b\xda\x63\x1b\x6f\x5b\x8f\x6d\xbc\x33\xbd\x9e\xfe\xbe\x97\x64\x6f\x4d\x6b\x75\x07\xfc\x8f\x02\xff\xa7\xc0\x3e\x05\x3e\xa3\xc0\xe7\x14\xf8\x82\x02\x0d\x0a\x18\x14\xf8\x92\x02\x07\x14\x68\x52\xc0\xa4\x80\x45\x01\x9b\x02\x2d\x0a\xb4\x29\xd0\xa1\xc0\x03\x0a\x74\x29\xf0\x90\x02\x87\x14\x38\xa2\xc0\x23\x0a\x1c\x53\xc0\xa1\xc0\x57\x14\x78\x4c\x81\x27\x14\x38\xa1\xc0\xd7\x14\xe8\x51\xe0\x94\x02\x67\x14\xf8\x86\x02\xe7\x14\x78\x4a\x81\x0b\x0a\x3c\xa3\xc0\x73\x0a\xf4\x29\xf0\x2d\x05\x2e\x29\xf0\x82\x02\x57\x14\x18\x50\xe0\x25\x05\x5e\x51\xe0\x3b\x0a\x5c\x53\x60\x48\x81\x11\x05\x5e\x53\xe0\x7b\x0a\xfc\x40\x81\x1f\x29\xf0\x86\x02\x3f\x51\xe0\x67\x0a\xb8\x14\xf8\x85\x02\xbf\x52\xc0\xa3\xc0\x6f\x14\xe0\x14\x18\x53\xc0\xa7\x40\x40\x01\xa4\xc0\x84\x02\x21\x05\xa6\x14\x88\x28\xf0\x96\x02\x33\x0a\xc4\x14\x48\x28\x90\x52\x40\x50\x20\xa3\xc0\xef\x14\xc8\x29\x20\x29\xa0\x28\x30\xa7\xc0\x0d\x05\x16\x14\xf8\x83\x02\x4b\x0a\xbc\xa3\xc0\x9f\x14\x28\x28\xf0\x17\x05\xfe\xbe\x1f\xe8\xbf\x4f\x3d\xf8\x27\x00\x00\xff\xff\xd7\x48\x84\xb7\x84\x16\x00\x00") + +func fontsSmkeyboardFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSmkeyboardFlf, + "fonts/smkeyboard.flf", + ) +} + +func fontsSmkeyboardFlf() (*asset, error) { + bytes, err := fontsSmkeyboardFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/smkeyboard.flf", size: 5764, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSmscriptFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x5a\xff\x6f\xe2\xb8\x12\xff\x9d\xbf\x62\x7e\x40\x3a\x78\x0f\xd6\x24\xa5\x5f\x90\x56\x2b\xb2\x90\xb6\xd1\xd1\x84\x0b\x61\xf7\x56\x8a\xe4\x47\xb7\x69\x8b\x8e\x42\xc5\x97\x3b\xad\xe4\x3f\xfe\x29\xf6\xd8\x8e\x13\x07\xe8\xdd\x89\x90\x92\x99\x8f\xc7\xe3\xcf\x8c\xc7\x93\x7d\x5e\x3d\xbb\x8b\x26\x5c\x42\x1f\x9c\x0b\xe8\x81\xd3\x83\x1e\x5c\x0c\x7a\xfd\xc6\xec\x6d\xf6\x73\xbb\x7c\xdf\xc3\xe3\x2f\xb8\x5b\x65\xeb\x35\x8c\x5e\x17\xef\xef\xd9\x6a\x05\x7d\x32\xb8\x80\x6e\x17\x1e\x17\xbb\xec\x09\x36\x6b\x10\x92\x8d\x60\xfd\x73\x75\x78\xca\x76\x10\xcc\x22\x98\x2c\xf6\xcb\x75\xd7\x69\x3c\x2f\x5f\x56\xd9\x1e\xb6\xd9\x2a\x5b\xec\x32\x70\x3f\x39\xb9\xaa\xe3\x82\x77\x78\x01\x67\x30\xe8\x37\xa6\xd9\xf6\x6d\xb9\xdb\x2d\x37\x6b\x58\xee\xe0\x35\xdb\x66\x8f\xbf\xe0\x65\xf9\x77\xb6\x86\xfd\x06\xde\x36\x4f\xcb\xe7\x5f\xb0\x7f\x5d\xee\xe0\x79\xb3\xde\x77\x60\xb1\x83\xd5\x66\xfd\x92\x7f\xef\x5f\xb3\x06\x17\x58\x66\xdb\xdf\x76\xb0\x5e\xbc\x65\x39\xc6\xfb\x6a\xf1\x53\x58\xb6\x80\x9f\x9b\xb7\xb7\x6c\xbd\x87\xd5\x72\x9d\x7d\x6a\x34\x1e\x84\xf4\x53\x3e\xad\xe9\xe2\xb0\x82\xaf\x87\xed\x7e\xb3\x86\xcf\xbb\xcd\xea\xb0\x5f\x6e\xd6\xc3\x6c\xb1\xdd\xbf\xae\x96\xeb\xbf\x3e\xad\xb3\xfd\x17\x70\x5c\x32\xb8\xca\x0d\x59\x8a\xd9\xc1\x3a\xfb\x07\xde\x17\xdb\xc5\x5b\xb6\xcf\xb6\x8d\xdd\xe1\xfd\x7d\xb3\xdd\x0b\xc0\xdb\xe0\x2e\x9f\xeb\x62\xfd\x94\xdf\x7e\x5f\xae\x3f\x01\x3c\x2c\x7e\xc1\x62\xb5\xdb\xc0\x63\x06\xbb\xd5\xf2\xe5\x75\xbf\xfa\x05\x6f\xd2\x8a\xe7\xcd\x16\x1e\xb3\xfd\x3e\xdb\xc2\x61\x97\x35\x36\xcf\x1c\xfe\xf9\xb0\x5a\x75\xff\x59\x3e\xed\x5f\xc9\x5f\xd9\x76\x4d\x76\x6f\x87\xdd\x2b\x2c\x56\xfb\x6c\xbb\x5e\xec\x97\x7f\x67\xbb\x0e\x3c\x1e\xf6\xf0\x94\x3d\x2f\x0e\xab\x3d\x6c\x0e\xfb\xf7\xc3\x3e\x9f\x79\x18\x25\xf0\xf3\x75\xb1\x7e\xc9\x9e\x3e\x35\x1a\x4d\x68\x0e\x6d\x97\x61\x03\x60\xd8\x00\x86\x9f\x0d\xff\x3b\xbf\xc9\xef\x58\xfe\x53\x2e\x89\x17\xfe\x84\xff\x37\x6c\x00\x65\x94\xd1\xc2\x37\x30\x60\xa0\x9e\x17\x04\x01\x05\x5a\x8c\x32\xa9\xd8\x56\x0a\x05\xc1\x56\x1b\x88\x40\x20\x42\x91\x40\xab\x5d\x42\x14\x62\xc3\x06\x90\x94\x0c\x1b\x90\x92\x14\x7f\x47\xe3\xf3\x1f\x9b\xf8\x41\x1d\x8e\x94\xcf\x06\xe4\x05\x52\xf5\x24\x15\x16\xca\x0b\x31\x2d\x4f\x19\x37\xa8\xdb\xfd\x4f\xb7\xcb\x61\x58\x6a\x9f\x22\x30\x10\x82\xff\x15\x82\xf8\xb7\x16\x2c\x7c\x84\x99\xf8\x54\x7f\x75\xbb\xa8\xda\x54\x3f\x5a\x14\x0b\x7e\x10\xb3\x12\x2e\x23\x12\x49\x3c\xa6\xd2\x7f\xa9\x98\x33\xce\x30\xa5\xc4\xb4\xaa\x93\xab\x32\xc3\x03\xf8\x84\x0a\xd4\xb6\x1e\x81\x52\x3d\x02\xa5\xe2\x2f\x4a\xb9\x09\x4d\xc8\x57\x21\xc5\xbf\x4a\xbe\x61\x80\x9e\xa7\xc8\x13\x28\xf3\x44\x80\x31\xca\xc7\xac\x03\x13\x0f\x09\xff\x62\x84\xcb\xe4\xf3\x29\xc8\x48\xa3\xa0\x49\x90\x44\xc2\x78\xc3\x9d\x62\x90\x16\xa5\x6d\xe1\x35\xdb\x58\x14\x27\xcf\xfd\x91\x52\xe1\x20\xe9\x1d\xbd\x2c\x1b\xa4\x99\x5e\x96\xd2\x8f\xc4\xe4\xdf\x67\xa4\x5e\x31\x90\xb4\xd9\xda\x7a\xc3\x5e\xcd\xd1\x2f\x82\x9e\x0d\x35\x5b\x71\x8f\x91\x94\x5f\x37\x26\x07\x28\x55\x34\xa0\xb4\x83\x4c\x20\x3c\xec\xf8\x6d\x3e\x33\x01\x98\xe6\xb2\x44\xe9\x75\xa4\x1a\xb2\x98\x69\x3e\xa7\x94\x68\xa7\x2b\x16\x21\x5b\x18\xa1\x68\x8d\x98\x25\x6b\x69\x51\x89\xdd\x54\x81\x8d\xc8\x4d\x50\x26\x68\xdc\x5c\xba\x29\xed\x6f\xe5\x78\x80\xc0\xdc\xf6\x56\x4a\x0b\x4c\x96\x8b\xd6\x14\xab\x26\x90\xbf\x74\x05\xb0\x81\x2b\x61\xb9\xa7\x5b\x6d\x60\x54\xa4\x35\x22\x5c\x02\x2d\xa2\x23\xb2\x14\xe0\xb9\x34\xb7\x3c\xe5\x74\x20\xad\x4a\x28\xa1\x24\x91\x7c\x07\xd6\xed\x32\x69\x36\x6b\xdb\x61\x41\x24\x14\x0a\x62\x25\x72\xf7\x92\x72\x82\x11\x89\x47\xb8\x8b\xc9\xb4\xc4\x6d\x62\x6a\xe8\x8e\x18\x9a\x22\xdb\x59\x2a\x53\x2f\xa4\x25\xb7\x72\x8f\x8b\xb1\x58\x5b\x27\x2f\x31\xc9\xb4\xbc\x08\xd0\xa1\x79\x32\xc1\x89\x31\x49\x83\xf2\x6d\x91\x12\x05\x77\x20\x2b\xb4\x31\x5a\xc9\xd4\x91\xec\xc0\xc9\xa6\x34\x25\xc8\x3b\x99\xb8\x2a\xeb\xad\x49\x87\x64\x43\xd3\x99\xdc\xab\x0c\x58\x04\xa6\x14\x6d\x61\x79\xe8\x2b\x4e\x97\x48\xad\x07\x50\x23\x80\x1c\xa2\xd6\xab\x8a\x26\x62\x06\x20\x48\xd2\x2e\xc4\xb2\xc1\x3c\xf4\x7c\x13\x6f\x6a\x98\x07\x2a\x00\x0a\x9e\x53\xca\xe5\x60\x2c\xea\xa1\x1a\x53\x4e\x51\x4a\xc6\x48\xc6\x50\x5a\xab\xa4\x58\xd0\x2d\xa9\x57\x07\x4d\x75\xca\xfd\xf2\x19\x33\x1c\x01\xa8\x66\x0d\x73\x40\x35\x18\x0e\xc4\x50\x5c\x72\x1c\xd7\x02\xda\x85\xa4\x4e\x70\xc9\x95\x8c\xb1\xc9\xe3\xa6\xa3\xf7\xcb\x14\x97\x1b\x87\xd0\xb9\x92\x9a\x35\x00\xbf\x50\x36\xe4\x45\xc6\xd0\x28\x80\x9a\xe5\xe4\x5d\xfe\x6a\xaa\x8c\x8e\x7b\x40\x5a\xad\x47\x50\x16\xd3\x2c\x41\xef\xa6\x84\x95\xb3\xa5\x60\x6a\x1b\x09\x9b\xf2\xed\x21\x25\xe6\xf6\x80\x58\x08\x25\x53\x9e\x6d\x1f\x06\xca\x4e\x0e\x38\x94\x3b\xbf\x48\x47\x4a\x40\x3f\x16\xf9\xaa\xf8\x98\xb5\x4d\x53\x3a\x72\x10\x31\x86\xb1\x42\x85\x39\xe1\xa4\x54\x89\x50\x32\x25\x77\x9f\x5e\x45\x52\xf4\xfb\x50\x6d\x75\x48\x1b\x22\x58\x64\x73\x1a\xee\x32\x69\xc9\x21\xd6\x89\x54\x03\xaf\x70\x4b\x58\xfe\x3f\xbf\x6d\xd6\xa4\x3c\x53\x0b\x75\x94\x46\x4d\xc2\x90\x2b\xc7\x57\xb7\x50\x9f\x99\xbb\x84\x4c\xa0\x48\x02\x26\xe4\x78\x5e\x38\x8f\x53\x85\x45\x02\x4c\xeb\xc8\x18\xee\xc3\x66\x75\x01\x50\xae\x68\x5e\xd3\x4a\x3e\xca\xa8\x8c\x5e\xb4\xad\x3c\x05\xc3\x29\x2a\x9d\x34\x0d\x02\x5a\x27\xcc\xf3\x10\x0e\x0c\x65\xf3\xaa\x6b\x24\x33\x17\x6a\x14\x95\xac\x51\x4b\xc4\x7c\x9a\xa4\x4c\x8f\x8a\x11\xd2\x5e\x21\x57\x62\xb3\xdc\xb7\x48\x5e\x68\x71\x3c\xb9\x8c\x5a\x90\x5f\x09\xf7\x56\x81\xb9\xe2\x1c\xc2\x2a\x9f\x72\xca\x92\xe7\x2b\x2c\xf4\xd5\xe9\x07\x6b\xc5\xe2\x15\x4f\x42\x79\x84\xa0\x6d\x1d\xbd\xd2\x80\x25\x46\xc9\xed\x1b\x19\x4f\x72\x22\xb8\xff\x1a\xbb\xae\x30\x8a\x94\xf3\x7f\x69\x7b\xb2\xef\x49\x1b\x65\xcd\x89\x9c\xb7\x91\x86\x1c\x89\x0a\x3d\xb7\xb3\x28\x45\x31\x03\xf0\xeb\x67\x9d\xb5\xf8\x63\xe7\xaa\x07\x10\x46\xdd\xaf\xb1\xef\xfd\x0e\xb3\xa9\x37\xf2\x75\xba\xd6\x9f\x5c\xd0\x01\x08\xc2\x6f\x7e\x9c\xf8\x63\xf0\xff\x1c\x4d\xbc\x07\x2f\x09\xa2\x10\x1e\xbc\xf8\x77\x5d\xfa\xcb\x35\x14\xe0\x2e\xc0\xc8\x0f\x13\x98\x05\x77\xa1\x91\x88\xa9\xe1\x05\xaa\xa2\x14\xf5\x2e\x00\xa6\xd1\x3c\x1c\x2b\x45\x2a\x23\xed\x7f\x32\xd4\x40\x96\xc1\x05\xf7\x39\x57\x7d\x80\xd1\x3c\x8e\xfd\x70\xf4\x03\x75\x53\xa0\xf2\x98\x28\x36\x3e\xf4\x26\xd1\x5b\x20\xea\x5e\x02\xfc\xf0\x43\xa5\x86\x74\x4d\x29\x11\xa7\x13\x86\xc7\x13\xf3\x60\xeb\x5c\x5d\x01\x7c\x8d\xa3\xdf\xfd\x10\xbe\x7a\x71\xd1\x01\x9a\xcc\xce\xd5\x35\xc0\xcc\x1f\x71\x7f\xe1\x94\x78\x1d\xc4\x27\x91\x97\x48\x58\x27\x71\xd9\x1b\x80\x71\xe0\xf9\xb1\x3f\x0b\x66\x9c\x11\x3c\x3f\x35\x4b\x57\x29\x3d\x00\x18\x45\xd3\x1f\x71\x70\x77\xaf\xfd\x6c\x1e\x7d\x40\x1d\x7d\xb0\x88\x67\x82\xd9\xb2\xb2\xc4\xa3\x8f\x73\xdd\x03\xb8\xf5\x1f\x82\x30\x08\x7d\x88\xe2\x71\x10\x7a\x13\x08\xc2\x71\x30\xf2\x92\x28\x56\x5b\x5b\x4b\x1c\x63\xbb\xdd\x2e\x86\x5d\x81\x9c\xce\xb5\x03\x30\xf1\x6f\x93\xee\x34\x0a\xc2\x24\x08\xef\x60\x1c\xcd\xbf\x4e\x7c\xf0\xc2\xbb\x89\x0f\x7f\xcc\xa3\xc4\xa0\x0d\x5a\xc9\x8f\x87\x78\x3e\xd4\x85\x89\x73\xed\x02\x6f\xe3\x28\xfe\x14\xce\x7d\xac\x14\xf3\xce\xf5\x05\xc0\x2c\xba\x4d\xe0\xfe\xc7\xf4\xde\x0f\x75\xda\x01\x61\x6d\x45\xbe\x0f\x10\xfb\x77\xc1\x2c\xf1\x63\x7f\x6c\xf5\x5d\x47\xfb\x8e\x51\xe9\x3b\x60\xa9\xc5\x77\x97\x00\x0f\xde\x28\x8e\xc2\xea\x49\xd6\xfc\xc2\xc1\xaf\x00\xc6\xfe\x5d\xec\xfb\x38\xb0\x68\xf4\x58\x2a\x2d\xe7\xfa\x1a\x60\x3a\x99\xcf\xba\x0f\x41\x38\x9f\x95\x63\xa9\xd4\x78\xd1\x3c\x95\xda\x37\x00\xb3\xf9\xd4\x8f\x67\xa3\x38\x98\x26\x90\x7c\x8f\x64\xc1\xc7\x3b\x00\xb4\x34\xd8\xa0\x24\x7e\x1f\xfb\x7e\x03\x64\xbb\x83\x87\x84\x20\x6b\xd1\x95\x37\x3d\x00\x6f\x34\x4f\x7c\xf0\x46\x79\xc0\x9b\x6d\x28\x59\xfa\x39\x37\x0e\xc0\x43\x30\x8a\xa3\xe2\x24\xec\x99\x8c\xc9\x4c\xc6\xd4\x4c\x6e\x5c\x80\x69\x30\x19\xc5\xd1\x77\x15\x44\x54\x25\x12\xd1\xaa\xc0\x73\x2c\x30\xd5\xac\x10\xaa\x17\xf9\xc0\xe3\xf1\xc4\x87\x71\x94\x98\xcc\x68\x46\xcd\xca\x74\xf2\x34\xe2\x8f\x83\xc9\xc4\x6b\x48\x51\x79\x11\x05\x6e\x9b\x8b\x5d\x9a\xbe\x8a\x42\xbf\xda\x52\xd2\xae\xbd\xb9\xca\x29\x32\x1b\xcd\x27\xb5\xf1\xc5\x93\x02\x77\xaf\x85\xaf\x37\xd7\x00\x3c\xcc\x3f\x14\x5b\x29\x36\x4f\x78\xf7\x84\x14\xe1\x6e\x00\xbe\xcd\x27\x77\x5e\x0c\xb7\xb1\x27\x72\x53\x14\xe6\x30\x5e\x9c\xf8\xb1\x79\x84\x95\xa5\x35\x30\x22\xbb\x9d\xc4\xe8\x63\x21\xe6\xc0\x8e\x79\xef\x4d\x6e\xcd\x16\x01\xc1\x9a\x89\x80\x6c\xb1\x21\x15\x15\xd6\xa0\x57\xc5\xe2\x6c\x94\x16\xce\x38\x2d\xf5\xa6\x42\xa4\x91\x94\xb6\x95\x95\x15\x33\x05\x76\x71\x33\xfb\x63\xee\xcf\xaa\x29\xa9\x50\x60\xb7\x0a\x9d\x3f\x67\xe0\x02\x4c\xbc\x24\x08\x61\xe4\x4d\x83\xc4\x9b\xc0\xc4\x4f\x12\x3f\x06\x0f\xbe\x07\xc9\x3d\xdc\xc5\xde\x37\x9f\x9f\xb2\xe0\xec\x02\xc4\x19\x5c\x1c\x07\xe5\xb1\x25\x67\x73\x36\x68\xff\x38\xe8\x28\x88\x47\xf3\x87\xdb\x89\xff\xa7\x38\xb0\x7f\x00\xf9\xf2\x38\x72\x12\x4c\xc6\x7e\x03\x8b\xa5\xb3\x41\xaf\x8e\x83\x1a\x7b\xe2\x07\xaa\x3b\x67\x70\x7d\x1c\x38\xce\x23\xc9\xfb\x1a\xf1\x65\x93\xcd\x9e\x3c\x0c\x4f\x23\xdf\xd4\x21\x0b\x28\xda\xd1\xfd\x3e\x60\xba\xe5\x07\xac\xab\xba\x7e\xc4\x6c\xfc\x49\xe4\x41\x0d\xf2\x08\xd7\x4e\x67\xa7\x73\x5a\x8a\x7c\x2e\xc3\x86\xdb\xeb\xd5\xc0\xfa\x55\xf2\x0e\x75\x4f\xf1\x33\x15\xd0\x9f\x4b\xbd\x44\xb7\xe7\x1c\xc7\x2b\xf2\xf6\x2c\xbc\xba\xe0\xf2\x6b\x28\x7b\x16\x68\x5d\x70\xf9\x76\x62\x9d\x85\x59\x17\x5b\x81\xe9\x48\x79\x80\x39\xd1\xe6\x74\x7b\x75\x11\x15\x94\x1c\x29\x3b\xe0\xa7\xf0\xea\x82\x29\xb0\x38\x12\xbb\x75\xa7\x41\xeb\x02\x29\xa8\x38\x52\x1d\x64\x4e\x62\xd6\x85\x90\x9f\xdc\xe3\x06\x0f\xc5\x16\x78\x97\x75\xeb\x5a\xe0\x6e\xaf\x2e\x68\x42\x33\x2d\x75\x64\x5e\x3a\xb7\x35\xeb\x3a\x75\x71\x13\xd5\xc5\xcd\xd1\x73\xa4\xeb\xd4\xc5\x4d\x54\x17\x37\x27\xf0\xea\xe2\x26\x3a\x1a\x37\x27\x40\xeb\xe2\x26\xaa\x66\xf9\xb3\xf0\xea\x62\x26\x3a\x16\x87\x27\x30\xf3\xb2\x7b\x3e\x49\x82\xe9\x24\xaf\xa2\x0a\xe7\x2b\x90\x07\xe2\xa6\xe8\x70\x88\x93\x93\x6a\x8d\x73\xe5\xba\x20\x41\x83\x66\x49\x7e\xae\x13\x39\x16\xdf\xa5\xa5\x04\x1b\xf3\xf8\x72\x8c\xb7\x60\xe5\x71\x12\x51\xeb\xa2\x64\x5e\x4a\x0d\xba\x44\xd0\xf5\xef\xc9\x56\x82\xeb\xd4\x05\xcc\xbc\x9a\x29\xfe\x0d\x7c\x5d\x0c\xcd\x6d\x89\x43\x97\x0d\x1f\x19\xc3\xad\x8b\xa6\x79\x35\x8f\xfc\xbb\x86\x8b\xeb\xd6\x05\xd8\x0f\x8b\x97\x8a\x07\x90\x56\x7d\x0f\xde\x75\xeb\xa2\x2c\xb9\x8f\xe2\x10\xeb\x5b\xf1\x1a\x0a\xab\x5b\xaa\xde\x07\x16\x5f\xc9\xb8\xae\x8a\xac\xd9\x83\x37\x51\x30\xb3\x7b\x2f\x9e\xc2\xec\x54\xcf\xc6\x75\xfb\x56\x75\x4b\x01\x3a\x3c\xdd\x72\x72\xdd\xcb\x63\x68\xd2\x51\xe4\x5c\xb4\xab\x63\x68\xf6\x3c\x74\x0a\xf2\xfa\x18\x24\x66\x21\x5d\x6a\x9e\x42\xbb\x39\x86\x66\x36\x5e\xce\x44\x1c\x1c\x43\x34\xaa\xcb\x56\xfb\x3c\xc8\x8b\x9e\x1d\xd2\x2f\x36\xd1\x3a\xf2\xec\x4b\xec\x18\x8e\x15\xa3\x5a\x40\x22\x9c\xac\x76\xa1\xf0\x06\x85\xb6\x05\x94\x6b\x85\xaa\x14\x8d\xc3\xfa\xb7\x27\xee\x85\x9d\xf3\x7e\x99\x66\x47\x31\xec\xc4\xb7\x14\x87\xe4\x84\x31\x76\xce\xdb\x0a\xc2\x62\x3f\x96\x09\x8e\x31\xe3\xfd\x92\x7b\x61\xa7\xbc\x59\x09\x62\x85\x55\x7d\xa1\xe3\x5e\xd8\xe9\x6d\x16\x7e\xa4\x5e\xdd\xce\x67\x4b\x9d\x87\xff\x52\xc8\x86\x61\x67\x70\xb5\xac\x53\x55\x9d\x6c\xd6\x58\xdc\xd1\xb7\x73\x97\xd7\x73\x29\x6f\x20\xfe\x96\x22\x71\x39\x6d\x41\x2f\x4b\xdf\x4e\xd9\xb0\xe6\x54\x29\xed\x38\xf2\x9a\xc9\xed\xdb\xa9\x5b\xad\xdb\x86\xc7\xbb\xee\x6e\xdf\xce\xdf\xa8\x92\x26\x4f\x02\xd9\x49\x5c\x53\xa9\x9d\x44\xb3\x33\x39\xaa\x24\xc7\x93\x40\x76\x16\xdb\x6a\xb3\x93\xaf\x28\xdc\xfe\x35\xc0\x38\xf8\x16\xcc\x8c\x82\x2c\x17\x88\x40\x37\xb7\xa2\xe2\xd2\xdb\x69\x5c\xa9\xc4\x54\x81\x8d\x55\x97\x78\x21\x98\x92\x42\x15\xc6\xe1\xec\x8c\x9e\xd7\xf4\x68\x0a\x8c\xae\x7b\x91\xe2\x5e\xda\x79\x5d\x2e\xbb\xc8\x47\x20\xed\x7c\xb7\x95\x5a\x66\xa1\x75\x0a\xd7\x4e\xfa\x6a\x79\xf5\xa1\x57\x49\xee\xa5\x3d\x04\xca\x25\x55\xf1\x55\x65\xfd\xdb\x43\xf7\xd2\x1e\x07\xb2\x96\x92\x4b\x23\xff\x8d\x4e\xcd\xab\x5f\xf7\xd2\x1e\x00\x3f\xea\xcf\x14\x27\x0c\xfb\x7f\x00\x00\x00\xff\xff\x9f\x52\x6a\x3c\x07\x2c\x00\x00") + +func fontsSmscriptFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSmscriptFlf, + "fonts/smscript.flf", + ) +} + +func fontsSmscriptFlf() (*asset, error) { + bytes, err := fontsSmscriptFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/smscript.flf", size: 11271, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSmshadowFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x5a\x6d\x6f\xe2\x38\x17\xfd\xce\xaf\x38\x1f\x90\x66\x90\x4a\x43\x52\xfa\x82\x54\x8d\x9a\x82\xdb\x46\x4b\x13\x9e\x24\x74\xb6\x52\x24\x2f\x9d\xa6\x2d\x5a\x0a\x15\x84\x1d\x8d\xc4\x8f\x7f\x94\xf8\x25\x4e\x62\xa7\x95\x56\x90\x2d\xbe\xc7\xf7\x5e\x9f\x7b\x7c\xed\xcc\xcb\xea\xc5\x59\x74\x31\xc4\x09\xec\x21\x06\xb0\x07\xf9\xc7\xc8\x19\x74\xa2\xf7\xe8\x6d\xf1\xbc\xf9\x8d\xa7\x3f\xb8\x5d\xa5\xeb\x35\xc6\x6f\x8b\x8f\x8f\x74\xb5\xc2\xd0\x1a\x9d\xa0\xdf\xc7\xd3\x62\x97\x3e\x63\xb3\x46\xf4\xbe\x58\xad\x3a\xde\xfa\xd7\x6a\xff\x9c\xee\xe0\x45\x01\xa6\x8b\x6c\xb9\xee\xdb\x9d\x97\xe5\xeb\x2a\xcd\xb0\x4d\x57\xe9\x62\x97\xc2\x39\xb6\x73\x4b\xdb\x81\xbb\x7f\x85\x3d\x1a\x0d\x3b\xb3\x74\xfb\xbe\xdc\xed\x96\x9b\x35\x96\x3b\xbc\xa5\xdb\xf4\xe9\x0f\x5e\x97\xff\xa5\x6b\x64\x1b\xbc\x6f\x9e\x97\x2f\x7f\x90\xbd\x2d\x77\x78\xd9\xac\xb3\x23\x2c\x76\x58\x6d\xd6\xaf\xf9\x77\xf6\x96\x76\x8a\x01\xcb\x74\xfb\x6d\x87\xf5\xe2\x3d\xcd\x31\x3e\x56\x8b\x5f\xcc\xb1\x05\x7e\x6d\xde\xdf\xd3\x75\x86\xd5\x72\x9d\x1e\x77\x3a\xf7\x6c\xf4\x73\x1e\xd5\x6c\xb1\x5f\xe1\x7a\xbf\xcd\x36\x6b\x5c\xee\x36\xab\x7d\xb6\xdc\xac\xaf\xd2\xc5\x36\x7b\x5b\x2d\xd7\xff\x1e\xaf\xd3\xec\x07\x6c\xc7\x1a\x9d\xe5\x8e\x2c\x59\x74\x58\xa7\xbf\xf1\xb1\xd8\x2e\xde\xd3\x2c\xdd\x76\x76\xfb\x8f\x8f\xcd\x36\x63\x80\x37\xde\x6d\x1e\xeb\x62\xfd\x9c\x3f\xfe\x5c\xae\x8f\x81\xfb\xc5\x1f\x2c\x56\xbb\x0d\x9e\x52\xec\x56\xcb\xd7\xb7\x6c\xf5\x07\xef\xc2\x8b\x97\xcd\x16\x4f\x69\x96\xa5\x5b\xec\x77\x69\x67\xf3\x52\xc0\xbf\xec\x57\xab\xfe\xef\xe5\x73\xf6\x66\xfd\x9b\x6e\xd7\xd6\xee\x7d\xbf\x7b\xc3\x62\x95\xa5\xdb\xf5\x22\x5b\xfe\x97\xee\x8e\xf0\xb4\xcf\xf0\x9c\xbe\x2c\xf6\xab\x0c\x9b\x7d\xf6\xb1\xcf\xf2\xc8\xfd\x20\xc6\xaf\xb7\xc5\xfa\x35\x7d\x3e\xee\x74\xd0\xed\x5e\xd5\x3e\xae\x3a\xc0\x21\x7f\xa6\xec\xb3\x97\x7f\x02\x60\x3f\xa0\xf8\xdf\x07\x3c\x74\xc1\xfe\x8c\xf2\xbb\x78\x38\xe4\xd6\xc5\x1f\x29\x0e\xf9\x7f\x0c\x45\x79\x06\x3d\xe4\xd8\xd2\xa0\x40\xfa\x4e\xfb\x97\x6c\x20\xb5\xf8\xa0\x6e\x31\x80\xf6\x60\xf1\xb9\x2c\x8e\x6b\x95\x4e\xc9\x69\x0b\x40\xf0\x47\xee\x7a\x42\x29\xa5\x87\xd2\x51\x31\xb8\x30\x16\xa0\x50\xa2\x03\x9b\x9a\x9b\xb0\xaf\x84\x26\x45\x4e\x12\x24\xc2\xe2\xd0\x55\xbf\xa9\xc5\x1c\x4d\x90\x70\x50\xca\xfc\xe1\xae\x26\xaa\x03\x57\xc2\x92\x45\x42\x41\x79\x4e\x78\x46\xea\x03\xcb\x4f\xe9\x74\x19\x78\x0e\x20\xed\xd5\x95\x68\x58\x57\x17\x51\xc4\x29\xdc\xb5\xba\xf5\x45\x14\xb1\x7e\xe7\x41\x26\x94\x5a\xdd\x2a\x3c\x15\x69\x10\x6b\x2a\xdc\x60\x3f\xa2\x57\x99\x42\x75\xf3\x8a\x45\xce\x97\x19\x49\x97\x0d\xb0\x2a\x03\x70\xe0\xf9\xcf\x73\xa4\xa4\xa8\x1a\x22\x87\xa5\x46\x14\xc8\xd8\xf8\x90\xa4\x32\x46\xf8\x22\x12\x22\x58\x51\x49\x89\x60\x98\x08\xc9\x8c\xa4\xfc\x26\x21\x9b\x89\x93\x6b\x51\x5b\x17\xf5\x87\xfa\x72\x17\x60\x97\xb8\x64\x58\x05\x27\xab\x0b\x56\xe3\x43\x9d\x18\x2a\x81\x7f\xe0\x47\x49\xdc\xea\x9a\x24\xd2\xe7\x62\x40\xaf\x5b\xe3\x45\x31\xa4\xf8\x9b\x05\xfa\x0f\x5f\xfa\x84\xd2\x23\xa5\xe0\x4a\xa7\x13\x35\xf7\xbc\x72\xa1\xb8\x2e\x16\x91\x27\xd6\xbc\x88\x22\x96\xef\xbc\xc6\x93\x66\x74\x72\x0e\x26\x40\x02\x47\xcf\x17\xc1\xa4\x06\x2d\x1b\x03\x0e\x8d\xd2\x90\xbe\x50\x59\x1a\x0d\x5f\x0e\xb2\x2a\xa8\x90\x52\xd4\x07\x95\xb4\x36\xf9\x22\x6b\x2b\x31\x57\x61\x3e\x15\xcb\xd6\x31\x27\x07\x3d\xa8\xec\xe0\x83\x84\x36\x8a\x07\x0d\x3d\xca\x69\x70\x48\x2c\xe9\x76\xc5\x6f\x39\xf0\xc0\xa7\x14\xc3\x92\x26\x1a\x55\x34\xa4\xcc\x94\xd5\x10\x43\x41\x3a\xbe\xea\xba\x8c\xeb\x90\x12\x1d\x8f\x92\xae\x52\x2d\xf4\xd0\xa8\x12\x2a\x69\x2a\xa9\xa6\x11\x83\x32\x35\xd2\x17\x9d\x38\xcb\x45\x3e\x98\x97\x27\xdf\x13\xc4\x02\x95\xfb\x43\x5e\x32\x15\x81\x51\x07\x57\xc6\x27\x50\x4d\x1a\x56\xaa\x21\x33\xfa\x01\xa1\x11\xf9\xf0\x7a\x8a\x94\x81\x0a\xb2\x26\xb8\xa6\x24\xea\x18\xc3\xff\x50\xd9\x2e\x8b\x51\x42\x6d\xc0\xe3\x90\x21\x24\xf5\x1a\xa8\xec\x21\xa2\x6e\x19\x02\x37\xb3\x92\x72\x67\xe9\x54\x8a\x43\x55\xe8\xa6\x06\x4a\x11\x95\xf6\x57\x9d\xaa\x35\x8f\xfe\x1f\xb9\x7a\x47\x4d\xae\x2b\x5d\x05\xa7\xcc\xb1\x46\xf5\xcb\xce\x83\x23\x35\x2b\x59\x84\xd8\x3e\x5d\x09\xd5\x67\xbb\x81\x4e\x5d\xd4\xfd\x50\x5f\x31\xfa\xe8\xa4\x2c\x4a\x89\x2e\xa3\x13\x02\xad\xd1\xa9\x1e\x5f\x64\xd9\x11\x96\x7e\xf4\x2a\x8d\x90\x5c\x3f\x15\x9e\x15\x08\x2b\xc8\xc6\xb6\x75\x30\x42\x97\x11\xfc\x23\x5d\xa3\x87\xb6\x84\x99\x23\x50\xd3\xd1\xb2\x75\xd7\x07\xe1\x58\x95\xa4\x2f\x64\x16\x7c\x49\x34\x94\xd0\xaf\x91\xda\xf6\x36\xfb\x9f\xee\x17\x48\x85\xaa\xfe\x1c\x19\x43\x67\xe2\x53\x54\x55\x73\x0f\x41\x75\x9c\x3a\xb4\x3a\xda\x8c\x4a\x9b\x9b\x8e\xce\xbf\xca\xce\xac\xa4\x41\xc8\x8d\x6e\x13\x64\x3f\x08\x95\x11\xdf\xa2\x2d\x57\x39\x54\x32\xa9\x22\x40\xc0\x81\x96\x82\x7e\x25\xdb\x3b\x26\x4e\x85\x8d\x95\x58\xfa\xf3\x0c\xed\x21\xa1\x4a\xdf\xc7\x7b\x9f\x66\x17\x43\x7b\x65\x3d\x50\x85\x66\x2a\xcf\xc4\x40\x51\x52\x2d\xcd\xb5\x8a\xd6\xa6\x18\x25\x5a\x5b\x83\xab\x99\x52\x43\x15\xc1\xfb\x43\xde\x67\x16\x0f\xf5\x02\xb0\xcf\x06\x80\x1f\xf4\xaf\x43\xe2\xfe\x85\x68\xe6\x8e\x89\xfe\x04\x69\x9f\xd9\x80\xe7\x3f\x90\x30\x26\x13\x90\xbf\xc7\x53\xf7\xde\x8d\xbd\xc0\xc7\xbd\x1b\xfe\x65\x16\x15\xfb\xcc\x01\xc6\xc4\x8f\x11\x79\xb7\xbe\xb2\xdc\xdc\x22\x91\xd1\xb2\xd3\xa1\x7d\x76\x02\xcc\x82\xb9\x3f\x91\x06\xe2\xc0\x45\x65\xd1\xd1\xa3\xaa\x88\x32\xc3\x21\x30\x9e\x87\x21\xf1\xc7\x8f\xdc\x36\xc1\xb1\x6c\xda\x79\x2e\xad\x72\x1b\x14\x76\xa7\xc0\x23\xf1\xa5\x89\x3c\xf3\x51\xe5\x30\x52\x3b\xd4\xe5\x66\x67\xc0\x75\x18\xfc\x45\x7c\x5c\xbb\x61\x2d\x74\x85\xb6\xf6\xd9\x39\x10\x91\x71\x91\xab\x32\xa4\x5e\x57\xdd\x49\x13\x24\x62\x52\x4b\xe0\x5f\x00\x13\xcf\x25\x21\x89\xbc\xa8\x42\x0b\xe8\xf7\xc9\xdc\x66\x04\x8c\x83\xd9\x63\xe8\xdd\xde\x95\x09\x47\x79\xfa\x63\x67\x69\x96\x89\x42\x80\xa4\x2e\xc8\x0a\xb6\xcf\x07\xc0\x0d\xb9\xf7\x7c\xcf\x27\x08\xc2\x89\xe7\xbb\x53\x78\xfe\xc4\x1b\xbb\x71\x10\xea\xf9\x4b\xa9\x66\x41\xce\x6d\x60\x4a\x6e\xe2\xfe\x2c\xf0\xfc\xd8\xf3\x6f\x31\x09\xe6\xd7\x53\x02\xd7\xbf\x9d\x12\xfc\x6f\x1e\xc4\x2a\x83\x72\x61\x90\xa7\x22\xe5\x5c\x54\x2f\x4c\xfb\xdc\x41\x71\xe7\xc1\xe2\xcb\x67\xae\xaa\x75\x55\xd5\x4b\xb3\x13\x20\x0a\x6e\x62\xdc\x3d\xce\xee\x88\xaf\x48\x55\x45\xa2\x3a\x8a\xc5\x10\x08\xc9\xad\x17\xc5\x24\x24\x13\x43\x3e\xfb\x3d\x91\x4f\x1c\x92\x44\x97\xcf\x53\xe0\xde\x1d\x87\x81\x6f\x9a\x4a\x9d\xf2\x0c\x98\x90\xdb\x90\x10\x31\xdd\x31\xd7\x00\xab\xd9\x33\xd9\xe7\xe7\xc0\x6c\x3a\x8f\xfa\xf7\x9e\x3f\x8f\x4a\x07\xe5\x75\x4d\xa5\xb3\x90\xbd\x14\x27\xe6\xf9\x05\x10\xcd\x67\x24\x8c\xc6\xa1\x37\x8b\x11\xff\x0c\x3a\xe2\xe8\xa6\xf8\xa9\xce\x37\xaa\x59\xdc\x85\x84\xe4\x36\x5c\xea\x7b\x1a\x9b\x8b\x01\xe0\x8e\xe7\x31\x81\x3b\xce\x75\xa0\x03\x19\xca\x55\xb5\x89\xb3\x2f\x6c\xe0\xde\x1b\x87\x41\x25\xd5\xaa\xca\xe1\xf8\x48\xd9\x7e\x99\x91\x03\xcc\xbc\xe9\x38\x0c\x7e\xaa\x66\x9c\xa0\xc5\xa5\x03\x4f\x40\x83\xa0\x17\x27\xf9\x7c\x93\xc9\x94\x60\x12\xc4\xc2\x99\xca\xa1\x9d\x0f\xcc\xa5\x85\x4c\xbc\xe9\xd4\x6d\x3a\xce\x2c\xf2\x51\xa7\xd5\xec\x04\x3e\x29\x1b\x62\x6d\x3e\x2f\xce\x72\x6a\x44\xe3\xf9\xd4\x5c\x6b\xb5\x3d\xa0\xd9\xb5\xdb\x17\xe7\x40\x51\xf1\x5f\x2c\xb4\xa4\x72\x71\x20\xaf\x0e\x1a\x27\x39\xfb\xe2\x02\x78\x98\x4f\x6f\xdd\x10\x37\xa1\xcb\x14\x2c\xf0\x73\x40\x37\x8c\x49\x58\x04\xc7\xee\x63\xd8\x1e\x7c\x80\x45\xbf\xc9\x7c\x5b\xf5\x72\x14\xa8\x23\x3d\xea\x9d\x3b\xbd\x51\x20\x25\xa2\xbc\xa4\xb1\x40\x6b\x78\x39\xdc\x68\xd0\x84\x2b\x78\x29\xdc\x8c\x0a\x82\x96\x7e\x52\xda\xfb\x92\x9f\x23\x75\xc3\xfb\xdf\x9c\x44\x15\xad\x92\x37\x29\xdc\xd7\x66\x5b\x6f\x8f\x1c\x60\xea\xc6\x9e\x8f\xb1\x3b\xf3\x62\x77\x8a\x29\x89\x63\x12\xc2\xc5\x4f\x2f\xbe\xc3\x6d\xe8\x3e\x10\x76\x1d\xc9\xc2\xed\xf7\xc5\xa5\x4a\xbd\xf7\xb2\x47\x27\xed\x58\x45\x85\x75\x94\xcb\xa9\x36\xac\x61\x3b\xd6\xd8\x0b\xc7\xf3\xfb\x9b\x29\xf9\x3b\xef\x8c\x92\xe4\x73\xc0\xd3\x76\xc0\xd8\x9b\x4e\x48\x07\xd6\x97\x9c\x3b\x6b\xc7\xaa\x6e\x89\x5f\xea\xe4\xec\xd1\x79\x3b\x66\x98\x97\x8c\x7b\x1d\x3c\x14\x19\xfc\x0e\xb1\xb4\x6d\x97\x5c\xf6\xe8\xc2\x04\x5a\xa0\x1c\x29\x2d\x83\xbc\x7b\xb2\x50\xdb\x26\x19\xd2\xc8\x80\x34\xe6\xeb\xa1\x28\x0f\x8c\xd7\x66\x05\x21\xaf\x3a\xce\x60\x60\x40\x23\x0d\xd6\x15\x4b\xc1\x0f\x2b\x97\xdd\x52\x97\x9c\x81\xdd\x8e\x21\xd8\xc6\x95\x5c\x8f\x61\x62\x3f\x69\xb2\x2c\x27\x99\x19\xc8\x44\x7d\xa2\x63\x84\x20\x44\x5f\x5c\x9a\xd0\xcb\x6a\x27\xee\x0c\x4c\xf4\xf7\x9a\x65\xa9\xdc\xea\xd5\x0f\x32\xce\xc0\xc4\x7a\xaf\x91\xa4\x76\x1c\x13\xe3\x3d\x7d\xa2\xda\xc1\x4c\x54\xf7\x74\xc9\x12\xd9\x92\x0c\x45\xfd\xf5\x4a\x81\x69\x62\x3a\x89\xef\x2a\x65\x92\xeb\xb5\xb8\x25\x6d\xec\x26\xce\xc0\x44\x73\xbf\xa2\x12\x4c\x26\x0a\xd3\xc4\x74\x19\xe9\xd8\x26\x92\x07\x95\x35\x54\xb4\xb5\xf5\x08\xe7\xd8\x26\xc2\x07\x35\x79\x2d\xaf\xd8\xdb\xf1\x4c\xe4\x0f\x34\x12\x5b\x6a\xec\x27\xa0\xa6\x42\x08\x34\x09\xfc\x0a\x9e\xa9\x10\x82\x36\xae\x7c\x82\x99\x37\xbb\xf3\x69\xec\xcd\xa6\x79\x0f\xa3\x9c\x79\x12\x66\x75\x04\x7c\x63\x07\xb1\xa4\x72\x3d\xe2\xd8\xa6\x32\xe0\xde\x44\x71\x7e\xd8\x62\x7c\xb3\xe4\xf5\xb1\x25\x3b\x3d\xab\x46\x39\x06\x6a\x2a\x87\xb9\x61\x0b\x36\x9f\xe0\x1d\xdb\x54\x06\x73\xc3\x16\xdc\x86\x65\xaa\x85\x79\xdb\x16\xdc\x02\xe8\x98\x0a\x62\xde\x22\x92\x6d\x78\xa6\x82\x78\xd4\x07\x5b\xde\x1b\x35\xde\xe9\x39\x8e\xa9\x18\xe2\xbb\x20\xf4\x95\x8b\x31\x7e\x43\x5a\xbf\x5a\x73\x1c\x49\xfc\xe8\xde\x9d\x4a\xf3\xe8\xce\x0d\x67\x88\xbe\x76\xd1\xe1\x38\x43\x2d\x88\x6b\x16\x8c\x96\x5b\x1a\xc7\x39\x6d\x43\xd3\xc9\x45\x2b\xda\x59\x1b\x9a\x51\x2c\x5a\x21\xcf\xdb\x20\x75\x52\xd1\x8a\x76\xd1\x86\x66\x12\x8a\x56\xc4\x51\x1b\xa2\xb1\x23\x6b\x83\x3c\x19\xe8\x21\x89\x7a\x62\xe0\x28\xe5\x65\x7c\xfd\x2a\x89\x63\xd9\x5a\x2c\x5d\x47\xa6\xbf\xd3\x65\x67\x42\xe7\xc4\xd1\xe2\x10\x0d\xeb\x58\xdb\x62\x7a\x49\xe0\x9c\xe8\x8b\x80\x18\xc4\xa7\x0d\x49\x5f\x09\x9a\xbe\x4c\x21\x5b\x0b\x9c\xbe\x14\xb4\xdd\x59\xd9\x9e\xf1\xf3\x53\xa2\x6b\x38\x4e\xf4\xe5\xd0\xec\xcf\x0a\xb3\xc6\xf9\x3a\x47\xd0\xb3\xbf\xda\x99\x51\xab\x7a\x57\xd7\x91\xd6\x7a\xb6\x1b\xfa\xb1\xab\xf2\x4d\x59\x19\x49\x01\xa3\xa7\x78\x6b\x27\xd6\xfa\x86\xd2\x19\xea\x19\xce\xfb\xb0\x23\x24\xdf\x1a\x95\xd7\xec\xc3\x86\x7a\x6a\xfb\x86\xb3\xda\x37\xe3\x6b\x18\x67\xa8\x27\xb7\xae\x07\xbb\xd2\xdd\x65\x28\x48\x7a\x72\xd7\xbb\xaf\x52\xac\xcc\x48\x7a\x72\xeb\xfa\x2e\x45\x49\xcd\x70\x7a\x72\xeb\x3a\xae\xcf\x90\xf4\xa4\xd6\xf6\x5a\x9f\xbe\x00\x70\x86\xe7\xc0\xc4\x7b\xf0\x22\xa5\xc3\xe2\x25\x25\x2b\xb4\xf6\x8f\x3d\x9c\xa1\x9e\xd6\x8d\xfe\x4a\x6a\x5a\xde\x67\xf1\xde\xaa\x3e\xbd\x9e\xda\x9f\x77\x55\xf5\x97\x14\xce\xa9\x9e\xd2\x9f\xf7\x54\x4d\x24\x3d\xaf\xbf\xd8\x51\x35\xe1\xf4\xe4\xfe\x5a\x3f\xd5\x44\xd3\x13\xdc\xd0\x4d\xb5\xbc\x60\x73\x4e\xf5\x04\x57\x7b\x29\x56\xb7\xc6\x97\x9e\xce\xa9\x9e\xd4\x8f\x5f\x0b\xad\xe6\xd0\xff\x03\x00\x00\xff\xff\x37\x7a\xd8\x72\x4d\x2a\x00\x00") + +func fontsSmshadowFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSmshadowFlf, + "fonts/smshadow.flf", + ) +} + +func fontsSmshadowFlf() (*asset, error) { + bytes, err := fontsSmshadowFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/smshadow.flf", size: 10829, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSmslantFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x5a\x6d\x4f\xdb\xc8\xf7\x7d\xef\x4f\x71\x5e\xa0\x7f\x89\x44\x98\xd8\x84\x40\x24\x84\xe2\x26\x06\xac\x0d\x71\xd6\x49\xda\xad\x64\x69\x9a\x16\x53\xa2\x0d\x09\xca\xc3\xae\x2a\xf9\xc3\xff\x35\x8f\x7e\x9a\x31\xec\x4f\xaa\x1a\x04\x33\x67\xee\xdc\xb9\xe7\xce\xb9\x77\xf2\xbc\x7e\xf6\x96\x27\xb8\x44\x17\x6e\x17\xee\x25\xdc\x0e\x3a\xf0\xbc\xae\x7b\xe9\xcc\x5e\x67\xeb\xe5\xe6\x80\x1f\xbf\x71\xbf\x4e\x37\x1b\x0c\x5f\x96\x6f\x6f\xe9\x7a\x8d\x1e\xe9\x5f\xa0\x8d\x1f\xcb\x7d\xfa\x84\xed\x06\xb3\xd7\xe5\x7a\x8d\xff\x03\x1f\xef\x84\x9b\x9f\xeb\xe3\x53\xba\x47\x38\x8b\x30\x5e\x1e\x56\x9b\xb6\xeb\x3c\xaf\x7e\xad\xd3\x03\x76\xe9\x3a\x5d\xee\x53\x78\xe7\x2e\xda\x6d\xb8\x1e\xfc\xe3\x2f\xb8\xfd\x7e\xd7\x99\xa6\xbb\xd7\xd5\x7e\xbf\xda\x6e\xb0\xda\xe3\x25\xdd\xa5\x3f\x7e\xe3\xd7\xea\x9f\x74\x83\xc3\x16\xaf\xdb\xa7\xd5\xf3\x6f\x1c\x5e\x56\x7b\x3c\x6f\x37\x87\x33\x2c\xf7\x58\x6f\x37\xbf\xd8\xe7\xe1\x25\x75\xf8\x80\x55\xba\xfb\xb4\xc7\x66\xf9\x9a\x32\x8c\xb7\xf5\xf2\xa7\x30\x70\x89\x9f\xdb\xd7\xd7\x74\x73\xc0\x7a\xb5\x49\xcf\x1d\xe7\x51\x8c\x7e\x62\x9b\x9b\x2e\x8f\x6b\x7c\x3e\xee\x0e\xdb\x0d\x6e\xf6\xdb\xf5\xf1\xb0\xda\x6e\x06\xe9\x72\x77\x78\x59\xaf\x36\x7f\x9f\x6f\xd2\xc3\x2d\x5c\x8f\xf4\x7b\xcc\x90\x95\xd8\x1d\x36\xe9\xbf\x78\x5b\xee\x96\xaf\xe9\x21\xdd\x39\xfb\xe3\xdb\xdb\x76\x77\x10\x80\x77\xe1\x3d\xdb\xeb\x72\xf3\xc4\x7e\xfc\xba\xda\x9c\x03\x8f\xcb\xdf\x58\xae\xf7\x5b\xfc\x48\xb1\x5f\xaf\x7e\xbd\x1c\xd6\xbf\xf1\xaa\xac\x78\xde\xee\xf0\x23\x3d\x1c\xd2\x1d\x8e\xfb\xd4\xd9\x3e\x73\xf8\xe7\xe3\x7a\xdd\xfe\x77\xf5\x74\x78\x21\x7f\xa7\xbb\x0d\xd9\xbf\x1e\xf7\x2f\x58\xae\x0f\xe9\x6e\xb3\x3c\xac\xfe\x49\xf7\x67\xf8\x71\x3c\xe0\x29\x7d\x5e\x1e\xd7\x07\x6c\x8f\x87\xb7\xe3\x81\xed\x7c\x12\xcd\xf1\xf3\x65\xb9\xf9\x95\x3e\x9d\x3b\x0e\x00\x9c\x0c\xd8\xc7\x09\xd8\xc7\x09\xd8\xc7\x09\xd8\x07\xfb\x1f\x03\xfe\x47\x4a\xd9\x07\x01\x19\x38\x20\x94\x60\xe0\x9c\xd2\x16\x1f\xaa\xc6\x50\x50\xf6\x5b\x64\x68\x0d\x9c\x8c\x64\x44\x03\xe4\x63\xc0\x91\x28\xe5\xbf\xa3\x94\x30\x40\xca\x11\x81\x73\xf6\x8b\x81\xc3\x7e\x14\x7f\x13\x2b\x51\x52\x9c\xcc\xe7\x11\x66\xc4\x29\x6d\xdf\x0c\x1c\x22\x27\x11\x6d\x84\x30\xf5\x94\xb6\xd8\x54\xf6\x1b\x86\x01\x86\x4b\x70\x4a\x5b\x12\x47\x22\x72\x5b\x38\x2a\x07\xa2\x7c\x97\xb7\x6c\x0e\xd8\xe4\x8c\xfd\x9d\x19\xa0\x67\xa9\x55\xd8\x46\xf9\x36\xc5\x26\xf9\xdf\xa5\x8d\xc2\x53\x72\x75\xb6\x43\xb6\x38\xb8\x81\x4e\x46\xb3\xc2\x40\x31\x2d\x43\xc6\x3f\x85\x6f\xa9\x70\xae\xd8\xb6\x1c\xc8\x7f\x9d\x0d\x9c\x5b\xd0\x1b\xbe\xa6\xb4\x47\x2f\xc8\x47\x50\xe1\x4b\xe6\x3f\x4a\xd5\x31\x15\x4c\x17\xa3\xd5\x7f\xa5\x2d\xe4\xce\xa5\xdc\x05\xcc\x11\x44\x47\x81\x6d\xb6\x74\x66\x7e\xae\x22\x50\x72\xb7\x43\x6f\x04\x35\xb7\x4b\x9f\x23\xe1\xce\xe1\x7b\x4f\xa8\x3c\xf5\xd2\xc0\x81\x83\x1b\x40\xf9\x52\x02\x96\x8c\x52\xf1\x94\x51\x70\x57\x12\x19\x3d\x84\x96\x00\xf3\xd1\x54\x8d\x96\x86\xe2\xc6\x36\x5a\x0e\x26\xe2\x70\xb8\x6b\xb5\x67\x4b\x86\x6a\x58\xa2\x9d\x4f\x91\xbc\x0b\x2b\x82\x17\x62\x68\x7d\xff\xea\x34\x1a\xf7\x2f\xb7\x7f\x0a\xca\x8e\x93\x81\x31\x2f\x98\x9d\x59\xf0\x3a\x12\x7a\x26\x36\x55\x1b\x28\x23\x53\x1c\xb0\xe0\x47\x95\xec\xc6\x31\xe0\x63\x32\x92\x2f\x28\xac\x1e\x38\x37\xdc\xc5\x48\x68\x52\x8a\x5c\xc9\x67\xe9\x0d\x2a\xf9\x5f\xb1\x87\x31\x74\xe0\x24\xdc\x45\xb8\xc5\xad\x70\x41\x8e\x23\xb6\xc5\xfd\x2d\x50\x88\xc1\x5c\x4a\xf3\xdd\x53\x15\x75\xa0\xf8\x4e\x38\x34\x3d\x13\x90\x89\xa4\x7b\x29\xae\xd8\x38\x15\x57\xec\x07\xb6\x7c\x46\xb3\x7a\x5c\xe9\xd1\x2d\xe9\x66\x31\xda\x12\x00\x2a\x02\x44\x08\x10\xb9\xcd\x4a\xbe\xa9\x63\x27\xc2\xa7\x44\x9d\x9d\x3d\xc2\x75\x28\x42\x51\xd1\x82\x6d\x19\x6d\x26\x6e\xc5\xee\x53\xca\xa3\xd2\x66\x77\x7e\x6f\xc8\xab\x83\x79\x45\x86\x31\x29\x67\xd6\xaa\x39\x90\x79\x04\x0d\xc6\xcb\x18\xa3\x54\x12\x94\x2f\xf3\x31\x73\xe4\x66\xcf\x6e\x20\x77\xcb\x73\x73\xdd\x1c\xe9\x78\x71\x1d\xb1\x63\x52\x6e\xaf\xf2\x5f\x65\x2b\x66\x2f\xe3\x80\xa4\x6c\x46\x0b\xbc\xad\x6c\x39\xe7\x92\x36\x2c\x53\x7e\x82\xf6\x53\x66\xf0\x53\x31\x9c\x65\x34\x53\x9d\x44\x8d\x21\x51\x09\x20\x45\x36\xcb\x21\x5b\xb1\x35\x85\x1b\xb0\xcf\x20\xb1\x99\x4b\xdf\x09\x37\xca\x88\x6d\x39\x5f\x4a\xf3\x04\x48\xf3\xeb\xd4\x1e\x9c\x79\xca\xd1\xf1\x40\xf3\x80\x30\xb9\x51\x45\x50\x86\x4c\x5e\xc0\x19\x3f\x02\x36\x30\xb3\x4e\x29\xcd\x2a\x4d\x2c\xcc\x25\x59\x55\x3a\xd4\x8e\x9b\x4d\x11\x5e\xb8\x05\xbb\xe5\x6c\x71\xa8\x77\x96\x20\x11\x8b\x31\xa7\x89\x53\x26\xf5\x40\xd7\x09\x55\x04\x61\x2e\xb2\x6a\x51\xab\x42\x96\xb3\x5e\x05\x3a\x4a\x8e\x56\x42\x44\x0a\x25\x79\x5c\x90\xf9\x38\x4f\xe9\x75\x44\xb1\xb8\xe2\x4e\x81\xc8\xea\x82\x20\x4c\xd4\x64\x24\xcb\x84\xd4\x10\x72\xb1\x7c\x41\xd4\x3e\x8a\x02\xa5\x28\x63\xf0\xa5\xae\xc4\xd4\x04\x50\x71\x2f\xf2\x64\xaf\x52\x7d\x03\xc9\x45\xd4\xd3\xf3\xba\xbb\x0a\x16\x88\x88\x4c\x0a\x64\xab\xa4\x24\x2a\x43\x50\x26\x3c\xb1\x70\x3d\x81\xe5\x37\x18\xda\xec\x32\xad\x23\xe6\x8c\xd1\xf9\x59\x0b\xc4\x8a\x79\xfa\x42\x85\xcc\xd2\x7c\xcf\xe2\xaa\x2f\x90\xac\x69\xd7\x32\x33\x96\xc2\x35\xbf\xe9\x6d\x1a\x2c\x97\xb3\xc8\x55\xb6\x3a\x79\x75\xf4\x92\x50\x85\x8c\xa9\xa2\x53\x58\x8b\x4f\x32\x6f\x24\x34\xa9\xe7\x8d\x41\x5e\x7f\xd8\x8d\x90\xfb\x87\x4c\x60\xf8\x24\x37\x45\x0b\xbb\xaa\xa9\x8f\x77\xf6\x5f\x3a\x23\x3e\x2a\x29\x30\xa9\xbc\xb4\x21\x1b\x9e\x97\x32\x6d\x63\x64\x42\x15\x3d\x16\x85\x84\x02\x94\xc1\x46\x19\x29\xb2\x32\xaa\xda\xa8\xf6\x49\x9a\xa3\x57\x58\x06\x11\xe3\x4a\x98\xd7\x39\xa3\xea\x00\x3e\x30\x93\xd7\x56\x66\xd0\x92\x85\xa1\xf9\x60\x3d\xfc\xac\x74\x59\x99\x8c\xe0\x99\x86\xef\xba\x18\x15\xf5\x03\xa7\x05\x5d\x64\x0e\x78\xe9\x73\x7e\x9f\x08\xf1\x44\x4a\xc9\xa6\x58\xb8\xd1\x4a\xe1\xc6\xd6\xae\x0c\x94\xc1\x58\xd0\x06\xa8\x14\x6e\x2a\xc2\x75\xb2\x54\x99\xb8\x5a\x1c\xb1\xe9\x09\xe1\x75\x6c\xc2\xc1\x8a\xe5\x77\x59\x74\xe7\x6c\x54\x84\x6c\x23\x6b\x50\x30\x54\x31\x93\x55\xc4\x7c\x8a\xb8\xd6\xf1\xce\xa5\x28\xa7\x40\x4e\x79\xff\x1e\x55\x33\xd4\x1a\xf8\x6e\xcd\x7a\xa6\xd1\x89\x55\xfd\xd6\x47\x6b\x91\x67\xcc\xa8\xba\xbf\x20\xc4\x66\x4b\x9e\x91\x2c\x41\xb8\xf4\x2b\x64\x50\x31\xcf\xed\x75\x80\x49\xd4\xfe\x1c\x07\xfe\x1f\x98\x4d\xfd\x61\xc0\xb1\xde\x6b\x8c\xb8\x3d\x17\x08\x27\x5f\x82\x78\x1e\x8c\x10\xfc\x35\x1c\xfb\x8f\xfe\x3c\x8c\x26\x78\xf4\xe3\x3f\x3e\x94\x3c\xdd\x9e\x07\x0c\x83\xc9\x1c\xb3\xf0\x7e\x52\xc8\x37\x84\xe4\x3c\x95\x39\x9f\xa8\x29\x17\xc0\x34\x5a\x4c\x46\x85\x39\x79\x53\x85\xf2\xda\x84\xb7\x30\x44\x9d\x7e\x46\x6b\x2c\x73\x7b\x5d\x60\xb8\x88\xe3\x60\x32\xfc\xa6\x51\x98\xd4\xcc\x84\x30\x39\x57\xb9\x08\x22\xbc\x84\xb6\x41\x15\xe5\x12\xf8\x16\x4c\x72\x33\x94\xbc\xa3\x99\xee\x15\x21\xef\xeb\xe4\x5d\x1d\x0d\xd0\x03\x3e\xc7\xd1\x1f\xc1\x04\x9f\xfd\xd8\xc4\x2e\x6a\x64\x97\xdb\xbb\x02\x66\xc1\x90\xfb\xba\xe6\x04\xe6\x2e\x2a\x0f\x9e\x69\x34\xa1\xce\x84\xdf\x4f\x69\x61\xf5\x6b\x60\x14\xfa\x41\x1c\xcc\xc2\x99\xa3\x03\x5f\xc7\x3d\x3f\xed\x13\x79\xde\x27\xe5\x50\x73\x7b\x7d\x60\x18\x4d\xbf\xc5\xe1\xfd\x43\x7e\x78\xa2\xa3\x24\x6f\x00\x2a\x32\x16\x11\x97\x76\xc6\x73\x1f\x95\x4c\x82\xa6\xd2\xc0\x71\xaf\x3a\xc0\x5d\xf0\x18\x4e\xc2\x49\x80\x28\x1e\x85\x13\x7f\x8c\x70\x32\x0a\x87\xfe\x3c\x8a\xd5\xbd\xa2\x64\x80\xb8\xd4\xa9\x4a\xcb\xc4\xc4\x49\xf7\xca\x05\xc6\xc1\xdd\xbc\x3d\x8d\xc2\xc9\x3c\x9c\xdc\x63\x14\x2d\x3e\x8f\x03\xf8\x93\xfb\x71\x80\x3f\x17\xd1\xbc\x14\xa9\xe5\x56\x07\x63\x8b\x2a\xd9\x73\xc5\xcf\x71\x3d\xf0\x0e\x62\x25\x5a\x4b\x8d\x8a\xd2\xad\xe2\x5e\x5d\x00\xb3\xe8\x6e\x8e\x87\x6f\xd3\x87\x60\x52\x49\xc5\xaa\xd1\xa4\x53\xb1\x7b\xd5\x05\xe2\xe0\x3e\x9c\xcd\x83\x38\x18\x59\x3c\x0b\xed\xd9\x76\x4b\x78\x96\x90\x24\x31\x78\xf6\x12\x78\xf4\x87\x71\x34\x31\x37\xb7\x2a\x39\xd6\xbd\xea\x01\xa3\xe0\x3e\x0e\x02\xb5\xb0\xbc\x37\xcf\xf9\x2a\x32\x82\x4f\x2a\x93\xae\x80\xe9\x78\x31\x6b\x3f\x86\x93\xc5\xac\xe0\x9a\x3c\x20\x65\x77\x0e\x39\x21\x38\x55\x09\x55\xc5\x4f\x6e\xf1\x35\x30\x5b\x4c\x83\x78\x36\x8c\xc3\xe9\x1c\xf3\xaf\x91\xb6\x22\x13\x3d\x1e\x2a\xea\xaa\xaa\x15\xfd\xca\xc4\x87\x38\x08\xb4\x2a\xc8\xd4\xa5\xd7\x32\x4c\xbd\xee\x00\xfe\x70\x31\x0f\xe0\x0f\x59\x2e\x72\xc4\xfd\x2f\xfd\x54\x12\xd6\xee\xb5\x0b\x3c\x86\xc3\x38\x2a\xee\x53\xab\x9f\x7a\xc1\x7f\x7e\x56\x4a\xb7\x1c\xc2\x03\xa6\xe1\x78\x18\x47\x5f\x73\x2f\xab\xf8\x03\x84\x36\xc8\xd3\x65\x35\xcb\xbb\xd7\x17\xcc\x84\xd1\x68\x1c\x60\x14\xcd\x1d\x94\x7b\x92\x25\x63\x59\x9a\x0b\x46\xe1\x78\xec\x3b\xc8\xfb\x97\x7a\x02\xa1\x2d\x3e\xec\xb2\xec\xba\x68\x12\xa8\xfe\xd5\x0d\x88\xea\x38\x95\xca\x13\xf7\xba\xc7\x22\x6b\x36\x5c\x8c\x1b\x49\x5b\x14\x83\x34\xc9\x7b\x5c\x95\x22\xdd\xbd\xbe\x02\x78\x2a\xf9\x18\x61\x15\x1b\x12\xe4\x9d\x31\xd9\x1b\x2b\x4b\x30\xf7\xfa\x1a\xf8\xb2\x18\xdf\xfb\x31\xee\x62\x5f\x64\xcc\x68\xc2\x00\xfd\x78\x1e\xc4\xba\x70\x16\x9b\xbb\x11\x3d\x77\xc9\x14\xc2\xff\x7d\x42\xde\x4e\x26\xa5\x9b\x44\x2d\xd1\x37\x2f\xf1\xe0\x8f\xef\x0a\xf8\x05\x78\xdd\x01\xca\x64\xb7\x8c\xf3\xa3\xd8\x2e\x90\xd0\xfd\x4e\x1d\x9a\x07\xb6\xb2\x7f\x26\x35\xbc\xde\x01\x8b\x9c\xc2\x16\x68\xab\xb0\x07\xdb\x26\xc4\x52\xc5\xbb\xfc\xcf\x45\x30\xab\x5f\xe4\x5a\x41\x31\x2e\x57\xaa\x00\xb7\xef\x01\x63\x7f\x1e\x4e\x30\xf4\xa7\xe1\xdc\x1f\x63\x1c\xcc\xe7\x41\x0c\x1f\x5f\xc3\xf9\x03\xee\x63\xff\x4b\x50\xa8\xbc\x64\x66\xe5\x0a\x4e\xe9\xb7\xe2\xc1\xf5\x2f\x9a\xf1\x38\x61\x6b\xca\xb5\x01\xaf\xdb\x8c\x37\x0c\xe3\xe1\xe2\xf1\x6e\x1c\xfc\x25\xaa\x37\x21\x05\xa8\x2c\xd0\x85\xd0\x34\xb6\x75\xdc\xfe\x65\x33\xf2\x3c\x1c\x8f\x84\xa5\x42\xe6\xf2\x37\x9c\x44\x5e\xec\x4d\xfa\xd5\xed\xf7\x9a\x91\x0b\xb7\xf7\x7f\x57\xc7\x6e\xff\xaa\x19\x3d\x66\x2c\xf4\x3f\x47\xe2\xd8\x4e\x4f\x5b\xad\xc1\xc7\xfa\xc1\x6e\xff\xda\x86\x1c\xa8\xbb\x2c\x2f\xeb\xf3\xee\x16\x7f\x60\x80\x22\x47\xad\x7f\xc4\x91\xfb\x16\xe4\xa1\x3c\x45\x9d\xed\xde\x6f\x32\x13\xda\x92\xb0\x5e\xa7\x63\x81\x0d\xca\xc1\x2b\x63\x57\x87\xae\xa8\x34\x6f\x0a\x97\x89\xd7\x71\x9b\xa1\xec\x71\x2b\xdf\x61\x6e\x4a\xd5\xad\xd7\xb1\xf1\x2a\x30\xc5\xad\x0c\xdb\x3c\x6a\x2d\xa0\x36\x72\x05\xf5\xc0\xaa\x56\x20\x02\x55\xc3\x96\xb8\xe0\x75\x6c\x2c\x0b\xeb\x59\x40\xa7\x01\x29\xb5\x6d\x97\x83\xd7\xb1\x11\x2c\xac\xba\x54\xf9\x54\x39\xb5\x09\xd4\xc6\xad\xf0\x43\xf9\xa0\x09\xd9\xc6\xab\xf0\x63\xac\xa5\xe5\x17\xbc\x32\x6b\xbd\x8e\x8d\x5b\xc1\xfc\xc1\x29\x14\x80\x54\x37\xb0\x59\x29\x94\x3f\x98\x54\x6b\x21\xaf\x63\xa3\xd4\xa4\x9a\xbe\xf2\xfc\x55\x48\x60\xaa\xe9\x4b\x0c\x2d\x5f\x06\xef\xda\xa8\x15\xd5\xa8\x05\x21\x12\x12\xa8\x32\x5d\x55\xdc\xc5\xd0\x75\x6d\xfc\x8a\x8c\xfc\x92\xa1\xd0\x80\x67\xe3\x57\x64\xe7\x97\x8c\x82\x06\x50\x1b\xbf\xa2\x8a\x4f\xa5\x47\x95\x3f\xb5\xb6\x37\x3c\x64\x78\xae\x8d\x5b\x91\x91\xb4\xff\xad\xf1\xe1\xb9\xac\x54\x58\x8c\xe7\xe1\x74\xcc\x24\x5c\xb9\xb2\x64\x27\x9d\xf1\xad\xdf\xf2\xc2\x48\x7e\xc1\x21\x9f\x6c\x23\x93\x34\x6d\x36\x67\x55\x6e\xe5\x25\x92\x10\xf5\xfe\xad\x1a\x6d\xaa\x39\x51\xb2\xcb\xc6\xa6\x85\x39\xa3\x50\x1d\x41\xf6\x37\x21\xcf\xb5\x91\x68\x61\xce\x28\x94\xab\xaa\xf7\x40\x6d\x44\x5a\x34\x64\x14\xaa\x63\xa9\x01\xd9\xb3\x71\x68\xd1\x7c\xf2\x1f\xea\x5f\x79\x9e\x8d\x51\xdf\x2a\xce\x60\xae\x90\x7c\x4a\xf0\xc5\x50\xec\x7a\x9e\x8d\x4c\xf3\x87\x28\x9e\x38\x86\xbe\x3b\xef\xfa\xd7\x7a\xba\x9e\xa7\x09\x34\x7b\xf4\xc7\x1a\x66\xf6\xe0\xc7\x53\xcc\xfe\xe7\x4e\x97\xe7\x75\x8d\xb8\xbe\x35\x17\x35\x3d\x9e\x78\xde\x65\x13\x9a\x35\x13\xd9\xd0\x7a\x4d\x68\xcd\x79\xc8\x06\x79\xd5\x04\xd9\x90\x85\xac\xfd\x4b\xcf\xbb\x6e\x82\x6c\x12\x0e\xcd\xb0\xfd\x26\xd8\xb2\x14\x95\x17\x9c\x50\xa4\xf6\xcd\x5f\x74\xcc\x90\x41\xe1\x82\x92\xef\x10\xfa\x79\xe3\xbb\x7a\x86\x3a\xab\x5e\x95\x02\xd2\x35\x42\xd6\xc4\x27\xac\x6f\x65\x5c\x74\x72\x28\xcf\x08\x65\x14\x9c\xa2\x56\x32\x3f\x91\x79\x17\x66\xaa\x54\xe5\x26\x55\xad\x77\x3b\x90\x99\x1b\x26\x9d\x29\xc2\x4f\x45\x9f\x19\xcd\xcc\x8d\xba\xc0\xa4\xba\xed\x28\xa3\xa4\xcd\x9b\x34\x09\xad\x7c\xcd\xc8\xbb\x30\xf3\x23\x6c\x94\xe8\x86\x9e\xb3\x77\x61\x66\x45\x55\x4d\xca\x0a\x85\x5a\x1f\xdd\xbc\x0b\x33\x17\x8c\x0a\xb2\xaa\xcc\x8b\x1d\xdd\xe2\x26\xcd\x3c\xa8\x4b\xc7\x42\xbb\x96\xe6\x0d\x76\xcb\xd7\x03\xbc\xae\x99\x0b\x4a\x33\x66\x99\xbc\xe5\xb2\x4c\x10\xe1\x3b\xaf\x76\x0d\x2f\x12\x5e\xd7\x4c\x81\x9a\x58\x34\xd4\xba\x54\x7d\xd9\xc1\xf0\xe5\x17\xaf\x6b\xe6\x43\x5d\x25\x16\xf8\x60\x7a\x8e\xf4\xba\x66\x3e\x54\xe5\x61\x89\x0f\x66\x20\x33\x1f\x3e\xa4\x0b\xcd\x5f\x69\xf3\xba\x66\x52\xbc\xaf\x0a\xad\x2f\x44\x5e\xd7\xcc\x0a\xb3\x26\xfc\xf0\xc3\x93\xd7\xbd\x02\x46\xe1\x97\x70\x56\x14\x82\xc5\x2e\x10\x35\x7d\xd5\xcc\xeb\x9a\x19\x51\x95\x81\x28\xbd\x2a\x13\xc2\xbc\x5f\x78\x7a\xe6\x48\x66\x22\x18\x54\x9f\x12\x7d\x0d\x4f\xb6\xde\xa5\x39\xfe\x6b\x72\xaf\x7c\x53\x5b\xd1\xcc\x1c\x30\xea\xbc\x4a\x64\x58\x21\xcd\xe1\x6f\x16\x78\x1f\x7e\x11\xf4\x2e\xcd\x64\xa8\x2a\xbb\x5a\xe1\x6c\x7f\x4a\xf6\x2e\xcd\xb4\xd0\x0a\x0f\xb9\x2c\x23\xa5\xfe\x6f\xfd\xcb\x00\xde\xa5\x99\x0f\xdf\x3e\xd8\xde\x22\xfa\x31\xe9\xac\xf4\x3d\x39\x86\xfd\xff\x01\x00\x00\xff\xff\xba\xc5\x6a\x59\xbf\x2f\x00\x00") + +func fontsSmslantFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSmslantFlf, + "fonts/smslant.flf", + ) +} + +func fontsSmslantFlf() (*asset, error) { + bytes, err := fontsSmslantFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/smslant.flf", size: 12223, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSmtengwarFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x55\x51\x6b\xdb\x30\x10\x7e\xd7\xaf\xb8\x07\x43\x1c\x70\xe4\xa6\x63\x63\x2d\xa5\x53\xc7\xd8\x5b\x1f\x06\x1d\xec\xc1\xa0\xc8\xb1\xd3\x78\x75\xed\x62\xa7\x0b\x85\xc3\xbf\x7d\xe8\xce\x92\xe5\xf4\x65\xdd\x5e\x07\x8d\x6a\xdd\x9d\xee\x3e\x7d\xdf\x97\x78\x57\xef\xce\x4d\x04\xef\xe0\x1c\xd6\x6b\x38\x83\xf7\xe2\xae\x6a\x5e\xe0\xae\x6c\xee\x8f\xa6\x83\xb5\x3c\x83\xfc\x05\x3e\x97\x75\xd5\x14\x06\x6e\xfa\xbc\xac\x6b\xb8\x7a\xcc\x8d\xda\x6e\x0b\xb9\x37\x5d\x57\xf5\x72\xdb\x3e\x5e\x8b\x9b\xc2\x3c\x1d\xca\x02\x76\x5d\xfb\x08\xb7\xd5\x43\x09\xdf\xbb\xdc\x34\x8b\x1e\xae\x9e\xed\x83\xea\xab\xa2\xec\xf7\xed\x51\xfe\x7c\xaa\x65\x63\x7a\x23\xef\xdb\x5f\xd7\x7e\xd4\xae\xed\x40\xdc\x96\x07\xb3\x6b\x9b\x03\xc4\xc7\x7d\xb5\xdd\x43\xd5\x27\x50\x35\x70\x78\xee\x9a\x04\x72\xd3\x97\x05\xb4\x0d\xdc\xb5\xf5\x43\x55\x36\x70\xb9\x14\x5f\xcc\xa1\xbc\x84\xf3\x8f\xf0\xb5\xcc\x61\x7d\x71\xf1\x41\x88\x48\xf1\x9f\x12\x69\xa4\x44\x16\xd1\x7f\x25\x52\xb0\x5b\x00\x70\xab\x12\x1a\x51\x47\x4a\xac\x10\x57\x1c\xa4\x68\x8c\x9b\x48\x09\x89\xcb\xa9\xb0\xb5\x47\xd3\x96\x02\x76\x0f\xdf\x6c\x97\x58\xff\x98\x4a\xa8\x39\x7f\x94\xa0\x51\x48\xdb\xcc\xee\x33\x7a\x44\xdb\x84\xf2\x19\x52\x43\xcc\x42\x2c\x9a\x6a\x02\x78\x63\x3b\x86\x4f\xb8\x57\x2b\x0f\x41\xca\x68\x5c\x60\x1a\x99\x82\xdf\x6f\x06\xdb\x3c\xd1\x69\x30\x42\x6b\x42\x0d\xb3\x10\xc7\x62\x1d\x30\xa0\x75\x10\xf7\x19\xce\x0d\x8c\x10\x99\x80\x84\x43\x9a\xb1\xc7\x7c\x26\x01\x17\xf5\x71\x9f\x19\xc1\x0e\x48\x4d\x91\xa2\x38\x86\xf4\xa8\x0d\xea\x25\x97\xe3\x54\xae\x21\xcc\x06\xf9\x89\x2a\xe9\xd8\x97\xe1\x26\x59\x44\x4a\x6c\x26\x9e\x08\x52\xc0\x23\x4b\x73\xed\xa5\x21\xd9\x79\x61\x5d\x89\x47\x54\x01\x8f\x72\x91\xbc\x92\xca\xe2\xb3\x75\x16\x1c\x9d\x75\x09\x00\x77\x47\xbe\x6f\x50\x0b\x4b\x98\xd5\x12\x54\x40\x0f\x0d\x5d\xcb\xb9\x5e\x4e\x18\x1c\x75\x41\x7f\x8d\x34\xf3\x67\x6d\x2b\xf4\x76\x04\x07\xe3\xe4\x94\x6b\x16\xa2\x1b\x8d\xb6\x9a\xf4\x3d\xbd\xdc\xc4\xfa\xc9\x4d\x7c\x62\x3b\x9b\xce\xca\x8d\xaa\xa2\xbf\x09\x06\xa3\xc1\x5b\x3c\xb3\x44\xc7\x6e\xae\x7d\xe6\xd6\x29\x4c\x21\x74\xbc\xd9\xd0\xa7\x70\x96\xf7\x89\x77\x09\x84\x68\xfd\xd9\x61\x18\x1c\x31\x78\x42\x4c\x50\xbb\x08\xa9\x9f\xa3\x59\xd2\xb4\xc1\x7d\xcd\xed\x97\xd7\x79\x69\x12\x81\x5d\x6e\x17\xf6\xb8\xd7\x47\x84\xe6\xe0\x95\xa4\x78\xdd\xe2\xbf\xd7\xde\xe6\x35\x16\x7f\xa0\xbb\xb2\xc6\x7f\x62\x34\x46\x3c\x84\x46\x0b\x1b\xbd\xd5\x68\xc1\xd9\xbf\x31\xda\x0c\x0d\x19\x2d\xb6\x46\xbb\x62\x5a\x69\x7a\xe4\x3f\x4a\x70\xa1\xfd\x05\xd3\x54\xbc\xd8\x2c\x4e\x5f\x74\xd3\x2b\xf1\x5f\x9e\x7e\x07\x00\x00\xff\xff\x7d\x58\x6d\xad\x2c\x08\x00\x00") + +func fontsSmtengwarFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSmtengwarFlf, + "fonts/smtengwar.flf", + ) +} + +func fontsSmtengwarFlf() (*asset, error) { + bytes, err := fontsSmtengwarFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/smtengwar.flf", size: 2092, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsSpeedFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x5a\xdd\x8b\xe3\xc8\x11\x7f\xef\xbf\xa2\x58\x0c\xe7\x21\xde\x6d\x4b\xfe\x0e\x73\x8b\x43\xc8\xc3\x41\x42\x02\x07\x79\x12\xf4\x68\xc6\xf2\x58\xc4\x23\x2f\xb6\x26\xb7\x0b\xfa\xe3\x43\x57\x77\x55\x77\x4b\x2d\x5b\xde\x5d\x72\x1c\x63\xed\x8c\xba\xba\x3e\x7f\xf5\xe5\xfd\x71\x9f\xe6\x23\x58\xc2\x02\x92\x25\x24\xfa\xa7\xf8\xfd\x4b\x51\xec\xe0\xf9\x1b\xfc\xf5\x98\xbf\xef\x0a\xf8\x47\x7e\xae\xcb\xea\x02\xa9\xdc\x2c\xe0\xe3\x47\x78\xce\x2f\xc5\x0e\x4e\x15\xfc\x7e\xcc\xab\x5a\xfc\x56\xbd\x1c\xdf\x77\xc5\x05\x7e\xfb\xfd\x9f\xf0\xf7\xbc\x2e\xab\x8f\x89\xd8\x97\xaf\xc7\xa2\x86\x73\x71\x2c\xf2\x4b\x01\xe9\xa7\x44\x9f\x4c\x52\xf8\xcb\xfb\x2b\x24\x9b\xcd\x5c\xfc\xab\x38\xbf\x95\x97\x4b\x79\xaa\xa0\xbc\xc0\xa1\x38\x17\xcf\xdf\xe0\xb5\xfc\x6f\x51\x41\x7d\x82\xb7\xd3\xae\xdc\x7f\x83\xfa\x50\x5e\x60\x7f\xaa\xea\x09\xe4\x17\x38\x9e\xaa\x57\xfd\x59\x1f\x0a\x81\x2f\x94\xc5\xf9\x97\x0b\x54\xf9\x5b\xa1\x69\x7c\x39\xe6\x2f\x86\xb1\x1c\x5e\x4e\x6f\x6f\x45\x55\xc3\xb1\xac\x8a\x4f\x42\xfc\xed\xeb\x97\x63\x5e\xe5\xb5\xbe\xed\xb4\x87\x7d\x79\xbe\x98\xbf\xfd\x59\x68\x0d\xc0\x47\xf8\xf0\x96\xbf\x96\x2f\x50\xbd\xbf\x3d\x17\xe7\x0f\xb0\x3f\x9d\x61\x5f\x1e\x0b\x28\x77\x45\x55\x97\xfb\xf2\x05\x0f\x8b\x1c\x00\xe0\x23\x5c\x0e\xa7\xf7\xe3\x0e\xf2\xe3\x1f\xf9\xb7\x0b\x3c\x17\xf0\x94\xff\x32\xc1\x43\xd5\xe9\x0f\x31\x32\x2f\xd5\x87\x02\x3e\x1c\xf2\xf3\xee\xf9\x98\x57\xff\xf9\xa0\x15\xf0\xe5\x5c\x56\xf5\x45\xcb\x90\x03\xfe\x76\x02\xcf\xef\x35\xbc\xe4\xd5\x2f\xb5\x26\x73\x79\x7b\xbf\x1c\x8a\x9d\x58\x1a\x0a\x87\xa2\x7c\x3d\xd4\x9a\xe3\x1c\x5e\x0e\xf9\x39\x7f\xa9\x8b\xb3\x58\x5c\xf9\xe3\x04\xaa\x53\x0d\x25\x9a\xa4\xac\x5e\x61\x57\x5c\x5e\x8a\x6a\x57\x9c\x2f\x22\x99\xe3\xb1\xb7\xfc\x2b\x4a\x0e\xc7\xa2\x7a\xad\x0f\x30\x2e\xbe\xd2\xcb\xbe\xd2\x2e\x0f\xf0\x27\xc8\x61\xff\xbe\x7b\x2d\x60\x9f\xbf\xd4\xa7\xb3\x48\x16\x48\x61\x57\xec\xf3\xf7\x63\x6d\x98\x7d\x3b\xed\x0a\x14\x9c\x4d\x25\x92\x25\xbe\x66\x54\xa9\xf9\x0b\xe8\x0a\xa1\xd9\x87\xd1\x68\x2b\xcc\x27\xe0\xc3\x68\x04\xf8\xa0\x3f\xf5\x03\x7e\xc2\x56\x98\x4f\xd8\x6e\x85\xc2\xff\xf0\x13\x40\xea\x4f\x00\xa9\x5f\x95\x4a\xea\x37\xc7\xea\xc1\x1c\xf5\x0e\x80\x82\xad\x50\x63\x68\xe0\x61\x2b\x54\x23\x1b\x69\x2e\xb1\x77\x40\xe7\x80\xc2\x43\x78\x4a\x29\x25\x41\x4a\x90\xfa\x4e\x50\x80\xff\x2b\xb9\x15\x92\x9f\xed\xed\xc8\x80\xa3\xd4\x22\xc7\xa4\x0c\xcb\x48\x42\x8d\x15\xc0\x03\x6c\x85\x04\x30\xdc\x13\x0d\xf3\x36\xe8\xf7\xf4\xe3\x58\x3d\x28\x89\x27\x34\x0d\xf3\x2a\x3e\x28\xa0\x53\x28\xb7\x77\x37\xdf\x0c\xe6\xe5\x31\x28\xc0\x37\xf0\x72\xc8\x64\xa3\xaf\x45\x5a\x8f\xb0\x15\x19\x52\xce\x64\x97\x84\x51\x9d\x51\x9c\x55\xd7\x56\x8c\x58\xd0\x8e\x84\x8a\x8d\x62\x2e\x93\x86\x45\xb0\xca\x69\x54\x13\xb2\x67\x1e\x1a\x68\x48\x66\x49\xc6\x25\x21\xbb\x8a\x51\x40\xa6\x69\xf4\xb5\x48\xd1\x98\x44\x53\xda\x0a\x68\x42\x53\x6c\xdd\x53\x70\xad\x31\xab\x24\x7b\x18\x75\x74\x8e\x85\x3f\x3b\x2a\x71\x1a\xf3\x9f\x94\xd3\x8a\xc4\x07\x69\x1c\xbc\xf5\xde\xb5\x1b\xd4\x43\x5b\xc5\xa4\x64\x15\xf7\x06\x00\x4f\x55\x71\x6f\x30\x82\x1b\x17\xb0\xe6\x41\x9f\x34\xae\x20\xd9\x13\x5a\x4c\xba\xcb\x1f\x83\xa8\x23\x3b\x87\x8a\x0b\x8d\xdb\xd8\xbb\xd8\xfd\x29\x68\xa4\xf9\x95\xaf\x6e\x5f\xcc\xc6\x39\x83\x54\xe8\xa4\x96\x84\x3b\x1a\x61\xd2\x46\x0c\x60\xcc\x1a\x06\xc9\xca\x6c\xe6\xb6\x9d\x03\x0d\x13\xb6\x58\x9b\x59\x41\x32\x7c\x9d\x18\x20\x0e\x5a\x64\x3a\x22\x20\x15\xc9\x31\x07\x83\xf5\x2c\x5b\xe8\x76\x43\xcf\x8a\x42\x5c\xa1\x57\xe2\x6d\xd0\x0c\xbc\xad\xeb\x12\xd6\x4e\x99\x52\x93\x7e\x6d\x07\x8e\x6e\x3e\xd1\x63\x19\x72\x02\x44\x1a\x74\xc0\x02\x14\x06\xaf\x65\xcf\x30\x64\xf0\x63\x2b\x32\xd4\x21\x64\x2a\xe3\xc8\x68\x07\x1c\xa2\xb4\xb2\x08\xcd\xfe\x35\x6a\xc3\x01\x63\x08\x52\x54\xe4\xa2\x46\xe9\xd2\x79\xe5\xd6\xe3\x97\x34\xa4\x24\x28\x7a\xad\x27\xe3\xb4\x14\xeb\x45\x9b\x52\xf0\x64\xe5\x61\xdb\x80\xd6\xb4\x55\x70\xc6\x6e\x15\xda\x47\xeb\xc8\x60\x24\x80\x34\x68\x89\x94\xd1\xcc\xfa\x40\xa3\x9a\xa8\x43\x03\xfb\x33\xfa\x86\x73\x0e\xb2\xb4\xef\xcc\x51\xc8\xf0\x7c\xd9\x3a\xb3\xb4\x3a\xd7\x32\x58\xeb\x65\x91\x70\x88\x73\x41\x8a\x06\x0e\xd0\x81\x5c\x74\x22\xd3\x00\x09\xe5\x19\xe2\xe4\x66\x64\xc6\x08\x61\x26\x05\x06\xa7\x10\x48\x7b\x09\x75\xf5\x62\xdc\x8a\x6c\x7b\x4b\x2f\x94\xdf\x3d\x75\x78\xfa\xb0\xa6\xe2\xc0\x6f\xa3\x56\x07\x6e\x90\xa0\x44\x9c\xa2\xfc\x6b\xd8\xb0\x5a\xe9\x07\x3d\xc7\x90\xa4\x8a\xc9\x98\x64\xa8\x20\x4e\x0e\x2e\x00\x26\x8f\x6c\x9a\x06\x1a\x12\x02\xf3\x7f\x94\x44\x90\xf6\xbd\xfa\x81\x00\x91\x73\x68\x47\x85\x74\x37\xa6\x7d\x52\x62\xe3\xb4\x28\x8d\x7a\xc9\xa4\x5d\x3d\x7a\x75\x85\x93\xa4\x71\x16\xd1\x84\x25\xcb\xe2\x48\x35\x57\x2c\xf2\x23\x89\x36\x1a\x2f\x5e\xf6\xa4\xfc\xf9\xc3\x89\x3e\x53\xd9\x77\xdc\x3f\x01\xbe\xbf\xd7\x9c\xad\x04\x68\x30\x70\x60\x0e\xf7\x23\xd4\x55\x93\xae\x9c\xf4\x64\x8f\x14\x00\xae\x02\x70\x08\x03\x72\xb8\x3b\x03\x3b\x01\xa0\xad\xa5\x7d\x94\xe4\x9a\x8d\xa4\xe0\xba\x46\x22\xa0\x02\x01\xa1\x90\x96\x4f\x4e\x36\x5d\xe4\xf2\xda\x01\x92\xac\x71\x9e\x49\x7e\x09\x1c\x62\x2d\x93\x6c\xc9\x0e\xfa\xb5\x4c\xde\x51\x55\x6c\x3b\x4d\x96\x09\x47\x32\x5d\x24\xe5\xf9\x28\xa4\x3c\x14\x8a\x19\xd0\xaf\xe8\x01\x38\x1b\x53\x05\x9c\x91\x4e\xd8\x6d\x14\xa5\xfd\xd8\x9d\x14\xf9\x0e\x42\xa2\x18\xc8\x77\x82\xd4\x1d\x90\x6e\x06\x1b\x5b\x20\xd8\x56\xd0\x34\x37\x5e\xfe\x67\x45\xde\x78\xf2\xf8\x21\xb4\xda\x86\xdd\x82\xfa\x77\xbc\x81\x6a\xd3\x00\x45\xe0\xff\xd4\x8e\xd8\x49\x4f\xc4\x00\x8b\xdc\x57\xce\x49\xf5\x29\x2c\xb7\xfb\xaa\x27\x1b\xb2\x98\xdb\x29\x52\x64\xa7\x7a\x8a\xe5\x0d\xca\x57\xed\x40\x9b\x44\xa2\x24\xb8\xd8\x26\x3b\xcd\xb0\xb4\x41\x9f\x05\xcc\x5e\x43\x06\xe5\xe7\xed\x1e\x64\x80\x8e\x99\x80\xf1\x09\x95\xec\xa7\x5d\x5b\xf7\x7a\x10\x35\x4c\xcf\x16\x60\x6d\xa6\x8e\x20\x93\x5f\xf3\x0e\xab\xeb\x15\x33\x4c\xa5\xb2\xe7\xdf\xce\xc1\x5d\x9b\xad\x54\xd8\x28\xfb\x0c\x73\x63\x64\xab\x15\x9b\xa0\x7b\x20\xa3\x1f\x01\xa2\x0c\x07\x78\xe5\x94\xec\xe7\xa1\xa7\x50\x55\xa1\xba\x42\x95\xf5\x04\xc6\x5d\x3a\x6f\x07\x15\xb7\x61\xec\x9e\x9c\x05\x22\x83\x02\x3f\xa2\xaf\x25\xc3\x4f\x9d\x64\x7c\x5f\x3c\x83\x0d\x2d\x49\x7e\x16\x05\x14\xbf\x93\xbc\x96\x01\x6f\x1c\x1e\x2b\x1a\x38\xf5\xe6\x5e\x82\x6b\x62\xd9\x32\x6c\x23\x59\x46\x6d\x1e\xa4\xa7\x78\xa5\xd1\xc5\xad\xe0\xb0\x9f\x2c\x39\x51\x4a\x76\xf4\xde\xc3\xde\xf9\x80\x44\x40\xc5\x23\x64\xf3\x6b\xbb\xf6\x8b\x0a\x82\x79\xd6\xa0\xcc\x67\x80\xc7\xde\x50\x69\x3b\x4c\xb4\x00\xb9\x8e\x2d\xa1\xd1\x5a\x51\x47\x20\x1b\xc9\xb7\x56\x28\xc3\xa4\x6d\x47\x1f\x35\xab\xe0\x8f\xdc\x74\x91\x17\x39\xd6\x9e\xb4\x75\x46\x76\xe1\x2c\xb2\x33\xa9\x93\x94\x70\x3f\xab\xcf\xdb\xf8\xa4\x4e\x66\x12\x1b\x65\x1a\x2c\x86\x13\xd7\x51\xe0\xbf\x5e\xfd\xc6\x40\xc9\x58\xa9\x4c\x75\x63\x82\xe8\x56\x2f\x01\x44\x62\xac\x1e\x2c\x6e\xda\xf9\xcb\xe0\x8e\xc6\x27\x01\x44\xe2\xae\x2a\xb2\x97\x8b\xa7\x61\xf9\xf1\xff\x2f\x48\x1f\x17\x7e\x1e\x72\x18\xd8\x6e\x59\x75\x8b\x67\xc7\x6d\xad\x44\x8c\x84\x92\xe5\xf4\x7b\x27\xfe\xc9\x32\xb9\x3f\x7f\x26\xcb\xb4\x15\x24\xae\x79\x0a\x8a\x9b\xee\xe4\x3d\x59\xce\x22\x72\x4f\xb0\xfa\xb4\x33\x63\xe0\xba\xd2\x66\xd8\xb1\x9a\x74\x60\xca\x12\x9b\xa3\xb2\x34\xf0\x28\x69\x22\xa7\x81\x20\xe8\xc8\x04\x26\x5b\x62\xb7\xb0\x15\x38\x03\x6b\xa2\x0b\x85\x64\xb9\x10\x36\xb7\x2a\x4b\xcf\x0a\x17\xac\x26\x94\xdb\x8a\xb4\x00\x2b\x59\x2e\xe3\x48\x60\xce\x28\x1e\xc8\x75\x91\x20\x59\xae\x44\xa8\x19\x09\x64\x0f\xeb\x01\x0d\x34\xee\x89\x26\x64\x1e\x81\xb5\xf0\xdc\x92\xbd\xd2\x60\xc3\x88\xd0\x81\x6b\xe4\x51\x97\xf9\x8d\xb3\x8e\xab\x6e\xf0\x5f\x19\xe3\x18\x32\xd6\x90\xb3\x93\xba\x1b\x80\xcc\x4d\x70\xcd\xa8\x8d\x31\x38\x59\x4d\xfd\x74\x8d\xa0\x83\x95\xa1\xca\x30\x34\x82\xa4\x39\x02\xe8\xaa\x75\x95\x38\x00\x07\xce\x81\x3c\xb8\xa4\xd1\xa5\xeb\x62\xf0\x50\xda\xdf\x42\xa0\x5d\xf4\x5f\xa4\x3f\xc6\x6c\xdf\x3a\x73\x95\x4e\x9b\x10\xb5\x22\x6e\xf7\xe0\x57\x3a\xc9\x6a\xde\xa3\x4a\x00\x4f\x95\x1a\x6e\x48\x95\xd8\xf9\x1b\x55\xda\x5c\x18\x57\xe5\x22\xd6\x0d\x05\xfb\x0f\x16\x25\xae\xca\x65\xa4\x31\x90\x5c\xb2\xc9\xd8\xde\x2e\x59\xad\x3a\x65\xb3\xe4\xf4\xc9\x1d\x83\x5d\x9b\xf5\x8e\x1b\x93\x95\xf6\x50\x70\xdb\x0b\xe0\x61\x6b\xb8\xb4\x68\x19\x03\x8f\x6e\x84\x17\x98\x0d\x05\x96\x5d\x5f\x5c\x3d\xba\x36\xde\x67\xc6\xca\x46\x41\xd1\x5e\x31\x59\x27\x3e\xc2\x70\x95\x7f\x63\x90\xf8\x89\x2a\xb0\x10\x95\xd7\x69\xd0\xcb\xe1\x46\xd3\xce\xdb\xc7\xec\xbb\x8a\xc7\x67\x9a\x62\x24\x4b\x24\xeb\x59\x74\x75\xe5\x44\xe0\x17\xe7\x10\xac\xba\xfc\x9f\xf6\xac\x54\x0f\xf8\xa6\x75\xa1\xad\xa0\x95\x93\xed\x2c\x8d\x18\xad\x1e\x3d\x59\x2f\xbd\x32\x9f\x5c\x46\x87\x6e\x6b\x13\x10\xd3\xfc\x4a\x78\x01\x60\x03\xd5\xcc\x1d\xdc\xa8\x9a\x3c\xa7\x75\x74\x4d\x47\xed\xb6\xf8\x11\x23\xc6\xbc\x67\x7a\x12\x45\xf3\x4b\xdd\x76\x59\xdb\xd8\x86\xc7\x0e\x6b\x81\x22\xdc\x90\xdc\xf8\x24\x21\x20\x6a\x15\x64\xc8\x5a\xc7\xb4\x8b\x67\x19\x00\x7e\x7b\xb3\xe6\x38\xde\x4c\xc9\x43\x81\x99\x36\x8e\x0a\xcc\xb7\xf1\x57\x8f\x75\x4d\xcc\xe3\xde\xb2\x1b\x0a\xe0\xcb\xb0\x49\x84\x57\x31\x06\x09\x1b\x05\x81\x48\x7b\x9f\x6c\x52\x11\xc4\x6e\xa6\x32\xae\xfd\xb8\xf2\xe3\xba\x2f\x30\xc3\x66\x16\x2d\x88\x07\x1d\x35\x10\x68\xa7\x41\x3a\x99\xe2\x40\x88\x6a\x4e\x57\x72\xba\x8a\x33\x44\xaa\x8d\xf5\x53\x5b\xea\xa2\xaa\x32\x09\x77\x96\xad\xc9\x66\xf9\x33\xaa\xdf\x64\x63\x7c\x79\x3c\x7e\xe0\xc5\xda\x5d\x4b\xa3\x64\xb3\xf6\x52\x82\xf2\x46\xeb\x10\xae\x4b\x9a\x60\xf7\xa2\xc2\xfd\x8b\xa9\x8b\x62\x93\x4c\x7b\xc9\x26\x9c\x20\xdd\xb1\x5a\x92\xb8\x6c\x43\x32\xe9\x74\xda\xe7\x31\xb4\x73\x95\xde\x4c\xc4\x37\x7b\x3a\x4d\xfa\x3c\xe6\xf6\xd1\xb4\xdf\x63\x14\xad\x6d\xae\xec\x5c\xd2\xe9\xec\x9a\xa9\x15\x35\x61\x6e\x0b\xa5\x3a\x5f\x2c\x41\x32\xf3\x1e\xe9\xed\xf0\xd5\xdb\x53\x77\x44\x58\xf4\x48\x3f\xe0\xe8\xb2\x57\x7a\x20\xf4\xb8\xb6\x71\x4a\xa7\xab\x2b\xd2\x83\x0a\x26\xb6\xd7\xa4\x5f\xdf\xe8\x4d\x68\xe8\x69\x54\x6a\xa1\xb1\x9b\xf0\x0d\xb1\xcd\x95\x18\x76\xab\x1f\x70\xcd\x46\x67\xf1\xa3\xc9\x24\x51\x7f\x1c\x32\x73\x4a\x93\xa8\x3f\x0e\x3b\x9a\x0a\xdf\x20\xd6\x1e\xc3\x8e\x1a\x4f\x64\xb1\x49\xea\xa1\xdf\x56\x48\x93\xf9\xcf\xe8\x53\xd3\x64\x61\x13\xb9\xa0\x94\x27\x1b\x2d\x0e\x7c\xc6\xba\x89\xbf\xb0\x45\xaf\x2f\xc3\x3a\x4f\xbb\x8d\xa4\x81\xa0\x94\x54\x78\xe3\x13\xde\xca\x2d\x69\x78\xeb\x8a\x8b\x27\xb6\x57\xff\x00\x2b\x26\xfd\xba\xdd\x0b\x29\x79\x1f\x81\x0d\x99\x0e\xd8\x76\x77\x11\x48\xa7\x3f\x63\x5a\x91\xa6\x49\xe0\xb8\x56\x0c\xb7\x2d\x6a\x7f\x19\xcb\x3f\x9a\x86\x4a\x8c\x0d\xc6\x15\x15\x61\x91\x81\x65\x9a\xc6\x9a\xed\xef\x1c\x32\xa4\xe9\xbc\xcf\xa4\xc3\x76\x29\x69\xba\xe8\x33\xe9\x50\x02\xcb\x3e\x93\x0e\x25\xb0\xea\xc6\xe4\x7d\x04\xd6\x3f\x63\xfc\x94\xa6\x9b\x96\x26\x6c\x4d\x31\x98\x8f\xd9\xb4\x33\xa5\xf5\x36\x02\x86\x0c\x78\x18\xe5\xf6\x3e\x13\xa5\xba\xe5\x2b\x92\x4c\xee\x5b\x58\xd9\x62\x01\x8f\xa6\xfe\x0c\x89\x91\xf9\xea\xc6\x29\x9d\xcd\x84\x07\xca\x8c\xc9\x37\x0e\xcd\x7b\xf0\xd8\x22\xaa\xad\x12\x22\x5b\xb5\x74\xb6\x10\x7e\x6a\xe4\xcc\x68\x0e\xbb\xd3\xee\x78\x5b\xe7\xcb\xae\x94\xb7\x26\x65\xe9\x6c\xd5\x95\xf2\xf6\xa1\x75\x4c\xca\xde\x19\x72\x20\xe5\xc6\x8c\xaa\x78\x02\x44\x1d\x82\x1c\xb6\xe5\x4f\xe7\x16\xf5\x1a\x1b\x63\xea\xd7\xa6\xf9\x75\xeb\xfd\x4e\x37\x43\x4f\x08\x15\x71\xd8\x9c\x27\xd7\x6a\x76\xbb\x70\xf6\x10\x94\xbe\x7b\xd3\x0a\x91\x79\xb4\x63\x19\x94\x79\xe7\xd1\x8e\x65\xd8\xd1\x3e\xff\x1a\x70\x74\xf1\x83\xf9\x7e\xbe\xfc\x29\xf9\x7e\xbe\x6a\xc5\x31\x70\x29\x18\x7c\xd5\x27\xfc\x66\x9d\x39\xba\x8e\x2f\x05\xb9\x06\x90\x5c\x02\x50\xde\x6f\x89\xb0\xb9\x27\xeb\x47\x70\x6d\x31\xbd\x27\xeb\xc7\x08\x24\xf7\x64\xfd\x18\x81\xf4\xde\xac\x1f\x45\xf8\xc5\x4c\xb4\x12\xaf\xcb\xfc\xc3\x57\x57\xe9\x82\x87\x88\xc0\xf9\x5b\xde\xbd\x32\x4d\x17\x0b\x37\xf7\x60\x6f\x00\x6f\x76\xe0\xbe\x81\xc5\x03\x73\xe2\xc8\x67\x49\x13\xfb\x5f\x00\x00\x00\xff\xff\x24\x85\xba\x3a\xcf\x32\x00\x00") + +func fontsSpeedFlfBytes() ([]byte, error) { + return bindataRead( + _fontsSpeedFlf, + "fonts/speed.flf", + ) +} + +func fontsSpeedFlf() (*asset, error) { + bytes, err := fontsSpeedFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/speed.flf", size: 13007, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsStampatelloFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x5a\x5b\x6f\xe3\xba\x11\x7e\xe7\xaf\x18\x04\x02\x64\x37\xbc\x58\xf4\xfd\x9c\x45\x60\xf4\xa1\x6f\xc5\x3e\x14\x05\x76\x51\xd5\x92\x36\xf1\x9e\x63\xc0\x8d\xb6\x89\x4f\xf6\x61\xb9\xfe\xed\x05\x67\x48\x4a\xd4\xc5\xce\xe9\x06\x91\x19\x73\x38\x1c\x0e\xe7\xf2\xcd\x68\xbf\x9e\xbe\xea\x2a\x81\x15\x2c\x20\x5b\xc2\x0c\x16\xec\x1f\x87\xc7\xfa\xf9\xa9\x82\xb7\xc3\xcb\xeb\xb1\x7e\x3e\xc0\xd3\xe1\x04\x5f\xeb\xe7\x33\xdc\xbd\x9e\xab\xff\x7c\xab\xce\x87\xd3\xa9\xbe\x83\x6f\x87\x17\xf8\x7a\xfc\xed\x74\x38\xb3\x97\xe3\xdb\xe1\xe9\x8f\x73\x05\x07\x78\xac\x5f\x5e\x0e\xe7\x73\x05\xc7\x13\xe8\x2d\xfc\xed\xf0\xe5\xcb\x4b\x75\xac\x21\xdb\x6e\x57\xf0\x54\xb1\xbf\x57\x2f\x8f\x35\xfc\xb5\x7e\x7a\xa9\xce\x35\x7c\xf8\x42\x83\xdd\x6f\x87\xe7\x63\x2d\x5f\x9f\x5f\xe5\xf1\xfc\x00\xbf\x9f\xcf\xdf\x7e\x51\xea\xfb\xf7\xef\xee\x2b\x75\x71\x84\x8a\xfd\xf3\xb5\x82\xc7\xea\xa5\x3a\x9f\x0f\x2f\x47\x78\x45\x61\x6b\x38\x55\x70\xae\xbe\x1c\x4e\xa7\x0a\x8e\xaf\xf5\x66\xb3\xdc\xc2\xe4\xfe\xbf\x7f\x54\xa7\xc7\xdf\x0f\xf0\x7a\xaa\x9e\x1f\x8f\x35\x3c\x1d\xe1\x6b\xf5\x7c\xae\x5e\x8f\xd5\x94\x41\x02\x3b\x06\x49\x12\x1e\xb0\x63\x09\xb8\xc7\x8e\xa9\x3c\xd9\xb1\xe9\x24\xd9\xb1\x5c\x25\x3b\xf6\xcb\xaf\x96\xc6\x4e\x03\x4e\xff\x0a\xf4\x05\x31\x71\xdf\xc7\xcf\x1d\x03\x09\xd2\x12\x08\x23\x8c\x48\x9a\x4f\x48\xa1\x4c\x1c\x19\x34\x9f\x7e\x35\x37\x32\xd9\xb1\xf2\x1e\x9f\x26\x4d\xba\x5c\xdd\x2a\x2e\x81\x83\x25\x49\x15\x97\x8e\xab\x27\xee\x71\x25\x7a\x4b\xfe\x20\x7e\xe1\x76\x95\x48\xcb\x24\xcc\x79\x52\xcb\x07\x79\xe0\x54\xf8\xdd\x31\xe0\x56\x70\x85\x27\x35\xf8\xcc\xe9\xd4\xa5\x48\x82\x64\x42\xd2\xa9\x72\xfc\xca\xe0\xd3\x2a\x4f\xa4\x49\x2c\xfe\x8e\x49\xb0\x42\x88\x4f\x76\xb5\xd5\xc6\xc0\x19\x51\x7f\xa4\x35\x3a\xdd\x00\x11\x5e\x81\xbb\x41\xbc\x22\xbf\xd3\xae\x75\x5b\x4c\x88\xf6\xe5\xb1\xf1\xd5\x10\xad\xc6\xb1\x15\x13\x94\xfd\x3e\xed\x5e\x73\xb8\x2f\x61\xd5\x66\x94\x21\xb5\x0e\x8b\x69\xf9\xa4\xa8\x92\x32\xe9\xca\x81\x3c\x2c\x41\xa2\xac\x3c\xa9\x18\xd1\x07\x6d\x94\x88\x0f\xa3\x1b\x59\xf3\x45\x63\xf8\x51\xd8\xbd\x12\x18\x21\x92\xa8\x92\x12\xf9\x8d\x72\x72\xe7\xba\x4e\x24\xda\x82\x43\x72\x4d\x43\x0f\xd7\x05\xe7\x6e\xa3\x51\x35\x36\x6e\x38\x78\x5f\xfd\x69\x9e\x36\xd3\xe8\xde\x56\xd4\x0f\x76\x3a\x29\x3b\xab\xed\xb0\x28\x12\xf7\x08\x33\x6d\x5b\x41\xe3\x4e\x1e\xda\x46\xe6\xa6\x9d\xe4\x30\xb5\x4b\x27\x38\x59\xf7\x35\xc1\xc9\x3d\x14\x17\xd2\x3a\x88\xe1\xc2\xd8\xa3\xe6\xa5\xd8\x2b\xb4\x8b\xc6\x4d\x82\x56\xb8\xf4\x46\xa7\xc0\x24\x34\xad\x2e\x17\xba\x13\x9e\x02\x40\xa3\xa9\xc8\xed\xbd\x60\x5c\x08\x41\xc1\xc1\x14\x45\x81\xfb\x70\x03\xe4\xa2\xa5\xd8\x0b\x11\x2d\x6f\x8d\x50\x60\x32\x00\xb7\x87\x5d\x46\x37\x24\x82\x2f\x46\x71\x46\x0a\x5c\x61\x5d\x05\xfc\x1e\x1c\x47\xca\xef\xd6\x3a\x61\x67\x37\xb7\xd8\xea\x21\x2f\x0a\x77\x52\x17\x5f\xd3\x32\x12\xb3\x19\xb8\x65\x38\x97\x1b\xbf\xcc\xc6\x27\x0c\x4d\x69\x12\x51\xb3\xb6\x62\x9c\xa4\x06\x80\x84\x32\x00\x1c\x19\x95\x42\x08\x83\x8c\xb8\x90\x34\x28\xc5\xbd\xa5\xb1\xcb\x0a\x8a\xb5\x29\x98\xc2\xa0\x36\x95\x01\x47\x14\x85\xdf\xce\x6e\xa8\xf9\x94\x62\xa2\xdc\x83\x71\x6a\xec\x07\xe0\x98\x14\xa2\x0f\x55\xb6\x16\xe2\xe5\x52\x74\x9a\x1a\xe5\x0e\x9e\x93\xdc\x4d\x40\x8d\xf3\x00\x27\x63\x9a\xe2\x87\x53\xee\x98\x18\xf6\x87\x8c\xaf\xe4\x06\xec\x8f\xdb\x03\x7e\x05\x83\x76\x00\x30\x60\x7f\x10\x8d\x89\x91\x63\x13\x31\x21\x0b\x46\x51\xc7\x2d\x90\x7b\x0b\x2c\x39\x9e\xdc\x38\x45\x94\x91\xd9\xf6\x2d\xd0\x99\x7b\xb0\x76\x7f\x3f\xc9\x98\x35\x84\x7d\xa4\xdf\x47\xfa\x7d\x72\xbf\x4f\x39\xb8\x4f\xe9\xf7\x99\x1a\xc8\xd1\x2b\xca\x14\x5a\xea\xef\x18\x2b\xad\xca\x0b\x6f\xaa\xde\x11\x47\xce\xc3\x05\x9a\xb7\xa5\x00\xca\xba\xc0\xfd\x00\xc3\xc5\x98\x75\x4b\xeb\xaa\xe0\x74\xed\x57\xe0\xd0\x5d\x5d\x29\xc4\xfe\x6a\xe4\x90\x14\x7d\x2c\x02\xb1\xfe\x8b\xf3\x06\x07\x8d\x6b\x8d\xf9\x32\x2d\x8e\x18\x44\x4c\x22\x46\xd0\x67\x36\xf0\x17\x31\x25\x7e\x90\x2b\x97\xd7\x54\x4e\x36\x4c\x4c\x86\x75\x4e\x41\xcb\x80\xbf\x59\x77\xb1\xe8\x89\xa2\xe3\x50\x74\x95\x94\xee\x9d\xbc\x8a\x12\x5c\xc7\x3f\xa4\x4d\x9f\xe8\xf7\xcd\xc3\x23\xa1\x26\xc9\xd2\x81\xc9\x29\xfb\x18\x87\x0c\xc8\x44\x0f\x7f\x25\x3e\xe7\x20\x0a\x6d\xa0\x69\x2b\xe7\x24\x98\xde\x09\xa9\xd9\xf1\xe5\xb2\x63\xee\xc0\x0d\xc8\x18\x86\x3d\x21\xd7\x72\x97\x6b\xf7\x5d\xd1\x48\x72\xe3\xc2\xbf\x25\xda\xdf\x00\x09\xb8\xd7\x60\xd6\x6e\x36\x32\x30\xb2\x5d\xc4\xc9\x05\xe1\xe1\xfc\xcf\xef\x90\xc4\x2b\x3f\xed\xe2\x35\xde\x12\x99\x90\x04\x70\x02\x5d\x78\xc3\xfd\x73\xa5\x43\x08\x89\x70\xb0\xf1\x58\x38\x60\x60\xc2\xa4\xfd\x5b\x73\xcc\x81\xf2\x88\x35\x17\xf3\x81\xd2\x60\xef\xd2\x65\xc7\x66\xd2\x3e\x18\x74\x70\xd0\x0b\xe9\xc5\x84\x51\x6c\x1f\x1f\x7b\xf0\x44\x7d\xdd\xbc\x83\x88\xae\x82\x82\x47\x9a\x5c\xd7\xb2\xcb\x53\x65\xd2\x23\x22\x45\x5c\x05\x86\xa3\x38\x94\xb4\x15\xae\x7b\x48\x5b\x56\xef\xf2\x96\x71\x21\x15\x5d\x0c\x39\x77\xd9\x8a\x9d\x8d\x57\x43\xa0\xa5\xcc\x6a\x14\x65\x56\xab\xfd\x2b\xf5\x1a\xd5\x32\xf0\x09\xae\xd5\x32\xb1\x94\x86\xa0\x83\x3f\x75\x50\x47\x41\xe5\x06\x72\xba\xeb\xab\x03\x23\x8c\x45\x99\x68\x5e\x38\x4a\x9b\xa8\x63\x92\xfe\x2f\x45\x1a\xcb\x75\x82\xd1\xe0\xc1\x8f\x7c\x8d\xd1\x36\xba\xb4\xe4\x54\xfc\xb9\x94\x09\x89\x3f\x76\xd2\x3d\x76\xcd\xa5\x87\xb8\x2a\xa4\xf5\x3f\x81\x4c\xa1\x2e\x6a\x40\x59\x00\x8f\x71\x3b\xb5\x43\x6d\x41\xb5\xbd\x8d\x3c\x64\xb4\x26\xa1\xf5\xf2\x59\x94\x90\xec\xd2\x9b\x41\xaf\x4d\x34\xea\x25\x44\x74\xd3\xe4\xb8\x2f\x85\x4d\x41\xc9\x84\x1b\x90\x94\xea\xc7\x61\x62\xa6\x37\x16\x74\x3a\x68\xa1\x00\x1c\x10\xce\x1b\x24\x5c\x76\xa0\x70\x7c\xcc\x4c\x6f\x87\x22\x88\x8b\xa8\xfb\x61\xc5\x66\xf3\x19\x93\x74\xac\xc1\x24\xc9\xa5\x83\xef\x16\xf4\x5b\xf2\x35\x93\xa5\xe4\xa9\xb5\x29\x59\xaa\x1c\x07\x62\xf2\xe3\xe7\x14\x6b\x79\x9e\x2b\x59\xe2\x20\x2d\x65\x1b\x07\xd9\xa5\x9b\xc6\xc5\xea\x7a\xf6\x11\xef\xff\x33\x95\x4d\xb9\x2b\x9c\x20\x2f\xa6\x49\x24\x1e\x9e\x29\x54\x04\x53\x72\x10\x24\xf6\xc6\x53\xd6\x75\x3d\x8b\x16\x2d\x66\x0c\x0a\xda\x69\x02\xae\x53\x31\xa5\x4c\xec\x17\xcd\xea\xba\xe6\xf1\xa2\x2c\x88\xf7\x71\x46\x93\x41\xbc\x29\xc5\x8d\x49\xa1\x62\x57\xc8\x16\x9a\xd5\xd6\x1f\xac\xde\xf2\x3d\xba\x34\xe4\x9f\x1d\x82\x28\x70\xc7\x42\x41\x5e\xc4\x3b\x2d\x18\x14\xf4\xcf\x09\x59\x60\x45\x52\x4c\xe9\x9a\x15\x4c\x3e\x4e\x21\x2f\x2f\x68\xdb\x39\x12\xaa\xcb\x25\x46\xda\x03\x18\x29\x5b\x2c\x1d\x63\x4a\x74\x75\x5d\xff\xab\xf8\x37\xde\x69\x5d\xd7\x06\x4c\x18\x16\x34\x2c\x8a\xc2\x11\xc4\x6c\x56\x41\x3e\xc7\x29\x77\xb3\x39\xe5\xb8\x9f\x82\xfe\xfd\xa4\xca\xcb\xfa\x26\xd4\x3e\x07\xb8\xb9\x9e\xb4\x96\xf1\x9a\x01\xe0\xa9\x93\x1d\xfb\x71\x91\x85\xbc\x20\x0b\xd4\x33\xc6\x83\xe9\xe5\x2f\x97\x89\x55\xf9\xa4\x98\x8a\x49\xcb\x16\x1c\x03\x34\x21\xe3\x50\x39\x7e\x4e\xca\x74\x9a\x04\x50\x3a\x10\xae\xb2\x55\xc6\x08\x39\x5a\x91\x3a\x8d\x3d\x4f\xa2\x19\xf5\xea\x38\xb6\xdd\x8c\x41\x3c\x73\x8f\x67\x68\x77\xaf\xb2\xd5\x1c\xdd\xdb\x1a\x47\x61\x0a\xba\xae\x29\x79\x67\x1a\xd5\xb7\x91\x00\x0b\x17\x20\x72\xa1\x42\xe0\x50\xa1\xb6\x68\x71\x5f\xba\x6c\x52\x7c\x2e\xae\xb5\xbe\xb2\xd5\xaa\x09\xf1\xad\x70\x9f\xa0\x8b\xae\xd6\xa1\xe3\x42\x89\x75\xa4\xef\x92\xad\x36\x2e\xa0\x41\x72\xb5\x91\x99\xad\xb6\xfd\xfe\x85\x0b\x13\x79\x29\xd2\xc1\xfe\x45\xb6\xb6\x4e\x88\x37\x84\xcd\xa8\xcb\xe5\x42\xa0\xb4\xc7\x7d\x9d\xf9\x2f\x38\x76\x68\x28\xbb\x95\xbd\x4c\x9a\xad\xb5\x4f\xd9\xd6\x86\xa2\x02\x79\x20\x9d\x67\xeb\x39\x0b\xd4\xcd\x21\x07\x8e\xb7\x5e\x0c\x1c\xaf\x40\x67\xce\x0d\xe4\x23\xc7\x5b\xfa\x66\x51\xaf\x63\xe4\xad\x6a\xbd\x6a\x19\xde\x08\xc9\xda\xcb\x68\x90\x19\x1a\x9e\x10\xa2\x7f\xf6\x0d\xf1\xc2\x68\x7c\x77\xd7\x63\x68\x49\xb6\x8e\xe4\x7e\x78\xcf\x1d\xcb\x36\x33\xc6\x9b\x89\x41\x89\x36\xd9\x00\x64\x31\x98\xe5\x0c\x7e\x9f\x7a\x42\x8d\xe9\x8a\xe2\x24\xc5\x16\x28\xa9\xa9\x05\xfe\x6f\x10\x7b\x11\x85\xbe\xcd\x3c\x34\x96\xeb\x06\x5c\x33\x37\xb9\xe8\x88\x43\x70\xbb\x6c\xaa\xa2\x6c\xb3\x6c\xf7\x52\xfb\xa7\xb0\x24\x6d\xad\x0f\x6b\x6a\xe3\xb5\x2e\x29\x4f\x3f\x60\x5b\x2f\xed\x9b\xe6\xc6\xa6\x64\x70\x3b\xba\xc2\xd0\x57\xb2\xca\x7d\x0f\x90\xde\xfb\x33\xa2\xff\x6d\xb6\xd7\x16\xb9\x06\x12\x75\xc5\x00\xc0\x4a\xb8\x63\xd9\x76\x46\x25\x2e\xde\x9f\x47\xa9\xca\xe7\x83\xfe\x4e\x28\xde\x36\x78\x4e\xed\x10\x21\x66\xb8\x0e\x9c\xb6\x84\xe8\x39\x79\xab\x9f\x58\xb6\x50\xdb\x04\xe3\x84\xb0\x42\x45\xe8\xa4\x8b\xda\xb2\x2d\x79\x14\x0f\x59\x50\xa5\x3d\xf0\x57\xde\x04\x7f\xd9\x16\xef\x59\xed\xf3\xc0\x26\x0c\x2d\x1b\x78\x1f\x86\xcc\xb6\x4b\x1b\x8b\x11\xb5\x7a\x28\x1a\xb1\xe1\x14\x3c\x6f\xb2\x59\xf5\x11\x6d\x97\x0d\x49\xd3\xea\x7b\x0e\xb0\x21\x9b\xfa\x18\x9a\x8e\x41\x1a\x8e\x6c\x7c\xa7\x02\xa8\xdc\x19\x65\x43\x20\xc9\xf7\xa4\x88\x0f\x01\x41\x45\xf2\xb4\x39\x5d\xef\xbe\x65\xdb\x6d\xab\x8d\x9b\x46\x6d\x5c\x28\x85\x72\x0d\xab\x38\x5d\xea\xd9\xcc\xa6\x8f\x22\xf7\x8d\x4e\xc2\x5e\x2a\x34\xd2\xf6\xa3\xdd\x58\x3d\xcb\x70\xa9\x0a\x3d\x38\xdf\xef\x7b\xc7\x52\x6b\xa1\xca\xed\x1a\x96\xf2\x77\x2d\x9d\x33\xe0\x75\x51\x13\x5a\xea\xed\xca\xaf\x2d\xb5\x48\x2c\x27\xe7\xba\xc3\x9e\x10\xbf\xd2\x9d\xd5\x33\x8b\xaf\x54\x8b\xbc\x29\x5b\x06\xc9\x57\x0c\x4d\x7b\xc7\xf8\x9e\xc8\xaf\x73\x5f\xb3\xba\xa8\xdf\x2f\xcc\x86\x5d\xa4\xeb\x63\x5b\x2c\x42\xef\xdd\x7c\xbf\x9d\x77\xda\xed\xf1\xb1\xad\x5d\x78\xc7\x91\xe9\xdb\x9d\x6c\x3a\xb3\xc4\xe6\x7a\x67\x56\x67\x98\xdb\x5d\xc7\x03\xca\x4e\x09\x37\x0c\x83\x74\x66\xad\x03\x14\xd5\xe1\x69\xd4\xd2\x8d\x4d\x39\x5a\xa4\x03\x90\xb6\x32\xc9\xf7\x2d\x9a\x87\x6a\xd6\xbf\x01\xb9\x59\x61\xea\x6c\xf1\xe7\xcb\x52\x9d\x2d\x11\x70\xda\x2a\x0d\xe3\xf9\x27\x72\xd4\x1e\xb4\xd3\xd9\xca\x8a\x42\xf0\x0f\x91\x85\x51\x0e\x06\x0e\x5e\x6e\xb6\x66\x92\xe7\xae\x8b\x5b\x62\xa1\x9b\x84\x92\xb7\xf0\x25\xef\xf8\xfb\x14\x9d\x6d\xac\xf9\x28\x62\x30\x35\x51\xcd\xec\x3a\xd5\x57\x6b\x66\x9d\x6d\x59\x88\xcb\x61\x79\x10\xc4\x2e\x96\x63\x5e\xa5\x67\x2c\x94\xeb\xf1\x52\xf7\x02\x69\x3f\xea\x90\x3a\x63\x12\xd4\x68\xcb\xb6\x53\x8d\x6a\xad\x19\x77\x78\xdf\x34\xaf\x74\xe8\xee\x44\x78\xb3\x83\xf3\x7b\x91\xf8\x3d\xe6\xff\x6f\x89\xae\xf5\xc2\x35\x74\x6f\x34\x13\xb4\x5e\xba\xd7\xc6\x37\x09\x57\x8c\xef\xe5\xed\xf6\x84\xd6\x6b\xf6\xb6\x7f\x7b\x0f\xe1\xe6\x7d\x0d\x0f\xad\xb7\xcc\xa6\xab\xdb\x84\xf3\x59\xb0\x7c\xdf\x57\xe0\xe2\x46\x5f\x41\xcf\x3d\x36\x69\x77\x02\xcb\xb7\xd4\x41\x84\x16\x77\x1d\xe9\x74\xb4\x03\xac\xe7\xf3\x48\xa7\x57\x08\x17\x91\x4e\xaf\x10\x2e\xe3\xb6\xcf\x38\xe1\x8a\x59\xdb\x18\xec\x0b\xeb\xf9\x9a\xa9\xf1\xc9\x0d\xdb\x8f\x4f\x6e\x7d\x01\x46\xc6\x48\xaf\x12\xfb\x38\x54\x2f\x66\xec\x01\xdf\x5c\x73\xac\x1a\x47\xfb\x53\x7a\x91\x45\x56\x32\xda\x13\xd6\x8b\x8e\xd6\xc7\x39\x76\xb4\x3e\x4e\xd8\xd1\xfa\x38\xe1\x72\x40\xc6\x41\xc2\xd5\xfb\xba\x72\x7a\xb1\x8e\x70\xb0\xab\x9f\xfa\x2f\xe1\x35\xf5\x0f\x90\xa3\x0a\xff\x69\x43\x0d\x71\xdc\x3a\xf5\xdc\xe8\xf5\xe9\xe5\xcc\xa9\xe7\x26\x61\xe6\xd4\x73\x93\x50\xbf\xaf\xcd\xa8\x97\xf3\xc1\xad\x7b\xcd\x65\xbd\xf4\xfd\x07\xd3\x32\xf3\x7d\xbf\x12\xd7\xcb\xe5\xe0\xd6\x3d\x8e\xff\x0b\x00\x00\xff\xff\x2e\xb2\xdc\xa2\x36\x26\x00\x00") + +func fontsStampatelloFlfBytes() ([]byte, error) { + return bindataRead( + _fontsStampatelloFlf, + "fonts/stampatello.flf", + ) +} + +func fontsStampatelloFlf() (*asset, error) { + bytes, err := fontsStampatelloFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/stampatello.flf", size: 9782, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsStandardFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x7c\x6d\x73\xda\xc8\xb2\xff\x7b\x3e\x45\xbf\xd8\xfa\x07\xea\x1f\x3c\x46\xb6\xb1\x5d\x95\x93\x32\x06\xd9\xd6\x06\x83\x8f\x80\xec\x49\x56\x5b\xb3\x24\x26\x31\xb5\x04\x52\x36\x3e\x7b\x53\x57\xf7\xbb\xdf\x9a\x47\x8d\x66\x7a\xa4\x91\x73\x6e\x6a\xd7\x60\x8c\xfa\x37\x3d\x33\xdd\xbf\x7e\x18\xe9\xcb\xe6\x4b\xb4\xfc\x05\xfa\x70\x02\xbd\x3e\xf4\x4e\xa0\xd7\x83\x43\x88\x8e\x8f\xfb\x47\xad\xd9\x7e\xb9\xbd\x5f\x3e\xde\xc3\xa7\x1f\x70\xbd\x59\x6d\xb7\x30\x7c\x58\x7e\xff\xbe\xda\x6c\xe0\xff\x41\xb2\xe4\xbf\xae\xe1\x88\x9c\x1f\x41\xb7\x0b\x9f\x96\x4f\xab\x7b\xd8\x6d\xe1\xea\x71\xb9\xfd\xeb\xd5\x13\x1c\x3c\xad\xbf\xb6\x92\xed\xe7\xcd\xf3\xfd\xea\x09\x92\xd9\x14\xc6\xcb\xfd\x7a\xdb\xed\xb5\xbe\xac\xbf\x6e\x56\x7b\x78\x5c\x6d\x56\xcb\xa7\x15\x44\x07\x3d\x26\xa0\x17\xc1\xe0\xf9\x2b\xf4\xce\xcf\x8f\x5b\xb7\xbb\xfb\xf5\x97\xf5\xea\x1e\xbe\xec\x1e\x41\x7e\x3d\x3a\x88\xd8\x48\x7e\xdd\x3d\x6c\x61\xb8\xfb\x7b\xb9\x85\x37\x9f\xd9\xcb\xc5\xe7\xcf\xeb\xcd\xc1\xee\xf1\xeb\xdb\x16\xc0\x7e\x07\xcb\xfb\x7b\x89\xf4\xdf\xd1\xeb\xa3\xd7\xc7\xaf\x4f\xfe\x07\x9e\x9e\xbf\x7f\xdf\x3d\xee\xa1\xbd\xd8\xae\x3f\xef\xee\x57\xb0\xf8\xff\x87\xbd\xc3\xc3\xee\x61\xef\xf4\xaa\x73\xd0\xba\x5b\x3d\x7e\x5b\x3f\x3d\xad\x77\x5b\x58\x3f\xc1\xc3\xea\x71\xf5\xe9\x07\x7c\x5d\xff\x7b\xb5\x65\x02\xbf\xb1\xc1\xfc\x80\xfd\xc3\xfa\x09\xbe\xec\xb6\xfb\xd7\xb0\x7c\x82\xcd\x6e\xfb\x95\xbd\xee\x1f\x56\xad\x6f\x62\xb4\x8f\xaf\x9e\x60\xbb\xfc\xb6\x62\x32\xbe\x6f\x96\x9f\xc5\x74\x2c\xe1\xf3\xee\xdb\xb7\xd5\x76\x0f\x9b\xf5\x76\x75\xd0\x2a\x74\xfb\xf4\x03\xee\x96\xcf\x1b\xb8\x7c\x7e\xdc\xef\xb6\xf0\xe6\x69\xb7\x79\xde\xaf\x77\xdb\x8b\xd5\xf2\x71\xff\xb0\x59\x6f\xff\x3a\xd8\xae\xf6\x6f\xa1\x17\x91\xf3\x3e\x1b\xc8\x5a\x4c\x26\x6c\x57\x7f\xc3\xf7\xe5\xe3\xf2\xdb\x6a\xbf\x7a\x6c\x49\xdd\x84\xc0\xab\xe4\x9a\xcd\xd5\x72\x7b\xcf\xde\xfe\xb6\xde\x1e\x00\xdc\x2e\x7f\xc0\x72\xf3\xb4\x83\x4f\x2b\x78\xda\xac\xbf\x3e\xec\x37\x3f\xe0\x9b\x39\xc3\x9f\x56\xfb\xfd\xea\x11\x9e\x9f\x56\xad\xdd\x17\x2e\xfe\xcb\xf3\x66\xd3\xfd\x7b\x7d\xbf\x7f\x20\x7f\xad\x1e\xb7\xe4\xe9\xdb\xf3\xd3\x03\x2c\x37\xfb\xd5\xe3\x76\xb9\x5f\xff\x7b\xf5\xf4\x1a\x3e\x3d\xef\xe1\x7e\xf5\x65\xf9\xbc\xd9\xc3\xee\x79\xff\xfd\x79\xcf\x34\x9f\x4c\xe7\xf0\xf9\x61\xb9\xfd\xba\xba\x3f\x68\xc1\x2f\x17\xd8\xff\x17\x2d\x00\x0a\x17\x2d\xc8\x21\x2f\x7e\x52\xf6\xb3\x4d\x3b\xec\xaf\x00\xf2\x4b\xfc\x6b\x6d\xc8\x81\x7f\xfc\x1e\xde\x03\xff\xf3\x2f\x50\x7e\x55\x17\xb0\x4b\xd8\x7f\xfc\x53\x9a\x43\x9e\x43\x2e\x90\x28\xc0\xc1\x01\xfb\x4c\xfe\xc2\xbf\x9b\xf3\x6b\x72\x9a\xe7\x34\x2f\x24\x19\xe2\xa8\xf8\x34\x87\x9c\xbd\x12\xa0\x7c\x78\x19\xa5\x90\xf1\x71\x01\x10\xfe\x77\x9a\xab\x01\x03\xa5\x42\x0d\x22\xfe\x04\x04\x08\x97\x41\x80\xf0\x91\x10\x4a\xb4\x92\x05\x0e\x55\x83\x6e\x03\x85\x0e\xc8\x2b\x28\x64\x24\xe3\x33\xd4\xa6\x6f\x01\xde\xb0\x4f\x33\x4a\x29\xc9\x88\x31\xd8\x62\x3e\xdb\x7c\x9a\x72\x01\xf8\x8b\xf1\xb3\xc0\x91\x43\x11\xd3\x0e\xce\x0b\x64\x34\x63\x5f\xa5\x42\xf3\x0c\x32\x35\x01\xce\x0b\xa1\x44\x4d\xba\x18\x3b\x1b\x17\x07\xc8\x40\x4e\x0c\x61\xf3\x91\xf1\xef\x64\xc4\x98\xe0\xe2\x32\xd0\xab\xa6\x97\x4c\xaf\x97\xb1\x3e\xe6\xe2\xc8\x8b\xed\x9f\xd6\x04\xd8\x08\xfa\x6b\xec\x9f\x40\x60\xff\x04\x02\xdb\x47\xa1\x08\xa5\x0d\xaa\xa7\x54\x4d\x6a\x69\xbd\x41\xae\x37\x38\xaa\x53\x39\xbf\x62\x8d\xd5\xfc\x6b\x73\xa0\x72\xa2\xf9\x5a\xdb\x03\x13\xbb\xc8\xb4\x1c\xc3\x7e\xf4\x97\x14\x02\x53\x53\x2e\x22\xdb\xbd\x42\x2e\xdb\xc8\xc4\x99\x84\xd2\xc5\x7a\x8e\xa4\x3a\x6c\x45\x32\x35\x83\x1d\x89\x88\x0f\x4f\xad\xa6\x30\x3f\xe3\xad\x92\x59\x2c\xad\xbd\xb6\xf6\xe0\xd9\xdb\xdc\x52\xa3\x06\x1f\xb4\x29\x71\x8b\x13\x62\x5e\xe9\x59\x6e\xab\x39\xc0\xe7\xb6\xa4\x39\xe4\x7a\x69\x41\xaf\xad\x10\x5d\x5e\x54\x7b\x55\x85\x11\x5b\xeb\x5b\x8d\x8c\x6e\x09\xf3\x92\xd7\x7a\x34\xc8\xc5\xce\xfe\x44\xf7\x2a\xfe\xa5\xb2\xc9\x98\x1e\x42\xea\xad\x9c\x40\x26\x4d\xd9\xb1\x5f\xc4\xa6\x6a\xac\xcb\x72\x2f\x19\x77\xa7\x0a\xb5\xd0\x4f\xad\x89\xda\x37\x99\x5a\x05\xed\x74\xd9\x64\xd3\x8e\x43\x02\x7a\x32\xf9\x56\x97\x28\x4c\x1d\xfa\xa7\x32\x19\x68\x6b\x2b\xe3\xd3\x2b\x07\x9b\xc9\x4d\xa5\xac\xbb\x70\x01\x84\xfb\x35\x25\x94\xfd\xa2\x4c\x49\x02\x88\x4d\x51\xcc\x92\x67\x43\x8b\x9d\x91\x83\x61\xf9\x75\x1b\x5a\xcc\x80\xc0\x52\xc3\xd7\x36\xa6\xfe\x9a\xf9\x4c\x19\x1c\x3c\xc7\xd3\x78\x4c\x59\xaf\x2b\xe8\x75\xe5\x3e\xba\x8c\x5c\xe7\x44\xc0\x18\xb6\x76\x0d\x5a\x0c\xcd\x01\x37\x25\x4c\x67\x15\x35\x98\x0e\xd2\x45\x86\xe2\x8b\xae\xa6\xfc\xef\xd2\x61\x16\x4e\xd3\xdd\x6d\xd2\x45\x29\x5e\x54\x34\x69\xe2\x69\x33\xa0\x6a\xfc\x72\x58\x14\x82\x7d\xb9\x30\xb8\x9c\x19\xa0\x20\xe5\x57\xc2\xe8\x72\x38\x90\xcb\x45\x73\x73\x4b\x69\x0d\xd5\x22\x60\xef\x6a\x16\x86\xff\x2f\xe6\x82\xd1\xb2\x1c\x68\x46\xf4\x98\xa1\xe0\x14\x28\xcf\x91\x33\xc3\x99\xd6\x14\x32\x7d\x7d\x56\xcc\x70\xe6\xe0\xff\x1c\xf9\xa1\xfb\x59\xd9\x0f\x14\xbc\xe6\xdb\x55\x81\xc8\x25\x2b\x0e\x43\x86\x37\xa0\x75\xf6\x5e\x4c\x34\xa3\x65\xc1\x8c\x66\xb8\x57\x23\x28\x02\x45\x9c\xc6\x3b\x24\x50\xc2\x8d\x21\x6c\xb6\xa9\xd8\x67\x32\xa6\xcb\x0a\xd7\xcb\x7e\xd1\xa4\x98\xc1\x7b\xd0\xc1\x5d\x66\x11\x63\x49\x50\x59\x96\x25\xce\x92\x68\x09\x75\xe5\x1a\xa2\x95\x50\x2d\x4e\x87\x60\xda\x31\xdb\x26\x24\x82\x10\x35\x18\x7d\xe1\x7b\x1d\xed\x54\x4f\x2a\xfb\xa7\x62\x19\x5f\x98\x5f\x32\x3d\x65\x79\xc2\x99\xe6\xbe\xf0\x3b\x67\xd7\xe8\x09\xd3\x53\xa5\x26\x49\x4d\x4f\xa6\x27\xc6\x76\x0c\x6a\x9f\xb8\xe1\xba\x36\x69\x81\xc0\x26\x9c\xf3\x7e\x26\x06\x29\xf3\x84\xf2\x0b\x12\xa9\x57\xbd\x73\xe2\x00\x2b\xc8\x00\x81\x85\x25\x26\x85\x34\x2a\x93\x3e\x93\xac\xdb\xc6\x2e\x7d\x8d\xfa\xfb\xc2\xf3\x39\x11\x5f\xc1\xae\x07\x18\xbb\x1a\xc0\x05\xe3\x28\xd8\x82\x59\xad\xa5\x84\xc2\xeb\x53\x35\xb8\xe0\x01\xa3\x98\x90\x69\x0f\xe6\xc3\x2c\xbe\xac\xd9\x54\x71\xa9\xf2\x79\x0e\x35\x85\xce\xaa\xdc\x23\x45\xf0\x53\x37\xab\x06\x45\x20\x2c\x5a\x44\x99\xfe\x1c\x45\xcf\xa0\x8a\x53\xe5\x70\x34\x8b\x12\xb5\x63\xcd\x21\x49\x74\x9b\x36\x41\xbb\x5f\x84\x2d\xed\x61\xa0\x83\x29\xed\x64\x3e\x61\xa6\xda\x7f\x5a\x8c\x51\x56\xdf\x22\x49\x24\x44\x86\xc6\x73\x68\x6d\x13\x9b\xb7\x6a\xd2\x89\x1a\x64\xcd\x5b\x07\x16\x63\xbe\x6c\xdf\x08\x3f\x59\xde\xdc\x54\xfb\xbb\x57\x66\xd4\xaa\xb1\x5c\x83\xba\x28\xe2\x2f\x65\x84\xaa\xd4\xc2\x37\x66\x69\x73\x17\x49\xa6\x0e\x51\x4d\xab\x60\x57\xba\x81\x9a\x1a\x59\x45\x80\x58\x61\xb2\x10\x40\x1c\x9c\xa8\xd0\x8b\x8d\xeb\x4b\x22\x4a\x52\x4a\x82\x2c\x59\xb6\xfb\x40\xa8\xef\xad\x34\x03\x84\xf1\x1a\xe9\xef\x78\x03\x63\x79\xa8\x8c\x44\x88\xa4\x3b\xc5\x76\xf6\x6c\xcb\xc2\x88\xca\xd6\x04\xc1\xbd\x81\x37\xa5\x92\x9a\x2e\x38\x79\xad\x54\xdb\xaa\x9a\x3c\xc5\x8a\x25\x29\x6f\xe1\xad\xf1\x3b\xcf\xc4\x24\xcb\x91\x5c\xd0\x1c\x51\xcc\x86\xbf\x3a\xf1\x52\x9b\x76\xa8\x72\x4c\x84\x66\x96\xfd\x31\x04\x24\xcc\x43\x2e\x36\x43\xcd\xba\x5c\x40\x5f\x5c\xf8\xce\xf0\x44\x02\x43\x0e\x27\xd0\x8a\x61\xd7\x94\x4c\xc2\x87\x8d\x98\x95\x4b\x80\xda\xa3\xe7\x99\xfa\x24\xe7\xa6\x2f\xbd\xc6\x45\xab\xd7\x3f\x04\x98\x4c\xbb\x97\x69\x3c\x78\x07\xb3\xbb\xc1\x30\xf6\x57\x9a\x7b\xfd\x1e\x40\x32\x79\x1f\xa7\xf3\x78\x04\xf1\xbf\x86\xe3\xc1\xed\x60\x9e\x4c\x27\x70\x3b\x48\xdf\x85\x51\x55\xaf\x1f\x01\x0c\xe3\xc9\x1c\x66\xc9\xf5\x44\x32\x57\x11\x27\xaa\xa0\xa1\x53\x0e\x1a\x00\x40\xd2\x9a\xa8\x0e\xf7\xfa\x47\x00\x77\xd3\xc5\x64\x64\x88\x31\xea\x14\xaf\x29\x9f\x6f\x5a\x64\xc9\x46\x2a\xd7\xa6\xaf\xad\x5c\x4e\x88\x3c\x06\x18\x2e\xd2\x34\x9e\x0c\x3f\x48\xa9\x44\x14\x88\x45\x19\x56\x9a\x69\xb1\x86\xb2\x5c\xc1\xfe\xca\x8b\x15\xa4\xb4\x20\xbd\xfe\x09\xc0\x87\x78\xa2\x06\x48\x95\x0f\x2f\x3c\x5c\xce\x3f\xcb\x4b\xef\xb0\x20\xb9\xd7\xef\x03\x5c\xa6\xd3\x77\xf1\x04\x2e\x07\xa9\x65\xe0\x34\xb7\x4d\x9e\x1b\x78\xaf\x7f\x0a\x30\x8b\x87\x7c\x7d\x8c\x49\x02\x19\x0e\xf0\x29\x26\x2a\x00\xce\x20\x93\x85\xa3\x8c\x79\x4b\x3e\x4b\xd2\xf0\x7b\xfd\x33\x80\x51\x32\x88\xd3\x78\x96\xcc\xf0\x5d\xca\xcb\x52\x2a\x16\x75\xdf\x19\x9a\x9c\x03\x0c\xa7\x77\x1f\xd2\xe4\xfa\x66\x5e\x5a\x3b\x5d\x6a\xd4\x65\x20\x50\x4e\x91\x45\x85\x9a\xe6\xdb\xfc\x8b\x45\xb1\x89\xe6\x85\x97\x17\x29\x1f\x07\x3a\x3d\x04\xb8\x8a\x6f\x93\x49\x32\x89\x61\x9a\x8e\x92\xc9\x60\x0c\xc9\x64\x94\x0c\x07\xf3\x69\xda\x2a\xd8\x58\x5b\xb6\x36\xaa\xbc\x5c\x6e\x33\x3d\x7f\xef\xb4\x07\x30\x8e\xaf\xe6\xdd\xbb\x69\x32\x99\x27\x93\x6b\x18\x4d\x17\x97\xe3\x18\x06\x93\xeb\x71\x0c\xff\x5c\x4c\xe7\x25\x8b\x50\x4e\x9e\xeb\xa1\xea\x80\xba\x12\xa8\x6b\x81\x65\x7e\xe9\x9d\x46\xc0\x9b\x3f\x7a\x7e\x00\xcb\x0a\xa0\x14\x2f\xa8\xe1\x5a\x13\x7e\x7a\x04\x30\x9b\x5e\xcd\xe1\xe6\xc3\xdd\x4d\x3c\x29\x45\xcc\xa6\x54\xa8\x53\xfc\x18\x20\x8d\xaf\x93\xd9\x3c\x4e\xe3\x51\xd8\xca\xe5\xdc\xef\xc9\x95\xe3\x01\x0b\xd1\x2b\x97\xd3\x9c\x66\xf8\xca\x9d\x00\xdc\x0e\x86\xe9\x74\x12\xd0\x62\xc0\xde\xe9\x21\xf7\x01\x46\xf1\x75\x1a\xc7\x7a\xb8\x45\xec\xa7\x1d\x72\xe1\x8f\x09\x78\x54\x3f\x05\xb8\x1b\x2f\x66\xdd\xdb\x64\xb2\x98\x95\xfc\x96\x98\x3e\xa4\xd1\x42\x99\x7e\xce\xb8\xb5\xc4\x33\x80\xd9\xe2\x2e\x4e\x67\xc3\x34\xb9\x9b\xc3\xfc\xb7\x69\xa9\x5c\x06\x1d\x34\x1e\x28\xf3\x6b\xef\xf4\xdc\x92\x72\x93\xc6\xb1\x19\x57\x50\x55\xdc\xb5\xc2\x3e\x4b\xce\xd9\x21\xc0\x60\xb8\x98\xc7\x30\x18\x32\xcf\xdc\x92\xf1\x06\x11\x5f\x37\x53\x4c\x23\xd1\xec\x9d\xf5\x00\x6e\x93\x61\x3a\x45\xb6\x69\x75\xd5\xf0\x40\x1b\x5a\xae\x85\x45\x00\x77\xc9\x78\x98\x4e\x7f\x33\x16\xab\x60\x34\x00\x55\x5b\x6b\x2b\x89\x3c\x9e\x55\x69\x0e\xdf\x4f\xb6\x47\x3f\x3b\x62\x23\x1c\x8d\xc6\x31\x8c\xa6\xf3\x56\x31\xba\xa2\x6c\x8f\xa8\xc5\x68\x20\x1e\x25\xe3\xf1\xa0\x55\x58\x8a\xfd\x93\xcb\xe8\xd0\x0e\xbf\xe2\xa4\xbc\x0e\xd3\x49\x8c\xb6\x96\x68\xee\x60\xf5\xd9\x56\x9f\x0d\x17\x63\xbf\x97\x52\x4e\x4a\x10\x8d\x5c\xc3\xdc\xb7\x27\xce\x4e\x01\xb8\x6f\x0d\x74\x50\xba\x98\x96\x99\x0d\x04\xdd\x42\x50\x4d\x04\x33\x54\xe1\x30\x67\x00\xef\x17\xe3\xeb\x41\x0a\x57\xe9\x40\x70\xcc\x74\xc2\xc4\x0f\xd2\x79\x9c\xaa\x2d\xa0\x92\x4c\xe6\x0a\xb8\x23\x57\x69\x06\xe1\x1f\xc9\x32\x0d\x01\xc2\xf6\xbc\x50\x48\x14\xfe\x55\x3f\x02\xca\xeb\x79\x8e\xa3\xde\x0c\xc6\x57\x26\x64\x81\xa8\xb2\xa6\x5c\xf4\xaf\x3a\x0a\xaf\x28\x31\x51\x52\x0e\xb8\x35\xd8\xf9\xa1\x0b\xc6\x2d\x4c\x29\x39\xd3\xb5\x48\xad\x27\x37\x39\x43\x53\x6e\x7a\x25\x5d\x69\x59\x5b\xbf\xbe\x72\x10\x66\xc0\xf5\xcf\x45\x3c\x2b\x73\x8b\xec\x90\xc9\xa6\x8d\xee\xac\x8b\xaa\x59\x66\xe5\x11\xbd\xf3\x08\x60\x3c\x98\x27\x13\x18\x0e\xee\x92\xf9\x60\x0c\xe3\x78\x3e\x8f\x53\x18\xc0\x6f\xc9\xfc\x06\xae\xd3\xc1\xfb\xb8\x65\x76\xfd\x18\x35\x29\xd7\x1e\x14\xb6\xf7\xce\x8f\xaa\x31\xb8\x9f\x51\x91\x88\x92\x4c\x1a\x62\x1c\x57\x63\x0c\x93\x74\xb8\xb8\xbd\x1a\xc7\xff\xe2\x42\x89\xca\x6f\x08\xcd\xf2\x66\x40\x27\xd5\x40\xf3\x64\x3c\xe2\xca\xb0\xcc\x48\x62\x64\x4d\x95\xe9\x57\x63\xf8\x02\xaf\x86\xf9\x54\xef\xfc\xb4\x1a\x27\x65\xae\x62\x70\x39\x7d\x1f\x97\xb8\x0d\xda\xbb\x4e\x43\x85\xce\x7c\x40\x42\xb2\x70\xeb\xaa\xcb\x43\x00\x0a\x7e\xe4\x66\xa3\xa2\x5f\xde\x6c\x54\x21\x3b\x43\x72\x98\x54\x23\x9e\x7b\x10\x87\x72\x3f\x14\xde\xfc\x05\xed\xbf\x0e\xb3\xae\x8b\x56\x74\x78\xe8\x41\x89\x7d\xd6\xc3\xc2\x3a\xac\xe5\xe7\x8d\x0b\xa2\xc3\x5e\x35\x86\x6b\x3d\xcc\x35\x37\xc4\xf0\x79\x81\x38\xcc\x7a\xc2\x81\x7c\xae\x20\x0e\xda\xd9\xe1\x38\x3e\x77\x90\x94\x17\x46\x4d\x59\x46\x65\x2f\xb0\xdc\x95\xb4\xbb\x91\xd1\xa1\xcf\xfa\x13\x6b\x31\xd4\x96\x52\xc5\xbf\x3a\xb9\x3e\x8b\x4f\x90\x05\x20\xaa\xb5\x40\xb3\x3c\x48\xb8\xcf\xcc\x93\x30\x77\xc2\x01\x84\x6d\xea\xb6\x4d\xae\xda\x6c\xe6\xa4\xfb\xac\x3c\x9e\xdf\xe8\xbc\x40\xe7\xf5\xa0\xbb\x73\xa5\x36\x79\x51\xee\x77\xbb\x75\x12\xc6\x67\xda\x13\xc4\x03\x6b\x07\xcc\xf7\x8c\xee\xa0\x1e\xfc\xa9\xc2\xaf\xac\xdc\x05\x88\x7a\x3e\x93\x9e\xd6\x13\x62\x60\x29\x2a\xea\xf9\x4c\x7a\x5a\x4f\x88\xc1\x18\x3e\x93\x9e\x06\x12\x62\x30\x90\xcf\xa4\xa7\xf5\x84\x18\x8c\xe1\x33\xe7\x69\xd8\x0e\x0e\xc6\x61\x69\xe6\x62\x3c\x4f\xee\xc6\x2c\xda\x2e\x55\x49\xd4\x57\x19\xf3\x88\x53\x83\xf2\xc4\x60\x56\x9c\x16\xe4\x32\x7c\xa6\x2c\xc7\x3a\x9b\xa7\xd3\x77\xb1\x4d\x3c\x84\xa8\xa0\x54\xa5\x07\x84\xa8\xb6\x17\x41\x06\xea\x33\xe9\x45\x00\xf1\x84\xd5\x3c\xa3\x9e\xcf\x9c\x17\x01\xc4\x13\x8a\xe1\xb3\xe5\x45\xcd\x2e\x85\x2c\x6f\x04\x14\xf9\xcc\x7a\x51\xbd\x83\x1a\x16\x8a\xa3\xc8\x67\xda\x1f\x3c\x93\x46\xf9\xa4\x55\x34\xca\x9d\xca\x5f\x14\xf9\x4c\x7b\x7e\x33\x4d\x27\x4e\x6b\xb1\x38\x24\x54\xb4\x40\x8d\x82\xaf\x2d\x5c\x9b\xf3\xec\x76\x30\xd6\xa2\x67\x37\x83\xf4\x0e\x66\xad\x17\x96\x94\xa3\xe8\x18\x15\x5b\x99\x65\x34\x6a\x53\x47\xd1\x49\x15\x02\xee\x52\x1b\x22\xf4\xab\x10\x42\x1c\x6a\x18\xcc\x69\x15\x0c\xee\x4e\x1b\x2a\x72\x56\x85\x10\xe0\x4c\xc3\x50\xce\xab\x50\xec\xdc\x42\xad\x4a\xbb\xdd\x29\x4e\x76\xbe\x0a\x80\x39\x3a\xc4\x61\xe2\x52\xf2\x2e\x37\x97\xe9\x72\xff\x04\xbd\x81\x19\x80\x71\x30\xc0\x69\x05\x48\xa4\x1e\x8a\xe4\x66\x14\x00\x0d\x8e\x3c\xa8\x5c\xe2\x28\x42\xa5\x63\x99\x84\x8e\x3a\x2e\xc2\x8e\x36\x44\x47\xb8\x51\x63\x19\x84\x8e\x36\x82\x65\xe3\x96\xed\xcd\x1c\xb4\x51\x04\x03\xe0\x86\x1d\x96\x31\x98\xd4\xaf\x0e\xaf\xc9\x0e\x81\xb5\x8d\x70\xe3\x2e\xe7\x0b\x54\x16\x51\xb2\x8a\x66\x56\x74\x84\xdb\xaf\x95\x21\x18\xd5\x54\xbf\x24\xdc\x4e\x03\x72\x02\xb0\x8e\x55\x96\x8a\x49\x5c\x34\x6e\x9c\x81\x19\x01\x84\x9d\x8e\x8b\x8e\x71\xdb\x94\xf9\x00\x0b\xa3\xc4\x05\xaa\xad\x0e\x94\x7d\xa4\xfa\x68\xba\x05\xe3\xd2\xec\x31\x6e\x8a\x58\x06\x50\x0e\x39\x43\x8f\x87\x44\xc7\xb8\x39\x36\xcb\x02\xaa\x3a\xbb\xd1\x31\x6e\x94\xcd\x72\x80\x6a\x04\xdc\x34\x5f\x90\x01\x54\xc3\xe0\x06\xda\x2c\xfe\xaf\x46\xc0\x8d\xb3\x79\xf4\x5f\x8d\x72\x0a\x30\x4a\xde\x27\xb3\x72\xdc\xaf\x43\x24\x1d\x4a\x33\xd9\x76\xe9\x81\x49\xb6\xe5\xe1\xe6\xeb\xe4\x01\x65\xc2\x28\x4e\x22\x29\x9b\x86\x36\x21\x6a\xd8\x76\x32\x20\x80\x70\x63\x7e\x79\x2e\xe0\x90\xec\x09\x6e\xc8\x2f\xcf\x04\x5c\x04\xdc\xa2\x7f\x32\x0f\x70\x61\x70\xb3\xfe\xb9\x2c\xc0\x45\xc1\x4d\xdb\x97\x03\x04\x4c\x97\x75\x60\x28\x3a\xc1\x4d\x1b\xcf\x00\x1a\x1d\x4f\x8b\x4e\x70\x73\xfe\xf0\x73\x53\x64\x29\x70\xf8\x5f\x87\x3d\x6f\xf5\x54\x86\x88\xba\xe1\x6b\x84\x6d\x85\xd9\x86\x95\x9e\x39\x0e\xbe\xb5\x1c\x14\x1d\xae\x01\xa1\x40\x49\x83\x58\x97\xa3\xd4\x74\x52\x2e\xd3\x58\x94\x1c\xf5\xb9\xcc\x2c\x93\x69\x7f\x23\x65\xf0\xad\x65\x81\x18\xc7\xe4\x4c\x8f\x12\xac\x4b\x4d\x37\x65\x7a\x3d\x9d\xc4\xef\x3c\xee\xb1\x56\x99\x36\x6d\x2b\x9c\xca\x9c\x0d\x43\x09\x3f\xbd\x6c\xa0\xf8\x2a\x32\x43\xdb\x1e\x75\x00\x4c\x8d\x08\xb8\x38\xf7\xe8\xbf\x11\x88\xc3\xe0\x71\x9f\x03\x52\x6c\x32\xbd\xc7\xea\xce\x43\x73\xf1\xbe\x72\xcc\x10\x73\x91\xea\xfc\x39\x70\x1a\xc9\x9a\xaa\x82\x13\x0a\x8e\xa4\x80\x48\x91\x90\x04\xe9\x33\xa8\xd6\x67\x34\x9d\x1b\xf9\xe1\xef\x7f\x98\x0c\xd9\x54\x9d\xcb\x2a\x75\x4a\x40\x26\x4e\x23\x6d\x86\x35\xab\x33\x90\x4e\x26\xcb\x94\xcd\x53\x23\x6d\x0f\xd6\x64\x54\xb9\x30\x26\x88\xc2\x68\xa4\x45\xec\xd1\x62\xe4\xd3\x82\x52\x1d\x58\xbb\x35\x56\xec\x76\x1b\x0e\x73\x85\x6a\x61\x81\x30\xc1\x2f\xbc\x05\x80\x81\x78\x0b\xf9\x23\xb4\x0e\xab\x7b\xf3\xfa\x6e\x43\x5e\xd7\x92\xb7\x62\xe7\x34\x27\x0a\x2f\xa7\x68\x5f\x82\x63\xe2\x4c\xe3\x20\x42\xb7\xdb\xcd\x5f\xae\x59\x4d\xb7\xae\x8a\x37\x43\x3b\x68\x1c\xa7\xb2\x6a\x50\xe6\x4d\xea\xf0\x66\x48\xd6\xcd\x51\x7c\x5c\x13\xd7\xf2\x66\x23\x65\x2a\xab\x08\x0a\x24\xcb\x58\x74\x79\x21\xd7\xc8\x49\x52\x6a\x75\xf1\x31\x4d\x5c\xe5\xd3\x7c\xb7\x81\x56\xeb\x83\xb3\x8d\x0f\xe8\xc2\xa4\x9e\x80\xaa\x0b\x87\xf0\xf6\xf3\xbc\xfc\x1c\xae\x4a\x9b\x6a\x7e\xee\xe1\x6c\x83\xa2\xd8\xbe\xd9\xaf\x47\xc1\xff\x3d\x1f\xd3\xc4\x55\x5e\xad\xf9\x8a\xe0\x2c\x83\x81\x88\x6b\xb3\xe0\x22\x1b\x17\xef\x63\x98\x6b\x8c\x95\x29\xc9\xb0\x43\x0d\x14\xdc\x16\x06\xa6\x0a\x4e\x33\x28\x92\x19\x69\x50\x75\x48\x2c\xe8\x1e\x28\x8e\xe4\x63\x9c\x6b\x2b\xa0\x45\xd9\x32\x5c\x21\x9c\x71\x2c\x10\xa3\x28\xd0\xf4\x7e\x2e\x06\xe2\xed\x31\x5d\x23\x46\x49\x7f\xff\xe3\x27\xf4\x89\x70\xa2\xc1\x80\x98\x97\xf9\x09\x9d\x7c\x5c\x73\x1d\x7a\x8e\xa6\x42\xa3\x0e\x15\xb5\x6f\x0e\x84\x93\x8d\x0b\x63\xba\x81\x40\x7d\x3a\x9d\x4e\x47\xc1\xf8\xd8\xe6\x06\xd9\xdb\x3c\x6b\x11\xc3\x27\x90\x39\x47\x56\xab\x6e\x74\xe7\x58\x38\xe5\xa0\x48\xfa\x7e\x51\xc8\x75\xd9\x27\xac\x52\xc9\x91\x7c\xbc\x73\x63\x45\x1e\xe6\x21\xdc\x7f\x34\xbb\x75\x9f\xe3\xe0\xa4\x83\xa1\x88\xa0\xf0\x1f\x4d\x6f\x6d\xe4\x28\x3e\xde\x49\xca\x15\x45\x22\x3c\x69\xe8\xa3\x04\xb8\xe8\xca\xb2\xb7\x2e\x55\x82\x68\xf2\x57\xd4\xe6\xb9\x30\x1f\xaf\x24\xe5\x08\x89\xe8\x7e\x6b\x83\x81\xe2\x64\x62\x49\x56\x91\x97\x42\xa8\x2b\xfb\x73\xc9\x3e\x1e\x49\x1c\x67\xd8\x78\xd0\x38\x6d\x58\x82\xc5\xd1\xe2\xca\xce\x07\x17\xe6\x63\x86\xc4\x8a\x0b\x42\x9f\x28\x61\x44\x1c\x11\x4e\x07\x8e\x64\x50\xc5\x2d\xb9\x6f\x8d\x17\x9a\x53\x21\x54\xc9\x3c\xf2\x79\xff\xc4\x75\xca\x54\x44\x64\x4d\xe6\xd6\xd3\x7c\x1c\x4d\xe7\xe3\x78\x36\x83\xa4\xa5\x43\x30\xa8\x99\xd8\x23\xd7\xa5\x27\xd7\x83\xf9\x22\x8d\x21\xf9\x55\xce\x27\x14\x53\x5a\xdc\xdd\x0e\xc5\xdb\x22\xc1\xcb\xdd\x7a\x34\x07\xb1\xdd\x79\x19\xa2\xa6\x70\x58\x8c\x5c\xde\xa3\x0c\xfa\x2e\x65\x2e\xdc\xe7\xc4\x7f\xc5\x03\x14\x1d\xa1\xf0\x22\x45\x7e\xa1\x9f\x20\x62\x50\x93\x0e\xeb\xcb\xb7\x84\x2a\x44\xdc\x95\xe3\x78\x24\x53\xf5\x10\x31\x72\xa5\x81\x78\xb5\xee\xba\xe6\xd2\x7d\xee\xfb\x9d\xcd\x7e\x14\xca\xe7\xe5\x85\x67\x05\x95\x82\x1f\xc8\xfb\x6c\xc4\x5d\xda\x52\x0f\xc6\xb2\x0a\x08\xf7\xdf\x18\x8c\x42\x09\xb9\x11\xbc\x53\x00\xe0\x2d\x8f\x77\x69\xa9\x2f\x5e\xdc\xb9\x9c\x13\x15\xfa\xfa\x6e\x2f\xe7\x52\x7d\xa7\x84\xc6\x56\x4f\x15\x64\xf6\x96\xf3\xf3\x53\x4a\x03\xe7\x2c\x8c\x27\x80\x3f\x1a\xa0\x83\xb7\x40\x88\xe3\xb8\x70\x2b\xbb\xac\x1e\xb3\x35\xd9\xce\x58\xab\x47\x6d\xae\xe9\xb0\x6a\xd4\x65\x18\x25\x1a\x7b\xd5\xdd\x5b\x63\x35\x47\x35\x3a\xc8\x9c\x86\xaa\xa4\x86\x09\xcb\x9a\xcf\x7b\x5c\xa9\x41\x20\x88\x7a\x87\x1c\x6a\xe2\x20\x57\xd5\xba\x94\xee\x09\xf2\x2c\x89\xf4\xda\xb5\x0a\x79\xda\xde\x5e\x24\x0d\xa4\x71\x4a\x9f\x38\x77\xf3\x73\x0c\xdf\x41\xb3\xb1\x1d\x8c\x15\x10\x44\x3e\xe3\x25\x27\x44\x8a\x24\xfc\x08\x7d\xa5\x2e\x78\xc3\xcc\x41\x31\x58\x87\xaf\x12\x21\x98\x55\x1c\xfb\x0e\x8b\x4e\x2c\x4b\x56\x37\xac\x34\x7f\x00\x12\x87\xc1\xbb\x63\x13\xb7\xde\xaf\x1c\x92\x3a\xa8\xdd\x20\x4c\x3d\xf6\x9d\x05\x9f\x60\xa6\xd7\x48\x9b\x0e\xed\x98\x40\x78\xf7\xdb\x81\xb1\x1c\x6c\xbd\x3e\x16\x8c\xef\x4c\xe9\xc4\x32\xc4\x8c\xbc\xf4\xe1\x54\x1c\x06\xa7\x08\x0b\x84\x97\x62\x84\x2e\x19\xf2\x94\xc1\xda\xb5\xc1\xe3\xfc\x09\xdc\xa5\xf1\x30\x1e\xc5\x23\xb8\xfc\x00\x83\xbb\x29\xdb\xbf\x77\x37\xe6\x51\xb1\x0b\xf5\xd4\x30\xf1\xbe\x0d\x1d\x86\x2b\x0f\xd7\x18\x41\x10\xf6\x54\x12\x05\xee\xad\x37\x4d\xae\x1b\xee\x06\x50\xf2\x0b\xa7\x7c\xec\x29\x33\x49\xd9\x75\x0b\xaf\x25\x1a\x77\x49\x52\x3d\x72\x5f\x6e\x30\xad\xa8\x2c\x83\x79\x58\x0c\x64\x16\xee\x3f\x66\xc1\x71\xf0\x44\xe1\x3f\x8d\xe2\xcb\x20\xa6\x56\xa6\x03\xea\x81\x56\x40\xbb\xc8\x03\xcc\x2a\xce\xfb\x72\x18\x3c\x99\xf8\xcf\x82\x9c\xd4\xdc\x96\xa0\x6e\xe8\x34\xba\x8c\xc5\x59\x3e\xa7\x26\x5f\x8b\x86\x27\x1b\xff\x37\x58\xfe\x74\x64\xaa\xda\x33\x56\xc9\xd7\x78\x16\x8d\xa0\xc9\x52\x20\x9f\x8b\x66\x04\x87\xcb\x75\x6b\xc2\x36\x52\xe7\x88\x46\x19\xb4\xec\x10\xe4\xf3\x83\x4b\x83\x30\xea\xb6\x7c\x23\x96\xaa\xb7\xc4\xaa\xe1\x96\xa1\x7d\xe9\x4b\x6a\x93\x20\xc5\x7a\x6b\x21\x4f\xc4\xe3\x30\x78\xce\x92\x22\x4d\x6f\xcd\x81\xd2\x73\x18\x77\xc1\x79\x03\x90\x13\x5f\xd2\x92\x3a\xf4\xd7\xf4\xc9\x7e\x65\x5e\x3a\xc1\x93\x16\x07\xa6\xcc\x7e\x75\x9a\x74\x8a\xd2\xe3\x89\xaf\xde\x94\xda\xc4\xf7\x73\xeb\x81\x73\x52\x8a\x10\x9f\xe6\xbd\x26\xeb\xe1\x63\x9d\x99\x67\x57\x35\x7f\x54\x22\x87\xc1\xe9\x67\xe6\x9e\x72\xb8\x90\x60\x01\x4f\x9b\xe2\x82\x7d\xdc\x33\x43\x2b\xb3\xd9\x4b\x9f\xf7\xc8\xb1\x70\xfe\xc1\x90\xd4\xa6\x22\x34\xa3\xa1\x9a\xf8\x78\x67\xe6\xb5\x8c\x06\x6a\x18\xa9\xdf\x09\xce\x3c\x0e\x0c\x14\xdd\x39\xa8\xd4\xa1\x88\x32\xfa\x3e\xbe\x99\x79\x6c\xe2\x65\x0b\xd1\xc7\x89\x66\xe6\xda\x84\x34\x89\xea\xf1\x1b\x82\xbd\xf7\xc8\x60\x6b\x60\x3f\xa9\x03\x6a\xce\x37\x1b\x6b\xd0\xc7\xfb\x16\x2e\x4c\x91\x0d\x8b\x57\xdf\xe3\xd4\x8c\x35\xf0\xf1\xc4\xdc\x5a\x83\x97\x3f\xbf\x94\xc3\xe0\x3c\x31\x77\x4f\x63\x04\x8d\xdf\x10\xec\x63\x88\xb9\x93\xa2\x56\x29\xd0\xcd\xbb\x79\xb7\x52\x01\x9c\x20\x1c\x14\xcf\x0a\x74\x2b\x34\xa8\xb9\x05\xae\x68\x18\x68\x13\x23\x59\xb3\x5b\xe0\x38\x4c\xe5\xf9\xdd\x17\x80\x60\xa7\x46\xfa\x3e\x7e\x58\x54\x9d\x1a\x01\x55\xa7\x0f\x57\x06\x27\x88\x0a\x94\xe2\x29\x5a\x0d\xb4\xf1\xb1\xc5\xc2\x6e\x36\xe8\x18\x2e\xab\xbb\x39\x11\x83\xc1\x89\xe2\xe5\x20\xa8\x2e\x3e\xbe\x58\xa0\xb7\x4a\x4d\xb5\x37\xaf\x79\xca\x20\xba\x3a\x38\x65\xd4\x22\xd1\xc6\x5a\x9d\xd6\xdd\x71\x69\x65\x11\xdd\x2e\x74\xbb\x62\xd7\x11\xd2\x78\xd7\x9d\x56\x1e\x1f\xc7\x32\x16\x1d\x9f\x54\x62\xa1\x9a\xf9\xb8\x65\xe1\xf6\x8f\x1a\x3f\x1b\xda\xe8\x52\x9d\xe2\xd4\x62\xa3\x18\x7e\x21\xfc\xb9\x9b\x45\xdf\xea\xd4\xc7\x32\xbf\xb9\xd1\x90\xa8\x25\x92\xcc\x78\xbc\x26\x10\x92\x65\x3f\xff\x9c\x69\x3e\x12\x9c\x88\x90\x71\x80\x1a\x06\xc8\x87\x56\xb2\x41\xbc\xf8\x89\x9f\x1c\xdb\xc7\x55\x1f\x7c\x67\x5e\x05\x34\x03\x86\x06\x37\xef\x72\x2c\x9c\xaf\x2a\x91\x84\x8a\x95\x6b\x8b\x9c\xde\x38\xf5\xf1\x17\x72\x98\xff\xf7\x3f\x40\x14\x9e\xc5\x22\x37\xb9\x23\x99\x43\xf9\x7a\x34\x1f\xed\xfb\xde\x88\x7a\x5a\x55\xf8\xa3\xbd\x39\x00\xde\x9d\xf9\xe8\x24\x1e\x60\x24\x1e\x75\xcf\x51\xe5\x82\x7d\x9d\x9a\x8f\x48\xe3\x96\xfe\xfe\x47\xe3\x07\x93\x73\x10\xbc\x4b\x83\x40\xa8\xd3\x7a\x81\x4f\x82\xe5\xc2\x7d\x7d\x9a\x8f\x6e\xb0\xfe\xb2\xd1\xe3\x1d\x9a\x8f\xde\x30\x3d\x78\xe4\x38\x1d\x8d\xa7\x93\x6b\x71\xb3\x79\x71\x5c\x5e\x3f\x96\xbb\xab\x5b\x56\xdd\x9a\xd0\x36\x1a\x9e\x82\x1a\x9d\x1a\x5c\xa6\x9f\xe8\x87\xbf\x88\x0b\x47\x67\xa0\xd8\x9d\x9f\x36\x15\x36\xa6\x79\xe7\x17\xef\xab\xbc\xfc\x1c\xcc\x35\xe5\x66\x25\xbe\xe4\xfc\x90\x17\x5c\x82\xe9\xd0\x3d\x63\xd3\x2f\x45\xf1\x22\x1a\x8d\xa0\x44\x6f\xc6\xf3\xfa\x40\x3d\x26\x49\x67\x47\x55\xe3\xfe\xdf\x00\x00\x00\xff\xff\xf2\x77\x6b\x98\xab\x6e\x00\x00") + +func fontsStandardFlfBytes() ([]byte, error) { + return bindataRead( + _fontsStandardFlf, + "fonts/standard.flf", + ) +} + +func fontsStandardFlf() (*asset, error) { + bytes, err := fontsStandardFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/standard.flf", size: 28331, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsStarwarsFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\x4d\x8b\xdb\x48\x13\xbe\xeb\x57\x3c\x07\x81\x2d\xb0\xd4\x64\x48\xde\x97\x80\x19\x44\x76\x61\xd9\xeb\x1e\x02\x0b\x4d\xca\xce\xac\x9d\x0c\xeb\xb5\xc3\x78\xc2\x12\xe8\x1f\xbf\x74\x7d\x74\xb7\x3e\x3c\x93\x64\xae\xce\xce\x5a\x6a\xa9\xab\xeb\xa9\xaa\xa7\xaa\x4b\xd2\xfe\xb0\xbf\xd9\xd6\xf8\x3f\xfe\x87\x9b\x1b\xbc\x7a\x83\xd7\xd5\xf9\x71\xfb\xf0\xef\xf6\xe1\xdc\xed\x0f\x7b\x7c\xfc\x86\x3f\xbe\x6d\x8f\xf8\xf3\xf4\xf5\xee\x6f\x2c\xbf\xc5\x43\x7f\x77\xee\xbe\x3e\xec\x3e\xdd\x1f\xb7\xdd\xdd\xb6\xc1\xaf\xbb\x3b\xdc\xbc\x71\xaf\xde\xbe\x7d\x5d\xfd\x8e\xed\x3f\x38\x9e\x1e\xf1\xb0\x3b\x7f\x39\x1d\xcf\xf7\x1f\x0f\x3b\xec\x4f\x0f\xf8\x7a\xde\xe1\xb4\xc7\xe3\xe7\xfb\x33\xf6\xa7\xe3\x23\x50\xbd\xdb\x9e\x77\x7f\xe1\x74\xc4\xbb\xfb\x4f\xa6\xec\xb7\xc3\xee\x78\xc4\x2f\x9f\xb7\x5f\xbe\xec\x0e\x87\xaa\xaa\x51\xf7\x4f\xff\xf4\x15\x88\xe2\x20\x00\x61\x7a\x20\x8a\x87\x25\x51\x53\xf7\x15\x00\x95\x00\xa1\xaf\x96\x08\x68\xfa\x0a\xef\xf1\x1e\xf1\x66\x8d\x99\x03\x80\x9e\x8f\xc4\x7f\xf1\x1a\x05\x84\x80\x40\xbc\x3e\xa2\x7a\x50\x28\xae\x63\x74\x1d\x81\x42\xa0\x00\x45\x80\x62\xd1\x38\x6b\xd5\xb6\x2b\xea\x64\xe8\xf4\x2e\x8b\x21\x00\x58\xb6\x6d\xdb\x6e\xe4\xae\xd7\xff\xeb\xbe\xea\xe2\xe5\x46\xe6\x28\x8e\xb8\x96\x93\xbb\xc0\xa2\x6d\x17\xb2\x14\x1b\xcc\xd0\xe3\x04\xaa\xa3\x05\x8e\x97\x71\x80\xab\xd9\x0f\x27\x34\x3c\x02\xdf\xf5\xe4\x64\x24\x93\x75\x26\x0b\xf1\x90\x47\x2c\x14\x87\x4e\xb4\x46\xa1\x3e\x1b\xc8\x3a\x49\x1d\xb6\x04\xa1\x61\x5c\x0e\x04\xef\x7c\x8c\xcf\x92\x6e\x81\x35\xab\x23\x72\xde\x25\x59\x8b\x69\x0c\x1b\xab\x08\x4e\x03\x32\xfe\x55\x2f\x12\x11\x2f\x0d\x77\x89\x05\x72\x88\x9a\x7c\xdf\x57\x8c\xab\xaf\x3c\xc4\xa0\x20\xee\x9e\x3b\xb0\x6d\x6a\x15\xa9\x59\xce\x07\x04\xc7\x92\x1e\x1b\x2c\xe0\x92\xff\x25\xdc\x0e\x2b\x74\xf0\xac\xd0\x05\x0a\xde\xa1\x08\x7c\xf6\x91\xb1\xca\x28\x65\x7c\xca\xa4\x51\xc6\xd4\x40\x5e\x41\xe5\xf3\x6f\xe1\x90\xb1\xd3\x86\xe1\x48\x67\xc4\xff\x98\xa5\xfc\x2f\x68\x66\xa4\x79\xf5\x50\x62\xac\x11\x13\x8d\xd4\x14\xe1\x80\x45\x44\xa9\x92\x39\xc3\x74\x51\xd2\xe9\x09\xc4\xc9\x33\x1a\x39\x4a\x89\x33\x92\xe1\xfc\xdf\xf8\x8c\x42\x8a\x2e\xb9\x22\xc9\x8a\xda\xe0\x48\xa6\x84\xd9\x03\x97\x88\x42\x82\x34\xa5\x54\x2d\x50\x37\x32\x93\x93\x40\x20\x39\xd2\x39\x94\xa4\xb3\x7c\x5a\x20\xad\x40\xd4\x98\x32\xc2\x9a\xaf\x91\x5d\xe3\x45\xa6\xc0\x85\x1b\x62\x62\xb0\x34\xd7\x02\xa3\xf0\x22\x55\x6a\xad\x14\x41\x13\x9f\xfd\x51\x96\x9a\x04\x4a\xab\xa4\x41\x0e\x8c\x65\x04\xf4\x19\x50\x52\xd4\x92\x2b\xb2\x2f\x64\xe9\x45\x0a\xd4\xd2\xec\xbd\x10\x94\x04\x47\xbc\xa5\x56\xd4\xe6\xdf\x62\x79\x39\x13\x4d\x5c\x68\x26\xb6\xcd\x32\x25\x01\xb8\x85\x3a\xfc\x39\x50\x4f\x2f\xe3\x89\x56\x06\x33\xa1\x64\x40\x13\x17\x71\xf0\x62\x56\xc4\xa4\xa8\x87\x43\x4b\x91\x0b\x73\x62\xe6\xa6\xc4\xd5\x24\xd2\x14\xe2\x74\xe9\xab\x35\xb0\x96\x22\x0d\x29\x31\x52\xd4\xaa\x71\x71\xb9\x94\xe7\x17\x72\x1f\xe3\xfc\xb3\xda\xcd\x6a\x4c\x1f\x4f\xb9\x05\x6e\x15\x4f\x4a\xde\x52\xbf\x6a\xd0\x2d\x19\xbc\x65\xd5\x7d\xb5\xd1\x0d\xcb\xb2\x45\xb6\x1b\x65\x6c\x41\x59\xa2\x14\xe2\x58\x57\x0a\xe2\x49\xd5\xe6\x7d\x89\x04\x4b\x0c\x04\x6d\x62\xa1\x8e\xfc\x5f\xc6\x42\xc0\x48\x63\xb0\xc4\x32\x2f\x34\x9e\xa9\xbf\x82\x31\x99\xed\x04\xa7\x8d\x1d\xf0\x81\x2d\x47\x2a\x5a\x24\xae\xd0\xd2\xa5\x46\xb2\x6f\x74\x07\x94\x50\x0c\x36\x79\x25\x45\x37\xf2\x09\xc1\xf8\x85\x40\x4d\xda\xa3\xe2\xf5\xf5\xe4\xba\xc8\xba\x41\xf7\x90\x08\x1b\x37\x74\x01\x04\xe9\x1a\x02\xb0\x8a\x9e\x5e\xc8\x2a\xb0\x12\x02\xf6\x7f\xa7\xe4\x1f\x06\xbe\xc8\x49\x2a\xc2\xc6\xfc\x0a\x40\xd7\xb6\x9d\xad\xad\x7f\x7c\x2e\x2d\x46\x48\x10\xc9\x4d\x20\x52\xa6\x9b\x86\x50\x97\x29\x9d\x21\xfb\x9c\x5c\x2d\xa9\x59\x42\xc4\x4f\xac\x06\xd9\xa3\x84\x5d\x03\x7e\xa3\x70\x1f\x95\x01\x4d\x46\xe6\x96\x32\x96\xdb\x90\xf0\x15\x5b\xcd\xa0\xf2\x17\xdb\x0d\x06\xd2\x28\x5a\x10\x06\x92\x62\x1d\x67\x86\xc9\x34\x99\x33\x5a\xfa\xf9\x36\x77\xd0\xed\x16\xb5\x28\xa3\x51\x86\xe4\x04\x4c\x03\x8d\x6f\x5a\x69\x23\x71\x2d\xec\x1c\x92\x2f\x5b\x2a\xec\x8b\x92\x5a\x9d\x22\x2b\x34\xaf\xd9\xe7\x6b\x8b\x4b\x67\x8c\x27\x0a\xa3\x2c\x29\xfc\x56\x44\xee\xc9\x53\xe3\xf2\x0c\x51\x2c\xdd\x18\x5c\xa7\x38\xbc\xcb\xbe\xf6\x9c\xcd\xe6\x34\xef\xc2\x8f\x45\xa1\x93\x6d\xd7\x96\xf6\x85\xdf\xe0\xf3\x79\x87\x4d\xa1\x04\xc5\xb2\x7e\x8e\x36\x65\xd1\x14\x32\x16\x15\x62\xc0\xa1\x1f\x0e\xd4\x77\xd7\x1e\x95\x16\x0d\x52\xf8\x02\x28\x0c\xbb\xc1\x7a\x98\x3c\xd0\x1c\xcb\x98\x35\xe7\x12\xd2\x8b\x63\x01\xbf\x88\xdc\x4b\xf8\xf5\x77\x58\x41\x6b\x94\x56\x24\x79\x8c\xd4\xb1\x2d\xf9\xae\x96\x74\xbb\x1b\x6b\xb7\x91\x26\x1a\xb5\xe9\x26\x15\xa6\x4c\x1a\xe5\xd5\x8b\x1f\xca\x2c\x2a\x40\x3d\xd5\xa4\x36\x99\xa6\x50\xdc\x0e\xb2\x5f\x46\xe4\x85\xa6\xa2\xa8\xcd\x0d\xa5\xce\xd5\x18\x68\xaa\xf1\x6c\x59\x7a\x19\xbf\x2c\x2c\xba\x15\xa9\x37\xa2\xc9\xbc\x1d\x78\x4b\x3f\xed\x12\xbc\xc6\xc6\x9e\xae\xbd\x8d\x14\xb6\xd7\x87\x81\xc2\x0a\x19\x96\x9a\x66\xf5\xcd\x6a\x9d\xd3\x3d\x8b\x60\x0e\xc7\x2c\x9a\x09\x26\x58\x1d\xe4\x0e\xc9\x65\x04\xef\x53\x87\x73\x9b\x4a\x61\xbc\x6f\xb5\x90\xdb\x86\x71\xcb\xf0\x33\x2e\x95\xc9\xc9\x20\x89\x60\x46\xcd\x75\x6c\xde\xa5\x46\xf4\xc1\xb6\xef\xac\x5d\xe3\x16\x2d\xf5\xe3\xa9\x0d\xe7\x73\xeb\x26\x1c\xcd\x6c\xd6\xc5\x83\x90\x2e\xab\x1b\x6b\x64\x32\x53\xac\x1e\x1e\x53\x31\x0f\xc9\xa7\xe9\x09\x5d\xcc\xca\xcd\x17\x52\x4b\x8a\xdc\x04\x0f\xdb\xe0\x6c\x5b\xa1\x7f\xd3\xea\xa3\x3d\x86\xc7\x4e\xaf\x27\xfd\xda\x1b\x6a\x5b\x15\x1b\x4d\xd7\xb6\x1f\xda\xd6\xe7\xc7\xbc\x27\x4e\x2e\x3c\x73\x0f\xce\x26\x4f\xda\xd3\xae\x3c\x3d\x5b\x83\x1f\x11\x7c\x78\xfa\x15\xc8\xb5\xa1\xbd\x36\xb4\xd7\x86\xf6\xda\xd0\x5e\x1b\xda\x6b\x43\x8b\x11\xb1\x71\x6d\x68\xaf\x0d\xed\xb5\xa1\x7d\x79\x43\x3b\x7e\x25\xab\xc4\xb2\x2f\x1a\xa9\x2f\x4d\x7b\x92\x7e\x6f\xfa\x9e\x8d\xae\xdc\xef\x66\xde\xbe\x32\x8d\xad\xf9\x1d\x41\xc8\x6f\x61\xa5\x8e\x01\xd6\x02\x78\x27\xdd\x8c\xf3\xc5\x4b\xd0\xfc\x5d\x73\xee\x2c\x95\x43\x29\x48\x7d\x85\x25\x35\xb4\xa4\xc6\xda\x48\x9f\x3a\x48\x8a\xe7\xf2\xba\x5c\x5f\xc8\x3a\xfd\x12\xe8\xc7\x7d\x0e\x60\xaf\xbb\x49\x5e\x79\x8b\x74\x6f\x1f\x72\x7a\xfb\x8e\x93\x5e\xce\xa7\x15\x46\xf2\x60\xf9\x52\xec\x67\xe4\xd9\x4d\xea\x25\x7b\x81\xbc\xcc\xf2\xab\x22\xf8\x17\xe5\xed\x11\xc1\x0c\xe1\x4f\x05\xdf\xab\xdf\xae\xcc\xe3\x1f\xeb\x9f\xd1\x16\xd0\xa8\xd8\x1a\xeb\xd1\x95\xc0\xfa\x83\x74\x89\x7d\x5f\xfd\x17\x00\x00\xff\xff\x42\x0d\x06\x12\xe5\x1f\x00\x00") + +func fontsStarwarsFlfBytes() ([]byte, error) { + return bindataRead( + _fontsStarwarsFlf, + "fonts/starwars.flf", + ) +} + +func fontsStarwarsFlf() (*asset, error) { + bytes, err := fontsStarwarsFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/starwars.flf", size: 8165, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsStellarFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\xcb\x8e\x1c\x45\x10\xbc\xcf\x57\xe4\xc1\x57\x0a\xb3\x42\xc2\xdc\x9a\x03\x3e\xf9\xb4\xf0\x01\x65\xa4\x35\xb2\xb4\x02\x09\x7c\x81\xaf\x47\x33\x53\x8f\x88\xc8\xa8\xee\x99\x65\xe5\xf5\xba\xbb\xaa\xbb\x2a\xdf\x19\x99\xd5\xfb\xe9\xf9\xd3\xc3\xc7\x37\xf1\x2e\x7e\x88\x87\xb7\xf1\xcd\x77\xf1\x70\xfa\xe5\xd7\x9f\x3f\x7c\xf8\xe9\x31\x7e\xfb\x27\x1e\xff\xfc\x23\xde\xff\xf5\xf9\xcb\xbf\xf1\xee\xdb\x1f\xbf\x3f\xbd\xff\xfc\xfb\xf3\xd3\x97\x78\x7c\x7a\x7e\xfa\xf8\xf7\x53\x3c\x94\xb7\xa7\x37\x9b\xf9\x8d\x6d\x3b\xd5\x52\xf8\x12\xed\x12\x11\xed\xd9\x79\x74\x79\x58\xcb\xb8\x45\x5c\xde\x1f\xdf\xda\xbd\xff\x9c\xa7\xb5\x94\x68\xd7\x0b\x87\xf3\x4f\x5c\xee\xe9\xed\x9d\x8b\x91\xd1\x65\xde\xdf\x5c\xa4\xbe\xaa\x56\x44\x8e\xb6\xaf\x9b\xa0\xad\xcb\x54\x51\x93\x46\x67\x2c\x85\xb5\x93\x68\x63\x75\xe1\x5e\xfc\x36\x92\x74\xae\x1d\xb2\xcc\xfd\x65\x6e\x97\x75\xb0\x77\x12\xbc\xbe\x69\x8e\x5a\x5f\x9a\x98\x8d\x5c\xa3\xaf\xb7\x21\x6f\x67\xd7\x39\xb8\x97\x74\x23\xa2\x6c\x44\xd1\x64\x5c\xba\x3e\xcd\x61\xdd\xe7\xd1\x3c\x6f\xde\x9b\xfd\x4c\xdf\x84\x05\x46\x63\xe4\x40\xe8\xb6\xee\x6c\xf7\x96\x65\x6a\xc8\x68\xe7\x32\x5c\xd4\xb7\xe5\x3b\x71\x37\xef\xef\xe2\xc4\xc6\x07\x2d\x5e\x10\xc1\xa2\xad\x24\x39\x10\x44\x42\x18\x55\x83\xf5\x20\x37\x23\x06\xc5\x6a\x91\xaf\x7e\xdc\x06\xc8\x48\x18\xba\x5b\x05\x23\x92\xa4\x33\x8b\xae\x94\x8c\x6d\xd0\x22\x68\x87\x89\x45\x9c\xcb\x06\x25\x8c\x61\x39\x38\x09\x67\x7a\xea\x17\x0d\x2f\x6b\x63\x0e\xc3\x8a\xb3\x9e\x20\x9c\x21\x90\x4f\x39\x5b\xbc\x8d\x49\x93\xc2\x11\x20\xee\xf1\x9a\x78\xbc\xb3\xc1\x82\xd6\xc6\x08\x9c\xfe\xc9\x41\xd4\x97\xe5\x22\x20\xce\xae\xa3\x76\xa0\xda\xc9\xd9\x6c\xd2\xc5\x78\x4f\x93\x3e\xf1\x41\x1f\x53\xde\xf0\x9a\x80\x29\xf6\x9c\x9f\x1c\x3c\x6d\xce\xda\xd5\x62\xa1\x8b\x8d\x6d\x9d\xbf\x46\x12\xc1\x94\xc3\x85\x00\x73\xc3\xfe\x52\xe2\x15\x5b\x50\x4a\x13\x59\xc6\x36\x30\xa8\x85\xa0\xd3\x3e\x49\x83\xfb\x05\x38\x50\x81\x08\x0b\xb1\xed\x94\x49\x4d\x4f\xce\xdd\xf6\x99\x54\xb5\x41\x01\x13\x27\xb8\xfd\x81\x0a\xd9\xb2\xa5\x32\xe8\xd6\x5a\x13\x76\xfb\xa6\x6a\x46\x39\x84\x54\x08\x12\xb8\x30\xa3\xca\xd2\x93\x91\x71\x55\xcb\x42\x72\x37\xf9\x6a\x62\x1a\x6e\x9d\xbb\x1a\xfc\x84\xe2\xfb\x4c\x11\xe8\x2f\x5c\x6a\x4b\x86\x51\x63\x46\xee\x96\x31\xae\x8f\x91\xd5\xc2\x60\xc0\xfc\xd8\x4c\xca\x23\xb3\xc5\x18\x0a\x5c\x02\x8d\x0d\x4a\x94\x82\x36\x0c\xbb\x64\xbb\x0b\x34\xe1\x5e\x85\x70\x8a\xb0\x0d\x6d\xd5\xc3\xd6\x44\x84\x9d\xf4\x2a\x3f\x8c\x04\x71\x66\xe0\x54\x10\x73\x67\x32\x8b\xc6\x8d\x7b\x12\xa3\xa3\x0b\x24\x16\x75\x1f\x47\xa3\x5a\xb0\x6f\x2a\x7a\x5a\x81\x9c\x83\x83\x07\xb5\x25\x2d\x2f\x2a\xbb\x05\x97\xad\x2a\xae\xb3\xe5\x2d\xce\xbe\x37\xcc\xda\xa4\xeb\x3b\x7d\x3d\xb5\xa8\xe0\x03\x06\xb9\x81\x43\x86\x14\x4d\x23\x92\x46\xc5\xf3\x05\xe3\x31\x57\x66\xca\xf9\x1a\xc4\x32\x71\x9c\x58\x93\x3b\x1f\xb1\x7e\x96\xdf\xcf\x03\x5b\x82\x70\xf4\x8d\xc6\x92\xb1\x0e\x73\x72\x56\xef\x81\xe0\x52\x43\x51\xf0\xc8\x3d\xc9\x5b\x75\x74\xb0\x50\xb0\x34\xcf\xcf\xbe\x27\xcd\x58\x37\x37\x0b\x0d\x4f\xad\xeb\xea\x0e\xef\xcb\x2e\x0d\x9b\x4f\xaa\xaa\x54\xf5\x0c\xfd\x48\x27\xf9\x2a\xb8\x9f\x61\x62\xaf\x33\x23\x86\xd9\x19\x2f\x9e\xdc\x80\xca\xc6\x66\xc3\xd9\x18\xd8\xbe\xb9\x18\x09\xc9\xaa\x49\xf0\xbb\x0a\xe4\x04\xd9\xc9\xb7\xca\x3d\x41\x02\x09\x04\x86\x59\xcc\x7c\xbe\xae\x04\xa0\x16\xad\x40\xbb\xa9\xfa\x46\x98\x77\x16\xc2\x6d\xd4\x5a\x86\x64\xf4\x15\xc3\x7b\x66\xcc\x30\x1d\xc8\xc0\xa9\xc6\xa3\x11\xe6\xd0\x52\xa5\x15\x28\xa6\x94\x00\xc3\xa6\xe8\xe1\x2d\x7d\xea\x12\x76\x7a\x40\x48\xb2\x42\x4a\x2d\xcf\x2d\xf8\x19\x73\xff\x66\xbe\x61\x54\x86\xd2\x7a\x50\xa6\x5f\x32\xe9\xb3\xfb\xef\xed\xcb\xd5\xe8\x72\xf2\x09\x91\x2f\x84\x70\x3a\x9e\x9a\x8e\x34\x73\x0d\xb3\xc4\x6a\xc9\xa8\x09\xd2\x2d\x86\x64\x45\x6e\xb4\x69\x41\xb6\x91\x1c\x28\x2b\x76\xa3\x83\x0d\x3c\x99\x6b\x98\xd0\xba\xef\x43\x30\xb1\x1d\x20\x3b\xfd\x76\x9b\xea\x41\x82\x8e\x48\x31\xfb\xe8\x65\x51\xdf\xfa\x81\xf3\xbc\x6d\xfc\x81\x80\xd0\x6a\x31\x48\x92\xea\x90\x91\x4c\x3d\x02\x4f\xbb\xa1\x28\x33\xc1\x1a\xa7\x99\xb9\x38\xa2\xcf\x30\xc9\xba\x25\x9c\x4d\xf1\xcb\xfa\x41\x3f\x3f\x94\xb0\x73\x7b\xbf\xf6\x9e\xdb\x4a\x60\x39\xc4\x37\x93\x17\x8a\xe0\x95\xe8\xc3\x66\xeb\xeb\x8c\x49\xff\x91\x21\xc8\xa0\xa3\xc2\x81\x87\xc4\x35\x7e\x7a\x55\xb2\xaa\x90\xfb\xe1\x50\xf5\x83\x41\x8a\x01\x32\x4f\x22\xfe\x62\x94\x11\xea\xb6\x7f\x4b\xf1\x86\x72\x4f\x5c\x99\x84\x15\x55\xac\xc3\x3d\xc1\x00\x5c\x41\x82\xd3\x11\x95\x3a\xe3\x31\x2a\x8b\x8f\x65\x0a\x57\x79\x50\x25\xff\x3d\xee\xd5\x42\x9f\x88\x3b\x72\x04\x20\x85\xe2\xd4\xe6\x80\xe1\x36\x14\xa1\xe2\xba\x36\xbe\x1e\xd5\xfc\x28\x9a\xd9\x16\x00\xaa\x65\x92\xeb\xac\x2d\x40\x43\x48\xd3\xc7\xf8\x3a\x6b\x38\x31\xab\xc1\xab\x30\xec\xcf\xd9\xf5\x17\x52\x20\x52\x2d\x3c\xc8\x30\x44\x74\x08\xf6\x98\xd5\x42\x8a\xee\x22\xc3\x96\x84\xab\xfc\x91\xb5\xe4\xbe\x49\xc2\x6f\x9f\x30\x05\x94\xa4\xe7\x26\xdf\x66\xdd\x57\x86\x90\x43\x33\xc5\x21\x53\x91\xda\x25\x71\x7c\xfd\x9a\x78\xfd\x7f\xdd\x4d\x73\x4d\x8b\x15\x39\x23\x44\x48\xea\x0d\x30\x9d\xde\x11\x1c\xfd\x5f\xe3\xed\x24\xff\xbe\xd6\x83\xff\x02\x00\x00\xff\xff\x45\xa0\xfc\xc0\xe6\x20\x00\x00") + +func fontsStellarFlfBytes() ([]byte, error) { + return bindataRead( + _fontsStellarFlf, + "fonts/stellar.flf", + ) +} + +func fontsStellarFlf() (*asset, error) { + bytes, err := fontsStellarFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/stellar.flf", size: 8422, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsStopFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x58\xc9\x6e\x1c\x37\x13\xbe\xcf\x53\x7c\xc0\xcc\xa1\xfb\x32\xfc\xed\x3f\x0b\x0c\x08\x82\x03\xf8\x6a\x1f\x12\x20\xc9\x81\x40\x61\xa2\x91\x12\xc1\xb6\x14\x48\x1a\x05\x01\xf8\xf0\x01\x6b\xe1\xd2\x4d\xf6\xf4\x58\x90\xd4\x6c\x36\x59\xac\xe5\xab\x85\x75\xf7\xe5\xee\xed\x61\x87\x1f\xf1\x03\xde\xfe\x0f\x6f\xbe\xc7\xff\x37\xbf\xbc\x3c\xfe\x8d\x3f\xfe\xc5\x87\xc3\xeb\xfd\x11\xbf\x1d\xbe\xbc\x3c\x3e\xe0\xea\x1f\x7e\xbe\xbf\x79\xde\x9f\x6e\x8e\x87\xd7\xfb\xe7\xfd\xed\xf1\x74\xbd\xf9\x70\xfb\x74\xff\x7a\x7b\xc4\xdd\xd3\xe3\x57\xfc\xfc\x78\x7a\x38\xde\x1e\xe3\xe6\x4f\xf7\x37\x9f\xf1\xf1\xfe\xe1\xf6\xe9\x19\x9f\xf6\x1f\xf7\x32\x7c\x7f\x3c\x3d\xfd\x75\xf8\xba\x3f\xdc\xec\x4f\x9f\x37\x6f\xdf\xe0\xa7\xd3\x9f\xa7\xe7\x17\xbc\x79\xf7\xee\xbb\xcd\x6e\xb7\xed\xfd\x6d\x37\x20\x6c\x37\x01\xc1\xfe\x51\xb0\x39\x1e\x01\xb2\x86\xa7\x06\x8c\x03\xc6\xed\x26\x38\x04\x07\xfe\x1a\x17\xf4\x9e\x3c\x20\xde\xcb\xb3\x14\x80\x10\x10\x98\x54\x9c\x0b\x01\xa0\x71\xf1\x0b\x02\x51\x08\xf1\xbb\x91\x2d\x69\x27\xba\x41\xa4\x00\x51\xdc\x15\x28\x9e\x1a\x94\x96\xc8\x51\xd3\x50\xa1\x44\x2a\x1a\xc1\xb2\x03\x70\x10\xb9\xe2\x33\x0e\xe2\x53\x95\x81\x41\x18\xca\xe7\xf3\x29\xb2\x8a\xe0\xc1\x0a\x8a\xc4\xc6\x38\x37\x82\x30\xe8\x9c\x03\x7c\x9c\xf3\x44\xce\x17\x54\x60\xfa\x37\xad\x8a\xc2\x27\xff\x4c\x8d\xc2\x17\x6f\x57\xf6\x86\x78\x02\x13\x86\x97\xef\x9e\x32\x79\x26\x2e\x1c\x0e\xca\x5f\xe4\xc3\x0b\xd9\x11\xac\x78\x11\x78\x20\x72\xb5\xe1\xc4\x6a\xc2\x9b\x0f\xd1\xdc\x23\x9f\x13\x67\x65\x07\x8b\xaa\xdc\xc4\x1f\xcf\xb3\x2e\x50\xa8\x44\x54\xfe\x13\x30\x26\x36\x13\x0b\x8d\x0d\x0b\x55\xaa\x6e\x68\xa6\xd2\x9b\xae\xce\x0f\x22\xa1\x9e\xf5\x31\x79\x2c\x51\xd5\x3d\x46\x95\x74\xd3\x12\x48\xa0\x20\x99\x71\x4e\xfc\xa3\x28\x61\x68\xb3\xa7\xc1\x39\x73\x3a\xe7\xa0\x43\x38\x22\x39\xc4\xf3\x2e\x87\xa9\x1e\x8d\x52\x5c\xe4\xc8\x09\x43\xa1\x7e\xa8\xdf\x1a\x00\xf8\x78\xd5\x05\x91\x9a\x3f\x0e\x05\x01\x2e\x9f\x44\x89\xd7\x41\x87\x53\x3b\xea\x34\xe5\x15\x4e\xd1\x29\x67\x00\x43\x3a\x83\xd2\x19\xba\x76\x2e\x8d\x28\x24\xeb\xd2\xb4\xa9\x5c\x88\x1b\xc3\xb8\x88\x5a\x33\x4b\x4c\x02\xc2\xb6\xcb\x7d\x5b\xfe\x35\xbc\xc1\x44\x4a\x76\x66\xd6\x90\x4c\xe9\xd5\xe1\x55\x91\x7e\x4a\xa7\xcf\x94\xa2\xca\xd6\x3a\x89\x18\xe5\x49\x18\xc8\x61\x26\x22\x92\x30\xcc\x41\x62\x21\xb9\xf3\x35\xae\x71\x85\x2b\x63\xac\xe4\xac\x09\x26\x5a\x14\x07\x8a\xf3\x42\x01\x13\xae\xb6\x1d\xdf\x99\x7b\x51\x67\xcd\xc4\x7f\x65\x9d\x06\x9e\xa8\x17\x0e\x3b\x03\x86\x28\x24\x33\x35\x96\xc1\x65\x12\x23\x1a\x8e\xdf\x0f\x03\x25\x11\x52\x6b\x0c\xf0\x83\x00\x64\xc4\xa8\xf0\x70\x43\xa9\xb9\x3a\x81\x14\xc8\xb2\xb0\xaa\x86\x10\x55\x95\x01\x8f\xc3\x65\xbd\x7f\xa7\xe3\x1d\xce\xbf\x50\x20\x79\x91\x18\xc2\x5f\x76\x1c\x6e\xe3\xef\xae\xde\x53\x18\x88\x59\xf1\xc9\xc9\x34\x0d\xb8\x38\xe7\x25\xf1\x6b\xc8\x09\x02\xcf\xd0\xc2\x6f\x72\x1f\x64\xf7\x19\xc1\xb6\x13\x37\x19\x8c\x94\xce\xf6\x9c\xaa\x0e\x86\x9a\xac\x35\x7f\x08\x89\x3c\xf4\x69\xad\x5f\x08\x46\xc8\xbc\x79\xb3\x25\x92\x6c\x16\xb4\x99\x37\x09\xdb\x41\x59\xbb\x20\x7c\x28\x43\x10\xa9\x95\x5a\x0e\x51\x0b\x81\x72\x25\xb5\x24\x34\x85\x09\xcc\xce\xe9\x4d\xb9\x40\xc0\x60\xb3\x12\x87\x16\x92\x08\xc7\x5c\x2b\xfb\x2a\x0d\xa5\x3c\x44\x34\xda\x6c\x5a\xc0\xbc\x85\x12\x20\x85\x15\x4c\xce\xb1\x82\xbc\x45\xec\x9c\xec\x6b\x5d\x6d\x53\xf2\xa0\x94\x3c\xf2\x77\x4a\x8a\x48\x74\x18\xab\xc9\xe2\x8d\x08\x49\x95\x68\x4e\x20\x1a\x34\x63\x07\x84\x18\x1c\xf5\xab\x82\x24\x8a\xd5\xa8\xc9\x9a\x90\xec\x0c\xd7\x81\x21\x9b\x3d\xa1\x33\x29\x3c\xc8\x6f\x3d\xe4\xba\xb7\xa3\xf0\x16\xb5\x6c\xc9\xd6\xb0\x63\xbe\x66\x4a\xe9\x52\xa3\x33\xf5\xc9\xb9\x6c\x2b\x1c\x93\xbb\x04\xf6\x67\x79\x63\x99\xbc\xb0\x64\x45\xf0\xa2\x15\x96\x2a\x01\xc9\x33\x40\x51\xee\xb5\xf5\x26\xf8\x94\xc5\xc2\x88\xcc\x7a\xab\xb4\x53\x1c\x5a\x51\x6d\x74\xc2\x45\x07\x85\x67\x03\x63\xc7\xc1\x57\xd9\x74\x8e\xb7\x8a\x58\x26\xa0\x9b\xe0\xad\x1c\xf6\x48\xd5\x8b\x9f\x97\x09\xf9\x42\xd1\x81\x3b\x18\xee\x97\x32\xe5\xe5\x58\x57\xb3\x32\x42\xee\x5e\x39\xd1\x71\xe5\xe2\xc9\xb7\xfc\xbc\x15\x0a\x5b\x38\x4f\xf7\x3d\xad\xad\xd6\x59\xf1\x5c\x95\xbb\x50\x6b\xab\x8f\x6b\x10\x43\xf9\xb0\x78\x33\x96\xb5\x7e\x8d\x16\x51\x09\xf2\xbd\x4f\x6d\x93\x4a\x95\x12\xd5\xdb\xa2\x58\x42\xe3\x5a\x21\xfa\x18\x0a\x53\x6c\x53\x6d\xc1\x92\x39\xcf\x64\x87\x49\x84\x5f\x18\x94\xf2\xae\x18\xf6\xb4\x5b\x5a\x53\x2b\x4a\xf8\xb0\xed\xde\xa4\x67\xdc\x14\xa9\x55\x9a\x06\x18\x0c\xdc\x75\xfc\x6d\xea\x58\xe2\xb8\xc1\xda\x6a\xaa\x31\x95\x41\xae\x25\x71\xef\x7c\x96\x49\x4a\xf7\xe4\xdf\xd3\x74\x39\xbb\x8f\x6a\x16\x81\x53\x4f\x8a\xfb\x0b\xf4\x86\x0b\xce\x87\x96\xdb\xea\x47\xad\xf3\x0b\x66\x47\x2b\x1b\xac\x66\x30\xb5\x58\x48\xb7\x9d\xdf\xa4\x78\xc3\x5b\x94\x60\xb5\xe2\x73\xb2\x9b\x1b\xae\xbc\x78\x4c\x1b\x5e\xb9\xa3\xa4\xa5\x7a\xf1\xd6\xf6\x03\xd7\x87\x43\x0a\x28\x56\x7f\x5c\x49\x81\x4c\xa1\xaa\x34\x1a\xcd\xb7\x0e\x57\x53\xcd\x15\x05\x00\x66\x92\xd3\x54\xf2\xc5\xfd\x74\x5e\x73\x6d\xcb\x59\x5a\x4e\x90\xcf\x90\x3b\x0b\xf9\x85\xf3\x63\x16\x70\x45\x2f\x65\x1d\x72\xd2\xfe\x84\x1c\x68\x7c\x5b\xe7\x72\xd9\x7c\xad\x16\x4e\x39\x53\xba\x40\xd1\x6c\x54\xac\xba\x39\xe4\x0d\x24\x1a\xb5\x2d\xa4\x23\xfb\x0e\xeb\x6c\x5c\xf6\x15\x2a\xd2\x54\x4a\x52\x17\x38\xf9\x6c\x3f\x3c\x7e\xb5\xbc\x5d\xdd\x86\x7a\xfb\x69\x76\x3e\xbe\xe1\xfc\x78\xbd\x96\x56\xe2\x88\xdf\xd9\x2b\xe2\xf5\x7a\xea\x15\x97\xca\x8f\x14\x2b\xa6\xad\x84\x12\x71\x96\x3a\x20\x7d\x2f\x88\xb9\x7a\x77\x12\xcd\xd7\xfc\xc5\x2e\x1d\xd6\x6e\xd5\xfc\xdb\x68\xb7\x62\xde\x46\x9f\xba\xf3\xac\x25\x1b\xa9\xf3\x57\x6d\xc9\xca\xfb\xbc\x25\xab\xad\x74\xe9\xff\x79\xa7\xed\x09\x3f\x69\x3a\x2d\x8f\xaa\x9a\x87\x1b\xb8\x29\xd8\x39\xe4\x36\x01\xe5\x3e\x01\xd9\x3d\x69\xa1\x10\x33\x6a\x44\x34\xd0\xb8\x50\xae\x9f\xbd\x3f\x54\xbc\x59\x20\x5e\x59\xa5\x35\x79\x33\x5a\x2a\xe5\xc5\xb9\x7e\xbe\x7f\x7d\xe0\x9b\xef\xbf\xd0\x8d\x1b\xa7\x61\xac\x73\x4a\x9a\xa1\x30\x4c\xce\xff\x2f\x00\x00\xff\xff\xc9\x25\x1d\xe0\x82\x1a\x00\x00") + +func fontsStopFlfBytes() ([]byte, error) { + return bindataRead( + _fontsStopFlf, + "fonts/stop.flf", + ) +} + +func fontsStopFlf() (*asset, error) { + bytes, err := fontsStopFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/stop.flf", size: 6786, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsStraightFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x55\x4d\x6f\xe3\x36\x10\x3d\x87\xbf\xe2\x1d\x78\x90\x81\x48\x74\xd3\x9c\x82\xa2\x10\x8a\x5e\x7b\xe9\xa1\x68\x01\x02\x84\xe2\xd0\x89\x1a\x5b\x5a\x58\xce\xee\x06\x98\x1f\xbf\xe0\x0c\x45\x49\x94\xbc\x80\x2d\x52\xf3\xf1\xe6\xcd\x07\xa9\xe3\xe9\xf8\xd0\x68\x3c\xe2\x57\xfc\xb2\xc7\x1e\x8f\x6a\xb8\x5e\x9a\xf6\xf5\xed\x5a\x1d\x4f\xc7\xbb\xbb\x7f\xfc\x65\x68\xfb\x0e\x0f\xea\xf9\xf3\x09\xf8\xa3\x19\xf0\x97\x6f\xff\xf7\x17\x00\x67\xde\xd4\x6d\x77\xec\xab\x6f\x6d\x57\x5d\x3f\x7c\xd5\x9d\x00\x3c\x37\x43\xfd\xd2\x9c\xfd\x7b\xf5\x7e\x7d\xab\x06\xaf\x8e\xed\x77\xff\x82\x80\xf0\xf7\x67\xd3\xe1\xbf\xfe\xe3\xf0\x0e\x7c\x86\xa5\x3e\x0c\xd5\xc7\xc5\xbf\xb6\x5d\x53\x1d\x1a\xf5\x67\x3b\x1c\x4e\x4d\x7b\xf6\x97\x27\x9c\xfb\xe1\x8a\x43\xf3\xa5\xbd\x36\xa7\x01\x6f\xcd\x57\x8f\x67\xef\x3b\xbc\xf8\xa1\x7d\xed\x18\x10\x43\x7f\xf6\x7d\xe7\xe1\x4f\x83\x57\x5a\xd7\xf3\x7f\xad\x80\x5a\x11\x6a\x55\x81\xf7\xb5\x32\x06\x41\xc5\xaf\xf1\xc1\x4f\x7e\x73\xe4\xc8\xa1\x56\x25\x95\x54\x22\xc9\x6b\x05\x62\x79\x41\xa4\xd9\x8c\x76\x60\xe1\xe8\x5d\xab\x7d\xc0\x35\xfb\x05\x64\xad\x8a\x5d\x10\x17\xff\xc6\x50\x4c\x20\xc4\x8f\x7f\x16\x68\x36\x61\x91\x4d\xee\x96\xdf\x43\x14\xa3\x93\x50\x03\x21\x2f\x6b\xc2\xd3\x58\x1d\x25\xbc\xb0\xc6\x91\x63\x21\x41\x8f\xc2\x5a\x21\xd6\xe3\x5e\xf3\x3e\xc1\x38\xa7\xd3\x3e\x5a\xca\x5e\x87\x72\x69\x79\x8d\xd9\x31\xc7\x05\x13\xe7\xa2\x1c\x96\x99\x3b\x27\x89\x21\x51\x65\x35\x41\xf8\x44\x09\x87\x65\x0d\xdc\x8e\x0d\x9c\x1b\xdd\xd6\x4a\x27\xcb\x0c\x92\xb5\xe4\xdc\x08\x48\xcb\x98\x02\x46\x02\x92\x7b\x8f\x8c\xe3\x6a\x67\xfa\x18\x3b\x52\x91\x44\x0c\xb6\xbc\x8b\xe8\x55\xe4\xde\x33\x03\x2b\x29\x2c\x0b\x82\x38\x84\x69\x10\x47\xc1\x7d\x9a\x03\xae\x3e\xf7\xd6\xc6\x3e\x64\xbd\x2a\xcb\x79\xcb\x93\xa1\x99\x59\xc7\x14\x76\xe1\x59\x4d\x55\xc7\xf4\x8b\x53\xcd\x13\x14\x16\x53\x96\x76\x1c\xf5\xa9\x05\x24\xf9\xc5\x65\xae\x0c\x13\x1c\x2c\x2c\xef\x33\x2f\x99\x06\x92\xdc\x97\x5e\x34\xe2\x2e\xbd\x92\x02\x58\xc1\x19\xb8\x18\x69\x41\x42\xe8\xc7\x29\x20\x19\x82\x49\x29\xe7\x9d\x52\x99\x35\xe3\xea\x20\x98\x70\xd2\x84\x12\x33\x25\xd8\x95\x02\x6b\xba\x31\xb0\x35\x9b\x81\xa3\x12\xa2\xb4\xb4\x2e\x6a\x3c\x2c\x76\x5d\x9e\x59\xc5\x23\xe1\x1b\x9e\xf6\x67\x9e\x36\xf7\xac\x55\xc1\x16\x8b\xc4\xe3\x9c\xf3\x4d\x06\xc2\x2a\xbf\x1b\x2c\x45\x69\xe5\x78\xc0\x1a\x6c\x24\x2f\x25\x21\x63\xf3\xca\x04\x38\xbe\x42\xd6\x85\x16\xc5\x82\x48\x64\x28\x1e\xf3\x0e\xf0\x78\x33\x71\x72\xf3\x9b\x96\x79\x05\x56\x49\x28\x87\x91\x9b\x4e\x4b\x4b\x63\xe7\x1f\x80\xd9\x67\x40\xb9\x09\xd3\xe6\x97\xb4\x24\x28\xf3\x58\x8c\x90\x93\x62\x1c\xee\xdd\x42\x11\x09\x17\x2e\xfb\x2a\x80\x01\x72\x98\xd1\xba\x9c\xac\x93\x37\xe9\x15\x84\x30\x09\x01\x9d\x59\x33\xc1\x82\x89\xdc\x34\x94\x5f\x3d\xc4\x1d\x19\xa3\x4b\x5d\x8b\x59\xa0\xfc\x28\xcd\x22\x13\xad\x6a\x80\xad\xc8\x39\xd7\x2d\x8f\x38\xbd\x37\xeb\x4c\xcb\x02\x51\xde\xbb\x20\x1c\xc1\x53\x32\x6e\x35\x24\xdc\x4e\x99\x9d\xbc\xf0\x3c\x0e\xe3\xd1\xca\xac\xed\xce\x6c\x59\xef\x8a\x65\xc0\x09\xc2\x4c\x42\x39\xbb\x13\x8b\xe0\xf3\x1b\x64\x65\x01\xe5\x35\x0e\x69\xfc\x0e\x59\x67\x54\x4c\x3a\xf7\x89\x4a\x0f\xf4\xe1\x6d\xf3\x16\xef\x9d\xeb\x6f\x1e\xe5\xe8\xb9\xad\x94\x18\xa5\x2b\xb7\x07\x7d\x54\xac\x7a\x59\xa2\x5c\x95\x76\xeb\x53\x42\x51\xf9\x23\x00\x00\xff\xff\xfc\x24\xb9\x78\x77\x0a\x00\x00") + +func fontsStraightFlfBytes() ([]byte, error) { + return bindataRead( + _fontsStraightFlf, + "fonts/straight.flf", + ) +} + +func fontsStraightFlf() (*asset, error) { + bytes, err := fontsStraightFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/straight.flf", size: 2679, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTanjaFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x99\x7d\x57\xda\xca\x13\xc7\xff\xcf\xab\x18\x2f\xb5\x87\xf1\x54\xa8\x88\xc5\x9e\xc3\xd5\x20\xde\x62\x51\x14\x05\x45\xad\x1e\x2e\x02\x2a\x2a\x20\x20\x22\xb5\xfe\x5e\xfb\xef\xcc\xec\x43\x9e\x76\x11\xef\xf6\x98\x7c\xf6\x3b\xdf\x99\x24\x9b\x6c\x42\xd2\xeb\x87\xeb\x54\xe3\x13\xac\xc3\x37\x48\x7d\x85\xe5\x15\x48\x39\xd5\x46\xef\xae\x01\x93\xc6\x08\x26\xc3\xce\xd3\x53\xbb\x07\xd7\xfd\x21\x2c\xff\x2d\xf4\xbd\x69\xaf\xf7\x02\xc5\x46\xf3\x7e\xd4\xef\xfd\xbd\x0c\x57\x53\x27\x7f\x3b\xec\x8c\x9e\xfa\x8f\xb7\xed\x21\x14\x13\x50\xee\x0c\x3b\x0f\x0f\x7d\xf8\xab\x7a\xdb\x86\xbd\x7e\xf3\xbe\x3d\x84\x42\xaf\xdf\x6d\xff\x05\xd9\x47\x8e\xa5\xd3\x6e\x67\xd4\x1c\x8d\xc6\xbd\xc4\xb8\xd7\x49\xb4\x5b\xe3\x0d\xe7\xd3\xa7\x4f\xee\x1c\x0b\xd7\x81\x05\x04\x70\x9d\x05\x5c\x59\xf1\x56\x52\x04\x6a\x91\x1e\xaf\x5c\x67\x69\x69\x09\xe8\xcf\x75\x62\x31\x00\xfa\xf3\x13\xc8\x36\x37\x11\xc6\x10\x56\x57\x59\x8e\xe1\xaa\x68\x10\xd4\xe7\xf3\x44\xcb\xcb\x0d\x40\x05\xa5\x5e\xc1\x74\x3a\x9d\x26\xac\x20\x10\x84\x54\xe1\x4d\xa7\xd9\x90\x0e\xaa\xc1\xca\x54\x78\x91\xc4\xb5\xb5\x35\x70\x1d\x8d\xd2\xb3\x88\x6b\xd2\x2f\x88\x59\xa0\xf4\x4b\x9c\xb9\xe7\x9f\x31\x93\x11\xfa\x67\x04\x60\xb6\xa9\x84\xcc\xd2\x90\xc9\x64\x02\xbb\x1b\xd8\x73\x71\x02\xe9\x9c\x89\x93\x27\xa2\xd6\x25\xaf\xe2\x48\xdd\x38\xb2\x1c\x97\xc3\xa1\xd6\x4a\xd7\xbe\xc0\x46\x5d\xd7\x41\x3d\x7e\xa8\xae\x2a\xc4\xc0\x5a\xe8\x18\x18\x67\x5f\x01\x5a\x2d\xc9\x20\xad\xd7\x01\xd6\xd7\xd9\xb0\x84\xeb\xd4\x63\x7d\x5d\xb4\x88\xee\xf7\xcb\x32\x86\xcb\x24\xa0\xf2\xd5\x15\x13\x97\x1c\x37\xbf\x62\x32\xfb\x0b\x99\x96\x96\x41\x7f\x6f\xaa\xc4\x7c\x5b\x9f\xe5\x0b\x1e\x4a\x64\xa1\xa6\xac\x4f\x93\x39\x49\xdc\x94\xe9\x44\x02\x99\x18\x05\x11\x4a\x02\xd7\x51\x64\xbf\xc0\xe0\x2b\xa2\x38\xa1\x5f\x91\x4e\x2d\x0a\x42\x88\x10\xaa\x0b\x40\x67\x58\x0e\x69\x05\x17\x84\xb8\x82\x0b\x0b\x2a\xbc\x82\x56\x22\xdb\xc2\xc2\x82\xb5\x60\x0a\x73\xb9\x1c\x89\x29\xf2\xe7\x72\x32\x9c\x52\xfb\x90\x52\x05\x53\xf2\xaa\xe3\x04\xed\x8b\x16\x5c\xc5\x98\x38\xad\xab\x94\xa0\x86\x7a\x15\xbd\x1b\xe3\x2a\x06\xa3\x3a\xc3\x58\x30\x4d\xc6\x4a\x05\x82\x54\xa1\xa6\x8c\x69\x9c\x45\xe1\x82\x6b\xb8\xb8\xb8\xb8\x48\xe2\x1a\xaa\xb0\xa7\xb1\xb0\x86\x01\x0a\x45\x23\x87\xfc\x0d\xf7\xf7\xf7\x49\xfc\xa6\x0b\xb2\xe4\x69\xfb\xfb\x01\xf2\x32\x8c\x05\x33\xf8\x99\x9a\x14\x33\xea\x54\x64\xd4\xa9\xc8\xa8\xbb\x43\x46\x4d\xe1\x0c\xce\x9e\x09\xeb\x48\xd3\x0e\x5c\x67\x9d\x8c\x62\x06\x1a\x34\x53\xd4\x5c\xf0\x3b\xc6\xe3\x71\x12\xbf\x53\x4a\x3c\x0e\x4a\x8b\x2b\xe3\x77\x34\x44\xe7\x98\xab\xde\xe4\x0c\x4e\x53\xc7\x70\x5b\xd1\xf7\x83\xd9\xb7\x95\xac\x38\x9d\x59\x31\x90\x59\x31\x8c\x59\x35\x88\x59\x35\xac\x59\x35\xd0\x59\x0c\xdf\x72\xc3\x63\x6b\xba\x1b\xcd\x79\x87\x72\x9d\x0d\xb5\xe5\x0d\xb5\xe5\x0d\xb5\xe5\x0d\xf4\xf7\x65\x7c\x23\xf8\x30\x70\x1d\xd8\xc4\x64\x32\x99\x24\x6d\x93\x63\xc9\xa4\x0c\x6f\xea\xe9\xbf\x19\x79\x52\xdb\x55\xaf\x70\x0e\x53\xa9\x14\xa9\x39\x36\xa6\x04\xa5\x48\x14\xa4\x35\x10\x1a\xe4\xc2\x4f\x91\xd0\x69\xcd\x61\xa3\x21\x54\x72\x12\xab\xe2\x8d\x06\x63\x83\x5b\x58\xd5\x68\xd9\xd3\x2d\xbc\xba\xba\xba\x62\x79\x8b\xcc\xc4\x76\x15\x04\x2b\xc3\xd5\xac\x21\x80\x3c\x36\x9b\x4d\x96\xf3\x94\xd8\x6c\x82\xeb\xe4\x51\x9b\x7d\xe8\x33\xf8\xd3\xcc\x85\xb7\xb1\xd5\x6a\xb5\x58\xde\xa6\x34\x62\x89\xc4\x76\x6c\x89\x3c\x6b\xe1\x7f\xb0\xcd\x0d\x08\xb5\x43\xaa\x10\x56\xc3\x86\xb6\xbd\xf0\x0f\xbc\xe6\x06\x84\xda\x21\x55\x08\xab\x26\xb4\x8e\x71\x01\x6f\x6e\x6e\x6e\x48\x2e\xe8\x3b\x17\x11\x89\x12\xa9\xa3\x0d\x02\x45\xda\x8c\xc2\x3b\x9c\x77\x7b\x0b\x21\xbc\xe5\x16\x56\x4d\x68\x29\xfc\x13\x3b\x9d\x4e\x87\xc5\x9f\x6a\x8a\x59\xc1\x67\x0e\x96\x72\x5d\xa7\x88\x77\xdc\xa4\x58\xd4\xf3\x55\x21\xad\xee\xee\xc2\x08\x94\x37\x73\x4c\x77\x29\xfd\xfe\x9e\x64\x42\x22\xc6\x7b\xa6\xb0\x1a\xf0\x32\x5b\x0b\xef\x79\xa7\xf3\x63\xf8\xc0\x6d\xc6\x55\x50\xc2\x6e\x17\xba\xdd\x2e\x05\x4a\x08\x40\xd4\xed\xbe\xdb\xa1\x16\xed\x00\x98\x36\xc3\x1b\xda\xc7\x1e\x00\xf4\x7a\xc0\xd8\xd3\x48\x6b\x85\xd0\xeb\x69\x04\x1f\x0a\x83\xed\x08\x0e\xb0\xdf\xef\xf7\x49\x3e\x60\x73\xbf\x3f\x0f\xfa\xd2\x2c\x85\xcb\xf8\x48\x8d\xe4\x32\xe7\x3d\x3e\x82\x41\x05\x3b\xda\xf6\xf8\x10\x07\x83\xc1\x80\xe4\x43\x36\x0f\x06\x51\x1c\x0c\x40\xab\xc2\x2b\xd2\x58\xb5\x14\x3e\xc2\x21\x35\x92\x8f\xb8\xda\x70\x28\xd1\x53\x09\xb4\x21\xe2\xb5\xed\x71\x05\x47\xa3\xd1\x08\xc4\xbb\x2a\x00\x8c\x46\x20\x55\x9f\xb9\x82\x26\x83\x48\xb3\x14\xae\xe2\x13\x37\x21\x57\xbd\x07\xe3\x47\xd0\x54\xf8\x98\xc3\xe3\x31\x7c\x0c\xe1\x18\xc7\xe3\xf1\x78\x46\xe1\x13\x36\x3f\x3f\x83\x0d\x81\xf8\xf9\x59\xe4\x9d\x20\x91\x28\x71\x32\x7b\x8f\x6b\xf2\xba\x99\x4c\xc0\xd0\x99\x4c\xde\xed\x40\x0d\x27\x13\x98\x50\x7f\xe6\x4c\x3c\xe5\xda\x2f\x2f\x14\x20\x7e\x79\x11\x9e\x53\x24\x0a\xa1\x67\xf0\xa7\x99\x8f\xe0\x8c\x1d\xd3\x29\xc9\xc4\xd3\xa9\x70\x9c\x21\x91\x30\x9f\x79\x83\x60\x46\x53\xe1\x73\xfc\xcd\x4d\xcb\xe7\xea\xb7\xe1\xb9\xbe\x75\x9f\xab\x12\xe7\xfa\x59\x16\x49\x8b\x14\xfe\x85\xaf\xaf\x40\x2b\x8e\x58\x56\x6c\xf1\x27\xbb\xae\x73\x81\x7f\x74\x3d\xc5\x14\x95\xcc\x46\xc1\x22\x87\x59\xa6\x13\x5b\xf7\xe8\x12\xdf\xde\x58\xba\x44\xdb\x4a\x5b\x02\x7b\xc4\xaf\x0c\xe0\x7d\x00\x33\x3c\xf8\xe6\x81\x77\x7e\x37\xcf\xfd\x3b\xdc\x77\x48\xde\x17\xba\xe0\xcb\x82\x65\x69\xde\x46\x83\x5f\x78\x59\x6b\xa8\x97\xe5\x06\xbf\x2c\x7b\x9a\xf5\x75\xf8\x4a\x5f\x12\x7e\xda\xda\xda\xda\xd2\xda\xd6\x56\x88\x54\xd4\x58\xd0\x38\x1e\x4d\xcc\xe7\xf3\x79\x70\x9d\xa6\xde\x88\x47\x5e\xd4\x5e\xb0\xa5\xdf\x78\x04\xb5\x70\x7b\x7b\x7b\x1b\x5c\xa7\x45\x65\x42\xe4\x45\xe7\xde\xc3\x36\xfe\x43\x4d\x93\xd0\xb4\x4f\x8a\xb6\x82\xd7\xf8\xe3\xc7\x0f\x70\x9d\x6b\xf5\xae\x21\x84\xa0\x12\x86\x70\x25\xdb\xd8\xdd\x60\xa1\x50\x20\xba\xa1\xcc\x42\x21\x40\x22\x5a\x50\x19\x32\x5a\x10\x19\xae\x73\xab\x0f\xc1\x4f\x3b\x3b\x3b\x3b\x5a\xdb\xd9\xb1\x90\xf1\x50\xbd\x57\xde\x0e\x46\x17\xca\xad\x0e\xc5\x74\xe1\xdf\xe1\xbb\x74\x47\xfb\x50\x2c\x92\x76\x87\xc5\x62\x91\x0b\xde\xab\x71\x0b\xc0\xee\x2e\xc3\xee\xae\x52\x88\x7c\x21\xc3\x20\x3f\xe0\x1e\x0b\x0f\x68\x5d\x3d\xe0\xde\x9e\xe9\x36\x02\xd1\x31\x11\x9d\x2e\x96\x4a\x50\x2a\x95\xa8\xd3\x45\x00\xa2\x52\xc9\xd4\xa1\xc6\x1d\x6b\x35\xf3\x65\xd0\xd3\xdf\x6b\x7a\xa8\xbe\xd2\x98\xc8\x78\xd6\x8c\xd7\x55\x1f\x0f\x0e\x0e\x88\xfa\x94\x7c\x70\x10\x20\x2f\x3a\x77\xc1\x47\x2c\x97\xcb\x65\x41\x00\x50\x2e\x87\x28\x10\x85\x00\x59\xf6\x70\x80\x87\x87\x87\x44\x03\x32\x1e\x1e\x06\x48\x44\x0f\x55\xc6\x00\xfd\x64\x29\x38\xc4\xa3\xa3\x23\xa2\x21\x95\x39\x3a\x52\x04\x10\xa0\xf9\xc7\x70\x84\xf2\x33\x9f\x04\x15\x1d\x61\x54\x8b\x16\x7c\x12\x8f\xd1\x27\x7c\xaa\x56\xab\xe0\x53\xcc\x50\xfd\xc0\x5d\x63\x4c\x69\xc7\xc7\x16\x82\x31\x1e\x1f\x1f\xbf\x7b\xa8\xc1\x47\xf1\x33\x8f\xcf\xc9\x09\xa9\xc4\x27\x27\xc2\xf0\x8c\x44\xc2\xfb\x3c\xfb\xc7\x0a\x80\x29\x40\x9d\x89\x1c\xfc\x5a\x4d\x76\x6a\x35\x53\x07\x26\x58\xab\x41\x8d\xfa\x1f\x9e\x41\x2f\xb4\x89\xd3\x53\xd6\x5e\xf0\x54\x46\x15\xf9\xa3\x33\x86\xc4\x4f\x53\x4a\x39\x3b\xb3\x10\x4c\xf1\xec\x4c\x10\xb5\x29\x72\x94\x25\x5b\xc1\xdf\x78\x4e\x4d\x6a\xbf\xe5\x37\xb7\xdf\x68\x8a\x46\xf7\xf0\x15\x7f\xfd\xfa\xc5\xda\xab\xfc\x79\xf6\xaa\xcf\x86\x54\x40\x2b\x9e\x39\x58\xc9\x75\x9d\x3f\x78\x01\xef\x2d\xc1\xff\x9b\xe4\x0d\x2f\x2f\x2f\x45\xff\x4d\xff\xe6\x7c\xc3\xcb\x88\x42\xe0\x33\x47\xb6\xec\x1d\x57\x7d\xf9\xdf\xe5\xfa\xf2\xbf\xe6\x63\x9d\x97\x5c\x27\xf4\xef\x3f\x09\x5c\xeb\x82\x16\x49\x6f\x0b\x5f\x20\x01\x1b\xd9\x8d\x2c\x24\xe0\x8b\x94\x2e\x2e\x00\x92\x89\xc4\x05\x7f\xec\x24\xe9\x0b\x6b\x17\xf5\x7a\x92\x95\x2f\xe0\x3a\xe4\xb9\x10\xb5\x92\xea\xa3\xa8\xd0\xea\x75\xa9\xc8\x5a\xf5\x3a\x49\x42\x95\xc7\xf4\x3f\xd9\xac\x07\x07\xfc\x4b\x53\x2e\xe9\x51\x9d\x48\x24\xe4\x82\xba\xd5\x84\x6c\x79\x7e\x2a\xc7\x72\xa2\xb7\x23\xbf\x45\xc7\xf6\xa9\x77\xa4\xff\xdf\x2c\x56\x4c\x24\x7e\xc6\xbc\x21\x8d\xe5\x2a\x31\xff\x08\xc7\x62\x7a\x98\xff\x1f\x00\x00\xff\xff\xd0\x66\xcf\xd4\x57\x1f\x00\x00") + +func fontsTanjaFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTanjaFlf, + "fonts/tanja.flf", + ) +} + +func fontsTanjaFlf() (*asset, error) { + bytes, err := fontsTanjaFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/tanja.flf", size: 8023, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTengwarFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5a\x5f\xaf\xa3\xb8\x15\x7f\xe7\x53\x1c\x21\x24\x76\x55\xc6\x9a\x3b\x0f\x57\xde\xab\xd1\x28\x5b\x75\xfb\xb6\x2d\x52\xa7\x0f\xbc\x61\xc7\xe4\x86\x4e\x02\xb7\x81\x34\xb3\xfd\xf4\x95\x8f\x6d\xb0\x8d\x21\x21\x3b\x53\x69\xa5\x44\x11\x1c\xdb\x60\xff\xce\xff\x63\x27\xbb\xc3\xee\x03\x4b\xe0\xe9\x3d\x3c\xc3\x87\x27\x78\x0f\x4f\xcf\xd1\xe7\xaa\x79\xbd\xb0\x13\x3c\x91\xf7\xb0\xab\x5f\x0f\x55\x0f\xfc\x37\xf8\x73\x75\xa8\x1b\xc1\xe0\xe7\x8e\x57\x87\x03\x7c\x3c\x72\xb6\xd9\x6e\x05\xd9\xb3\xd3\xa9\xee\xc8\xb6\x3d\x7e\x8a\x7e\x16\xec\xad\xaf\x04\xec\x4e\xed\x11\x7e\xad\xbf\x54\xf0\xcf\x13\x67\x4d\xda\xc1\xc7\xb3\x24\x36\x5d\x2d\xaa\x6e\xdf\x5e\xc8\xbf\xde\x0e\xa4\x61\x1d\x23\xaf\xed\x7f\x3e\x81\x59\x71\xd7\x9e\x20\xfa\xb5\xea\xd9\xae\x6d\x7a\xf8\xe1\xb2\xaf\xb7\x7b\xa8\xbb\x0c\xea\x06\xfa\xf3\xa9\xc9\x80\xb3\xae\x12\xd0\x36\xf0\xb9\x3d\x7c\xa9\xab\x06\x5e\x7e\x8c\xfe\xc2\xfa\xea\x05\x3e\x50\xf8\x6b\xc5\xe1\xe9\xa7\x9f\x9e\xa3\xe8\x97\xaf\x6f\x07\xd6\xb0\xbe\x6e\x1b\x68\x77\xb0\xab\x4f\x5d\x0f\x87\xba\xa9\x5e\x00\x3e\x6e\xdb\xb7\xda\x80\x6c\xfb\x7d\x75\x92\x5c\x76\x9f\x22\x29\x0b\x78\x07\xf1\x91\xbd\xd6\x5b\x68\xce\x47\x5e\x9d\x62\xc4\xb4\xab\x0f\x15\xd4\xa2\x6a\xfa\x7a\x57\x6f\x71\xda\x88\x01\x00\xbc\x83\x6e\xdf\x9e\x0f\x02\xd8\xe1\xc2\x7e\xeb\x80\x57\x50\xb2\x34\xc3\x97\x9a\xf6\x12\x25\xea\xa1\x7e\x5f\x41\xbc\x67\x27\xc1\x0f\xac\xf9\x12\xc3\xbb\x77\xf0\x76\xaa\x9b\xbe\x03\xd6\x01\x03\xec\xcd\x80\x9f\x7b\xd8\xb2\x26\xed\xe5\x34\xdd\xf1\xdc\xed\x2b\x11\x3d\xbd\xc7\x19\xf6\x55\xfd\xba\xef\x25\x2f\x0c\xb6\x7b\x76\x62\xdb\xbe\x3a\x45\xcf\x30\x3f\x98\x41\xd3\xf6\x50\x37\xdb\xc3\x59\xd4\xcd\x2b\x88\xaa\xdb\x56\x8d\xa8\x4e\x5d\xf4\xe1\x09\x5f\x3b\xb2\xaf\x28\x13\x38\x54\xcd\x6b\xbf\x87\x1f\xaa\xaf\xe6\xe1\x6d\x7b\x3c\x56\x8d\x12\x59\xf7\x23\xfc\x09\x18\xec\xce\xe2\xb5\x82\x1d\xdb\xf6\xed\x29\x52\xa0\x44\xb5\x63\xe7\x43\xaf\xb0\x1e\x5b\x51\x21\xdf\xfd\xbe\xee\x40\x2a\x30\x7a\x7a\xc6\xc7\x94\x24\x25\x3c\x67\xda\xe8\x6f\x7f\xff\xfc\xcb\x0b\xfc\xe3\xcc\xbb\xbe\xee\xcf\x52\xa8\xdd\x0b\xa4\x5f\xf6\x29\x4e\xf3\xef\x0c\xd2\xff\x6a\xfa\x6b\x06\xe9\x56\xd3\x5b\x12\x45\x49\x92\x24\x9b\xfb\xae\x9b\x08\x80\x88\x3c\x4d\x24\x41\x29\xc0\x8d\x84\x7c\x47\xf7\xc8\x4f\x88\xd8\x44\x20\x28\x80\xa0\xc9\x26\x62\x79\x0a\x4c\xaf\xe2\x3e\xf7\x3b\xa9\x81\x1c\xfa\x45\x2a\xbf\xb2\x51\x5e\x2e\x14\xbf\x44\x8d\x50\xfc\x06\x46\x48\x2e\xbf\xf6\xc4\x6b\x1a\x66\x55\xd9\x2f\xf2\x94\xa6\x71\x91\x6c\xa2\x82\x13\x3a\x3c\x4b\xe3\x82\x27\x9b\x88\x33\x42\x89\xc8\xb1\x2f\xcb\x57\xf2\x99\x09\x49\x29\x55\x49\xd9\x83\xc8\x87\x47\xc4\x38\x19\x92\x8a\x46\x32\x13\x89\x7c\x17\x7b\x6d\x05\xb8\x6c\xcc\xd3\x36\x77\x99\x88\x69\xcc\xa5\xcc\x28\x8e\x4a\x4e\xca\x82\x11\xd0\x26\x11\xa7\x8a\x52\xa3\x1c\x47\xf9\x45\x5c\xd7\xbb\x32\x2c\x39\xc7\x68\x24\x77\xdc\x36\x91\xb1\x65\x41\x15\x62\x6a\x23\x92\x77\x8d\xb5\x90\x84\x7c\xaf\x2c\x38\x31\x48\xa6\x77\x64\x80\xa8\x8e\x92\x72\x35\x50\x52\xfd\x80\xbe\x13\x75\x27\x34\x57\x92\x4e\x03\x13\x45\x8e\xb9\x0a\x8d\xaa\x2c\x38\x50\x30\xf2\x29\x38\xd5\x2a\x2d\x2f\x68\x9c\xda\x3a\x45\x4e\x0b\xae\xe5\x9f\x03\x85\x01\x71\x96\xaf\x54\xa3\x33\x60\x40\x80\xf2\x0c\xa5\x42\xbd\x70\xea\xf5\xaf\x5c\xcc\x5f\x38\x09\x5f\xd0\x34\x35\xf3\xf6\xc5\x76\x2f\xcf\xdb\x82\xcd\x4c\x50\xca\xa5\x62\x21\x4b\x36\x91\x64\x0b\xca\x82\x52\x4b\x11\x37\x4f\xb5\xd4\x94\x16\x2e\x83\xa5\xf6\x3f\xcb\xaa\x83\xc4\x9a\x87\xc7\xa5\x88\xe3\x2d\x6e\x90\x15\x8e\x0a\x54\xcb\x89\x00\xa6\x99\x99\xd6\x18\xa9\xef\xe4\x58\xc9\x56\x0d\xe5\x28\xd7\xf1\x29\xba\x40\x72\xf4\x0b\xed\x61\xd4\xe1\x62\x9e\xbc\xfe\x44\x94\x16\x94\x4a\x65\xcb\x5e\xa2\xf8\x92\x24\xda\x29\x37\x0e\x7d\x71\x35\x7f\x7d\xb9\x9b\x44\xa3\x96\x76\x97\xf7\x20\xb8\x30\xc4\x14\xca\x8d\x42\xf7\x3e\xc9\xad\x5d\x23\xc4\x29\xcc\x00\xd4\x29\xdc\x19\xc8\x6b\x40\xa8\xa8\x66\x46\x4b\x2b\x07\x2e\x51\x43\xfa\x55\x30\x47\x9d\x8e\x2a\x75\xf3\x85\x5e\x04\xdc\x95\x2c\x58\x7e\xd3\x96\x89\x1c\xb5\xe4\x21\x57\xb7\x64\x21\x47\x2c\x39\xe8\x11\xf3\xb0\x1e\x99\x51\x9d\x0d\x2b\x00\x2d\x00\x6f\x02\x71\x0a\x33\x00\x75\x0a\x57\x84\x21\x07\x60\x2f\xa8\x4e\x3a\x38\x37\x83\x43\x9e\x83\x6b\x44\xa6\x09\x61\xb2\xa1\xa7\xb1\x71\x6a\x77\xf9\x41\x30\xc3\x6c\x7e\x5b\xc4\x32\xff\xba\xe3\xd4\x69\x13\x53\x06\x65\x83\x08\x51\x7a\x0a\xcb\x38\x9f\x23\x00\x9f\xfd\x09\xc0\x20\xca\x20\xd4\x00\xde\x19\xd0\x41\xe4\x01\xf8\x33\x3c\x04\x19\x99\x53\xe6\x95\xcc\x4b\x66\x33\xef\xdc\xc8\xc2\x2b\x80\x55\xa6\x1e\xca\x4c\xca\xca\xc4\x60\xb5\x58\x43\x0d\xce\x60\xcb\xc6\xae\xbe\x96\xc2\xbe\xcb\xd4\x2c\xe7\xd7\x8b\x81\xef\x55\x2a\xdc\xc8\xe4\xa2\x98\x56\x49\xa1\xe4\x84\x11\xe3\xec\x25\xd7\x44\x86\x7b\x0b\x9a\xd1\xe9\x10\xd1\x43\x8a\x55\xaf\x9a\x73\x3d\x95\x08\x65\x81\xa0\x36\x34\x9a\x07\xac\xa2\x89\xc8\x63\x4e\xd0\xd9\xa9\x09\x33\xaa\x81\x66\x59\x16\xb9\xda\xfa\x58\xa0\xa5\x7c\xa9\xed\x0f\x53\x09\x7a\x0d\x6c\x09\xae\x06\xf0\x0e\x82\xfb\xa2\x48\x0b\x6e\x45\xce\x00\x49\x68\x7e\x8f\x49\x05\xfd\x29\xd8\x23\xc3\x05\x56\x47\x63\x89\x44\x71\x4d\x5a\xc8\x0b\xd7\x3d\xf2\x69\xd5\x4d\x87\x1e\xac\xc9\x88\xd9\x2a\x4c\x93\x36\xf5\x9d\x5e\x31\xe3\xcb\x68\x0a\xce\xa3\x55\xed\xa7\x8c\x4e\x23\x41\x10\x05\xc5\xc8\x42\x15\x1b\xd2\xfe\xa9\x5d\xc2\xcd\xd0\x05\x25\x53\x69\x4d\x44\x13\xea\x90\x2e\x30\x8a\x4a\x71\x98\x8e\x92\x9a\x08\xca\x74\x60\x21\x2b\x2f\x13\xa1\x84\x3a\x5c\x21\x39\x67\x1b\x22\x4f\x7d\x7b\x4f\xf5\xa6\x2a\x70\xc8\x81\x13\x85\x9c\xc3\xf2\x12\xbf\x00\xb1\xc0\x04\x9b\xc4\x44\x1e\x6d\x27\x29\x0a\xd4\x79\xd8\xac\x8a\xcd\xcc\xec\xe9\xc9\xa4\x2a\x98\x13\xf5\xad\xa6\x1c\x17\x9c\x99\x8f\xa0\x18\x90\x06\x43\x71\xcc\x14\x6c\x9b\x41\xb4\x60\x5b\x8e\x0a\x9d\x53\xfb\x51\x9f\x5b\x7a\xb4\x45\x95\x63\xc4\x18\xe2\xa7\xcb\x80\x69\x9b\xf7\x0b\x6e\x37\x45\x6e\x95\x9c\x28\x38\x13\x1c\xc8\x18\xbf\xae\x0a\x2f\x24\x4b\xd9\x6b\x62\x0f\x16\xd8\xda\x56\x87\xab\xda\xf5\x0f\xef\x9b\xeb\x5d\x5e\x32\x46\x05\xdb\x65\x27\x7e\x3b\x71\xde\x19\x0d\x5c\xef\x08\x38\xf4\x54\x2a\xa5\xb1\x16\x6d\x2a\x1a\xe1\x30\x9b\x31\x5e\xdd\x44\x74\x83\x85\xa8\xe8\x3f\x01\xb7\xdc\x9c\xc2\x4a\x96\x5b\x65\xe1\x79\x08\xb1\xb2\xaa\xb3\x19\x36\x68\xad\xd3\x1f\x42\xc6\x83\xb4\x32\x8e\xe3\x34\xb0\xc2\xb7\x4f\x11\xf4\x86\x14\x31\x56\x87\x64\x9a\x22\xae\xaf\x7e\x6f\xa4\x06\x3f\x52\xe7\xcb\x91\x1a\xa3\x25\x17\xfa\x72\xd3\x2a\x21\xa8\xb2\xb4\x40\xf7\x52\xba\x13\xe9\xff\x29\xe3\x5f\xb5\x2d\x4c\x5d\xa3\xfa\xd0\xb0\x68\xe1\x05\x6e\x37\x6e\xdb\x59\xdd\xcf\xe8\x6e\xa2\x72\xc0\x18\x0f\xd3\x9f\xa0\x97\x0c\xee\x68\x46\xb5\xa9\x2f\xfb\x1f\xba\xd4\xfa\xf0\x17\x1a\xf4\x1b\x3a\x56\x8f\xa7\x98\xf6\xc9\x74\xc1\xad\xa3\x69\xcd\x36\xc6\x54\x09\xda\xa8\xb3\x20\xf6\x71\x78\xa9\x5c\xf0\xb6\xf3\x20\xa5\x1a\xac\x54\x51\x31\xaa\x4e\x55\xe9\x84\x9a\x4d\x2a\x31\x66\x39\xfc\x68\x41\x42\x55\xf7\x1a\x8e\xb5\x49\x38\x16\xe1\x18\x84\x63\x0f\x8e\x01\xf8\x8d\xd1\x18\x24\x04\x74\x02\x15\x9d\x4a\x29\x1f\x9d\x71\x1c\x24\x63\xe5\x62\xd5\x2e\x36\xe9\xd6\x2f\x4b\x7e\x10\xda\xf2\xba\xc5\xe7\x42\x97\x15\xd9\xa6\x91\x02\x02\xb1\xcd\x0e\x6e\xe9\xa8\x0d\xa7\x00\x0e\x86\xb7\x40\x7c\x5b\xef\xc6\xe0\xb8\x71\x0e\x6e\xbc\xf5\xdc\x78\x12\x79\x7d\x58\x8b\x29\xc2\x73\xe4\xd9\x5c\x6c\xc9\x41\x75\xac\xca\xf8\x53\xa7\xbe\x3d\xda\x4e\x06\x7d\xbf\xca\xe3\x78\x3c\x0a\x92\x8e\x3c\x90\x22\xd7\xa7\x10\x17\x37\xdc\x5f\x8f\xb6\xf3\x0f\x13\xc6\x98\xb2\x7a\xfc\x59\xa2\xe4\xca\x01\xc8\x70\x8a\x8c\x1a\xc1\xcd\xac\x29\x90\xac\x4c\xef\x78\xf4\x58\x0e\x58\xbf\x6a\x0e\x4b\xae\x6f\xa8\xdd\xf0\xc4\x00\x96\x1a\xbf\xbf\xa2\x35\xed\x71\x62\x2b\xf9\xe1\xe7\xae\x8a\xd6\x93\x0c\xb8\x01\x64\x6d\xe3\x2e\xc9\x48\x95\xe9\x00\x27\x75\xb6\xfe\x04\x68\x0d\x69\x2d\x9e\x7c\x7f\x5a\x4b\x97\x63\xf5\xaa\x65\x54\x16\xcc\xf0\x77\xc7\x0d\xef\x8f\x93\x10\x7b\xad\xc7\x49\xc8\xe3\x24\xe4\x71\x12\x32\x97\x37\x1e\x27\x21\x8f\x93\x90\xc7\x49\x88\xdf\x73\x6f\xa4\x7e\x9c\x84\x78\x02\x79\x9c\x84\x2c\x35\x1e\x27\x21\x8f\x93\x90\x50\xf5\x1b\x88\x6d\x76\x70\x7b\x9c\x84\x3c\x4e\x42\x46\xf2\x9b\x9e\x84\x38\x7f\x28\x1c\xbc\xc2\x38\x05\x19\xf6\x02\xba\xec\x54\x51\x87\x72\x3b\xa1\xe8\x72\xc0\xa9\x1a\x6d\x0a\xff\xe5\x9d\x6a\x6b\xb9\xe7\x46\xd0\xd8\x5c\x0c\x63\x26\x1b\xfe\xa8\x6b\x63\x18\x78\x32\x7f\xf3\x1d\xdf\x20\xde\xfe\xc6\xc3\x4a\x4c\x54\x51\x7f\x2d\x93\x0e\x2a\xe7\x65\x22\x4f\x57\x66\x85\x6f\xd8\xdc\x44\xc9\xd2\xf7\x8f\x3c\xfc\xbf\x00\x00\x00\xff\xff\xbe\xff\x80\x0f\x50\x33\x00\x00") + +func fontsTengwarFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTengwarFlf, + "fonts/tengwar.flf", + ) +} + +func fontsTengwarFlf() (*asset, error) { + bytes, err := fontsTengwarFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/tengwar.flf", size: 13136, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTermFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x99\xe9\x9f\x23\x55\xd5\xc7\xdf\x9f\xbf\xe2\x3c\x0f\xcf\x23\x2e\xd3\x4d\x52\xe9\x24\x1d\x45\x3c\xd5\xc9\xed\x74\x31\xe9\xaa\x50\xa9\xcc\x30\xe3\x9a\x49\x57\x4f\xc7\x49\x57\xb5\x49\x1a\x68\x37\xdc\xf7\x7d\xdf\x17\x10\x94\x45\x10\x10\x90\x1d\x15\x01\x11\x65\x13\x04\x59\x44\x59\x45\x94\x4d\x10\x64\xf3\x73\x92\x9b\x99\xba\xa9\x7b\xd3\xbe\x3a\x9d\x53\xdf\x7b\xeb\xde\x73\x7e\x75\xee\xa9\xea\xd5\xee\xaa\xd5\x3a\x11\xb3\x98\xc5\x1c\xce\x64\x31\x9b\xc3\x0c\x66\x20\x08\x7b\xeb\x9d\xa8\xd5\xc5\x7d\x5b\x58\xed\x86\x51\x84\xe5\xb5\xd6\xc6\x46\xd8\xed\xe2\xdc\x11\xa5\x1c\x38\x51\xbb\xbb\xb9\x12\xf6\xb1\xbd\xd6\xea\xb5\xda\x83\xb0\xd7\xc7\xac\x35\x3f\x63\xe5\xf3\x20\xa2\xb5\x56\xd4\x0e\x57\x70\x35\xee\x61\xad\x35\xe8\x44\x33\xd6\x8e\xdc\x8e\x39\x9e\xeb\xe8\x78\x2d\xc2\x72\x7c\x7c\x2b\xc2\x23\xdb\x6c\xa8\xdd\xee\x74\x67\xe3\xde\xfe\xa3\x60\x88\x1e\x9a\x10\xfb\xe1\xa0\x8f\xfd\xcd\x8d\x8d\xb8\x37\x08\x57\x30\x8e\xba\x5b\xd8\x59\xc5\xad\x78\xb3\x87\xfd\x76\x2f\x0c\x23\x5c\x8d\xa3\x01\xae\xc4\x61\x1f\x56\x3b\xfb\xbb\xe1\x00\x7b\x61\x37\x6c\xf5\x43\xb4\x66\x2d\x9c\x99\x41\x37\x3e\x2e\x5c\xdf\x17\xf6\x30\x5b\x2a\x15\xa0\xce\x9b\xea\xf7\x3b\x71\x84\x9d\x3e\xae\x85\xbd\x70\xdf\x16\xee\xef\x1c\x17\x46\x38\x88\x71\x3d\x5e\xe9\xac\x6e\xe1\x60\xad\xd3\x1f\xce\xbb\x03\x5b\x7d\xec\xc6\xd1\x7e\xb6\x83\xb5\x10\x86\x40\x27\xec\x1d\xde\xc7\xa8\xb5\x1e\xf2\x1c\x1b\xdd\x56\x7b\xb8\x34\x6c\x61\x3b\x5e\x5f\x0f\xa3\x01\x76\x3b\x51\x38\x0b\x50\x89\x37\xf7\x75\xc3\x99\xf6\x5a\xd8\x3e\x10\xae\xf0\xde\xeb\xad\xcd\x2e\x2e\x6c\xf6\x06\x71\x84\x47\xf6\xe3\xee\xe6\xa0\x13\x47\x14\xb6\x7a\x83\xb5\x6e\x27\x3a\x30\x1b\x85\x83\xa3\x30\x6b\x1d\x51\x2a\xcc\x22\xda\x2b\x2b\xe1\x0a\xdf\x16\xa3\xf0\x78\xd8\x68\xf5\x5a\xeb\xe1\x30\x28\x07\xe3\xb1\x6f\x0b\x17\x9d\x2a\x6f\xba\x15\xad\xf0\x9f\xbb\x3b\xd1\x2c\x62\x33\xea\x76\x0e\x84\xd8\xea\x76\x31\x1e\xac\x85\x3d\xbe\xc2\xdb\xe9\xef\x18\x6d\x2d\x8e\x42\xe8\xf4\xb1\x13\x0d\xc2\x68\x78\x8b\x18\x37\x7a\xf1\xca\x66\x3b\xc4\x78\x73\xb0\xb1\x39\xc0\xf0\x84\x56\x7b\xd0\xdd\x1a\xde\xbc\xcf\x1b\x1d\xed\x1f\x3b\x11\x5f\xdd\x8c\xba\x61\xbf\x3f\xdc\x6f\x34\xe8\xc5\x5d\x58\xed\x74\x87\xb1\xd8\xec\x87\x2b\xb3\x88\x01\xc7\x75\x35\xee\x85\xd8\x19\x8c\x67\x1e\x8d\x6f\xd8\xcb\x62\x7c\x0f\x96\x46\x7f\x7d\xb3\xbf\xb6\x03\x0f\x84\xbd\x08\xe3\x1e\xae\x76\x06\xb3\x80\x08\x27\x12\xfc\x0f\xc1\xff\x12\x1c\x46\xf0\x7f\x04\xff\x4f\xf0\x0a\x82\xc3\x09\x5e\x49\xf0\x2a\x82\x57\x13\xbc\x86\x60\x07\xc1\x0c\xc1\x2c\xc1\x11\x04\x19\x82\x2c\x81\x45\x90\x23\x98\x23\xc8\x13\x14\x08\x8a\x04\xf3\x04\x25\x82\xd7\x12\xbc\x8e\xe0\x48\x82\xd7\x13\x1c\x45\xf0\x06\x02\x3a\x0c\x6c\x82\x05\x82\x32\x41\x85\x40\x10\x2c\x12\x54\x09\x96\x08\x1c\x82\xa3\x09\x76\x12\xd4\x08\x96\x09\x5c\x02\x8f\xa0\x4e\x70\x0c\x81\x4f\xd0\x20\x08\x08\x9a\x04\xbb\x08\x76\x13\x1c\x4b\xb0\x87\x60\x2f\xc1\x1b\x09\xde\x44\xf0\x66\x82\xb7\x10\xbc\x95\xe0\x6d\x04\x2d\x82\x7d\x04\x6d\x82\x15\x82\x90\x60\x95\x60\x3f\xc1\x1a\x41\x87\xe0\xed\x04\x07\x08\xba\x04\xeb\x04\x11\x41\x4c\xb0\x41\xf0\x0e\x82\x1e\x41\x9f\x60\x40\xb0\x49\x70\x1c\xc1\xf1\x04\x27\x10\x6c\x11\xbc\x93\xe0\x5d\x04\xef\x26\x78\x0f\xc1\x7b\x09\xae\x22\xb8\x8d\xe0\x2e\x82\xfb\x09\x9e\x21\x78\x81\xe0\x4f\x04\x59\x6b\x1e\xde\xc7\xa6\x04\xef\x27\xc8\xe6\x32\xf0\x01\x36\x59\xf8\x20\x1b\x0b\x3e\xc4\x26\x07\x1f\x66\x33\x07\x1f\x61\x93\x87\x8f\xb2\x29\xc0\xc7\xd8\x14\xe1\xe3\x6c\xe6\xe1\x13\x6c\x4a\xf0\x49\x82\xec\x5c\x06\x3e\xc5\x26\x0b\x9f\x66\x63\xc1\x67\xd8\xe4\xe0\xb3\x6c\xe6\xe0\x73\x6c\xf2\xf0\x79\x36\x05\xf8\x02\x9b\x22\x7c\x91\xcd\x3c\x7c\x89\x4d\x09\xbe\x4c\x90\xcd\x67\xe0\x2b\x6c\xb2\xf0\x55\x36\x16\x7c\x8d\x4d\x0e\xbe\xce\x66\x0e\xbe\xc1\x26\x0f\xdf\x64\x53\x80\x6f\xb1\x29\xc2\xb7\xd9\xcc\xc3\x77\xd8\x94\xe0\xbb\x04\xd9\x42\x06\xd1\xf5\x66\x16\x7c\x61\xef\xc4\x46\xdd\x2e\x0b\xf8\x1e\xbb\xb3\x88\x8e\xbb\x4b\xf8\x81\xa8\xa0\x38\xb6\x5c\xb3\x97\xed\xc0\xf1\x5c\x5c\xb6\xfd\x9d\xf0\x7d\x26\x2c\xc4\xb2\x70\x03\x6c\x38\x55\x17\x4e\x62\x4f\x0e\xb1\xee\x35\xdd\xca\xc8\x75\x32\xbb\xe6\x10\xcb\x4d\xdf\x17\x6e\x79\xcf\xc8\xfb\x03\xf6\xe6\x11\xf7\x08\x77\xe4\x38\x85\x1d\x05\xc4\x05\xdf\xdb\x29\x5c\x5c\xb0\x7d\x38\x95\x5d\x45\xc4\x86\x28\x0f\xef\x39\xe4\x7e\xc8\xce\x79\xc4\x8a\x63\x0b\x5f\x34\x9c\x06\xfc\x88\x3d\x25\xc4\xb2\x57\xdf\xe3\x3b\xd5\x25\xb9\x94\xd3\x08\xb2\xc5\x0c\xe2\xa2\x58\x76\x5c\xc7\x15\xe8\xf9\x15\xc7\xb5\x6b\xe8\xb8\x15\xa7\x6c\x07\x9e\x0f\xa7\x33\x92\x45\xac\x89\xc5\x60\xa6\xee\x39\x6e\xe0\xb8\x55\xac\x78\xcd\x85\x9a\x40\xdb\xad\xd6\x04\x1e\xd3\xf4\x82\xc4\x8e\xcf\xe0\x11\x16\x87\x4a\xde\xe5\x4c\x76\xe4\x10\x1b\xde\x62\x80\x4b\x7b\xea\x4b\xc2\x85\x1f\xb3\x6f\x0e\xd1\x17\x55\xa7\x11\x08\x5f\xc8\x48\x9c\xc5\xfe\x3c\xe2\xb2\x5d\xf6\x3d\x17\xce\xe6\x9f\x05\xc4\x8a\xa8\xfa\x42\x8c\x90\x9f\xb0\xaf\x88\x58\xaf\x35\x1b\x33\xcb\x8e\xdb\x6c\x8c\xfc\xe7\xb0\x7f\x1e\xb1\xd1\xac\x0b\xbf\x51\xf6\x9d\x7a\x80\xc1\x6e\x0f\xce\x65\x7f\x69\xc2\xbf\xe4\x0b\x01\xe7\x11\x64\xe7\x33\x88\x76\xb9\x19\x08\xb4\xcb\x9c\x23\xf8\x29\x3b\xb3\x88\xcb\x4e\xd9\xf7\x46\x33\x9f\xcf\x2e\x0b\xb1\xee\xd4\xca\xbe\xb7\x7b\xe4\xbc\x80\x9d\x39\xe6\x2a\x95\x9a\xc0\x8a\x17\xc0\x85\xec\xe2\x34\x8a\x8a\x53\xab\xd9\xf0\x33\xfe\x9d\x57\xef\xec\xb9\x02\x2e\x62\x7f\x81\x37\xd9\x28\x37\x6b\xfa\xb8\x5f\xcc\x4c\x11\x71\x98\xad\xff\x2e\xf0\x97\xf0\x90\x79\xc4\x5d\xcd\x5a\xd5\xf6\x71\xd1\xb7\x47\x9a\xf0\x5c\x46\x6d\x3f\x10\x3e\x5c\xca\x4c\x49\xcf\x2c\xd9\xb5\x45\xb8\x8c\x20\x5b\xca\xa4\x81\x61\xc4\xc6\xd3\x34\xe0\x72\xc6\x92\xc2\x3f\xa6\x29\x1a\x87\x96\x72\x05\x5f\xb6\x10\x6b\x76\xe0\xb8\x58\xb6\xeb\x4e\x60\xd7\xb0\x26\x82\x40\xf8\x68\xe3\x6e\x27\x58\xc2\xaa\x6f\xef\x12\x70\x25\x93\xb9\xe9\xe4\x30\x3f\xf0\x73\x26\xe7\xa6\x93\x65\xc7\x2f\x37\x97\x17\x6b\xe2\x58\xf8\x05\xe3\xf9\xe9\x78\xe0\xd4\x2a\x02\x7e\xc9\x64\x61\x3a\x79\xe8\x51\xba\x8a\xe9\xe2\x74\xda\xe7\x54\xd9\x0b\xde\x2e\x01\xbf\x62\x7c\xde\x84\x0b\xb8\x9a\xaf\x97\x0c\xd7\xcb\x72\x57\x52\x4f\xd7\x10\x58\x99\x8c\x81\x15\xc9\xa8\x5e\xcb\x64\x76\x3a\x39\x8a\xea\xaf\x99\x34\x65\x4a\xa4\xa2\x7a\x1d\xe3\xa6\x74\x89\xc9\x58\xfd\x86\x69\x53\xca\x9c\xe4\x82\xaf\x67\xd2\x94\x2d\x27\xb9\xe0\xdf\x32\x69\xca\x96\x93\x5a\xf0\xef\x18\x37\xa5\xcb\x99\x5c\xf0\x0d\x4c\x9b\xb2\x25\x82\x25\xb8\x91\x01\x53\xba\xdc\xa4\xaa\x6e\x22\xb0\xb2\xa6\x64\x79\xc9\xbd\xdf\xcc\xa4\x29\x59\x5e\x72\xef\xb7\x30\x69\x4a\x96\x97\xda\xfb\xef\x19\x37\x25\xcb\x4b\x2e\xf6\x56\x26\x4d\x89\xf2\x26\xa3\x74\x1b\xd3\x5c\xa4\x9b\xb5\xc0\xa9\xd7\xb8\x62\x1d\x3c\x7b\xfe\xc0\xd7\x4c\xe9\x91\x33\x35\x02\x3e\xc0\xe0\x76\x46\x4d\xa9\x69\x26\x23\x74\x07\x93\xa6\xb4\x34\x93\x11\xfa\x23\x93\xa6\xfc\x34\x53\x11\xba\x93\xc0\xb2\x4c\x49\x6a\x4e\xee\xfb\x2e\xa6\x4d\x89\xda\x93\x5c\xc6\xdd\x4c\x9a\x12\x15\x2c\x79\xbe\x0b\xf7\x30\x72\x30\x39\x8d\x65\xbb\x76\x10\x68\x2c\xd9\x7e\x1d\x1b\xdc\x54\x59\xd6\x9c\x16\x51\x6a\xe8\xbd\xcc\xe5\xa7\x71\xa3\x55\xfd\x99\xb9\xc2\x34\x2e\x11\x9a\xbf\x30\x5c\x9c\x06\x8f\xa4\x73\x1f\x73\xf3\xd3\xb8\x43\x01\xbc\x9f\xd9\xd2\x34\x36\x51\x39\x1f\x20\xb0\x72\x19\x3d\x2c\xe0\x41\xbe\x9a\xd5\x5e\x9d\xa8\x9a\x0f\x31\x69\x69\x49\xa5\x66\x3e\xcc\x9c\x3e\x21\x4a\xc5\xfc\x2b\x73\xfa\xac\xa4\xeb\xe5\x23\x0c\xeb\x53\x93\xaa\x96\x7f\x63\x56\x9f\x1e\xa5\x56\x3e\xca\x9c\x3e\x33\x4a\xa5\xfc\x3b\x73\xfa\xcc\xa4\xeb\xe4\x3f\x18\xd6\xa7\x26\x55\x25\x1f\x23\xb0\xe6\xf4\x99\xe1\x1a\xf9\x38\x5f\xd6\xa7\x46\xa9\x90\x4f\x30\xa7\x4f\x8c\x52\x1f\x9f\x64\x4e\x9f\x18\xa5\x3a\x3e\xc5\x9c\x3e\x31\xe9\xda\xf8\x4f\x86\xf5\x89\x51\x2a\xe3\xd3\xcc\xe9\x93\x92\xaa\x8b\xcf\x30\x5b\xe4\xbe\x7b\x97\xd3\x38\x58\x11\xff\xc5\x5e\x7d\x1a\xd4\x7a\xf8\x2c\x83\xfa\x14\x28\xd5\xf0\x39\x02\x2b\xaf\x0f\xbf\x52\x0b\xff\xcd\x9c\x3e\x0f\xe9\x4a\xf8\x3c\xc3\xfa\x64\xa4\xea\xe0\x0b\xcc\xea\x13\xa2\x54\xc1\x17\x99\xd3\x27\x64\x54\x03\x5f\x62\x40\x9f\x84\x3d\x93\x37\x7d\x99\x20\x73\x42\x26\x6b\xec\x7f\x64\x01\x91\x2f\x0e\x57\x4a\x5a\xbf\x7d\x95\xbd\x57\xb2\xdb\xf4\xab\x0b\xbe\xd8\x35\x6c\x16\x87\xb0\x7e\xfb\x0a\x7a\x9f\x44\xb7\xe9\x59\xbd\xaa\xe7\x8a\xe1\x8b\xe2\x90\x9e\x5a\xc7\x25\x7b\x8e\x64\x4d\xe7\x6c\x39\x99\x85\xab\x25\xac\xaf\x18\x0a\xfa\xa0\x44\x4d\x47\x6d\x39\x25\x9b\xf1\xe4\x7a\xd9\xa6\xf9\xf1\x1d\xec\xe9\x77\xa8\x78\xc1\xa1\xfe\x79\x38\x60\x61\xda\x0d\x0e\xe1\x0f\x48\xbc\xbc\xcd\x0e\x6c\x4e\xfc\xb5\x12\xae\x4c\x5d\xfc\x10\x7d\x58\xa2\xc2\x30\x6f\x25\x09\xdf\x20\xe1\x45\xed\xbc\x0a\xfa\xd8\x08\x35\x36\x8a\x15\xa5\x42\xdc\x28\x69\xbd\xaa\x55\xf6\x71\xc9\x6e\xd3\xdb\xcb\x67\xe0\x74\x49\x4f\x3d\xff\x24\x7b\xb1\x64\x4d\xea\x13\x93\x49\xb9\x5e\x0e\xd0\x2b\x30\x85\x3f\x2a\x71\x63\x1f\xae\x3c\x0b\xd7\x49\x5a\xaf\x40\x95\x7d\x44\xb2\x26\xf5\x89\x64\x6a\xc6\xab\xd6\x2b\x4f\x41\xc7\x2b\x36\xa9\xae\x9a\x7a\x0e\x6e\x97\x23\xf4\xd2\x4b\xf3\xcf\x4a\xde\xa4\xbf\x6a\xb2\xf0\x9c\x21\x61\xbd\xfe\x14\xf4\x92\x11\x6a\xec\x81\xab\x93\xc9\xb9\x55\x0e\xd0\x4b\x30\x85\x3f\x2d\x71\x93\x0a\xab\x6a\xaf\x26\x57\x6e\xe8\x8b\x27\xe0\xf1\xda\x4d\xf5\x75\x29\x15\xc5\x53\xe5\x08\x7d\x8d\x4d\xf3\x17\x48\xde\xa4\xf4\x25\xe5\x89\x93\x15\xdc\xd0\x34\xab\xac\xac\xe0\x96\x49\xe3\x4e\xb2\x07\x39\x45\xc2\x53\xdb\xb3\x11\x7a\xbe\x44\x4d\x0a\x77\x94\x27\x59\x16\x2a\x4b\x2f\x71\x95\x95\x95\xca\x32\x29\xd0\x51\x9e\xb4\x6b\x24\xad\x97\xa0\xca\x3e\x34\x62\x73\x26\x0d\x3a\x93\xa2\x3a\x4d\x0e\x30\x94\x41\x2f\xa8\x89\x46\x03\x1d\xb8\x48\x72\x26\x81\x1c\x9d\x4a\xf8\x99\x72\x84\x5e\x20\x69\xfe\x52\xc9\x9b\x04\xb2\x53\x55\xec\x2d\x12\xd7\x2b\x64\x02\x7e\x4a\xc2\xfa\xd6\x71\xa7\x6f\xc3\x49\x92\x30\xbd\xef\xd6\x92\x67\xbb\x3c\x48\x73\xb6\x76\x3a\x05\x95\x87\x68\x6e\x61\xfa\xbc\xe3\x85\xca\xa7\x2a\x57\x9e\x36\xf3\x18\x96\x8f\x54\xae\xb2\xcd\xdc\xc3\xba\x2a\x85\x9f\x13\x53\x67\x1e\xa2\x52\xf8\x73\xa6\xd7\xf3\x9a\xf2\xf8\x9d\x2c\x69\x7d\xc3\xab\xb2\xe7\x49\xd6\xf4\x21\xc5\x4d\x86\xee\x26\x09\xeb\xbb\x5e\x05\x7d\x42\xa2\xa6\xaf\x5e\xae\x1a\xb8\xf1\xcc\xfa\x77\x91\x09\x78\x3c\xb7\xe9\xeb\x8a\x9b\x0c\xdd\xcd\x12\xd6\x0b\x4d\x41\x9f\x94\xa8\xf1\x00\x75\xab\x70\x99\x64\x0c\xe7\xa6\x5b\x85\x2b\x24\x61\x3a\x2e\x3d\xa5\xf0\x8c\x97\xa7\x3f\x2a\x55\x56\xae\x2f\xbf\xcd\x77\xb7\xf1\x57\xf4\x61\x1e\xe4\x89\x66\x78\x53\xd2\x8d\x90\x87\x5a\xde\x54\x57\xfc\x64\x9a\xe5\x9b\x88\xe1\x35\x47\x41\xe5\x8b\x48\xde\x54\x4d\x7c\x35\xc9\x52\xc4\x79\x7d\x35\x99\x80\xa5\x8a\xf3\xa6\x03\xc7\x4f\xa6\x59\x76\x27\x79\xfd\x81\xa3\xa0\xb2\x31\xc9\x9b\x14\xd1\x48\x6e\x50\x56\x8a\xbc\x5e\x1a\x0a\x2a\xeb\x44\xde\xa4\x91\x46\xaa\x16\xdf\x23\x47\xe8\x75\x92\xe6\x5f\x92\xbc\xe9\x40\x6b\xa8\x01\x94\x1d\x72\x5e\x7f\xa2\x4d\xc0\xb2\x45\x2e\x98\x74\xd8\x48\x86\x50\x1e\x67\x05\xbd\x00\x15\x54\x9e\x68\x05\xe3\xa7\x45\x75\x19\x32\x24\x05\x7d\x3b\x35\x01\xcb\x78\x14\x4c\xaa\x0e\x92\x0b\x91\x8d\x5a\x41\xaf\x6a\x05\x95\x6d\x5a\xc1\xa4\xea\x40\xa9\xb6\xf2\x04\x2e\xe8\x45\xad\xb2\xf2\xf4\x2d\x6c\xf3\x65\x78\xd4\x19\xdd\x2d\xe1\xa9\x1f\x58\x46\xe8\x8b\x12\x35\x69\xba\xa9\xd4\x9c\x71\x90\xf5\xa2\x56\xd9\x71\x8c\x4d\xaa\x6e\x26\x7b\xf3\xf1\x8a\xf5\x82\x56\xd0\xf1\x8a\x4d\x5a\x6e\xa6\xbe\xae\xde\x21\x47\xe8\xe5\x9c\xe6\x9f\x1b\xf1\xc5\xed\x3e\x96\x27\xeb\xe4\x9d\x72\xcc\xd4\x6f\x50\xca\x88\xe7\xe5\x08\x93\xbe\x9b\x4a\xe3\x28\xf7\x50\xd4\xcb\x5b\x65\xc7\xeb\x37\xf5\x4a\x7b\x93\xc5\x47\x8a\xb0\xa8\xef\x95\x14\x54\x6a\xb0\x68\xea\x95\xf6\x4e\x36\xaf\x67\xcb\x01\xfa\x6e\x29\x85\xcb\xb3\xb2\x68\xea\x97\xf6\x26\x1f\xb5\xb3\x24\xac\xef\x97\x14\xf4\xf2\x21\x6a\x95\x8b\x28\x1d\x17\x8e\x1c\x95\x79\x94\x9a\x3a\x49\x3a\x4a\x98\x58\xce\xcb\xd2\xb9\x80\xe3\xc0\x9e\x2b\x3d\x15\x54\x52\x39\xfe\x77\xfa\x65\x04\xff\x09\x00\x00\xff\xff\x17\x87\xd5\xd4\xdd\x25\x00\x00") + +func fontsTermFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTermFlf, + "fonts/term.flf", + ) +} + +func fontsTermFlf() (*asset, error) { + bytes, err := fontsTermFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/term.flf", size: 9693, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsThickFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x57\x4d\x6f\xe3\x36\x10\xbd\xeb\x57\x3c\x08\x02\xbc\x8b\x26\x6a\x92\xda\xc0\x64\x4f\x2e\xd0\xf6\x52\x14\x10\x8a\x5e\x74\x33\x65\xd2\xb6\x10\x59\x5a\x58\x32\x98\xfd\xf7\x05\x39\xa4\x44\xca\x8a\x77\x7b\xe8\x62\xa3\x2f\xce\x3c\xbe\x99\x79\x1c\xd2\x87\xe6\xf0\x22\x32\x6c\xb0\xc6\xf3\x06\x4f\x78\x5e\x27\xff\x9c\xea\xfd\x1b\xaa\x6f\xf8\x5b\xb4\x52\x34\x8d\xb9\xf7\xdd\x19\x2f\x3f\xbf\xae\x93\x3f\xea\x63\xa3\x06\x5c\x54\xa3\x44\xaf\xf0\x92\x3f\xe1\xf1\x11\xbf\x5e\x8f\xd7\x7e\xc0\xe6\x01\xcf\xaf\xaf\xbf\x24\xbf\x89\x41\x7d\xc1\x06\x7f\x89\x8b\xf9\xb0\x4e\x92\xdf\xdf\xbf\x36\xa2\x15\x43\xdd\xb5\xe8\x0e\x38\xd4\x97\x7e\x40\x53\xb7\xea\x4b\x62\x18\xe0\x11\xe9\x59\x1c\xeb\x3d\xda\xeb\xb9\x52\x97\x14\x87\xee\x82\x43\xdd\x28\xd4\x52\xb5\x43\x7d\xa8\xf7\xd6\x39\x11\x00\xf0\x88\xfe\xd4\x5d\x1b\x09\xd1\x68\xf1\xad\x47\xa5\xb0\x13\xab\x07\xeb\xd4\x76\x3a\xc9\xd8\x68\x38\x29\xa4\x27\x71\x91\x55\x23\xda\xb7\xd4\x10\xfd\x7a\xa9\xdb\xa1\x87\xe8\x21\x60\xbf\x3e\xa0\xba\x0e\xd8\x8b\x76\x35\x18\x98\xfe\x7c\xed\x4f\x4a\x26\x1b\x46\x38\xa9\xfa\x78\x1a\x0c\x63\x81\xfd\x49\x5c\xc4\x7e\x50\x97\x64\x7d\x67\xf0\x01\x6d\x37\xa0\x6e\xf7\xcd\x55\xd6\xed\x11\x52\xf5\x7b\xd5\x4a\x75\xe9\x93\xe7\x8d\x75\x3b\x8b\x77\x1b\x39\x1a\xd5\x1e\x87\x13\x3e\xa9\x77\x6f\xbc\xef\xce\x67\xd5\x72\x62\xfa\xcf\xf8\x09\x02\x87\xab\x3c\x2a\x1c\xc4\x7e\xe8\x2e\xc9\x13\x4f\x2c\xd5\x41\x5c\x9b\x81\xc9\x9e\x3b\xa9\x6c\xe0\xc3\xa9\xee\x71\xe8\xda\x01\x9f\x9a\xfa\x4d\x21\x7d\x3c\xe3\x29\x45\xd7\x5a\x58\xd1\x4a\x0b\xfb\x39\x79\x5e\x5b\x10\x4e\xb4\x61\x1f\xcd\x9a\x24\x59\x96\x6d\x97\x2e\xdb\x84\x32\xfe\x4b\xb3\x6d\xa2\xed\xd7\xed\x36\xd1\x30\x8f\x29\xcc\xc7\x0c\x59\x7c\xdd\x26\x20\x80\x60\x1c\x48\x6b\xd2\xe1\xc3\x38\x04\xfb\xcf\x58\xe7\x9a\x74\x6e\xe0\x34\x59\x40\x80\x34\xf8\x5d\xf3\x7b\xca\x86\x1d\x20\x0b\xfb\x41\x16\x16\x42\x16\x66\x20\x31\xb7\xce\x43\x1a\x43\x59\x90\xb1\x2b\x2b\xad\xdd\xb0\x7d\xa3\xca\xa1\x59\x8e\x66\x28\x4f\x17\x22\xce\xed\x24\x64\xa7\x20\xeb\x4a\x95\xb9\xae\xca\xca\x0c\x97\x95\x21\x8b\x92\x98\xaa\xe5\x41\x76\x9a\x15\x0f\x3b\x96\xba\xac\x64\xa1\x99\x66\x59\x79\x9e\x65\x15\xc5\xee\x1e\x0c\x90\xbd\x9b\x2c\xe9\xe0\x7d\x34\x9c\xd7\xc6\xb3\x37\x39\x77\xa9\xb7\x37\xad\xad\x7f\xf4\xd1\x16\x6d\xfc\x1b\x8b\x88\x3b\xe9\x8c\xe6\xce\x25\x91\x0d\x9a\x4c\x00\xc4\x09\xe1\xa0\x77\x25\x91\x89\x3b\x08\xc9\x7e\xc7\xec\xe2\x46\x6c\x05\x52\x37\x2d\xcf\x2a\x89\xc8\xf9\x07\x36\xd0\x9a\xa9\x59\x77\x33\x49\x60\xe3\xe6\x86\x2c\x38\xef\x9a\x26\x53\x6f\x43\x0c\x4b\xda\x4a\x0b\xd8\x2d\xe1\x98\xc9\x6c\x7d\x0b\xcd\x76\xf7\xe2\x32\x88\x3f\x98\xae\x82\x35\x62\xf4\x67\x43\x95\x45\x9a\xda\xc2\xef\x8c\x26\x62\xd8\xdc\x45\x4c\x2e\x80\x1d\xe9\x31\x21\x14\x2b\x60\xac\x5d\x50\xc3\x2c\x73\x52\x9e\x9e\x58\x16\x88\xb3\x5c\x31\x47\x16\x22\xcb\x70\xae\x99\x65\xe9\x94\x33\x4f\x59\x05\xa8\x85\x5f\x6f\x1c\x5e\xea\x2b\xe3\x53\x04\x1d\xa7\x46\xa6\x69\x9a\x1a\xcb\x02\x45\x69\xc3\xab\x50\x49\xed\x17\xab\x5e\xcd\x17\x87\xe1\xed\xe6\x2d\x3c\x13\x59\x68\x3d\x2d\xa8\x68\x49\x8d\x95\x62\xb9\x6a\x56\x91\x49\xad\x4d\x71\x58\xd3\x28\xf7\xae\x84\xe4\x66\xdb\x05\x42\x89\x11\x5d\x51\xc6\xfb\x1c\x71\x92\x9d\x76\x66\xd9\xf8\xf1\xbe\xcd\x24\xa2\x91\x19\x4b\x13\xce\xee\x9e\x34\x3d\x1d\xad\x35\x2d\xd0\x0c\xe9\x71\x0f\x1e\xaf\x23\x35\xee\xdb\xe3\x2b\xb7\x6f\x77\xdf\x95\x3a\x8d\xea\x48\x4e\x5d\xa4\xb9\xd0\x44\x5c\x10\x0a\xab\xe1\x99\xdd\x04\x39\xcf\x87\xc9\xba\x0d\x8d\xca\xea\xcc\x8b\x9a\x6c\xd3\xf7\x31\x04\x51\x4c\x2e\xde\xc1\x9b\x2f\x06\xfc\xdf\x5a\x57\x50\xe6\xdc\xe5\xd3\x5a\x10\x7e\xa4\x23\xea\x09\x56\x7f\x0f\xf6\xcf\xd5\x52\xb6\x46\xd8\xd2\x37\x24\xe0\x7e\x43\xa2\x68\xaf\x98\xdf\x83\x22\x4c\xb9\x64\xb6\x79\xfe\x11\xac\x59\x66\xbe\x79\xf0\x66\xe6\x5b\x86\x6b\x78\x66\xc9\x85\x3d\x2f\xf2\x9b\xf9\xce\xfc\xe7\x18\x73\x9c\x10\xcb\xa3\x8c\xdd\xf6\xc3\x4d\x74\xc1\xda\x22\x73\x2e\x82\x64\xdc\xef\xe3\x81\x2c\x9d\xa1\x4d\x59\x7c\xa1\x31\x54\x1f\xa3\x0b\xce\x47\x55\xdd\x40\xcc\xf6\x42\x86\xb8\x8d\xc7\x9e\x5f\xb3\xa5\x87\xf9\x1e\xbf\xb4\xd5\x9b\xc5\x6a\x3a\x70\xbe\x70\xa0\x71\xea\x35\xea\x62\x11\xf8\xca\x87\x8b\xd0\x2f\x4d\xaf\x54\xa6\x3a\x6a\x23\xc6\xa9\xa6\xc5\xbc\x2b\x67\x7b\xb3\xf5\xbc\x3b\x17\x02\x42\x16\x29\xe7\x4d\xf1\xa6\xed\x82\x67\x02\xad\xfc\x61\xd2\x9d\x20\x17\x49\xdd\x4e\xc8\xdb\xef\xb4\x7e\x49\x8e\xab\xb6\x5c\x6c\x17\xda\xed\xad\xfe\xd0\xeb\x66\xe0\xe5\xac\xa7\x03\x9f\xbd\x06\xe0\x2e\x08\x4e\x20\x8b\x65\x14\x42\x78\x88\x9e\xe1\x8e\x8a\x67\x66\x21\xbb\x90\x61\xdc\xff\xe2\x1c\x7e\x37\xa4\x20\xd9\x6c\xb8\xe2\x26\x64\xea\xb6\x9a\x1b\x7e\xa4\x00\x5a\xa8\x9c\x6b\xe8\xe4\x37\x4c\x57\x14\x2a\x42\x30\x39\xee\xaf\xb7\x7b\x9c\xb3\x71\x22\xd8\xb9\x33\xd3\xec\x88\xc6\x07\x88\x59\xe9\x6f\x24\x37\xc2\xbb\x56\xec\x7b\x5b\x71\x9b\x0a\xfe\x29\x70\xa7\x61\xc4\xd6\xa1\x47\xe4\x15\x7b\x46\xde\x73\x62\x65\xe5\xfc\x76\x94\xbb\x25\x1f\xb7\x89\xbb\xc4\x7c\xc7\x74\xe7\xcc\xa0\x50\xf4\xf1\x31\x1a\xac\x46\xf0\xaf\x18\xbd\x62\x08\xd6\x26\xdc\x0f\x99\xb9\x28\x5d\x6b\xcb\x5d\x8a\x79\xf6\x1d\xff\x82\x21\xe6\xbd\xc2\xd4\xbb\xe0\x8e\xb6\x08\xb8\x4e\x09\xfb\xe0\x79\x9b\x64\xd1\xff\xff\xff\xfd\xdf\x00\x00\x00\xff\xff\xc0\xc7\x94\x97\xec\x10\x00\x00") + +func fontsThickFlfBytes() ([]byte, error) { + return bindataRead( + _fontsThickFlf, + "fonts/thick.flf", + ) +} + +func fontsThickFlf() (*asset, error) { + bytes, err := fontsThickFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/thick.flf", size: 4332, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsThinFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\xfd\x6e\xab\x36\x1c\xfd\xdf\x4f\x71\x14\x5d\x89\x76\xc5\xa0\x74\xea\xd5\x5d\x35\x4d\x68\xd2\xa6\xbd\xc1\xfe\x41\x0b\x0e\x98\x04\x95\xe0\x0a\x88\xda\x3b\xf1\xf0\x93\x3f\x30\x0e\x98\x24\xd3\x26\x05\x9b\x8f\xe3\xe3\xdf\x97\x8f\x9d\xb2\x2e\x9f\xd9\x17\x7c\xc5\x0b\xbe\x81\x6e\xb1\x7d\x26\xbf\xb2\x8e\x17\x10\x0d\x18\x6a\xf1\xc1\xdb\x9c\x75\x1c\xa5\x68\x7a\xbc\x8b\xae\xe7\x05\xf6\xdf\xd1\x8a\x3d\x6f\xfb\x24\xef\xa2\x9c\xd5\x3d\xcf\x8f\x11\x2f\xce\xe4\xf7\xea\x50\xf3\xbe\xfa\x5b\x63\xfe\xe4\x4d\xc1\xeb\x1a\x7f\x54\xf9\x1b\x6f\xb0\xdd\xc6\x3f\xfd\x88\x87\x8f\xa3\x7a\x4c\xde\x59\xcb\x3a\x51\xf6\x51\x2e\x4e\x8f\xe4\xb7\xcf\xf7\x9a\x35\xac\xaf\x44\x03\x51\xa2\xac\xda\xae\x47\x5d\x35\xfc\x95\x48\x0b\x41\xb1\x39\xb1\x43\x95\xa3\x39\x9f\xf6\xbc\xdd\xa0\x14\x2d\xca\xaa\xe6\xa8\x0a\xde\xf4\x55\x59\xe5\x6a\x30\x61\x00\x40\xd1\x1d\xc5\xb9\x2e\xc0\xea\x0f\xf6\xbd\xc3\x9e\x23\x63\x41\xa8\x06\x35\xe2\x83\x7c\xd1\xa0\xfe\xc8\xb1\x39\xb2\xb6\xd8\xd7\xac\x79\xdb\x80\x52\xbc\xb7\x55\xd3\x77\x60\x1d\x18\xd4\xdb\x10\xfb\x73\x8f\x9c\x35\x41\x2f\x69\xba\xd3\xb9\x3b\xf2\x82\x7c\xd5\x0c\x47\x5e\x1d\x8e\xbd\xb4\x98\x21\x3f\xb2\x96\xe5\x3d\x6f\xc9\xcb\x95\x8f\x21\x1a\xd1\xa3\x6a\xf2\xfa\x5c\x54\xcd\x01\x05\xef\x72\x19\xa7\xb6\x23\xdf\xf4\xb0\x13\xfb\x54\x9e\xa3\xe6\xcd\xa1\x3f\xe2\x81\x7f\x8e\xe0\x5c\x9c\x4e\xbc\xd1\x81\xe9\x1e\xf1\x04\x86\xf2\x5c\x1c\x38\x4a\x96\xf7\xa2\x25\x74\xab\x18\x0a\x5e\xb2\x73\xdd\x6b\x63\x4f\xa2\xe0\xca\xf1\xfe\x58\x75\x3a\x8d\x0f\x75\xf5\xc6\xb1\xa1\x27\x6c\x5f\x36\x32\xd1\x92\x97\x35\x85\xe2\x7d\x24\xdb\x67\xc5\xa2\x23\x2d\xcd\xbf\x98\x96\x10\x69\x66\x72\xbd\x4d\x08\x12\x32\xa8\x1f\x12\x22\x64\xab\xbe\x24\x64\xc0\x90\x90\x0c\x99\x79\xb4\x8d\x1e\x28\x1f\x22\x44\x48\x08\x7d\xa2\x4f\xd4\x76\xc8\x90\x61\x84\xc8\x3e\x94\xd0\x90\x0e\x34\x4a\x48\xa6\x3b\x40\x73\xd3\x81\x06\xf2\x29\xb3\xac\x09\x11\x40\x2c\xef\x63\x49\x12\xcb\x37\x31\x20\x26\x63\x27\xd3\x65\x17\x52\x69\x01\x86\xdd\xb0\x53\x2f\x87\x8b\xa9\xe5\x97\x84\x04\x30\xf7\xfa\x32\x37\xb1\x74\x70\xba\x90\x9a\x0f\xa9\x19\x35\x5e\xb1\x8d\x87\x6a\xa2\x21\x4a\x08\xfd\x81\x26\x24\x18\x32\x37\x1e\xb6\x1d\xa0\x63\x62\xef\xe1\x01\x2d\x06\x04\x16\xe4\x36\x94\xd2\xcb\xc0\x3b\xbf\x29\x55\xfa\xf3\x32\x6a\xf3\xb9\x43\x2a\x83\x3f\x40\xba\xa5\xdb\x8c\xea\x04\x38\x73\x07\x83\xca\x8f\x6d\x66\x4e\x6a\x8e\x50\x8d\x1b\xd4\x9b\x39\x87\x05\x01\x74\xd0\x49\xf1\x82\xac\x05\x16\x84\x71\xb6\x09\x44\xa9\x2e\x1b\xcd\xb7\xc2\x64\xfc\x72\xbc\xf3\x80\xe8\xc8\x11\xeb\xfa\x9b\x5a\x9f\x77\xd7\x98\xf4\xe7\x6c\xb7\x73\x0d\x0f\xdc\xe5\x34\x66\x67\xca\x91\xb9\xc4\x45\x55\x4e\x95\x18\x8f\x85\x97\xda\x12\xbd\x28\x81\x45\x1d\xb8\x78\x33\xde\x5d\x1e\xe1\xe8\x6d\xa8\xad\x37\x75\x28\x70\xb1\x3c\x5c\xe8\x00\xb1\x8b\x75\x4a\x75\x4e\x47\xb7\x3d\x50\xaa\x72\x36\x98\x7c\x4c\xce\xfb\xa1\xd1\x04\xbd\xca\x6a\xab\xe2\xaa\x01\x11\x2c\xdd\xc4\x1a\xac\xbb\x45\x29\xbd\x83\x55\x41\x77\xbb\xc8\x81\xba\x3a\xe3\xb1\x75\x77\xcb\xad\xd1\xc8\xab\xc1\x1a\x95\x57\x7e\x9a\x96\xb3\x99\x77\x98\x77\xeb\x13\xc5\xca\xfc\x58\x3f\xa5\xc6\xfc\xcc\x0b\x5d\x74\x6b\x41\x89\xb4\xa7\x83\x0e\xb5\xd9\x0c\x02\x5f\xae\x01\x48\x68\x6a\xb2\x92\x1a\x4f\xb3\xab\xb9\x1e\xee\x2c\x0b\x6a\xa5\xe6\x8e\xac\xcc\x58\xd3\xdb\xac\xeb\xc1\x0a\xad\xf6\x18\xe1\x58\xb7\x95\xd2\x48\x2f\x50\xb3\xce\x4c\x97\x79\x6c\x8d\x4c\xb0\x6e\x46\xc0\x60\x11\x1a\xe7\x65\x19\xa4\x46\xe0\x91\x05\x96\xfa\x82\x3b\xc2\x32\x67\x34\x98\x73\x2b\xe6\x30\x21\xf8\xe5\x67\xab\xc3\xc1\x5c\x7b\x9d\xc4\xce\x8c\x74\xbc\xbb\x08\x96\x64\x8c\xd4\x5a\x5c\xab\x2c\x89\xd3\xdb\xed\xab\xd9\x76\x33\x6a\x6d\x4a\x75\x6a\x53\xcd\x9e\x6a\xff\x52\xc7\xa6\x84\xa8\x44\xc8\x40\xbc\xea\x9e\x06\x56\x2a\x23\xb5\xe5\xa5\xf3\x1d\x74\xfe\x3c\x13\x56\x4d\xab\xb6\x81\x2c\x99\x1f\x12\xdc\x83\x86\x29\x86\x50\xaf\x66\xe9\xd8\x5f\xab\xab\xeb\x0e\xcd\xf3\x4a\xdf\x3a\x74\xb0\x33\xff\x1b\x56\x8d\xf9\xcf\x9a\xe7\x5d\x64\xd9\x14\x88\xe0\x56\x04\x16\x9a\x27\x12\x12\xad\x68\x9e\x39\xe8\x45\x7e\xe9\x5b\x4c\xf4\xff\x6a\x9e\x57\xfa\xfc\x9a\xe7\x0f\x8a\x77\x2b\x5c\x8d\xdf\x9d\xd0\x0b\x05\xbc\x2b\x2b\x3a\x66\x37\x8a\xed\x8e\x5c\x67\x53\xe7\xdf\x76\x70\xdf\x06\x6b\xe5\xc9\x2b\x27\x97\x9a\x07\x07\xac\x34\xea\x86\xe6\x59\xb8\x2b\x7d\x5e\xcd\xc3\x42\xf9\x7c\x9a\x37\x46\x60\x61\xeb\xb2\x08\x9d\x60\x4d\xd2\x17\x2c\xdc\x52\xff\x52\xd4\x31\x3f\x21\xf4\x15\xe6\x0e\x5a\xf9\x9c\x03\xc0\xe0\xfc\xf5\x32\xff\x6a\x64\xf3\x6a\xc7\x06\xae\x99\x3b\xd5\xc5\x48\x77\xf1\xdc\x20\x3b\xb7\x80\xb8\xf7\xd4\x76\x09\xbd\xba\x39\x69\xe8\xdd\xfb\xd8\x9c\xfc\xaa\x84\x7a\xed\xb8\xca\x7a\xbb\xa8\x56\x0e\xa3\xb3\x65\xf5\x4f\x00\x00\x00\xff\xff\xe8\x6e\xdc\x71\x5d\x11\x00\x00") + +func fontsThinFlfBytes() ([]byte, error) { + return bindataRead( + _fontsThinFlf, + "fonts/thin.flf", + ) +} + +func fontsThinFlf() (*asset, error) { + bytes, err := fontsThinFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/thin.flf", size: 4445, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsThreepointFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x54\xcf\x6b\xe3\x38\x14\xbe\xeb\xaf\xf8\x26\x18\x92\xd0\xc4\x6a\xda\xec\xa1\x43\x29\x1a\xd8\xdd\x53\x4f\x7b\x18\x72\x30\x28\x8a\x2d\xc7\x66\x1c\xb9\xd8\x32\xd3\x01\x91\xbf\x7d\x79\x92\xad\x38\x7b\x58\x18\x8a\x1b\xbf\x5f\xdf\xfb\xde\xfb\x24\x97\x4d\xf9\xa4\x12\x3c\xe3\x09\xbb\x1d\x1e\xb1\xdb\x33\x5b\x75\x5a\x7f\xb4\xb5\xb1\x38\xfd\xc2\x3f\xca\x14\xaa\x69\xe8\xb7\x6f\x2f\xd8\xf3\x97\x3d\x5e\x3b\x6f\x88\x73\xdd\x59\x75\x4a\x87\x3e\x4f\x75\x31\xbc\xb1\xbf\xeb\x73\xa3\x2d\x3a\xdd\x68\xd5\x6b\x3c\xa5\x8f\xd8\x6e\xf1\x6d\x38\x0f\xbd\xc5\x1f\x1b\xec\x5e\x5e\x9e\xd9\x9f\xca\xea\xaf\x78\xc1\xb7\x8f\x8e\x1c\x7b\xc6\xfe\xfa\xfc\x68\x94\x51\xb6\x6e\x0d\xda\x12\x65\xdd\xf5\x16\x4d\x6d\xf4\x57\x46\xf4\xb0\xc5\xe2\xa2\xce\x75\x0e\x33\x5c\x4e\xba\x5b\xa0\x6c\x3b\x94\x75\xa3\x51\x17\xda\xd8\xba\xac\x73\x5f\xcc\x14\x00\x6c\xd1\x57\xed\xd0\x14\x50\xcd\x4f\xf5\xab\xc7\x49\xe3\xa8\x96\x1b\x5f\x64\xda\x9f\x2c\x09\x49\xb6\xd2\x58\x54\xaa\x2b\x4e\x8d\x32\x3f\x16\x44\xf4\xa3\xab\x8d\xed\xa1\x7a\x28\x78\xef\x06\xa7\xc1\x22\x57\x66\x69\x09\xa6\xbf\x0c\x7d\xa5\x0b\xf6\x1c\x10\x2a\x5d\x9f\x2b\x4b\x8c\x15\xf2\x4a\x75\x2a\xb7\xba\x63\x4f\xff\x13\xdc\xc0\xb4\x16\xb5\xc9\x9b\xa1\xa8\xcd\x19\x85\xee\x73\x6d\x0a\xdd\xf5\x6c\xb7\xf3\x65\x17\xf5\xe9\x27\x47\xa3\xcd\xd9\x56\x58\xe9\xcf\x29\x39\x6f\x2f\x17\x6d\xc2\x62\xfa\x35\x1e\xa0\x50\x0e\xc5\x59\xa3\x54\xb9\x6d\x3b\xf6\x18\x1a\x17\xba\x54\x43\x63\x03\xd9\x4b\x5b\x68\x3f\xb8\xad\xea\x1e\x65\x6b\x2c\x56\x4d\xfd\x43\x63\xb1\xbd\xe0\x71\x81\xd6\x78\x58\x65\x0a\x0f\xbb\x66\xbb\xbd\x07\x09\x8b\x26\xf6\x77\x5d\x19\x4b\x12\x31\x3d\x82\x39\xc1\x52\xc1\xe8\x6d\xb9\x9c\xb9\x1f\x1e\xc2\xe3\x8d\x95\xbb\x0a\x26\xdd\x5a\xb0\x04\x3e\x93\x0b\xc6\x37\x53\x70\x0d\xc1\x56\xf2\x10\x83\x84\xe6\x01\xc1\x05\x73\x10\x0c\x99\x10\x2c\xa3\x17\x27\x18\x87\x10\x2c\xc5\x46\xb0\xed\x61\x2b\xd8\x12\x47\x41\xd8\x52\x30\x7c\xc1\x84\x91\x08\xb6\x09\x18\x52\xce\x48\x25\x91\x2b\x41\x73\x8c\x6e\x7e\x10\xec\xc0\x05\x03\x41\x2f\xa9\x63\xea\x52\x32\xbd\x7d\x5d\x53\xba\xbc\xb3\x53\xb9\x9e\x6c\x27\x1d\xbd\xba\xc9\x7e\xa7\x51\xd7\x23\x18\xb8\x9f\x2d\x26\x5f\xc7\xb6\xde\x58\x11\xd2\x2c\xb8\x0a\x9d\x30\xd9\x69\x64\x9b\xc6\x71\x88\xf7\x6a\x5a\xc9\x76\x1b\x1e\x9f\xed\xf7\xb3\x1e\xf7\x13\x48\x7f\x89\x50\x09\xbf\x5e\x33\x5a\xe6\xca\x71\xc1\x92\x4c\xca\xc0\x8e\x8a\x42\x08\xe3\x34\x54\xe8\x6e\x9c\xf8\xf5\x28\x58\x26\x37\x71\x5a\x0f\x23\xf9\x8d\x33\x8d\x30\x52\x20\x95\xe9\x19\x2b\x25\x55\xf2\xbb\x3d\xb9\xdb\x9e\xae\xe1\x4c\xc8\x3b\xfb\xdd\x45\xca\x44\xd4\x65\x13\x32\x08\x60\x32\x32\xee\xa1\x46\xac\xe0\x82\x77\x65\x37\x17\x27\xa6\xf3\xfe\x7e\xb2\x2b\x6e\x93\xf9\xf8\xe1\x3f\xf1\x6c\x3e\x59\xd4\xd1\x93\xc3\x8c\x9c\x6f\x27\xe3\x30\x19\x48\x18\x64\xa3\x78\xa3\x6b\x72\x46\x77\x08\xd0\x19\xc8\xa2\x6a\xfc\x0e\xd8\x1f\x90\xbb\x7d\x86\xc9\x47\x7d\xb3\xc8\x27\x5c\x06\xe9\x48\xec\xe3\xfc\x88\x27\xf1\xcc\x0b\x76\x8c\x57\x29\x91\x09\x09\xe5\x6e\x6a\x24\x77\x3a\x27\x72\xa6\x63\x42\x79\xb3\xe4\x50\x7c\xbb\x02\x70\xc7\x69\x25\xb3\xb8\xcf\xf7\x84\x02\xf8\x4d\xea\xd4\x7f\x25\x7c\x65\x1a\x78\xbf\xbb\x49\xd4\xd7\x69\xd6\x98\x93\x48\x8c\xf5\x93\xc0\xb1\xc7\x0c\x73\xec\xb9\xbe\xb3\xfd\x40\x6e\xb2\xfd\xd5\x0b\xab\x75\x7c\x9c\xd1\x21\xce\x28\x98\x9c\xd6\x19\x8e\xe1\x4d\x05\xfa\x3f\x97\x97\xcc\x6c\xfa\x40\x90\xf1\xfd\xfb\xcc\x78\x7b\x9d\x19\x19\x1f\xaf\x20\xf1\x89\x4a\x82\x96\xf5\x0a\xff\x09\x93\x71\x5a\xe7\x95\xf4\x5d\xdf\x88\x84\xbf\xb9\xc7\x65\xf8\x88\x8d\x5f\x32\xff\xf7\x3b\x3f\xff\x06\x00\x00\xff\xff\x14\xc8\xd7\xe6\xcc\x07\x00\x00") + +func fontsThreepointFlfBytes() ([]byte, error) { + return bindataRead( + _fontsThreepointFlf, + "fonts/threepoint.flf", + ) +} + +func fontsThreepointFlf() (*asset, error) { + bytes, err := fontsThreepointFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/threepoint.flf", size: 1996, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTicksFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x57\xcd\x6e\xdb\x38\x10\xbe\xeb\x29\x06\xc2\x02\x4e\x0e\x2b\x39\xde\xec\x02\xbe\x79\xdb\x6b\xd1\xf6\xd0\xf6\x14\x60\xa0\xd8\x52\x2b\xd4\x91\x02\x4b\x6e\x91\x4b\x9e\xbd\xa8\x61\x5b\x1c\xce\x0f\x29\xcb\x0d\x10\x4b\x14\xc9\xf9\xfd\xbe\xe1\xb0\xda\x56\x8b\xe2\x2f\xf8\x0f\xfe\x85\xc5\x1c\xe6\xb0\x84\x79\x92\xf4\xf5\xfa\x7b\x97\x55\xdb\x0a\x6e\xde\xde\xc2\xdd\x72\x79\x0f\x8f\x2f\xf0\xa5\x5e\xf7\xed\x0e\x3e\x16\xbb\x62\x53\xc0\xcd\x8f\xe7\xc3\xcb\xaa\x6e\xaa\x6c\xdf\x57\xdd\x53\xb6\xde\xde\xc2\xf2\x3e\x9f\x2f\xf3\xc5\x3f\x59\x92\x54\xf5\xd7\x6d\xd9\xc3\x22\xbb\x83\xaa\x6d\x7a\x78\x2c\xba\x72\x03\x6d\x03\xdd\xb6\xde\x94\xbf\xc5\x67\x49\xf2\x69\xf7\x02\xed\x73\x5f\xb7\x0d\xa4\x7f\x3f\xc1\x5d\x0a\x7d\x0b\xdd\xd3\xbe\xfb\x06\x6d\x53\x42\x8a\x29\x3c\x96\xfd\xcf\xb2\x6c\x0e\x42\xba\x2c\x49\xde\x15\x7d\x7d\x1c\x41\x31\x83\x72\x06\xf5\x0c\xda\x19\xec\x67\xd0\xbc\x42\xd1\x6c\xe0\xfd\x2b\xec\xca\xe7\x6d\xb1\x2e\x3b\xf8\x3f\x85\x0f\x29\x7c\x4e\xa1\x48\xa1\x4d\x61\x9f\x1e\x56\xbc\xc9\x92\x04\x11\x71\x65\xff\xae\x12\xcc\x1f\xf2\x07\x14\x9e\x78\x5c\x46\xc7\xc7\x0f\xee\xe2\xe1\x1d\x0f\xff\xe7\xc5\x68\xbd\x1f\x06\x87\xad\xc7\x5f\x57\xdc\x20\xf4\xb2\x35\xde\xdf\xea\xf8\xe9\xb8\x00\x87\x4d\x82\x27\x8e\xa5\xd2\xbc\xab\x62\x58\x7f\x8a\xd3\xe9\xe3\x79\xac\x6d\xf0\xac\x16\xf6\xdb\x1e\x70\x91\xc4\x2b\x45\xac\xb2\x46\x08\x17\x47\x85\x13\x59\xf6\x74\xa5\x9e\x63\xe5\xc0\x07\xdd\xd4\x79\x56\xf8\xa8\xb0\x16\xd1\x8c\x73\x65\x01\x78\xa1\xe3\xbb\x0e\x26\x7f\x4d\x34\xbc\x86\x0f\x3c\xdb\x9a\x1a\x12\x47\x6f\x3f\x57\x20\x3c\xe5\x44\x19\x14\xf4\xad\x90\xd6\x5c\xa4\x99\xc5\x82\x6a\x09\x33\x81\x08\xb6\x63\x41\xb9\xa8\x53\x8f\x42\xc4\x87\x8f\x98\x4f\x07\x3a\xc3\x56\xf2\x4d\x7e\x3b\x6f\x96\x2a\x05\x0a\xe0\x65\x0c\x41\x1d\x2a\x38\x45\x81\x3f\xaf\xed\x27\x09\x94\x68\x48\x2d\xce\xc5\x92\xac\x25\x5c\xf1\x80\x16\x29\x1e\x82\x8b\x3d\x88\x8d\xb1\x8c\xa0\x20\x42\x62\x5c\xfe\xa3\x18\x1f\x3b\x3f\x41\x01\x85\xe1\xa8\x10\x59\x0d\x45\x2e\x35\x16\x31\x1b\x48\x89\xf3\xa8\xc8\xce\x1c\x31\x29\x62\x89\x43\xbb\x3e\x6a\xdf\xb9\xcf\x41\xcd\x9a\xa5\x22\xbe\x34\x8e\xfb\x14\x45\x66\x95\x28\xdc\x4e\xbf\x4f\x49\x9d\xa2\x11\xf8\xd2\x1a\x2e\xbd\x68\x4b\x35\x81\xc5\x5c\x0b\x91\x2e\x30\x6e\xde\x08\x91\x5e\xa5\xac\xb1\xc6\xa0\x51\x1e\x84\xc6\x41\x0a\x52\x0f\x28\x4a\x34\x03\xf8\x7c\xd0\x83\xb8\x42\x1e\x0e\xd9\x45\x39\xb0\x50\x13\xe1\x41\x5c\x8c\xe3\x72\xa2\x86\x88\x97\x04\xf9\x5d\xb4\x56\x6f\xab\xf4\xb1\x1e\x0a\xab\x60\x1b\x37\x18\x67\xbb\xda\x06\x04\x43\x21\xe6\x3e\x62\x24\xa6\x90\x0a\xa5\x29\xe3\x0d\x20\x4f\xa3\x74\x51\x0a\x5f\x8b\x14\x55\x3a\x5a\x2c\x78\xa2\x9f\x64\x99\x30\xf1\xf0\x8b\x48\x72\x48\xc1\x95\x18\x3b\xc6\x03\x9f\x0b\x01\xc6\x8e\xf7\x60\x04\x4c\x43\x35\x67\x72\x7f\xed\xa3\x04\x85\xc3\x7a\xec\x38\x08\xd3\xab\xa3\x68\xaa\xc0\x28\x0f\x38\x4e\x64\xb2\x59\xcc\xa7\x72\x30\xec\x4d\xd8\x5a\x1b\x81\xa3\xc3\x15\x13\x9e\x60\xb8\x38\x69\x42\x1e\x9c\x04\x8c\x38\xe8\xd1\x6b\x5a\xad\x77\x26\x6c\xa5\x92\xc9\x1d\x4b\xae\xea\x64\x33\x2d\xf5\x17\x4a\xef\xaa\xa5\xda\xa1\x7c\x4a\x1d\xaa\x17\x01\xed\x76\x21\x66\x6f\xfc\x98\x26\x4b\xbc\x38\xd1\x0b\x14\x7d\x7a\x47\xe8\xc0\x1a\x96\x00\x7a\xe6\xc7\x1f\xc4\xfa\x09\xce\x2a\x32\x9b\x43\x35\x76\x94\x27\x01\x85\x28\xe7\xd5\x15\x2a\x70\x45\x51\x21\xcf\x45\x59\x2a\xc5\x6d\x94\xa5\x3a\xed\xd8\x09\x6d\xdd\x27\xc3\x71\xb4\x9c\x94\x52\x35\x31\xe3\x22\x83\x1d\xa1\xc3\x04\xf9\xe6\xc7\xd4\x5d\x27\x90\xdc\x8d\x45\x5c\x21\x30\x5c\x93\x01\x6b\x39\xaa\xba\x16\x7e\x13\x5d\xa3\x59\xcc\xdd\xe6\x93\x55\x86\x49\x7d\xae\xa4\x28\x26\x97\xa6\xfb\x32\x06\x63\x00\x12\x5d\x20\xe2\xeb\x8c\x8e\xdd\xab\xb1\xc5\x1d\x89\x96\x8e\xb7\x6d\x74\x7d\xf4\xe3\x66\xcd\x39\x42\x79\xad\xa1\x0e\x53\x85\x72\x4a\x83\x89\x8a\x05\x11\x8f\xf0\x05\x42\xb9\x6d\x62\x3b\xa5\x33\x8d\xc7\x2f\xd4\x8d\x22\xfa\xfc\xf4\x25\x07\x3c\xf0\x6d\x8e\x2e\x34\xe3\xc3\x12\x51\xed\xb9\xd0\x5c\xba\xc1\x04\xf1\xe3\x59\xaa\x83\xcb\x07\xbe\x04\x35\xbd\xce\x46\x3f\x45\x36\x68\xbc\x41\xa3\x3b\x12\x5d\x93\x01\x82\xe2\xc5\x97\x46\xd8\x1e\x2b\xd9\x72\x17\x6b\x76\x06\x12\x82\x42\x37\x74\x95\x36\x66\xc2\x99\x6e\x59\x34\xe1\xd8\x88\xa9\x3f\x23\xaa\x91\x1e\xd8\x89\xfd\x90\x22\x96\x77\x01\x44\x98\x76\xd7\xa4\x0a\x7e\x05\x00\x00\xff\xff\x75\xa7\xe0\x03\x21\x23\x00\x00") + +func fontsTicksFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTicksFlf, + "fonts/ticks.flf", + ) +} + +func fontsTicksFlf() (*asset, error) { + bytes, err := fontsTicksFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/ticks.flf", size: 8993, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTicksslantFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x59\x4d\x6f\xe4\x44\x10\xbd\xfb\x57\x94\x2c\xa4\xd9\x95\x60\x3a\x09\xbb\x48\x73\x1b\xe0\x8a\x80\x03\x70\x8a\xd4\x72\x12\x3b\x58\x4c\xec\x28\xf6\x80\xf6\x92\xdf\x8e\xc6\xe3\xf1\xd4\x57\xbb\xdb\x6d\x1b\x11\x69\x57\x8e\xe3\xe9\xd7\xaf\xea\xd5\xab\x6a\x4f\x71\x28\xee\xb2\xaf\xe0\x3b\xf8\x0c\x77\x9f\xe1\x06\x76\x70\x93\x24\x6d\xf9\xf8\x57\xd3\x1c\xb2\xaa\xdd\x16\x87\x02\x3e\xfc\xf8\x11\x6e\x77\xbb\x4f\xf0\xf0\x05\xfe\x28\x1f\xdb\xfa\x0d\x7e\xcd\xde\xb2\xa7\x0c\x3e\xfc\xfd\xda\x5d\xec\xcb\xaa\xd8\x1e\xdb\xa2\x79\xd9\x3e\x1e\x3e\xc2\xee\x93\xb9\xd9\x99\xbb\x6f\xb7\x49\x52\x94\xcf\x87\xbc\x85\xbb\xed\x2d\x14\x75\xd5\xc2\x43\xd6\xe4\x4f\x50\x57\xd0\x61\x9c\x96\xff\x1a\x3a\xa4\xfc\x09\x8e\x4d\x59\x3d\x9f\x7f\x2b\xca\xe7\xd3\xe3\xdb\x24\xf9\xed\xed\x0b\xd4\xaf\x6d\x59\x57\x90\x7e\xf3\x02\xb7\x29\xb4\x35\x34\x2f\xc7\xe6\x4f\xa8\xab\x1c\x52\x9b\xc2\x43\xde\xfe\x93\xe7\x55\x07\xd0\x6c\x93\xe4\xa7\xac\x2d\xfb\xdf\x20\xdb\x40\xbe\x81\x72\x03\xf5\x06\x8e\x1b\xa8\xde\x21\xab\x9e\xe0\xe7\x77\x78\xcb\x5f\x0f\xd9\x63\xde\xc0\xf7\x29\xfc\x92\xc2\xef\x29\x64\x29\xd4\x29\x1c\xd3\xee\x89\x1f\xb6\x49\x02\xa7\x1f\x6b\xad\xdd\x27\x97\x2b\xe8\x2e\xbb\xab\xee\xf2\x7c\x75\xba\xec\xaf\x60\x9f\x5c\xae\x60\xbf\xef\x97\x30\xf7\xe6\xfe\xb2\x48\x77\xdd\x2f\x73\xbe\x1e\x16\x1a\x96\xea\xef\x5f\x16\xd3\x96\xe3\x4b\xd2\x65\x6d\xf7\x8f\x2c\x4d\x77\x7a\x5d\xf6\x02\x21\x61\xec\x79\x1f\xfd\xff\x1c\x8c\x42\xb2\x67\x7b\x60\xfe\xec\x05\x9e\x3d\x4b\x37\xa1\x6f\x65\x78\x1c\x65\x43\x6e\x63\xf8\x9d\x66\xc8\xf2\x2d\xa0\x4f\x92\x0d\xa0\xcf\x8a\xfc\x5d\xfe\x8c\xf3\x38\x70\x18\x64\x61\x25\x3c\x7a\x12\xe7\xd7\x5a\x99\xe7\x00\xf6\x58\x8d\xf8\x2e\x49\x04\x02\x1c\x12\x81\x9e\x27\x89\xc0\x9b\xf3\x26\xc2\xad\x64\x26\x37\x2c\x35\xb7\x92\x87\x1d\x10\x71\x5d\xd9\xf0\x60\xe2\x9c\x11\x06\xe3\x4a\x1e\x3e\xc6\xa4\x23\xf2\x46\x72\xc6\xe5\x12\x59\x30\x16\xc5\x99\x32\x23\xa5\xc1\xb4\x4a\x83\x18\x55\x30\xc3\x8f\x26\x18\x65\x1b\x7c\x13\x22\x8f\x18\xca\xaf\x58\x0c\x8b\x03\x4d\x34\xa9\x18\xde\xc0\x51\x61\x83\x97\x54\x58\xcc\x36\xbc\xf8\x5d\x8f\xe4\x00\x0b\xdc\xe2\xe8\x06\xb8\x06\x66\x81\xee\x84\xe5\x00\x79\x1c\x31\x2d\x2b\x9b\x90\xa5\x01\xe4\xbe\x80\x6b\xcd\xa9\x41\xb5\x18\x68\x61\xd3\x6e\x41\xb9\x52\xb1\x53\x00\xbd\xff\xf1\x2a\xe3\xcc\xd8\x8e\x89\x13\xf2\xc0\xe2\x0e\xe1\x61\x36\x07\x9e\x3e\x37\xde\x8c\x46\x54\xea\x32\x34\xe9\x9d\x92\xbd\x11\xed\xd8\xda\x51\x6f\x71\xb1\xe7\xbd\xd0\x92\xa2\x61\xac\x96\x63\xef\x56\xb5\x0f\xde\xb0\xb6\x1b\xa1\x6a\x8d\x3d\x0d\xe0\xff\xa2\xa6\xc7\x94\x3f\xbb\xa6\xa7\xc0\x4b\x4f\x9e\x19\x7c\x6d\xf8\xc0\x41\xe6\xd2\xf2\x8c\xd1\xb1\xcb\x39\x9a\x14\x95\x85\x56\x8f\x8a\x1c\x50\x1d\xaa\x0e\x34\xde\x0b\x8d\x98\xc4\xad\x60\x20\xcb\xdd\xdb\x0b\x8d\x3a\x6d\x2b\x02\x77\xb0\x31\x78\x92\xf6\x4d\x66\x21\x5e\x6a\xad\xcb\x4d\x2e\xcb\x92\x20\xce\xad\x27\x23\xce\x15\x5a\x8f\x94\xf0\x51\x56\x2e\x03\xad\x4b\x87\xc3\xf3\xdc\xf2\x12\x8f\x68\x64\x5a\x39\x87\x7a\x69\xb4\x95\xfb\x3b\x89\xa6\x6a\x77\xf0\x17\x67\xaf\x05\x7f\x26\x7b\xa3\xf6\x51\xfc\xb8\x3f\xf8\x98\x3d\xd7\xc3\x8a\x6d\xdc\x05\x4f\xa0\x96\xcc\xbd\x5e\x78\x46\xed\x24\xc1\xec\xad\x7a\xa0\x77\x29\xdf\x9f\x7b\x3b\xbd\xf0\xdc\xe6\xca\x9d\xce\x08\x73\x55\xa1\xf4\x56\x61\xc4\xb0\x22\xcd\x35\x44\xe1\x31\xed\x3a\xe0\xad\x09\x5a\x94\x49\x4c\xb2\x9e\x18\xe4\xfe\x0f\x52\x60\xd2\x5a\xa4\xb6\x65\x5d\x4b\x50\x17\x24\xe7\xac\xb7\x14\x97\xb8\xf8\xd1\x8b\xad\xab\x71\x0f\x0d\xfe\x75\x69\x9f\xc2\xcd\x9c\xd6\x32\xed\xf0\xb9\x8a\xf4\x42\xe1\x57\x75\xb7\x58\xf6\x54\xf9\x2b\x76\x36\x9d\xfd\xac\xc2\x0b\xf5\xf6\x95\xce\xc7\x46\xe9\x6c\x56\x79\xf5\x65\xd5\x23\x9a\xfe\xea\x0b\xdf\x5b\xb0\xb5\xac\xa4\xfc\x58\x78\x0e\x35\x83\xbd\xee\xfa\x6e\xeb\xd1\x5f\x3d\x2d\xee\x80\xda\x70\xad\xeb\x40\x46\x62\x62\x15\x84\x26\xc2\x55\x05\x0b\xc8\x50\xab\x82\x39\x2f\x2a\x22\x2c\x48\xd4\xbf\xe3\x30\xcc\xd9\x3a\xa1\x9c\x3d\x5e\x9e\x54\xad\xbf\xd6\xb5\x33\xf8\xc4\x1c\xab\x36\xe3\x3c\x24\xeb\xce\xe6\x9d\xe3\xa4\x97\xd3\x42\x3a\x2f\xbc\xc4\xeb\x6f\x11\x4c\x4c\x9e\x8e\x8b\x32\x98\x0e\x58\x04\x2d\x64\x14\xf2\x3a\x46\xff\x92\x54\x30\x72\xb2\x61\xe3\x8f\x2a\x0b\xfe\xb6\x64\xde\xf8\x37\x3e\x71\x4a\x48\x87\x2c\xb4\x42\xf7\xb1\xb4\x8a\x3c\xfc\x43\xae\x75\x6b\x51\x87\x74\xf8\x8a\x34\x35\x27\xcb\xc9\x90\x1a\x4b\xf5\xa0\xba\x0c\x4b\xbf\x81\xa9\x53\xe3\xb4\xf7\x5f\xe1\xf9\x33\x7a\xa3\x26\xb5\x86\x99\x31\x11\xad\xa3\x52\xa7\x53\xaa\x90\xd4\x53\xa8\x3a\x69\x18\x49\x21\xb0\xe5\x65\x00\xa5\x16\x95\x12\x77\x58\xb0\x1e\xb6\xe0\xa0\xf1\x3c\x85\xb8\x49\x54\xd0\xa4\xd8\x67\x06\x4d\x2a\xcf\xe0\xe3\xa1\xe7\x8b\x6a\x73\xbf\xd0\x39\xd5\xb5\x0d\xc9\x76\xdc\x4d\x26\x04\xd6\x5d\x73\xd3\x20\xad\x9d\x61\xd3\xa1\x2c\x35\xf9\x0c\x00\xbd\x5e\x27\xb2\x9c\xeb\x2c\xec\x8e\x8f\x65\x48\x91\x5c\x19\xe8\x2c\x23\x72\x39\x06\x49\x19\x4c\x6e\xb9\xc3\xfe\x58\x2e\x79\xff\xeb\x3f\xcc\x67\xea\xa5\xe4\xa3\x1d\xe8\xc2\x14\x1b\x55\x24\x21\x90\x94\xd3\xd5\x5f\xa7\xe6\x52\xc0\x5a\xf5\xf8\xea\x2a\x18\xae\x60\xea\x6b\xd1\xae\xa4\xcf\x38\xf2\xd4\x18\x65\xf7\xf3\x02\x1e\x35\x09\xe8\x90\x5c\xc9\x7c\x44\x96\x4a\x36\xe1\x23\x32\xda\xaf\x3c\xae\x39\xe7\x55\x77\xf1\x84\xf6\x51\xe5\x5c\x21\x4a\x05\xe7\x0c\xbb\xab\x5c\xce\x88\x79\x86\x5a\x39\xc9\xc1\x3e\xe1\x61\x8c\x75\x1c\xce\x64\xc4\xce\x27\x1f\xcc\xbc\xc5\x20\xc1\x71\xce\xa4\x4c\xcc\x9c\x93\x94\x84\xfc\x8f\x06\xfe\xd5\x66\xd4\x10\x36\x8b\x0f\x1c\x42\xfa\x6b\x76\x0c\x94\x0f\x3e\x8b\xaf\x79\xae\x18\x01\x15\x87\x35\x9d\x6b\xc4\x97\x1c\xff\x06\x00\x00\xff\xff\xdd\xdb\xf2\xef\x36\x2f\x00\x00") + +func fontsTicksslantFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTicksslantFlf, + "fonts/ticksslant.flf", + ) +} + +func fontsTicksslantFlf() (*asset, error) { + bytes, err := fontsTicksslantFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/ticksslant.flf", size: 12086, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTinkerToyFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x58\x4b\x6b\xec\x36\x14\xde\xfb\x57\x7c\x0c\x17\x6e\x42\xa3\xfa\x4e\x4a\xb8\xb4\xab\xd9\x14\xba\xd3\xa6\xd0\x8d\xa1\x55\xfc\xc8\x98\x78\xac\x60\x7b\x48\x02\xfe\xf1\xc5\x7a\x58\x47\xb2\x6c\x4f\x02\xf1\x4b\x3a\xaf\xef\x3c\x35\x55\x53\x3d\x8a\x6f\xf8\x89\x27\x1c\x1f\xc1\x8e\x38\x3e\x26\x7f\xd7\xed\x6b\xd9\xb1\x41\x7e\xe2\xf9\x13\xff\x94\x6d\x51\x36\x0d\xfe\xaa\xf3\xd7\xb2\xc5\xf1\x98\xfe\xfe\x1b\xee\xde\xcf\xea\xf5\xf4\x26\x3a\xd1\xcb\x6a\xf8\x35\x97\x97\xfb\x24\xf9\xf3\xe3\xad\x11\xad\x18\x6a\xd9\x42\x56\xa8\xea\xae\x1f\xd0\xd4\x6d\xf9\x47\x32\x49\x02\xc3\xe1\x22\x5e\xea\x1c\xed\xf5\xf2\x5c\x76\x07\x54\xb2\x43\x55\x37\x25\xea\xa2\x6c\x87\xba\xaa\x73\x45\x9c\x08\x00\x60\xe8\xcf\xf2\xda\x14\x10\xcd\xbb\xf8\xec\xf1\x5c\xe2\x3f\xf1\xfd\x41\x11\xb5\xf2\x3d\xf9\xa6\x37\x0d\xe7\x12\x87\xb3\xe8\x8a\xe7\x46\xb4\xaf\x07\x30\x86\xb7\xae\x6e\x87\x1e\xa2\x87\x80\xfa\xfa\x80\xe7\xeb\x80\x5c\xb4\xdf\x87\x89\x4d\x7f\xb9\xf6\xe7\xb2\x48\x7e\x6a\x0e\xe7\xb2\x7e\x39\x0f\x93\xc6\x02\xf9\x59\x74\x22\x1f\xca\x2e\x79\xda\x58\x7c\x40\x2b\x07\xd4\x6d\xde\x5c\x8b\xba\x7d\x41\x51\xf6\xf9\x04\x54\xd7\x5b\xb2\x8b\xf8\x50\x96\xa3\x29\xdb\x97\xe1\x8c\xbb\xf2\xc3\x6e\xce\xe5\xe5\x52\xb6\x1a\x98\xfe\x1e\xbf\x40\xa0\xba\x16\x2f\x25\x2a\x91\x0f\xb2\x4b\x8e\x4f\x8a\x43\x51\x56\xe2\xda\x0c\x5a\xd9\x8b\x2c\x4a\x65\xf8\x70\xae\x7b\x54\xb2\x1d\x70\xd7\xd4\xaf\x25\x0e\xec\x82\xe3\xd3\x01\xb2\x55\x7c\x45\x5b\x28\xbe\xf7\xc9\xf1\x51\x71\xd1\x48\x4f\xea\x7b\x62\x93\x04\xc0\xb7\xd3\x2d\xd7\x53\x22\x71\x4a\x46\xe8\x3b\x70\x4a\xb8\xb9\x03\x6a\x51\x2f\x8f\xea\x03\xd6\xaf\xa7\x04\xd3\x2e\x9c\x12\xc6\x19\x67\x58\x7f\x07\x1c\x99\x26\xd4\x2b\xd3\x27\xbd\x57\x29\xb3\xfc\xa6\xf7\xc9\xc8\x37\xa3\x2a\xa0\x75\x47\xaa\x97\x52\xcd\x20\xd5\xf2\x38\xa0\x89\x97\xf2\xf5\xab\x5a\x4c\x15\x14\x8c\x33\xcd\x3b\xb0\xd1\x62\x85\xe5\xbf\x12\xa4\xb4\x98\xb6\xd8\x27\x64\x9a\x58\x5f\x4e\x49\xa6\x1e\xa5\x86\xc3\x3c\xa5\xf0\xf7\x58\x05\xa5\xb5\x36\x1b\xd5\x16\xc6\x38\x63\x5a\xc7\xcc\x5b\x0f\x0c\xd1\xa8\x4b\x36\x3f\xc7\x9c\x15\x18\x40\x0c\xf3\xf7\x4b\x26\x37\x9d\x1e\xf0\xf1\x82\x67\xda\x23\x8d\x43\x34\xc0\xd6\x1d\xda\x5b\x56\x77\x7b\x9b\xf0\x60\xd2\xac\xa6\x5a\xa5\x54\xe1\x24\x53\xe3\x1e\xb3\x1e\xf1\xe1\x0f\xe3\xec\x71\xc6\xce\xc6\x87\x31\x83\xc5\x9d\xcf\x4c\xbc\x19\x28\x53\xaa\x23\x73\x34\x4e\x47\xa7\x82\xf6\x20\x7d\xa3\xea\x11\x0a\x98\x2c\x32\x7b\x18\x27\xf4\x24\x28\xa9\x0c\x66\x28\x80\x50\x62\x5c\x86\x42\x97\x46\x3b\x9b\x91\x34\x7a\xae\x20\x27\xd9\x6c\x65\x6a\xf3\x80\x22\xe7\xde\x43\xe4\x0c\xc7\x31\x90\x10\xbe\x6f\x12\x1a\xa7\x32\x1e\xd5\x60\xe1\x64\x17\x60\x7c\x91\x7d\xa4\x84\x05\x69\xea\x92\xdb\xd6\x01\x95\x98\x2e\xe9\xfd\x88\xa6\x81\x69\x1c\x11\x8b\x04\xea\x5e\xc2\x90\xcf\x82\x64\x24\x5d\x5c\x7c\x23\x0e\xba\xdd\xce\x43\xd3\x67\xf7\x9a\xdc\xe7\x5a\xa3\x6c\xae\x5e\x6c\x05\x33\x3e\xd3\x19\x67\x1b\x17\x19\x2d\xe2\xb1\x47\xfc\xc8\x83\x77\xbb\x1e\x11\x65\x72\x8c\x14\x5c\x04\x3a\x46\x73\x50\x81\xa2\x25\x64\x56\x12\xd7\xf7\x74\x8e\xff\x95\xd8\x25\x69\xc2\x9d\x9e\x1b\xc9\xbb\x41\xb1\x57\x92\xac\x92\x3a\x24\x76\x13\xcb\xcb\x7a\xae\x63\xdc\xd6\x80\x95\xac\x37\x4e\x0d\x8a\x97\x2b\x62\x7c\xa5\x88\x91\x2a\x32\x92\x7b\x86\x9d\xb2\x39\xeb\xa8\x33\x83\x1b\x0b\x33\xac\xeb\x38\xa3\xb0\xbc\x71\x1d\x5d\x11\x0a\x25\x24\x83\x6a\xae\x23\x38\x46\x1a\x4f\x6b\x7d\x99\x10\x1a\x82\xcc\x12\x66\x9b\x84\x61\x96\x2d\x24\x6d\xd4\xc2\xd5\xd0\xa7\x31\xf0\x25\x89\x5c\x4b\xe4\xd9\x6d\x12\xb9\xa4\xa9\xb0\x6b\xa3\x55\x8d\xda\xb4\x99\xa5\x7b\x41\xb6\x5a\x7d\x43\xdb\xc2\xfb\x56\x90\xc5\xdd\x90\xed\xd4\x7b\x0d\xb7\x23\x86\xc7\x80\x30\xf1\x18\x51\x66\x08\x9f\x63\xd2\x79\x50\x59\x77\xc2\x71\x41\xb8\x0b\x5c\xd8\x61\xed\xec\x6a\xea\x24\x59\xf7\x09\xf9\x5c\xab\xfc\x2b\x0f\x67\x32\x57\xbb\x32\xa7\x8a\x5a\xb3\x41\x17\x2b\xf3\xdc\x28\xef\x5f\xed\x77\xaf\x6b\x41\x8d\xa9\x99\xbf\x10\x9f\x07\x97\x35\x74\xd9\x52\x5d\x95\x38\x25\x3f\x36\x47\x6a\x2a\x41\xba\xa3\xc8\x34\xce\xc5\x11\x58\x54\xf6\xad\x91\xc9\xd3\x4e\xd2\xce\x40\x5b\x95\x47\x61\xbe\x9a\x80\x27\x25\x7d\x95\x62\x36\xc0\x08\x60\xdf\x63\x63\xb5\x66\x30\x4f\xef\x9c\x4c\xef\x72\x07\x65\xd7\xd3\x96\xe3\xa5\x5e\x5b\xa2\xe3\x51\x44\x0b\x3d\xc8\x3c\x35\x2e\xe7\x29\xb7\x39\x0c\x5d\xda\x88\x82\x8a\x4b\xf4\xf8\x42\xc3\x71\xf2\xe9\x31\x75\xa1\x07\xed\x90\x23\x4c\x9c\x40\xae\x9f\xf9\x28\x82\x5e\xb4\xac\xe0\xb1\xf0\xe4\x1c\x5b\x3b\xee\xa1\xcc\x63\x23\xc7\x7a\x28\x92\x30\xf3\xce\x0b\x71\x75\xb0\x32\x74\x2e\xb6\x1a\xbc\x63\x31\x08\x44\x4f\x90\xbb\x31\x18\x1e\x71\x56\x10\xf4\x9c\x75\x6b\x1f\x80\xfd\xf3\x1e\xe7\x2e\x40\x9a\x00\xe9\x01\x2b\x64\x3e\x1c\x86\x26\x5e\xdf\x76\x6d\x8c\xe4\x59\x04\xeb\x74\x0d\xeb\xb9\x62\xcc\xd3\x5d\xd8\x4f\x56\xc6\x65\x92\x09\x8b\x8c\x70\xb3\xb2\xe5\xc2\x03\x29\x9b\x20\x4f\x60\xfc\xab\x40\x49\x3d\x14\x57\xef\xea\x28\xa2\x7c\xf0\x85\x13\x86\xb2\x4c\x6a\xe0\xed\x8f\x21\x36\xd8\xe7\x0a\xe2\x8d\x13\xe4\xc1\x66\xb5\x9b\x0a\xc6\x79\x34\x9b\x8f\x44\xa9\x43\x90\xfa\x9f\x3c\xd9\xdf\xb6\x70\x5b\x73\x91\xb7\x26\xbf\x5f\x3d\x6e\xc9\x8b\x5b\x0f\x5c\xc1\x14\x7a\x4a\xfe\x0f\x00\x00\xff\xff\x68\xbd\xa6\xb5\xe0\x15\x00\x00") + +func fontsTinkerToyFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTinkerToyFlf, + "fonts/tinker-toy.flf", + ) +} + +func fontsTinkerToyFlf() (*asset, error) { + bytes, err := fontsTinkerToyFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/tinker-toy.flf", size: 5600, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTombstoneFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x56\x5f\x6f\xdb\xb6\x17\x7d\xe7\xa7\x38\x30\x84\x9f\x62\x80\x31\x1b\xff\xba\xa1\xc9\x93\xdc\x0e\x01\x8c\x0d\xdd\xd6\x64\x6f\x04\x68\xd9\xa6\x6c\xa1\xae\xe4\x49\x4a\x0b\x03\x04\x3f\xfb\xc0\x4b\xfd\xb5\x14\x60\xdb\xeb\x8a\x48\x0c\x75\xff\x1c\x1e\x9e\xcb\x2b\x39\x39\x25\xcb\x38\xc0\x0f\x78\x8b\x77\xb8\xbd\xc3\xdd\x92\xcd\xd2\x2c\xc9\xcf\xf9\xb7\x62\x91\x9c\x92\x19\x7b\x1f\x97\x7a\x8f\x3c\x43\x8c\xbb\x25\x4e\xba\xaa\x74\x81\x24\xcf\x2a\x3c\x16\xf9\x97\x07\xcc\x56\x59\xf9\x4d\x17\x25\xaa\x1c\x8f\x85\xfe\xf3\x45\x67\xd5\xe9\x82\x55\xf9\x59\xef\xf1\xfb\x8b\x2e\xab\x34\xcf\x4a\x06\xac\xb6\xf9\x4b\x85\xe7\x7c\x1f\x5f\xc2\x12\x1f\x8a\xcb\xb9\xca\x0f\x45\x7c\x3e\x5e\xf8\x0c\x5f\x97\x8b\x37\xd8\x17\x71\x52\x61\x99\x70\xdc\x8b\xe5\x1b\x71\xff\x7f\x8e\xed\x05\xbf\xc5\x2f\x27\x3c\xc6\xc7\x8c\x33\x60\x97\x9f\x2f\x45\x7a\x38\x56\xb8\xbb\x77\xfe\x4f\x4f\x2b\xfc\x12\x6f\xf3\x22\xae\xf2\x22\xd5\x25\x7b\x3a\xeb\x5d\x9a\xa4\xbb\xf8\x74\xba\x70\x54\x47\x8d\xd5\xd3\x87\xf5\xba\x26\x9e\x66\x07\x24\x45\xfe\x85\x1c\xb3\x9f\xd6\x4f\xcf\x9f\xd6\xef\xff\x78\x5e\xff\xfa\xf1\x61\x86\x52\xef\x1c\xd7\x07\x36\x53\x50\x1c\x0a\x4a\xf9\x89\xe2\xf0\x06\xff\xa4\x9a\x19\xc5\xe0\x3a\xc4\x47\x28\x8e\x19\x83\x81\x91\x6e\x50\x10\x90\x30\x6a\x0e\x18\x29\x0c\x3d\xc1\xb8\x8b\xec\x14\x83\x1b\x05\x0a\x21\x13\x7c\x1a\x0c\xe6\x04\x03\xe9\x06\x48\x08\x37\xaf\x93\x8d\x35\xf5\xac\xb6\xbb\x18\x8e\xb9\xb3\x79\x93\xf0\x69\x30\x56\x32\x58\x58\xf8\xc1\x4f\x3a\x83\xc5\xd5\xd5\x4e\x7b\x26\xeb\xcd\x8c\xad\xf6\xfb\xd4\x29\x15\x9f\xb0\x3b\xc6\x45\xbc\xab\x5c\xfd\xff\x87\x24\x3d\x9c\x74\x85\x5d\x9e\x7d\xd5\x45\x99\xe6\x19\xb6\x97\x07\x06\xe0\x67\x9d\x55\xf8\x18\x97\xa5\xce\x38\x3e\xeb\xac\xca\xa2\xdd\x65\xab\x8b\xf2\x1c\xef\xf4\x22\x2f\x0e\x1c\xef\xc4\xdd\x8f\xe2\xfe\x2d\x0b\x82\xe8\xfa\x8e\x18\x78\xc4\x60\xea\x7b\x11\x31\x78\xa3\xb3\x86\x61\xc4\x80\xa0\x37\x44\xac\xfd\xfb\x1b\x53\x82\x0e\x09\xb2\xb9\x23\x06\xd1\x5b\x4f\x36\x46\xd9\x33\x8a\xda\xf8\x0a\x68\x03\xd6\x81\x0f\x58\x5a\x3b\xe0\x1b\xd0\xc5\x23\x66\xdd\xa4\x75\xb9\x35\x04\x06\x91\x50\x41\xc4\x84\x63\x22\xc9\x6d\xd1\x7a\xfa\x12\xd9\x76\x49\xc5\x29\x6a\x4e\x78\xa0\xa5\x69\xed\x81\x5b\x91\x9b\x82\xac\x47\xac\xdd\x04\x2a\x3c\xa4\x35\x64\x19\x65\x1b\x85\x57\xb2\xc9\x2d\xc8\x2d\x3d\x83\xbe\x5b\x91\x5b\x11\x29\x51\x3b\x46\xd9\x37\x44\xed\x66\x9c\xdd\xb8\x45\xbb\x31\x84\x3d\x77\x50\x9f\x93\x45\xbf\xa8\x8d\x71\x58\x94\x56\x65\x49\xf9\xb2\x15\xb4\xab\xd4\x55\xb9\xfa\x91\xfd\x1a\x11\xad\xa0\xe3\xeb\xed\x8b\xa0\xa5\x15\x10\xeb\x40\xf0\x79\xc4\x02\xb9\x09\x9d\xc5\x61\x07\x43\xc5\xfc\x41\x23\xbd\x6d\x5f\x6f\x55\xeb\x3d\x6f\x47\x3b\x56\x4c\x60\x43\x0c\xf9\xb4\xde\xc6\x83\x2b\x71\x9d\xad\x7a\xc5\x34\x18\x9d\x94\x09\xf7\xd4\xda\x8a\xd6\x16\xa3\xb5\xb9\xa2\x6c\x3a\x9f\xb8\xde\x58\xa0\x22\x16\x98\xfa\xb6\x4d\xcb\x43\xb9\x1c\x4e\xd1\x37\x34\x62\x40\xa8\x86\x14\xed\xa6\x86\x5a\xf1\x8e\xa9\x21\x31\x86\xdb\xe1\x44\xd5\xbd\x8b\x7d\x98\xe7\x54\x23\x5c\xc5\xf8\xb6\x82\x9c\x88\xe9\x57\x6c\x6a\xdf\xfd\x8a\xbd\xae\x1a\x65\x4b\x9f\xbd\x79\x2d\x7b\xb4\xc5\xa6\x05\x40\x32\x4d\xf4\x97\x63\x0f\x83\x6e\x9c\x28\x09\xba\x92\x6c\x6c\x38\x76\x93\xbc\x62\xc4\xdc\x7d\xe7\x3a\xe1\xdc\x57\x66\x42\x3f\xff\xfa\x08\xa5\xef\x03\x21\x31\x8e\xe1\xf5\x89\xa1\xf2\x4a\xd3\xbe\x48\x60\xc3\x7a\x0f\x0e\x62\x53\xb7\x92\x68\xce\x25\x7a\x10\x8b\x5b\x22\xd2\x0c\x9b\xdb\xae\x19\x7d\xcb\xfa\x6e\xf5\xed\xda\xdf\xc3\xed\x82\x94\x69\x86\xdb\xb0\x4d\x14\xf2\xfa\x9b\xd2\x41\xfa\x87\xc1\x3f\xdb\x1c\xad\x3a\x86\x70\x37\x53\xd9\xf5\x1b\x51\x4e\xb6\xc1\x7f\xaf\xbf\xb9\x6f\x70\xf8\x0e\xf7\x47\x74\x50\xdb\xef\x4d\xfe\xbd\xc9\xaf\x7e\x65\x51\x77\x99\xde\xfd\x4f\x7f\xf0\xfd\xcb\xe9\x5f\x01\x00\x00\xff\xff\xd7\x8b\x21\xb8\x29\x0d\x00\x00") + +func fontsTombstoneFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTombstoneFlf, + "fonts/tombstone.flf", + ) +} + +func fontsTombstoneFlf() (*asset, error) { + bytes, err := fontsTombstoneFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/tombstone.flf", size: 3369, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTrekFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x59\xeb\x6b\x1b\x47\x10\xff\x7e\x7f\xc5\x34\x2c\xdc\x17\x67\x95\xf4\x91\xda\x02\x97\xf3\x06\x42\xc1\x0e\x5d\x70\x9b\x2f\x21\x48\x3a\xef\xc5\x32\xc8\x92\xb1\x24\x48\x20\x7f\x7c\xd9\x9d\x9d\xd9\xd9\xbb\xd3\xc3\x56\x9c\x26\x50\xd5\x54\x73\x8f\x79\xbf\x7e\xab\x7c\x9c\x7d\xfc\x79\xa2\xe0\x15\xfc\x06\x2f\x5f\xc1\x0b\xf8\xe5\x45\x31\x18\x3c\x7f\xfc\x67\x30\x28\x06\x03\x78\xfc\x87\xf9\xb5\x33\xc6\x18\x0b\xe1\x2b\x11\x35\x80\xa0\xb6\xf0\x1b\xdb\xf7\xd8\x98\x48\xb8\xfe\xe7\x89\x7f\x9c\xb4\xd9\xc4\x12\xf8\x83\xfe\xf3\x1d\xf6\x13\x67\xc6\x1e\xf8\x99\xd8\xc2\x8f\xce\x97\x19\x3b\x7e\x48\x6a\xb9\x5b\xff\x63\x3e\xbd\xfc\x1b\x72\x10\xbd\xcb\x42\xb9\x5b\xbf\x7c\xdd\x19\xab\xf3\x4c\x6c\xd0\x6f\x93\x1d\xe7\x48\x58\x10\x51\xda\xce\x9f\xd8\x65\x06\x44\x42\xb7\xf3\x67\xec\x3e\xf0\x5c\x99\xb6\x9b\x87\x27\x89\x3f\x3c\x3f\x7d\x0f\x7f\x4f\x1b\x78\x73\x73\x3d\x6b\x56\xf0\x66\x31\x5f\xc1\x87\xd3\xe7\x7b\xf3\x3f\x58\xff\x7b\x78\xd7\xdc\x2f\x6f\x16\x73\x78\xa9\x5f\xc0\x87\x43\xed\x7f\x30\xff\xcb\x93\x93\x5f\x8f\xa0\xfe\x0c\xb6\x59\x35\xf7\x70\xa1\xc1\xac\x97\x57\xd3\xdb\xc9\xfc\xdb\xe8\xb7\xb3\x66\xb2\x6c\x60\xd9\xcc\x1d\x4c\x66\x33\xb8\x5a\xdc\xde\x36\xf3\xd5\x12\x56\x0b\x58\x4d\x1b\x98\xac\x57\xd3\xc5\x3d\x4c\x56\xc3\xa7\xd1\xff\x34\xfc\x27\xbf\xd7\x31\x8a\x77\xd5\x64\x76\x3b\xd1\x8d\x5b\x3f\x84\xff\x6e\x86\xfc\xd5\xfc\xf3\x27\x7d\xb5\xd4\x6e\xdd\x91\xf0\x95\xec\xff\xeb\xec\x12\xce\xce\x2e\xe1\xe2\xe2\xf2\xa7\x87\xf3\x1f\xb6\xbf\xfe\x6c\x6e\xae\xa7\xab\x23\xf8\xe7\x6e\xe4\xbf\xde\x4e\x3e\xcd\x9a\xf9\xf5\x6a\x7a\x04\x97\xb7\xeb\xe5\xf4\xed\xc2\x35\x47\xf0\x1a\x0b\xe2\xf5\x62\x3d\x5f\x15\x45\x50\xad\x94\xaa\x8a\x48\x40\xa0\x3c\x11\xa8\x40\x78\x0a\x09\xa8\x8a\x48\x40\x55\x45\x66\x67\x6c\xe4\xf6\x73\x05\xd9\xc3\xc4\x09\xfc\x38\x83\xbc\x80\xe8\x67\x55\x28\x9a\x4b\x24\x02\x9f\x3b\x0b\xce\xfa\xa7\xfe\x1b\xe4\xa3\x36\x51\x15\x28\xcc\x1d\x1f\x07\x35\xca\x53\xf8\x09\x02\x8e\x4d\xa0\x83\x35\x8a\x5e\x93\x12\xe4\xd5\x08\x92\x2d\x4a\x9f\x9e\x9e\xea\x40\x0d\xe1\x4b\xf4\x60\xcc\xf7\x00\xbe\xc0\x90\xef\x95\xf1\xde\x33\x64\xf6\x76\x07\x07\x82\xa7\xe8\x81\x0a\x04\x52\xce\x26\x3f\xf1\x3d\x10\x9a\xb5\x31\x75\x89\x0a\x8d\xd6\x64\xde\x1f\x18\xa8\xaa\x00\x6d\xc8\x68\xad\xc7\x26\xcc\x6e\xe1\x4d\x8c\x08\x86\xb6\x2a\x14\xe5\x21\xff\x7f\xcc\x92\x57\xa5\x45\x9a\x64\x9e\xd0\x40\x4a\x4f\xca\x94\xa7\xc7\x84\x5c\x44\xe6\x8d\x8e\x42\xc3\x92\x41\xa1\xb8\x6f\x62\x70\xa8\x10\x88\x8c\x82\x70\xf9\x50\x22\xb5\xc6\x44\xe9\xe1\x30\x54\x00\xae\xc9\x61\xa0\x86\xce\x98\x1a\xef\xd9\xe1\xb0\xa4\xd4\x93\x15\x8a\xfc\x53\x5c\x47\xa4\x5f\xe1\xce\x0f\x02\x93\xa3\x39\x73\xe4\x8c\x49\x8b\xc2\x7d\x8d\x63\x1c\x43\x20\xa9\x21\xb0\x49\x14\x51\x8a\x28\xa1\x87\xef\xe5\x7a\x62\x53\xa0\x32\xd4\x45\xa9\xca\xdb\x40\x8d\x46\x14\xdb\x11\xc7\x73\xc4\x31\x1c\x71\x92\x64\x62\x00\x5a\xda\xd0\x20\x4b\x29\x2e\x35\xa5\xc3\x99\xa1\x26\x59\xf1\x3e\x70\xa0\x6c\xa7\x43\xb0\xaa\x03\x6c\x12\x1e\x15\xa2\xbd\xb9\x80\x44\xf9\xb4\x99\x7d\xf6\xa8\xf9\x23\x33\xda\xe3\x79\x46\x23\xa1\xbf\x15\xb6\xc4\x9d\xb1\x23\x80\xa3\x36\x13\x99\xb6\xed\x96\x88\x86\x3a\x7e\xbb\x55\x18\xf4\xbe\x28\x8e\x8e\x1b\xad\xa0\xa6\x1a\xcf\x1d\x62\x71\x4a\x0a\x4c\x4f\x3a\xb3\x87\x92\x43\x22\xc3\x54\x10\x83\x10\xc5\xc5\x59\x81\xc2\xde\xf5\x89\xaa\x48\xcb\x63\x73\x44\x66\xd4\xd1\x22\x1e\x4e\xca\x59\xc3\x99\xb2\x34\xc8\x14\x90\x19\xdd\x18\xc9\x84\x39\xcb\x13\x21\x18\x6e\x6c\x8a\x0e\xcd\xec\x8d\x82\x80\xb6\x50\x72\x85\x98\xc9\x0d\x62\xe2\x5d\x90\x7d\x63\xf7\xf2\x35\xa7\xdd\xa6\x66\x0b\xf7\x2d\xc5\xd8\x87\xb7\x8e\xfe\x71\x24\xab\x4e\x44\x53\xe1\xa5\xfc\x66\x65\xd7\x89\xad\x90\x3c\x0e\x39\xca\xb5\xb6\x72\x88\x2f\x53\xe7\x3b\x93\xad\x41\xd5\x1a\x62\x55\x56\x8a\xa2\x12\xb1\xe3\xfc\xdb\x23\x2f\x84\x38\x9f\x79\xcc\x1f\xeb\xc8\x84\x11\xdc\xdf\xed\x5c\x4b\x0a\x8f\x9d\x5c\x4d\x48\x2b\x3a\x8c\x70\x7d\xc7\x77\x36\x16\x03\x75\x2a\x9b\x78\x5e\xd2\x08\x02\x57\x8b\x09\x54\x6e\xee\xbc\xac\xef\xb6\x2e\xac\x2d\x55\xd5\x32\x09\xe3\xc1\x53\xa2\x64\x59\xe1\x6c\xc4\xee\x95\x7d\x73\x91\xed\x82\xde\x2c\x74\x4c\xdb\x31\x66\xdb\x53\xbb\x53\x76\xbd\xe2\x6c\xa7\xea\xb6\x38\x9b\xd5\x08\x8b\x2b\x21\x5e\x50\x2e\x37\x8b\xb3\x5c\x98\x00\xdd\xa2\x13\xc3\x26\x1b\x38\x3d\x13\x35\x13\xda\xda\xe2\xfb\x4f\x2d\x48\x45\x95\xd9\x03\x20\x50\x87\x33\xa5\xdb\x63\xcb\xe1\x5a\x11\x0e\x86\xe3\xfd\xb1\x15\x0e\xc6\xe2\x54\xe9\xe8\xcd\x0e\x96\x87\x3a\xd8\xb7\xfd\x20\x6f\x67\x94\x51\xa6\xa2\x8d\xbe\x95\xb1\x6e\xe5\x65\xf2\x1c\x2f\x5b\xb6\x6d\xeb\x07\x2b\xb7\x66\x9e\xd0\x2c\x9f\x3b\x0b\x38\x87\x1d\xe5\x43\x61\x07\x4f\x22\xde\xfe\xa9\x57\x43\x2e\x36\xe3\xd4\x03\xec\x92\x9d\x50\xf6\x77\x42\x8f\x5d\x69\xae\x99\xf3\xb2\x33\x20\xe3\x2f\x2f\x1d\xbb\x74\xb2\xca\xa4\xd3\xc9\x38\x5b\xe5\x98\x83\xbe\xd9\x58\xd1\x02\x65\xcf\x44\x07\xec\xc6\xf2\xbd\x85\x0a\x6e\x03\xba\xb1\xb2\x9f\xec\x68\x4f\x74\x13\xf8\xe4\x31\xc7\x33\x40\x0c\xbe\x31\x14\xf9\xce\xb8\x16\x16\xe1\x5f\x6e\x53\xb2\x0a\xff\xd8\x2e\xba\x94\x7b\xa9\xc7\x36\x8c\x9d\xdf\xc5\xde\x96\xb8\xcd\x45\x49\x98\xf3\x34\x1a\x49\x58\x99\xaf\x62\xda\xd8\x22\x62\x2e\x09\xd8\x67\x7e\x65\x60\xb2\x95\xb7\xfd\x67\x43\x0d\x4a\x64\x8f\x41\x53\xbc\x10\x22\x95\x2c\x86\x04\xa3\x8c\xb1\xf9\x51\xde\xdf\x1f\xe3\x21\xaf\x2a\x00\x29\xac\xca\x3a\x3a\x30\x36\xb5\x6a\x01\x90\xb8\xf0\xc8\x14\x1f\x13\x86\x6f\x81\x66\xd8\x6e\xc9\x0e\xa2\x23\xaa\x15\x56\x28\x87\x20\xc9\xd9\x31\x8d\x3d\x81\xf5\xda\xdf\xa4\x5f\xfc\x7c\x81\xd4\x3e\xa7\xb5\xaa\xe0\x40\xd4\xa1\x22\x02\x64\xa4\x4e\xad\x0a\x68\x61\x22\x76\xdb\x18\x81\x88\x44\xbf\x8b\xba\xab\x5a\xe7\x90\x1f\x04\x0d\xed\x9a\xfe\x7d\xd2\xfe\x47\x43\x5b\x0e\x83\x3f\x2a\x1a\x6a\x39\xe8\x8e\xf1\x1f\x3b\x04\x1a\x82\x8d\x68\xe8\x60\x07\xfb\x27\x5e\x0e\x86\x54\xe6\xb1\x18\xff\xad\x65\x80\x56\x89\xd5\x00\xfd\x76\x3d\x21\x0e\xb2\x62\xb9\x58\x81\x37\xec\x7f\x8c\x83\xb6\xd8\x75\x38\x0e\x82\x6f\x87\x83\xb8\x32\xbe\x53\x1c\xd4\x01\x42\x02\x09\x09\x28\x24\xb0\x50\xdb\xc1\xa7\x46\x43\x02\x0c\x09\x2c\xc4\x50\x88\x91\x50\x17\x08\xb5\x71\x50\x7a\xcc\x3f\xd2\x70\x9f\x7c\x75\x1c\x24\xfe\xfb\x1e\x2e\xfe\x0d\x00\x00\xff\xff\xe4\x59\x27\x82\x90\x20\x00\x00") + +func fontsTrekFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTrekFlf, + "fonts/trek.flf", + ) +} + +func fontsTrekFlf() (*asset, error) { + bytes, err := fontsTrekFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/trek.flf", size: 8336, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTsalagiFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x59\xdb\x72\xdb\x46\xd2\xbe\xe7\x53\x7c\x09\x9d\x40\x8a\x1b\x1c\x49\x8e\xe3\x43\xb9\xf4\xcb\x71\xe2\x24\xce\xc9\x89\x95\xa3\xe1\x0c\x21\x69\x28\x22\x22\x09\x9a\x04\xa8\xd0\x99\xf0\xe9\xfe\x67\xd8\xab\xad\xda\x9b\x7d\x86\xbd\xda\x8b\xad\xc6\xcc\xe0\x4c\x49\x74\x71\x97\xa5\xd2\xd7\x02\x07\xdf\x74\xf7\xf4\x69\xec\xc1\x68\x70\x10\xde\xc2\x5d\xdc\xc5\xfe\x1e\xfc\x7d\xdc\xbd\xd3\x39\x59\xe2\x99\x9a\xcd\xc2\x33\x3c\x8f\xd4\xec\x54\xe1\xe5\x30\x49\xa6\x0f\x85\xb8\xbc\xbc\xec\x4d\x93\xe1\xc9\x49\x2f\x9e\x9d\x8b\x79\x3c\x48\x2e\xc3\x99\x12\x83\xe8\x7c\xa4\x12\x91\xcc\xc3\x51\x78\x1e\xf5\x86\xc9\x78\xf4\xaa\x03\x60\x7f\x5f\x3c\xb8\x47\x38\x10\x7b\x07\x84\xfb\x62\xef\xa0\xf3\x5c\xcd\xc6\xd1\x7c\x1e\xc5\x13\x24\x31\xc6\xf1\x59\x34\x58\xe2\x7c\x16\x4e\x12\x75\x46\x38\x49\x13\x4c\x47\x2a\x9c\x2b\xcc\xd5\xe4\x0c\x63\x85\x10\xa7\xf1\x74\x89\x78\x86\x49\x9c\xf0\xe2\xb1\xea\x75\x1e\x8f\xe6\x31\xc2\x45\x18\x8d\xc2\x93\x91\x7a\x88\x13\x35\x4f\x30\x88\x12\x31\x0e\x93\xd3\x21\xe2\x01\x4e\x87\xe1\x2c\x3c\x4d\xd4\x6c\x4e\x08\x47\xa3\xf8\x12\xd3\x74\x72\x9a\xa4\x61\xc2\x5b\x8f\xa2\x0b\x85\x1e\xde\x85\x87\xff\x63\x3d\xed\x47\xbe\xdd\xa7\x60\xd8\xc1\xdb\x7d\x76\xaf\xa3\x90\x92\x7f\x41\x66\xbf\xa4\xfd\x7b\x23\x0a\x01\xe9\x49\x68\x68\xf0\x4f\xc0\x0f\x36\xa4\xd0\x0e\x0c\x05\x82\x26\xc3\x4d\x28\x8e\x78\x77\xbb\x7f\x0b\xc3\xb5\x86\x04\x52\x8a\x6c\xf7\x6c\xff\xa3\xb6\x25\xd7\xba\xf3\xfa\xcf\x15\x14\x2f\x96\xa3\x51\x78\x32\x5b\xe2\x49\x3c\x59\xa8\x59\x16\xcb\xc7\x1c\x87\xff\x0b\x2d\x42\x42\x08\x40\x11\xfa\xfc\x67\x44\x58\x31\xc6\x84\xd7\x8c\x29\xe1\x3b\xc6\x05\xe1\xf1\x1a\x0a\x3f\xfb\xdc\xf6\xab\x58\x7f\x60\xc1\x50\xe0\x78\xa8\x66\x0a\xd1\xbc\xce\x75\x1e\x12\xe6\x9b\x59\x84\xd9\x70\xc9\x69\x3d\x39\xab\x93\x5d\x84\x84\x09\x93\x2a\xc2\x3e\x63\x44\x78\x87\x31\x26\x5c\x32\xa6\x84\x9f\x18\x17\x84\x17\x8e\x4c\x85\xf3\x78\xb2\x05\x13\xeb\x14\xc3\x90\x70\xc6\xa8\x08\x07\x8c\x11\x65\xb1\x36\x8c\x09\x7c\xd0\xc3\x94\xf0\x29\xe3\x82\xf0\x89\x55\x26\x44\x1f\x2b\xbc\xc6\x77\x78\xbc\x0d\xa7\x37\xbc\x3d\x0a\x09\x03\x46\x45\xb8\xc3\x18\x11\xba\x8c\x31\x61\xc6\x98\x12\xbe\x67\x5c\x10\x9e\x5a\x92\x09\xf6\xf1\x0e\x2e\xf1\x13\x5e\x6c\x43\xa7\x33\x1c\xe0\x08\x0a\x9f\xe2\x93\x3a\xdd\x38\x24\x9c\x33\x2a\xc2\x87\x8c\x11\xe1\x16\x63\x4c\x48\x18\x53\xc2\x71\x2d\x16\x06\xb8\x83\x2e\x66\xf8\x1e\x4f\xb7\xa1\xdd\x39\x3e\xc4\x2d\x24\x38\xae\x93\x4d\x42\xc2\xf0\x06\xd1\x59\xd6\x6d\xd8\x08\x09\x66\x19\x6f\x46\x32\x6e\x6a\x32\x24\xfc\x0c\x4c\x14\xe1\x2e\xff\x1d\x11\xde\x63\x8c\x09\x4b\xc6\x94\xf0\x0b\xe3\x82\xf0\x99\x25\xf9\x19\x77\xf1\x1e\x96\xf8\x05\x9f\x6d\xc3\x4b\xbf\xe3\x23\xfc\x86\x14\x3f\xe0\xf3\x3a\xdd\xeb\x34\x24\xfc\xce\xa8\x08\x1f\x31\x46\x84\xdf\x18\x63\x42\xca\x98\x12\x7e\x60\x5c\x10\x3e\x37\x74\x17\xdb\xd0\xe9\x09\xee\xe1\x7d\x44\xf8\x02\xcf\xea\x74\xf3\x90\x70\xb1\x89\xd7\xb1\x8b\x11\xee\xe3\x03\xc4\xf8\x16\x5f\x36\xe8\x08\x4f\x32\x54\x84\x7b\x8c\x11\xe1\x7d\xc6\x98\x10\x31\xa6\x84\x2f\x18\x17\x84\x67\x96\x4e\xe0\x01\x76\xb6\x61\xe6\x8f\x75\x92\xb3\x90\x30\x62\x54\x84\xfb\x8c\x11\xe1\x03\xc6\x98\x10\x33\xa6\x84\x6f\x19\x17\x84\x2f\x2d\xc9\xaf\xd8\xc3\x2e\xa6\x78\x8e\xaf\xea\x74\x49\x48\x59\x4f\x4d\x14\xe1\x01\x63\x44\xd7\xf4\x9e\x5d\x7c\x8c\x47\x90\x78\x89\x3f\xf1\xcd\x36\x4c\x5c\xe0\x10\xb7\xf1\x0a\x7f\xe1\xeb\x86\xb1\x5c\xc0\x7e\xdc\xec\x24\x4f\x10\x40\xe3\x0d\xfe\xc0\x69\xc3\x58\xa6\xfb\x95\x51\x11\xf6\x18\x23\xc2\x2e\x63\x4c\x98\x32\xa6\x84\xe7\x8c\x0b\xc2\x57\xd8\x52\x2f\x6c\x68\xc1\xf1\xf9\x31\xa3\x22\x3c\x62\x8c\x88\xe7\xb4\x84\x03\xea\x25\x63\x4a\xf8\x93\x71\x41\xf8\xe6\xbf\xa5\xc5\x65\x48\x58\x30\x2a\xc2\x21\x63\x44\xb8\xcd\x18\x13\x5e\x31\xa6\x84\xbf\x18\x17\x84\xaf\xdb\x29\xb6\xa0\xc5\x32\x24\x9c\x30\x2a\xe2\x09\x0f\xcb\x88\xb2\x91\x6f\x19\x13\xde\x30\xa6\x84\x3f\x18\x17\x84\xd3\x76\x8a\xcd\x3f\x65\x8a\xb7\x1c\xe0\x77\x3b\x19\xc7\xf1\x30\x9a\x23\x9d\xab\x39\x92\xa1\xc2\x38\x9c\x4e\xa3\xc9\x39\x5f\x26\x9e\xc5\xe1\x04\xc7\x71\xfa\x46\x25\xde\x1c\x4f\x67\x4a\xf1\xad\x07\x4f\x86\x6a\x16\x5f\x28\x85\x41\x3c\x49\xb0\xd8\xef\xed\x1d\x74\x72\x65\xf8\xd9\x43\xee\x1a\xc5\xc5\x29\x4c\x96\xd3\xe8\x34\x1c\xf5\x26\x2a\x11\xc2\xbd\x9d\xdd\x96\xf2\xd7\x2e\xd4\x72\x1c\x4e\x1f\x5e\xfd\x1a\x4f\x9b\xe6\xb5\xce\xad\x5b\xd9\x6b\xdd\x9b\x08\xdd\x8e\xbd\x3a\x74\x3b\x76\x00\xef\x76\xec\x18\xcf\x82\x30\xab\xed\x48\xdd\xe5\x47\xd9\xac\xcf\x42\x1f\x9e\x11\x2c\x65\x43\x60\x29\xbb\x94\x74\x3b\xf6\x7e\x90\x09\x7d\xcf\x3d\xc9\xbe\x92\x5a\x3a\x4d\xf8\x26\xc3\x8f\xec\x65\xa2\xdb\xd1\x52\xea\xda\x13\x2b\xb0\xde\x12\xa8\x30\x41\xd7\x77\xe3\xb7\xb5\x5c\xbb\x3a\x7f\x12\x38\x4d\x44\xc3\x4a\x6e\x75\x37\xb0\x52\x5a\x26\xed\xbe\x73\x82\x30\xc2\x91\xc8\xf6\xb3\xab\x29\xfb\x4e\xc0\x78\x50\x3b\xca\x5c\xb0\x17\x9a\x0a\x37\x6e\xc6\x6d\x09\x84\xb5\x49\xaf\xac\x25\x1a\x5a\x6a\xcb\x9d\x5b\xb9\xce\xa6\xc2\x4b\xa2\x64\x65\x63\x91\x49\xfa\x35\x3e\x59\xcb\x2d\x02\x2b\xd8\xcb\x62\x79\xb5\xce\x5c\xa8\x2b\x42\x16\x97\xac\xb7\xe0\x9f\x35\x67\x59\x17\xa4\x09\x1d\xe6\xce\xd2\xb9\xee\x41\x91\x56\x9e\x48\x5e\x9f\xaf\xe6\x47\x7d\xc6\x00\x26\x16\xa0\x8d\xe0\xfb\x5e\xb6\x3a\x0b\x1d\xab\xb7\x30\x8b\x78\xb7\xa3\xcc\xf1\xb5\x88\x45\x20\x35\x5a\x4e\x47\xdb\x03\x0f\x9c\x20\xb4\x3b\x1d\x5d\xf3\xa0\x08\x72\xe3\x82\xdc\x4a\xde\xf6\x48\xf3\xc6\xb2\xe6\x6f\x17\x30\x02\xd6\x00\x18\x03\x76\x4c\x80\x57\x56\x67\x21\xc4\x87\x22\xb5\x8d\x4a\x68\x9b\xd7\x25\x2b\x5d\x5e\xb9\x88\x45\x20\xf3\xdd\x8c\x6e\x7d\x99\x47\x95\xf3\xa0\x49\x06\x4b\x29\x9d\xe0\x54\x62\x3b\xbb\x2d\xc1\x70\x65\x9c\x5c\xb1\xba\x2d\x62\xa5\x94\x79\x54\xb9\x23\x64\xb7\x18\xc1\xd6\x3a\x7d\x45\x36\x64\x01\x5e\x11\x2a\x11\x2b\x02\x7b\x3a\x7d\xcd\xfb\x16\xa1\x2b\xc0\x02\x02\x2d\x8d\x95\x10\xab\x55\x80\xf2\x59\x00\x58\xad\xaa\xa7\x6b\xab\xae\x39\xc2\xbc\xfe\xda\xd7\x5c\xf2\xba\xc2\x74\x24\x70\x24\x6a\x51\xf5\x48\x1e\xd6\x0d\x70\x27\xd0\x7a\x96\xd2\x93\xf5\xea\x73\x54\xaf\x3e\xcd\x60\xe2\xd7\xd9\xdc\x40\x1b\x81\xd3\xd2\x5a\x59\xf2\xa0\x08\x2a\xea\x9a\x74\x96\x19\x77\xe0\x49\x51\x8e\x13\x54\x4e\x67\xbd\xde\xf6\x51\xdf\x25\x98\x2f\x1b\x05\x51\x96\x6a\x6c\x1e\xb1\x41\x5e\x5a\xf5\xbb\x4e\xa8\x5a\xe9\xea\x89\x09\x6f\x0e\x2f\x59\x04\x93\xb4\x4f\x38\x3d\x0a\x2b\x4d\x41\xd9\xe1\x7b\x80\x97\x9d\x6f\x50\x38\x31\xcf\x7b\x81\xbc\x55\xe5\x0d\xc2\x65\xb0\xd9\xbc\xdc\x7c\x38\x27\xf2\x0d\xa4\x0d\x0d\x4b\x5f\xb0\x3b\xf2\x32\x77\xbd\x3a\x14\xdc\xe6\xb5\x9e\x6d\x8c\xd5\x12\xe4\x59\x4a\x97\x24\x9e\x7b\xbf\xd2\x7c\x72\x37\xae\x56\x8d\x56\xb5\x5a\xbf\x5a\xb7\x97\x09\x2d\xcb\xcd\xc7\xf8\x84\x8d\x0c\x8c\x07\x02\x78\xe5\xc6\x56\x4e\x89\x46\x63\x63\x47\xef\x16\x1e\x6f\x34\xb6\xcc\x83\x3a\xb0\x65\xd5\x04\xa0\xf5\x89\xce\x7c\xc2\x42\xb5\x74\x6a\xdf\x77\x1e\x6c\xf4\x15\xdf\x2f\x4a\xe7\x9a\x71\x49\xba\x71\xa9\xda\xd8\xac\x95\xe8\x67\x56\xea\xec\x81\xed\x70\x9e\x29\x13\x32\x4f\xa0\x52\x2d\x08\xa4\x75\xa5\x40\x60\x2a\x9f\xb6\xe9\xc6\x86\xba\xe4\xb4\x8d\x2d\x9f\x56\xac\xd0\xcb\x9f\x04\xa5\xd3\x59\x3f\x76\xf8\xb7\x7d\x7b\x96\xba\xe1\x6f\xd7\x22\x9d\xd0\xf7\x5b\xd2\x2d\xf7\x97\x77\x6d\x43\x66\xa5\x5d\x36\xd8\x39\x53\xd7\x3b\x1c\x33\xcb\x40\xae\x19\x21\xfd\x47\xd5\x27\x87\x5a\x4a\xaf\xd0\x44\xda\xc6\xd6\x67\xc7\x65\xe5\xdd\x0e\x12\xb0\x65\x59\xba\xb2\x5c\xa4\x79\xe0\x0a\x96\x08\xbc\xea\x6e\x79\x0c\x5e\x39\x76\x08\xed\xd9\x2c\xd6\xb5\xe6\x73\x45\xce\xbb\xaf\x6a\x56\x56\x7b\x7c\x26\x88\xd2\xfb\x95\xb2\xdc\xcf\x1b\xaa\x8b\xaa\x4c\x73\x33\x76\x48\xd1\xa8\x10\x87\x0d\x0f\x3a\x7f\x17\xc5\x50\x56\x8b\xf0\xce\xa1\x2e\x0a\x75\xbd\x69\x36\x56\x73\x52\xad\x1b\x24\xa4\x3b\x9d\xbc\x21\xe7\x82\xfd\xe7\xff\xac\xb2\x05\x2d\xa3\xaf\xf4\x65\xb7\xd3\x0f\x4a\x71\xa2\x1b\x15\xa2\x5e\xf2\xd9\x70\x8d\x75\x25\xbf\xae\x77\x4f\xef\xec\x66\xe6\xee\x6a\xaf\xde\x7c\xca\x83\x84\xae\x9b\x6b\x93\x48\xea\x52\x7c\xdb\xea\xe3\xf6\xd5\xbe\xef\x57\xaa\x4f\x29\x2f\xdb\x3c\x58\x12\x7a\xe6\x98\x39\xc0\xbb\xdd\x8e\xaf\xfd\xf5\x31\x98\x4f\x0d\xa5\x81\x53\xba\xa6\x89\xea\xfc\xa1\x77\x5d\xf5\xca\xab\xa6\xd1\x5b\x3a\x4a\x6d\x4a\x9c\x11\xf2\xd3\x31\x0e\x6f\xbd\x98\xec\x68\x53\x0f\x4a\xd7\xa0\xbe\xf4\xd0\x72\x96\x99\x73\xcc\xff\xd8\x64\xb9\x67\x07\x09\xae\x91\xb6\xb2\xd5\x07\x4e\x0d\x72\x27\xe0\xe5\xd5\x7e\xed\x90\x22\x65\x50\x54\xc6\x7a\x4f\x93\x05\x77\x90\x3b\xa7\x56\x3e\x5b\xae\x48\x04\xe8\x15\x99\x91\xdb\xdd\x64\xab\xf5\xbf\x5d\x6f\x5b\xff\x2b\x5d\x4a\xf7\xd7\x5e\x91\x84\xd5\x44\x98\x79\xcf\x38\x49\xd6\xb8\xbd\x55\xbd\x7e\x4b\x37\x72\x94\x2f\x3d\xf6\xbb\xfa\x5d\xde\x8d\xce\x42\x16\xd5\xbe\x34\x70\x9a\x9c\x15\x45\xad\xd3\xa8\x55\xb6\x96\x2b\xa9\x1b\xc5\x44\x31\xfc\xe4\x27\xdf\xd2\xa5\x84\x2d\x5a\x22\x1f\x21\x77\x3c\xb1\xb6\xcf\x3b\x4a\x2b\x1c\xe5\x73\x55\x7e\xb5\x2b\xaa\x5e\xcb\x2d\xb9\xe8\x52\x2d\xfe\xae\x09\xff\x8f\x72\xe7\xbe\x6e\xf5\xdf\x36\x5a\xfd\xf7\x8d\x56\xff\x73\xa3\xd5\xff\xda\x68\xf5\xbf\x37\x5a\xfd\x8f\x7c\xf5\x7f\x02\x00\x00\xff\xff\xeb\x2c\x6b\xe6\x17\x20\x00\x00") + +func fontsTsalagiFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTsalagiFlf, + "fonts/tsalagi.flf", + ) +} + +func fontsTsalagiFlf() (*asset, error) { + bytes, err := fontsTsalagiFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/tsalagi.flf", size: 8215, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsTwopointFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x53\x51\x8b\xe3\x46\x0c\x7e\xd7\xaf\xf8\x08\x86\xc4\xdc\x26\xde\x84\x2d\xdc\x1d\x47\x99\x96\xb6\x0f\xfd\x05\x07\x6b\x98\x4e\xec\x71\xec\xae\x3d\x93\xda\x63\xb2\x0b\xae\x7f\x7b\x91\xc6\xde\x6c\x5f\xee\xc1\x41\xd2\x48\x9f\x3e\x7d\x52\xaa\xb6\x3a\x99\x04\x27\x9c\xf0\x19\x8f\x38\x3e\x51\xb8\xf9\xab\x6f\x5c\xc0\xf9\x0d\xbf\xf6\x63\x61\xf1\xa7\x79\xb1\x37\xf3\x86\xdd\xf5\xfc\x77\x34\x95\xb3\x63\x67\x9c\x3b\x8c\x37\x13\x6c\xdf\x7a\x7f\x28\x4c\x4a\x7f\xf8\x1e\x55\x73\x69\x6d\x40\x6f\x5b\x6b\x06\x8b\xd3\xe1\x91\x7e\x33\xc1\x7e\x05\x8e\x5f\xbe\x3c\xe1\x97\xf1\x82\xe3\x91\xe8\xf7\xd7\x6b\x6b\x9c\x09\x8d\x77\xf0\x15\xaa\xa6\x1f\x02\xda\xc6\xd9\xaf\xc4\x94\xb0\xc7\xa6\x33\x97\xa6\x80\x1b\xbb\xb3\xed\x37\xa8\x04\xbb\xb5\x68\x4a\xeb\x42\x53\x35\x85\x14\x93\x01\x80\x3d\x86\xda\x8f\x6d\x09\xd3\xde\xcc\xdb\x80\xb3\xc5\x5f\x66\xfb\x20\x45\xce\xdf\x28\x89\x49\xa1\xb6\xd8\xd4\xa6\x2f\xcf\xad\x71\x2f\x1b\xec\xf7\xb8\xf6\x8d\x0b\x03\xcc\x00\x03\x89\x3e\xe0\x3c\x06\x14\xc6\x6d\x03\xc3\x0c\xdd\x38\xd4\xb6\xa4\x53\x44\xa8\x6d\x73\xa9\x03\x33\x36\x28\x6a\xd3\x9b\x22\xd8\xfe\x87\x8f\x0f\x70\x3e\xa0\x71\x45\x3b\x96\x8d\xbb\xa0\xb4\x43\x61\x5d\x69\xfb\x81\x3e\xc7\xb2\xce\xbc\xca\xe4\x68\xad\xbb\x84\x1a\x3b\xfb\xba\x26\x17\xbe\xeb\xac\x8b\xc2\x0c\x29\x3d\xc6\x82\xd2\x56\x66\x6c\x43\xe4\xd6\xf9\xd2\xca\x9c\xa1\x6e\x06\x54\xde\x05\xec\xda\xe6\xc5\x62\xb3\xef\x70\xfc\x69\x03\xef\x04\xc6\xb8\x52\x60\x52\x3a\x3e\x09\x4a\x14\x96\xd9\xfe\xaf\x0b\x51\x92\x28\xfe\x14\x4d\x8a\xbc\x52\xb4\xdd\x2e\xfe\xa7\x4f\xf2\x29\xda\x4d\xb3\x22\x3d\xa5\x4a\x91\xcf\x14\x65\x9c\x05\x0f\x45\x3b\xfd\x5d\x29\x4a\x15\x71\x7a\x36\x2b\xca\xb5\x52\x34\xe7\x8a\x74\xa6\x14\xe5\x13\x67\x4f\xb9\x52\x94\x4c\x89\xa2\x79\x9a\xd9\xe4\x0a\x69\x41\xf3\xe2\x33\x60\xc2\xb9\x82\x93\x2b\xca\xb9\x7c\x3b\x71\x1d\x03\xa6\x8a\x32\x41\xce\x14\x69\x2e\xce\xee\x70\xc2\x8d\x63\xc8\x84\x51\xba\xe4\x09\xd6\x8e\x4b\x63\x6c\x37\x33\x9c\x44\x7d\xec\xe8\x23\x91\x4c\x11\x33\xdc\xef\xe5\x63\x87\xb2\xa5\xa9\xe7\x6c\x68\xc6\x35\xa9\xf4\x9a\xe2\x0f\xff\xa6\x8a\x26\x9d\x2e\x0c\x26\x2d\x46\xce\x16\x57\x3f\xcf\x8a\x9e\xf5\xfa\x18\x79\x6a\x7e\x94\x5a\xcd\x30\x98\xa2\xe6\x93\xc8\xc3\x02\xb3\x9f\x29\x12\xc1\x78\xc0\x08\x9a\x67\x92\x1d\xd3\x73\x88\x93\x4f\x22\x38\x2b\x25\xed\x22\x9b\x19\xf7\xe8\xf7\x7b\x34\x97\xe9\x17\x95\x58\x34\xd9\x46\x64\xb0\x30\xca\x81\x4c\x51\x92\x67\x31\x0e\xee\xc6\x6e\x0c\xe4\x99\x6c\x65\x31\x92\x55\x60\xfd\x71\xf6\x9c\xaf\x28\x97\x06\xcb\x28\xbc\x47\xb9\x23\xde\xb4\xe6\x9c\x9d\x22\x66\x98\xe8\x84\x97\x12\x95\x48\x56\x15\x13\xcd\x41\x31\xa6\xf5\x99\x63\xff\x4a\x8c\xfb\xc4\x7d\xc7\x3d\x7e\xa8\x16\x61\xfc\xa2\xa3\x5f\x75\xe4\x97\x6f\x77\x81\x0f\x1a\x4b\x32\xa2\xfb\x5e\xb9\xb0\x91\x45\xa6\x52\xc8\x07\xbf\x5c\xde\x41\x2f\x11\x66\xa2\x79\x3c\x3d\xe9\x55\xbf\x04\xc9\xaa\x1f\x8f\x28\x37\x9b\x00\x62\x46\x27\x51\xf4\xf3\xb7\x75\xe1\x91\x33\x77\xcb\x3e\x8c\xa4\xef\x1c\x67\x26\xad\x65\x48\xae\x17\x7c\x45\x7e\xf6\xeb\xcd\x89\x19\x17\xfe\x0f\xae\x6b\x6f\xaf\xfd\xaa\xd7\x62\xca\x1f\x15\xfe\xfd\xdc\xe4\x0c\xc0\xd1\xff\x02\x00\x00\xff\xff\xf1\x79\xae\x0d\xff\x05\x00\x00") + +func fontsTwopointFlfBytes() ([]byte, error) { + return bindataRead( + _fontsTwopointFlf, + "fonts/twopoint.flf", + ) +} + +func fontsTwopointFlf() (*asset, error) { + bytes, err := fontsTwopointFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/twopoint.flf", size: 1535, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsUniversFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x3b\x7b\x8b\xe3\x38\xf2\xff\xfb\x53\x14\x22\x90\x59\x7e\x9e\x66\x7b\x76\x7e\xa0\x59\x96\x25\xdc\x83\xa3\x0f\xfa\xf0\x3f\x77\x60\x96\x63\x2c\x47\x4e\x77\xb8\xb4\x33\xe4\xb1\x3b\x73\x9f\xfe\x90\x54\x25\x95\x6c\xd9\xb1\xd3\x0f\xc6\x0d\xb1\xaa\xac\x47\xa9\x54\x2f\x95\xd4\x9b\xdd\xe6\x83\x5a\xc0\xed\x2d\x7c\x82\x8f\x3f\xc2\x4f\x1f\xe0\xf6\x63\xf6\xcf\x76\xfb\x7b\x73\x38\x42\xfd\x0d\xfe\xb1\x3f\x3c\xa9\x56\xc3\xbf\x9a\xed\x6e\xd7\x9c\xbf\xc2\x2f\xed\xef\xae\xb8\x6a\x9e\x0e\x37\x6b\xf5\x6b\xb6\xd9\x3e\xec\x9a\xd3\xf6\xbf\x8d\x36\x0d\xfe\xb6\x6b\xda\x16\xfe\xfc\xa8\xbe\x7c\x69\x76\x3b\xf8\xe5\xe1\x61\xbd\x3a\x6f\xcf\xeb\x9b\x46\x9f\x7f\x85\xbf\xab\xf6\xac\x0e\xdf\xe0\xf6\x43\x0e\xb7\x9f\x3e\x7d\xcc\xfe\xa2\x4e\xcd\xcf\xf0\xff\x70\xaf\x0e\x0e\x91\xfd\xf5\xeb\x97\x9d\x6a\xd5\x69\xbb\x6f\x61\xbf\x81\xcd\xf6\x70\x3c\xc1\x6e\xdb\x36\x3f\x67\x86\x58\x78\x0f\xe2\x49\x3d\x6c\xd7\xd0\x9e\x9f\xea\xe6\x20\x60\xb3\x3f\xc0\x66\xbb\x6b\x60\xab\x9b\xf6\xb4\xdd\x6c\xd7\xb6\x71\xa6\x00\x00\xde\xc3\xf1\x71\x7f\xde\x69\x50\xbb\x3f\xd4\xb7\x23\xd4\x0d\x54\x6a\x99\xdb\x46\xed\xfe\x8f\x6c\xe1\x2a\x9d\x1e\x1b\x10\x8f\xea\xa0\xeb\x9d\x6a\xff\x23\xe0\xfd\x7b\xf8\x72\xd8\xb6\xa7\x23\xa8\x23\x28\xb0\xd8\x1c\xea\xf3\x09\xd6\xaa\x5d\x9e\x4c\x37\xc7\xa7\xf3\xf1\xb1\xd1\xd9\xed\xad\xed\xe1\xb1\xd9\x3e\x3c\x9e\x0c\xc5\x0a\xd6\x8f\xea\xa0\xd6\xa7\xe6\x90\x7d\x82\xe1\x8f\x39\xb4\xfb\x13\x6c\xdb\xf5\xee\xac\xb7\xed\x03\xe8\xe6\xb8\x6e\x5a\xdd\x1c\x8e\xd9\xc7\x1f\x6d\xb3\x27\xf5\xd5\xce\x1c\x76\x4d\xfb\x70\x7a\x84\x77\xcd\x57\xaa\xbc\xde\x3f\x3d\x35\xad\x63\xcc\xf1\x07\xf8\x3f\x50\xb0\x39\xeb\x87\x06\x36\x6a\x7d\xda\x1f\xb2\x9f\x3e\xd8\x1e\x74\xb3\x51\xe7\xdd\xc9\x11\xfb\xb4\xd7\x8d\x9d\xf8\xe9\x71\x7b\x84\xcd\xbe\x3d\x65\xb7\x1f\x6d\x35\xc7\x4a\x43\x5f\xd4\x6f\x66\xd8\xb3\x58\x3d\xe7\x77\x95\x19\x0e\xac\x32\x29\x0d\x6a\xe8\x57\x08\xf3\xab\x54\xc0\xb8\x56\xf6\xd7\xbd\xa8\x17\xdf\xcc\xb7\x74\x8d\xc1\x0c\x67\xdb\x5c\x53\xf0\x63\xd0\x48\x60\xfa\xb7\x3f\x8b\x04\xa8\x94\x94\xca\xfd\x38\x22\xa4\x14\xee\xc7\xcd\x63\xec\xeb\x78\xcf\x3d\x32\x38\xe8\x60\x09\xb6\xae\xfd\xac\xb4\x34\x4f\xad\x60\x01\xab\x4c\x4b\x61\xbf\x0a\x59\x9b\x91\x4a\x99\xf3\xca\x55\x29\x95\xf9\xcb\x69\xa4\x4a\x48\xf3\x57\xe7\xc4\x3f\x53\xb9\xa2\xb6\xca\x82\x4a\x16\xf6\xab\x28\xed\x40\x85\xb0\x03\x75\xc9\x48\x50\xd9\x99\x48\xae\x74\xad\x72\x07\xe5\xda\xad\xe1\x1d\xc0\x9d\x44\x84\xeb\x57\x6c\x6a\x5d\x2c\x3d\x22\xf4\xed\x11\x84\x62\x08\x87\xb2\x08\x00\x1c\x67\xb1\xca\x10\x01\x80\xe3\x98\x11\x05\x36\xc0\x71\x16\x49\x52\xfb\xa8\x2e\xce\xc2\x34\x12\xc1\x34\x0c\xc1\x42\xba\x31\x18\x7d\xb2\x06\x14\xdb\x1b\x3b\x4d\x28\xeb\x5c\xde\x59\xca\x0a\xd7\xb3\x94\xd2\x92\x65\x16\xc5\xb4\x29\xcc\x6a\xb8\xb5\x73\xdc\x87\xb2\xee\x93\xdd\x83\x99\xca\x00\x68\xd7\xa5\x7b\x83\x53\x36\x20\xe1\xbf\xbe\x60\xc7\x50\xa8\xac\xdd\x31\xdc\x1b\xf5\x1e\xa2\x82\x91\x4a\x27\x80\xae\x64\x9a\xdb\x12\x72\x8d\x4d\x6f\xb5\xca\xa4\xc2\x66\x56\x2e\x6d\xb3\x0a\x79\x02\x24\xab\x40\x5c\x65\x85\x1c\xe5\xd6\xbe\x8d\xfc\xd9\x37\x58\x19\xe0\x82\x35\xb0\xb8\x31\x7c\x27\xad\x2f\x51\x66\xad\xec\x77\x51\xd6\x79\xae\xad\x32\x58\x6b\x60\x94\xdd\xea\x3c\x2a\xbc\xd1\x76\xab\xf4\xa8\xf1\xb9\x2e\x96\x55\x59\xd3\xc4\xef\x9c\x20\x0a\xdf\xdf\x85\xf1\xc7\xa4\x31\x05\x29\xc5\x35\x53\x7a\x3d\x55\x68\x95\x38\x99\x81\x48\x5e\xd3\x92\x27\x52\xfa\x3d\x0a\x11\x38\xe3\x8d\xc4\x80\xc0\x95\x43\x49\x92\x82\xdb\xc3\x68\xce\xf3\xcb\x0a\x1f\x9a\x35\x9f\xf3\x8c\x7e\x10\x98\xf3\x92\xe4\xae\x82\x58\xd3\x6b\x6c\x9d\x35\xd7\x02\x23\xc3\xdc\x56\xe9\x88\x37\xee\xa3\x6f\x1f\xdb\x49\xff\xd1\xc1\xdc\x66\x1a\xe2\xc2\xc7\xf9\x72\x07\xc1\x0e\x5a\xd9\xf7\x96\x30\x97\x85\x58\x1a\xe7\x42\xea\x9d\x93\x71\xf3\x7a\x6e\x3d\x1e\x3e\xde\x9f\x77\x30\x46\xb7\xfd\x84\xdd\xf4\x2b\x69\x5d\x48\xae\xc8\xca\x18\x21\x2d\xbd\x5b\x4a\xd9\xee\x04\x26\xa0\x82\xd0\xa3\x81\xa6\xb5\x62\xcb\x35\xbf\x14\x0f\x9b\x88\x2c\xd0\x67\xa3\xdf\x25\x2f\x45\xf2\x8f\x0b\x42\x7e\x77\xe1\x7c\x1d\xf9\xdd\x85\xf9\x10\xf4\x52\x45\xeb\xdb\x5d\xde\xf0\xf4\x5c\xc6\x05\x9f\x7d\x0d\x95\x4a\x91\x17\xb7\xa0\xf0\xeb\x4f\x95\x59\x54\xe2\xda\x4a\x1e\x59\xc8\x84\x3f\xbe\x18\x59\x30\x1d\xd1\x51\x00\x65\x16\x93\x21\x6c\x2c\x10\x10\x18\x1a\x10\x22\x44\x0a\x92\x9c\xd6\x10\xef\x7a\x61\x5a\x32\x6e\x4b\x21\x7a\x53\xe3\x23\x80\xd7\x80\x85\xff\x0a\xf0\xf9\xf3\xe7\xcf\xde\x7b\x2a\x59\x14\x45\x81\xcb\x51\x14\x8e\x60\x72\x80\xb4\xea\xce\x6e\x94\xce\x5d\x8d\xc4\x6e\x53\x39\x6c\x40\x27\x08\x18\x54\x81\xb4\x71\x8b\xe9\xd2\x89\x05\xd5\x44\xbe\xe5\xda\xd4\xae\x6b\x54\x73\x57\x19\xa9\xf4\x3a\x8e\xe1\xdf\x8b\x51\xd9\x59\xb1\xef\xdf\x74\x26\x03\x77\xe0\x2a\xd2\x63\x8d\xf1\xdc\xc4\x1a\x23\xaf\xc2\xc5\xed\x93\xda\x5e\xbf\xf8\x43\x54\x4a\x1e\xb3\xa2\xed\x43\xbd\x26\x33\x0a\xa2\xb0\x0f\x59\x0c\x62\x97\x23\xcb\xc5\xb7\x8e\x48\x13\xdc\xa1\x19\x5f\x4e\xde\x04\x4d\x74\xb8\x8b\xc5\x44\x2f\x3c\xf2\xa6\x66\x14\x34\xcc\x8f\x58\x3c\x56\x49\x1f\x77\xa8\x50\x5f\x49\x6f\xd1\x55\x10\xb3\x55\x26\x30\xf8\xc5\x0e\x3c\x88\xe1\x99\x03\xa9\x6b\x0b\x26\x07\x4d\x2d\xee\x38\xd4\xd1\x25\x12\xf4\x45\xe2\xdb\xe4\x3e\x47\xa0\x0e\xd5\xd7\x4f\x7b\x1e\x7b\x27\xb2\xea\xa2\xb6\x0a\x41\xa3\x17\x9e\x94\x5c\x71\x8d\xd3\x52\xa4\x02\xeb\x4e\xb4\xde\x09\xd7\xe7\x1a\x41\xaa\x63\xc2\x23\xe3\x2b\xd0\x66\x93\xf1\x82\x1b\xf5\xa4\x6e\x14\x31\xcc\x08\xad\x0d\xd5\xa4\xf9\x16\x82\xb0\x1b\xa3\xd5\x56\xb3\x03\xce\x25\x28\x96\x41\x79\x0d\xee\x4f\xd6\xe5\x62\x2c\xe6\x16\x4d\x48\xdb\x9f\xa4\x7d\x11\x54\xd6\x6e\xd9\x47\xa3\xa6\xbb\xf9\xfb\x18\x3c\x19\xa2\x8d\x4c\xce\x3e\xba\x8e\x76\xfe\x8e\xc3\xb2\xee\xac\xac\x96\xcb\xca\x23\x69\x1d\x96\xe8\x8d\x3c\x4e\xcb\xd2\x90\x57\x22\x72\x65\xf7\xb2\x44\x9f\xdf\x72\xda\x86\xee\xa1\xdd\x27\x43\xf1\x8d\xe8\x10\xcd\x23\x93\x0b\x3a\x85\xf2\x45\x8e\x52\xc4\x7e\x33\x27\xee\x3b\x8e\xa2\x67\x93\x92\x68\x8d\x42\xeb\x8e\xcf\x55\xd4\x56\x5e\xed\x64\x83\x70\xc5\x92\xe5\x5c\x3c\xd2\xca\xb9\x42\xfb\xff\x0b\x08\x8a\x07\x3d\x02\x4a\xa9\x6e\x0c\x74\xe3\x35\x0a\xfd\x43\x29\x96\x89\x0d\xf3\xe5\x60\x2b\xe6\x71\x1e\x13\xe2\x48\x8f\x28\xeb\x06\x2c\x89\x5d\x4a\x1f\x51\x70\xc4\x0d\x79\x36\x1a\xd6\x91\xfe\xcc\x40\xd1\x67\x4c\xa3\xaf\x1d\x10\x77\xba\x04\x06\x45\x4b\x55\xee\x80\xcf\xd8\x30\xbc\x21\x95\xc3\x91\xd7\x1b\xcb\xb0\x9f\x2d\x93\x61\xe4\x1d\x93\x61\x19\xc9\x70\x52\xf7\xa6\xc8\x30\xa7\xa3\x2b\x7d\x43\x30\x59\x5f\x82\xbd\x61\x9b\xd8\xbe\x33\xa9\x51\x8a\x27\xa6\xfa\xfb\xbf\xa1\xb3\xc1\x20\x25\x1e\xff\x79\x90\x94\x79\x14\xa6\x96\x03\x8b\x32\x16\xa4\x44\xcb\x11\x54\x1d\xac\x95\x66\xa9\x4f\x03\x31\xb1\x36\x83\x2e\xd9\x16\x4f\x4a\x21\x5d\x36\xd4\xc1\x85\x4d\x91\xd4\x21\x63\x0a\x2e\xda\xce\x63\xfb\x54\xa6\xbc\xcd\xe5\x3d\xc7\x2c\xb5\x7a\x1e\x78\xb5\x1d\x09\x5d\xd4\x0c\x0e\x99\x99\x9a\xe3\x10\x19\x32\x45\x5a\x2e\xbd\x60\x11\xd6\x68\x73\x10\x67\x87\xb5\x1a\xce\x64\xdc\x60\x9d\xd6\x73\xc1\xaf\x6c\x32\xbe\xab\x0d\x15\x5a\x87\x94\x4a\xa4\x66\x38\x30\x4f\x3e\x9b\x90\x77\xaa\x23\x98\xe6\xd0\x25\xbf\x43\x79\x87\xe8\xba\x4b\xaf\x8c\xb5\xb9\x4a\x2d\xcb\xd4\x6c\x5f\x64\x3b\xfb\xc6\x33\x8e\x8b\xfa\x6e\x74\x30\xdb\xc7\x0c\x28\x65\xee\xe3\x28\x00\x7a\x61\xc0\xdc\x6c\x5f\x60\xfb\xb3\x63\xad\xf8\x6c\x23\xf4\xfc\x72\x7e\xea\x2d\x98\x6d\x0c\x4b\x97\xd9\xa5\xec\x33\xbb\xec\xed\x2c\xdf\x90\xd9\x91\x35\xf5\xc1\x3b\x93\xee\x38\x76\x4b\x05\xe2\xd7\xa7\x39\xa2\x7c\x25\x3b\x45\x35\x0f\x3b\x45\x65\xe1\x37\xd5\xab\x5e\x3a\x19\x33\x94\xe3\xea\xee\x16\x5f\x1b\x1e\xa2\xf7\x65\xe2\x95\x79\x70\x6f\x9f\x30\xba\x4d\x98\x60\xe1\x0c\xcd\xb1\xdf\x81\xf8\x24\x82\x1f\x45\x70\x8c\x13\x04\xe6\x71\xc8\x3c\x05\x7f\x13\xb6\xba\xe4\x6d\xf8\xe1\x9a\xf3\x35\x31\xa7\x2b\xb9\x4c\x26\x2b\x86\x51\x49\xd7\x43\xd5\xef\x58\x8c\x4a\x05\x3c\x7d\xe6\xb3\xa9\x3b\x33\x15\xd8\x2c\x97\xac\xe4\x8f\x6d\x5d\x77\x05\x2b\x91\x56\xf8\x89\x47\x2c\x40\xa6\x28\x50\xd6\x72\x51\x89\x33\x47\xa1\x4d\x73\xa5\x98\x49\x64\xed\xaa\x1e\xb3\x92\x53\x1e\xfe\x94\x10\x56\x76\xdc\x84\x26\xd1\xe5\x33\x83\x41\x34\x55\xf8\x3c\xc8\x5a\xb2\xfc\x02\xcf\x2d\xd8\x73\xaf\x70\xda\x8d\xb6\xda\x5b\x2c\x76\x28\x76\x85\x3b\x9e\x42\xb0\x45\xe4\x61\x4d\x40\x48\xa5\xa2\xf3\xef\x4a\x76\x58\xfc\xba\xe6\x60\x20\x2e\xcc\x23\x18\x43\x66\x84\x7d\x00\x6d\xe1\x10\x4e\x83\x3d\x69\x8c\xee\x80\x70\x68\x74\xbc\x21\x82\xbb\x9b\xd7\x17\x28\xc4\x43\xa7\x57\x71\xc1\xe1\xaa\xa4\x0d\x01\x11\x29\x64\x27\xdd\x55\x15\xbe\x0a\xf6\x27\xa2\x94\x96\xa9\x12\xc4\x0e\x1f\xe1\x2f\x4d\x80\xaf\x32\x8f\x29\xa9\x5b\x16\xd7\x14\x86\x98\x12\x65\x41\x73\x49\x1b\x23\x77\xde\x51\x88\x92\xce\x3b\xee\xbc\x7b\xbe\x8b\xb2\xd3\xe0\x2f\x74\xbd\x0a\xc8\xe1\xee\xf7\xef\x03\xee\xc8\x7b\x60\x70\xf2\xee\x0c\x84\x2b\x10\x5e\x50\x66\x17\x46\x77\xcc\xa3\x50\xae\x74\x51\x94\x25\x1e\x2b\x62\x0a\x1d\x65\xd2\x7e\xb3\x19\xec\xb0\x5f\x27\x23\x61\x42\xdf\x5a\x17\xa2\x2f\xbd\xe3\xfb\xf5\xd9\x5b\xd8\xdc\xd0\x47\x39\x22\x3a\xc9\xc4\xe4\x79\xef\x24\xd3\xdd\xd6\xc2\xe3\x07\x13\x32\xd7\xb5\x8d\xd5\x67\x46\x79\xb3\x20\xc7\x41\x64\xa0\x22\xa5\x70\x14\x90\x6b\xb0\x8b\x25\xa4\x72\xe4\x29\x0a\x92\x88\xba\x79\x0c\xf4\x45\xd9\x3f\x13\xef\x83\x48\x1e\x2e\x1b\xd2\x57\x95\xb8\xa6\x75\x54\x99\x28\xa4\x9c\xcc\xf0\x22\xbf\x22\x03\x8b\xcf\xe6\x41\xfa\x8a\x82\x5d\x13\x12\xb4\xbe\x57\x33\xd0\x16\x15\x4d\x8f\x1f\x43\x7b\x5f\x71\x7f\x2f\xe5\xfd\xfd\x3d\x2c\x22\xec\x84\x62\x7f\xf0\x91\xa5\x9b\x04\xd2\xd2\xe9\xab\x96\xae\xe4\x4b\xa7\x14\x57\x5e\x7b\x6f\xc0\x7e\x36\x9a\xf1\x26\x1a\xda\xd9\x39\x24\xc1\x51\x76\x38\x78\x15\x5d\x54\x0e\xdb\xff\x79\x59\xcd\xd8\x1d\xf6\x0d\xf0\x1c\x6f\x9a\x7b\x6f\x6a\xb8\xd9\x3d\x43\x65\xcc\xba\x00\xd0\xd5\x19\x0b\xe4\xfe\xd8\xd4\x38\x91\xdf\xc0\x67\x26\xcd\xb2\x2a\x96\x86\xac\xfa\x49\x81\x18\x88\xd9\x36\xfd\x37\xc5\xb6\xb1\xf5\xb9\x0a\x6b\x65\xc7\x4c\xc8\x48\x7a\x57\x84\xdc\x89\x68\x4f\x92\xd8\x7b\x26\x76\x06\x65\xcf\x53\x5c\x59\xa7\x54\xa2\x7a\x25\x95\xb8\x96\x4a\x6e\x77\x81\x79\x2e\xa4\x32\xec\x3f\xb9\x79\xa1\x93\x7d\x32\x2f\xb3\x5d\xeb\xcb\xf0\x72\x62\x00\x20\x2b\x4e\xe5\xa8\x1d\x7b\x5b\x53\x0d\x09\x53\xbd\x9c\xe0\xce\xaf\xf7\xb2\x9c\x8f\x41\xc5\x4a\xbe\xcd\x4a\xf0\xa5\x0b\x8d\x8c\x30\x62\x83\x46\x80\xc8\xf7\xdf\xc9\xdf\x20\xd8\xe3\xca\xf8\x28\x12\x4e\xcb\xb1\x7f\x63\x96\x24\xc8\xde\x45\xc3\xd7\xfd\x60\x2d\xb6\xee\xbb\xef\x49\x5e\xdf\x6f\xe5\x6c\x7a\x2b\x1a\xfd\xe5\xbc\xfe\x2c\xf3\x10\xd4\x32\x16\xa5\xd9\x01\xdb\x7c\xb5\xc4\x62\x9c\x9a\x8b\xd3\x72\x51\x4a\x2e\x8f\x32\x36\x42\x26\x2f\xb5\x5f\xa2\x32\x5d\xeb\x2a\x2c\xcd\x40\xd7\x89\x89\x68\x49\x6f\x36\x1f\xbc\xc8\x12\x4f\x8b\xae\xb2\xc4\xf9\x28\xcc\x29\x95\x45\x72\x92\xe3\x94\x3d\xdb\x5e\xda\x12\x5a\x22\x97\x81\xc0\x3b\x94\xb6\xf2\x0f\x52\xca\x77\x6c\x47\x2f\xc0\xdf\x60\x44\xdb\x9a\x4c\x49\x7c\x5f\xe2\x53\x4a\x19\xdf\x18\xe5\xd9\x33\x1d\x92\x92\xd7\x19\xa5\x7e\xb6\xca\xdd\x22\x73\xc6\xc3\xdf\x8e\x8c\xfe\x17\x6a\x28\xc3\xd5\x37\x4a\x4a\xfb\x5a\x32\xcc\x29\x69\x73\x30\x67\x68\xbb\x7f\xe7\x67\x28\xd8\x1d\xa9\xc9\xa6\xea\x52\x1c\x38\x1e\x4a\x23\xf1\x74\x36\x83\x8b\x22\xa3\x80\xb9\x5f\x0c\xff\x57\x64\xc5\x0e\x3b\xd2\x21\xad\x98\x6c\x96\xcb\x70\xfb\x1b\x6f\xb0\xf5\x34\xe5\x3a\x84\xd2\xb2\xc6\xff\x5c\x50\x21\xdb\x21\x44\xc9\xae\xf5\xd3\x83\xac\x7c\x3e\x62\x95\x25\xff\xbe\x27\xf4\xff\x02\x00\x00\xff\xff\xaa\x8c\x6e\xd1\x7c\x3c\x00\x00") + +func fontsUniversFlfBytes() ([]byte, error) { + return bindataRead( + _fontsUniversFlf, + "fonts/univers.flf", + ) +} + +func fontsUniversFlf() (*asset, error) { + bytes, err := fontsUniversFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/univers.flf", size: 15484, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsUsaflagFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x58\x4d\x6f\xe3\x36\x13\xbe\xf3\x57\x4c\x16\x3a\xec\x02\xaf\xb4\x51\x36\xc9\x26\xc4\xab\xc2\x01\x82\xd4\x8b\x6e\xdd\x00\xe9\x02\xbd\xd2\x32\x2d\x73\x23\x93\x86\x48\x27\x48\x7f\x7d\xa1\x6f\x0e\x45\xdb\x72\xd0\x4b\x01\x3b\x88\x38\xa6\x86\x0f\x67\x9e\xe1\x8c\x46\x5e\xe6\xcb\x0b\x16\xc0\x35\x5c\x41\x7c\x03\x61\x0c\xf1\x2d\xd9\x6a\xb6\xcc\x59\x16\x2d\xf3\x25\x84\x21\xc4\xf1\xe7\xeb\xcf\xb7\x97\x84\x3c\x88\x2c\xe7\x06\x52\x25\x5f\x78\xa1\x85\x92\x30\x7f\x83\xdf\xb8\x34\x30\x63\x5a\x73\x09\x1f\x9f\xb9\x34\x72\x92\xbe\xcd\x79\xa1\x37\x2c\xe5\x91\x2a\xb2\x4f\x30\x67\x9a\x2f\x40\x49\x30\x2b\x4e\x96\x4a\x1a\x58\x70\x2d\xb2\x72\x39\x25\xe4\xae\x30\x22\xcd\x39\x5c\x5c\xdf\xdc\x80\x5a\x42\xc1\xd3\x88\x15\x46\x47\x4c\xa7\x42\x50\xf2\xc8\xcc\x8a\x82\xe4\xaf\x3a\x12\x66\x11\x6d\xd7\x22\x5d\x45\x7c\xb1\x3d\x4b\x19\x97\x67\x7a\xa3\x54\x1e\xad\xb7\xd5\xcc\x4a\xbd\xe6\x4c\x2e\xa2\x82\x6b\xa3\x64\xc4\xa4\x8e\x24\x37\x67\xd5\x5a\xbd\x29\x84\x34\xb9\x90\xcf\xd5\x9c\x66\x7a\x25\xd6\x22\x7a\x7d\x65\x51\xaa\xd6\x67\x52\x99\x70\xa9\x8a\x70\xcd\x44\x4e\x1e\x0a\xb5\xa6\xb0\x10\x5a\xf3\x7c\x22\xb7\x52\xa4\x91\xac\x76\x80\x8f\xf7\xec\x45\x2c\xe0\x29\x82\x6f\xe5\xcd\x4f\x64\xc6\x5f\x75\x56\xa8\xed\x46\x53\xc7\xf2\xff\xb1\xdc\xd4\x52\xc8\x0a\x43\x9e\xb6\xf3\x9f\x3c\x35\x14\xbe\x0b\xc9\x29\xfc\x78\xba\x83\x87\xef\x77\xbf\x42\xc9\x07\xb9\x67\x86\x53\xb8\x86\x99\x7a\x81\xf8\xf6\xf6\x12\xe2\x2b\x7a\x79\x43\xcf\x63\x08\xcf\xaf\xcf\xcf\xc9\x1f\x45\xc6\xa4\xf8\x9b\x19\xa1\x24\x85\x59\x35\xb2\x1c\x7e\x48\x51\x45\xc2\xbc\x91\x12\x55\x53\xf8\x7a\x41\x9e\xb8\x5c\xf0\x82\xc2\x5c\xcd\xd9\xa4\x71\x8f\xdc\x6d\x36\x85\x7a\xe1\x0b\x67\xfa\x77\xae\x35\xcb\x78\xf8\xed\x9e\xc2\xff\xbf\xdc\xfe\x14\xdb\x55\xf0\x1c\x5f\x4c\x32\x96\xad\x79\xcb\xcd\x2f\x64\x36\xfb\xf3\x31\x7c\x54\xda\x08\x99\x85\x53\xa5\x0d\x05\xa4\x41\xfe\x2a\xf8\xd2\x17\x23\x37\x98\x55\x8c\x11\x2f\x34\xbe\xfa\xfa\xe5\x86\x84\xe8\x43\x02\x00\x08\x26\x23\x86\x09\x09\x92\x24\x71\xae\x00\x80\xe4\xea\x0b\x24\x49\x39\x0b\x95\x54\xdf\xe8\x70\xfc\x42\xad\x9d\x54\xff\x35\x5e\xfd\x19\x39\x0f\xed\xa7\x06\x9a\x26\xd3\x46\x7d\x0a\x53\x68\x4c\xe9\x54\xa7\x30\x6d\x00\x5a\x3d\xb4\x38\x81\xde\xe8\xd6\xaf\x72\x6d\xb3\x3f\xb4\x1e\x03\x24\xee\x62\x4a\x69\xa7\x40\x29\xad\xb0\xac\xdd\x5b\xda\x3a\x38\x7b\xde\x71\xa1\x56\xad\xf1\x03\xcf\xb5\x59\xdd\x00\x0e\x86\xce\xf2\x36\x24\x68\x16\x0d\x16\x85\x15\x6a\x69\x78\xcf\x6b\xe9\x51\xef\xbc\xc5\x7e\x6d\x7b\xef\x07\x0c\x62\x51\xc3\xf5\xce\x82\x03\xb4\xe3\x2e\x5a\x0c\xee\xb5\xb5\xbc\xa1\xa7\xd7\x0f\xb0\x88\x4c\xf5\x29\x74\x62\x29\x07\xc3\x4b\xbd\x34\x98\x38\x27\xa1\xf7\x12\x3b\x8f\x4f\x87\x7b\x26\xea\x35\x2d\xb3\xad\xea\x40\x6c\x39\xb5\x58\x6c\x20\xba\xab\x3f\x03\xdd\x7d\x1c\x83\x13\xcb\x4e\xf0\x92\x63\x41\x00\xf6\x18\x59\xd4\xed\x9b\xf8\xce\x7e\x7b\x1f\xfb\x89\x76\xf2\x8b\xd8\x85\x16\xc1\x32\x75\xdc\xfe\x88\x67\xe4\x29\x8c\xe5\x79\x87\xb1\x16\x85\x38\xd6\x3b\x20\x9c\x68\xc3\x3b\xac\xf0\x43\xb8\xb6\x59\x39\x68\xe7\x8d\x7d\x80\x6d\xa9\x3d\xcf\x41\x9f\x4a\xb6\xdc\xb0\x3b\x38\xf4\xc3\xf2\x87\xd3\x15\x85\x72\x32\x26\x11\x0f\xa4\xe7\x88\xbd\x7c\x36\xf5\xfb\xdb\x84\x7b\x93\x01\xe0\xc3\x87\x6e\x59\x57\xa7\x87\x10\x7d\xd0\xac\x74\x6d\x4d\x6f\x0f\x01\x38\xfa\x70\x38\x9e\x16\x01\x3b\x6a\xc1\x58\x08\x18\x40\x1c\x38\xdb\x38\x3d\x3c\xa2\x7b\xd6\xde\x51\xc9\x8e\xb5\xc2\x89\xe6\xc1\x22\x35\x1a\x62\x4f\x92\xa2\x5a\x01\xde\xd0\xe2\xdc\xf3\x85\x76\x7f\xcd\x1b\x15\xda\xc3\xb5\xbd\x54\xae\x55\xb0\xd4\xaa\x1e\x53\x98\x51\x27\x65\xbb\xed\x56\x27\x17\xa2\x6f\x6b\xf6\x1e\x9e\x03\x61\xeb\x59\x6f\xb6\xb5\x0c\xc0\x49\xe5\xfb\x06\x30\x48\xad\xde\x35\x8b\x74\x9b\xff\x61\x28\xde\x7b\xa8\x8f\x2a\xd8\x83\xf2\x31\xe6\xb1\x61\x63\xd8\x27\xd2\x26\xb6\x7f\x10\x25\xc3\xc6\x71\x94\x2d\x07\x83\x8d\x73\xe3\xc8\x2e\x20\x41\x01\xd9\xd9\xe4\x79\xda\xbd\x9d\xe9\x74\x54\x2c\xf6\x43\xe0\x75\xbe\xc2\xdf\xf1\xef\xe0\xb8\x60\x6e\x75\x48\xec\xce\xb0\x09\x1b\xee\x87\xf7\xda\x08\x4e\x74\x0f\x26\x24\x4a\x69\x27\x46\x87\x68\x46\x19\x8a\x5a\xdb\x71\x9d\x22\xaa\x6f\xfe\xb1\x5f\xe3\x3c\xcd\x93\x9e\x74\xcb\xb8\x61\xe3\x39\xdc\x0d\x3d\xfc\xad\x71\xb0\x5b\x13\x82\xc0\x4b\xe0\xae\xf7\x00\xe7\x95\x60\xb4\xb2\xbf\xaf\xe9\x5f\xdf\xec\x36\x6b\xf8\x12\x77\xea\x0e\x46\x58\x71\xea\x0e\x4e\xdd\xc1\xa9\x3b\x38\x75\x07\xa7\xee\xe0\x3f\xd1\x1d\x58\x5b\x36\x46\x0e\x26\xa1\x7f\xb4\xef\xb9\x02\xfa\xf5\x12\x03\x39\x93\x96\x6a\x7d\xbb\x54\xdb\xfb\x93\x73\x27\x4c\x88\xf5\xf7\x6f\x7f\xf9\x27\x00\x00\xff\xff\x82\x85\xbc\xdc\xf4\x19\x00\x00") + +func fontsUsaflagFlfBytes() ([]byte, error) { + return bindataRead( + _fontsUsaflagFlf, + "fonts/usaflag.flf", + ) +} + +func fontsUsaflagFlf() (*asset, error) { + bytes, err := fontsUsaflagFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/usaflag.flf", size: 6644, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsWavyFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x59\x5b\x6f\xdb\x3a\x12\x7e\xd7\xaf\x98\x07\x01\x95\x81\x75\x64\xd1\x97\xd8\x6f\x56\x64\x26\xd1\x56\x91\x7c\x24\xb9\x4d\x00\x01\x6c\x4e\xeb\xb4\x06\x12\x7b\x91\xe4\x14\x28\x90\x1f\xbf\xe0\x4d\x22\xa5\xa1\x77\x03\x38\xb6\xc5\x99\xe1\x5c\x3e\xce\x0c\xc7\x4f\xcf\x4f\xe4\xd1\x87\x19\x4c\x21\x9a\xc0\x04\x16\xe0\x7d\x7d\xfc\xfd\x07\xfe\xfe\x03\x57\xaf\x87\xc7\x23\x7c\x7e\x3d\xfd\x84\x68\x12\x4e\xe6\x5e\x7a\xfc\xfe\xfc\xcf\x8f\xfd\x1b\xa4\x55\x01\xd9\xe3\xfb\xe1\x38\x8e\xbc\xa7\xc3\xcf\xe7\xfd\x3b\xbc\xee\x9f\xf7\x8f\x6f\x7b\x20\x17\xe4\x82\xc0\x78\x0c\x93\x39\xfc\xfb\x9f\x67\x20\x93\xc9\xdc\xdb\xee\x5f\x5f\x0e\x6f\x6f\x87\xd3\x11\x0e\x6f\xf0\x6b\xff\xba\xff\xfb\x0f\xfc\x3c\xfc\xde\x1f\xe1\xfd\x04\x2f\xa7\x1f\x87\xa7\x3f\xf0\xfe\xeb\xf0\x06\x4f\xa7\xe3\xfb\xbf\xe0\xf1\x0d\x9e\x4f\xc7\x9f\xfc\xfd\xfd\xd7\xde\x13\x04\x87\xfd\xeb\xa7\x37\x38\x3e\xbe\xec\xb9\x8c\xff\x3c\x3f\x7e\xdf\xff\x80\xd3\x11\x1e\xe1\xfb\xe9\xe5\x65\x7f\x7c\x87\xe7\xc3\x71\x7f\xe1\x79\xbe\xbf\x36\x5f\x6b\xcf\x87\xb5\xf7\xe1\xaf\xbd\x93\x2f\x3e\xaf\x3d\x10\x4f\xf8\x23\xf1\x09\xe4\x43\xfe\x27\xbe\xb2\x90\x85\xcc\x5f\x7b\xe3\x70\x1c\x8e\xc5\x13\xb1\x22\x89\xf8\xf7\x20\xfd\xe6\xaf\xbd\x8b\x74\xe4\xab\x27\xe2\x6d\xed\x9d\x42\x7f\xed\x85\x27\x5f\x8b\x64\x9c\xb8\xe1\x54\xc1\xbd\x24\x35\xb4\xe1\xef\x92\x95\x73\x05\x62\x9f\xa6\xa5\x6a\xc4\x77\xce\x1a\x4a\x42\xa5\x22\xff\xc2\x1a\xa1\x1e\x84\x92\x08\xac\x45\x9f\x7d\x88\x45\xff\xc3\x5c\x54\x9b\x09\x17\x84\x7e\x6b\x88\xcf\x98\x36\x41\x1b\x62\x52\x76\x96\x09\x1d\x4d\x4d\x18\x7f\xf7\x82\x0b\xa9\x42\xc3\xa4\x2b\x5a\x5f\xf0\xc5\x30\x10\x6f\xc0\x46\x72\x13\x69\x18\xeb\x0c\x63\xad\xb5\xe2\xa1\x90\x21\x05\xf9\x86\x41\x21\x0b\x14\xbf\xb9\x07\x13\x2c\x23\x29\x8d\x99\x81\x50\x9a\x49\x82\x9e\x66\xea\xa1\x94\x14\x18\x56\x83\x58\x08\x04\x71\x80\x8a\x53\xbe\xb6\xc4\x69\x3f\xb5\xbe\x32\x1f\x84\x3a\xd8\xdc\x75\x8d\x05\x3d\xe1\xf5\xf1\xb8\xb5\x54\xc0\x44\xb2\x78\x1d\x70\x84\x96\x27\x13\xa0\x4c\xe9\x1f\x02\x13\x8b\x01\x04\x4c\x01\x87\x31\xa6\xe2\xaa\x48\x84\xa2\x21\x84\x16\x44\x14\xff\x48\x2e\xb2\x5e\xd4\xb4\xf0\x6f\xc2\x09\x17\x0a\xf9\xbe\xc5\xa9\x22\xd7\x17\x2b\x22\x34\x52\x4e\x64\x3d\x4e\x73\xd1\x07\x90\x06\x81\xbd\x27\x13\x7b\x72\x1b\x0c\x85\xc0\xd0\x36\xd0\xf1\xb2\xc5\xc2\x48\x38\x34\xb0\xf6\xd4\x7e\x92\x2c\x4d\xdf\x4e\x89\xe8\x91\xd8\x2c\xb4\x61\xa5\xb7\xd4\x86\x18\xe7\x5b\x2f\x35\xa1\x54\xa7\xd5\x67\x40\x00\x0e\x02\x6d\xab\x04\xd9\xb9\xc8\x48\x2f\xf1\xff\x18\xe7\xbd\x96\x8b\x70\x42\x2f\x27\xa8\xd5\x80\x89\xa8\x5e\x70\x57\x68\x47\x69\x4f\xb5\xf6\xfa\x3a\x34\x3a\x1b\x8a\x3d\x43\x44\x5b\xe9\xc2\x06\xc4\x22\x34\x83\xc5\x76\xfd\xbe\x25\x69\xa9\xa0\x0d\xbe\xd8\x54\x04\xc1\x3a\xf4\x22\x64\x23\x99\x74\x3a\x85\xa4\xa2\x61\x9b\x37\xf4\xe9\xe4\xc9\x94\x67\x54\x66\x9c\x26\xd0\xf9\xb3\xb1\x33\x0c\xf0\xbc\xcb\x3e\x3a\x76\x71\x3c\x9b\x5e\x1d\xd0\x1f\xbb\xb3\xda\x9d\xd0\x2e\x69\xab\x52\x21\x81\x12\x58\x85\x40\x24\x0c\x30\xc3\xd2\x62\x40\x31\x71\x9e\x7e\x9e\xd3\x67\x9a\x05\x48\x56\x07\x4b\x5c\xc0\xcc\x6a\xa4\xf3\xe3\x68\xa0\x44\x9b\xbc\x82\x2e\x79\x0d\x35\x04\x2c\xa5\x05\x5d\x4a\x03\x95\x85\x02\x9d\xa4\xdb\x32\x08\x12\x3c\x8d\xb1\x71\x9b\xb8\x02\xbb\xb2\x2a\x0b\x65\xd6\xee\xf6\x84\x81\x33\x47\x7d\x3b\xec\xe4\xdc\xcb\x0d\xb6\x5b\xce\x5b\x0f\x52\xf5\x36\x04\x23\xa3\x9c\x75\x71\xe9\x1b\x29\xab\x97\xca\x2f\xa6\xdb\x95\x0f\xac\xd8\xb7\xa8\x69\x6c\x44\x1b\x89\xc0\x1b\xb1\x16\x13\x28\x57\xd0\x18\x41\xd2\xb1\x42\x23\xc8\x98\xa2\x19\xa9\x33\xce\x74\x05\x10\x28\x17\x80\x37\x52\x98\x6e\x3c\xd4\x4b\x9c\x89\x96\x46\xd1\x9b\x06\x86\x4d\xd8\x6f\x0e\xe0\xc4\x44\x77\x83\x57\x17\xbd\x88\x26\x38\x38\x81\x5a\x0d\xa5\xdf\xc2\xce\x40\x4e\xa0\x96\x19\xf8\x9d\x53\xc5\xc9\xd5\x0b\x03\x0c\xa8\x05\x18\x70\xf4\x8a\x95\x9d\xe1\xa3\xc5\x04\x20\x2f\xc6\x57\x25\x8d\x3f\x43\xb5\x8d\x13\x3a\xec\x18\xa3\x45\x04\x90\xe6\x5f\x68\x59\xd3\x0d\xd0\xfb\x24\x8b\xef\xe2\x3a\x2d\x72\xb8\x8b\xcb\xcf\xed\x21\xf9\xd0\x87\x24\x5a\x10\x80\x84\xe6\x35\x54\xe9\x4d\xae\x7d\xf6\x21\x55\xe6\x89\x57\x7c\xe6\x74\x53\x80\x6d\xb1\xcb\x37\x9a\x90\xa9\x13\x2c\x88\x4e\xba\x53\x52\xc4\x33\x80\x64\x57\x96\x34\x4f\x1e\x4c\xc1\xdf\x4e\x9f\xfc\xb5\xf7\x49\x64\xf4\x96\x76\x0e\xf0\x40\x73\x4d\xa6\x00\xd2\x48\xf4\x8e\xc7\xa1\xec\x36\x3a\xd1\x0b\x80\xab\xb2\xf8\x4c\x73\xb8\x8a\xcb\x16\x1a\x9f\x34\x34\xa2\xc5\x25\x40\x45\x13\x61\xb3\x94\xd9\x9e\x8e\x40\xf7\x69\x9c\x6c\x09\xb0\x49\x63\x5a\xd2\x2a\xad\xda\x88\x74\xc7\xa3\xdd\x6f\x05\x90\x14\xdb\x87\x32\xbd\xb9\xad\x5b\x81\x5c\x58\x32\xea\x83\x2c\xba\x9c\x00\x5c\xd3\xbb\x34\x4f\x73\x0a\x45\xb9\x49\xf3\x38\x83\x34\xdf\xa4\x49\x5c\x17\xa5\x85\x91\xb1\xb2\x4b\x31\x46\x00\x19\xbd\xae\xc7\xdb\x22\xcd\xeb\x34\xbf\x81\x4d\xb1\xbb\xca\x28\xc4\xf9\x4d\x46\xe1\xaf\x5d\x51\x1b\x51\x14\xc0\x09\x45\x77\xd6\xd5\x89\xe8\x92\x70\x78\x58\x81\xd4\xdd\xc4\x87\xb9\xd5\x14\xa0\x2a\xae\x6b\xb8\x7d\xd8\xde\xd2\x5c\x78\x90\x99\x05\x22\xba\x9c\x01\x94\xf4\x26\xad\x6a\x5a\xd2\x8d\x65\x73\x89\xd8\x3c\x07\xb8\x8b\x93\xb2\xc8\x55\xa5\xb3\xaa\x10\x27\x58\x00\x6c\xe8\x4d\x49\xa9\x14\x55\x74\x9b\xe9\x0d\x2f\x01\xb6\xd9\xae\x1a\xdf\xa5\xf9\xae\xb2\x0c\x50\x47\x9c\x99\x06\x2c\x01\xaa\xdd\x96\x96\x55\x52\xa6\xdb\x1a\xea\xaf\x85\x47\xc4\x7a\xfb\xe2\x54\xab\x1e\xd5\x6d\x49\xa9\x37\x1d\xec\xbd\x9c\x00\xc4\xc9\xae\xa6\x10\x27\xfc\x1c\xe8\x56\xd6\x24\x89\x00\xee\xd2\xa4\x2c\x0c\x8c\xf6\x9b\xbc\x50\x25\x83\x68\x49\x00\xb6\x69\x96\x94\xc5\x57\xed\x39\x81\x3f\x79\x69\xfb\x30\x23\xb1\x9c\x72\xb9\x9b\x4d\x46\x61\x53\xd4\xe6\xdd\x45\x6f\xcc\x8f\x11\xdd\xa4\x59\x16\x9b\x2a\x79\x95\x88\xf7\x72\x6e\x5b\x58\xe4\xd4\x8b\x86\xca\x2f\x78\x78\xaa\x64\x97\xb9\x30\xd9\xa5\x27\x1b\x93\xcb\x4b\x00\x81\xfa\xff\x13\x94\xa2\x0e\x34\x1a\x99\x5a\xc8\x12\xe0\xcb\x2e\xbb\x89\x4b\xb8\x2e\x63\x79\x2a\x8b\x9c\x33\xc7\x65\x4d\x4b\x09\xe5\x48\x34\x47\x33\x83\x6b\x85\x73\xdd\xc6\xd9\xb5\xc9\x42\x3a\x96\xd5\x64\xc8\x22\x62\xae\xb7\xaa\x24\xe3\xb4\xbf\xd7\xca\x4c\x98\x7f\xed\x68\xd5\x33\x49\x64\x06\xa3\xe9\x89\x56\x04\x20\x8b\xeb\x34\x87\x24\xde\xa6\x75\x9c\x41\x46\xeb\x9a\x96\x10\xc3\xd7\xb4\xbe\x85\x9b\x32\xfe\x42\x3d\x50\xfd\x2c\x5a\x70\xa2\xd5\xf4\xbc\x0c\x81\x48\x9e\x64\x43\x67\xd1\x8a\x56\xb3\xf3\x32\x92\xb4\x4c\x76\x77\xd7\x19\xbd\xf7\x00\x64\xaf\xe8\x10\x34\x3f\x2f\xa8\x4e\xb3\x0d\xe5\x97\xf6\x73\xca\x2c\xce\xcb\xe8\x52\xed\xd9\x4a\x1c\xad\x2e\xcf\xcb\x29\x39\x0c\xe3\xab\x42\x78\xb8\x38\xe7\xe1\xa5\x4b\x10\x95\xf7\x51\xa6\x94\x10\xf8\x0f\x21\x6c\x2f\x7f\x1a\xff\xab\x95\x43\x42\xa2\xfc\xab\x4e\x26\x7e\xfd\xac\xa4\x1e\x64\x32\x71\x48\xa1\x16\x5a\x58\xe3\xbe\x88\x92\x49\x74\x5e\x86\x42\x8b\x9c\xfd\xb8\x64\xb8\x50\x4b\x87\x68\x61\x12\x2d\x0e\x41\x2e\xe8\x52\x57\xa4\x1d\x72\x5c\xf0\x4d\x31\xc7\xa0\xb7\x65\x32\x71\x21\x37\xc5\x1c\xe3\x90\xe1\x42\x6e\xea\x74\x8c\x43\x90\x0b\xba\xa9\xcb\x31\x7d\x39\x0a\x31\x2e\xe4\xd2\xfa\x56\x77\x88\x4c\x0f\x42\x2c\xd0\x93\x89\x0b\xb2\xb9\x75\x92\xf5\x51\x76\x5d\xf8\x49\xe4\x02\x6d\x81\xa6\x38\xac\x6d\x26\x91\x0b\xb4\x05\x9a\xe2\x70\x19\x2e\xd0\x16\xee\x14\x87\x0b\x72\x81\xb6\x40\x53\x1c\x2e\xc3\x05\xd8\xc2\x99\xe2\x70\x39\xbc\x61\xda\x65\x75\xba\xcd\x78\x0d\x6e\xfb\x54\x4e\x74\x6f\xd6\x7f\x12\xb9\xa0\xa9\x76\xac\x6a\xde\x08\x77\x09\x48\xce\x77\xc2\xde\x76\x2e\x58\xee\xf0\x58\x22\x53\x13\x12\xb9\x20\xb9\xb3\x63\x09\xa1\xef\x96\xe1\xc2\xe6\xee\x4c\x2c\x31\x41\xc4\x05\xce\xdd\x20\x0e\xff\xeb\xea\x46\x88\x0b\xa4\x0f\x96\x61\x82\x1b\x19\xed\x10\xe2\x02\x68\x7d\x5b\x94\x79\x6f\x96\xd2\x1b\x21\x12\xd2\x82\xb2\xba\x8b\xb3\x96\xb5\xba\x8d\xcb\x2d\x54\xdd\x08\x4c\x0f\x1d\xba\xfb\xa0\x3a\xa6\x64\x86\x0a\xb0\xfb\x90\x46\x0d\x30\xfa\x77\x4d\x42\xe6\xe7\xb8\x4d\xcb\x51\xee\xc5\x39\x6e\x33\x98\x32\x96\x98\x88\xcb\x73\x22\xe4\x91\x54\x27\x12\xe3\x5e\x9e\xe3\x1e\x5c\xed\x30\x09\xab\x73\x12\xcc\x4e\xa3\x70\x79\x61\x3a\xc1\x45\x50\xe3\x47\x19\x3d\x3c\x02\xd6\x0e\xc1\xf4\x18\x8c\x4c\x23\x94\xbf\xdf\x61\xf8\xf6\xf0\xad\x92\x7b\x13\x94\x97\xf6\xa3\xef\x1c\xc2\x91\x29\x0e\x40\xbb\xab\xd0\x7d\x15\x2e\x01\x47\x20\xd2\x53\xe8\x9f\x7c\x98\x1a\x61\xdb\x62\x70\x28\x22\x1d\x85\xba\xa7\xe3\x52\x70\x48\x5a\xfd\x84\xaf\xc6\xa1\x22\x88\xd2\x8d\x38\x0a\xad\x0e\xc2\x57\x3f\x86\x18\x5c\x38\xfa\x90\x9e\xc1\xd7\x93\x5a\x63\xae\x48\xa6\x38\xf4\x86\x9d\x82\xb8\x14\x9e\x7a\xdc\x33\x1c\x75\xa2\x3f\x80\x7b\x1f\x19\xc9\x6a\x46\x1c\x6e\x39\x76\xe2\xfc\xfe\x48\x93\xcc\x70\xc0\x15\x58\xba\x19\xcc\xbc\xc8\x0c\x07\x5b\x81\xa5\x1b\x84\x1b\x07\x1a\xd2\x07\xa8\x74\x63\x8a\x90\xf0\x98\xe1\x20\x2b\x5c\xc6\xf7\x14\xc0\xc1\x35\xa8\xfd\xce\xa1\x1f\x99\x5d\x02\x6c\xd2\x2f\x69\xd5\x4d\xa6\xc4\x74\x5a\xdd\xba\x4f\xc6\x50\x85\xcc\x70\x78\xf5\xeb\xbe\xe1\xad\xd0\xda\x0a\x87\xd7\x0e\x89\xd4\x70\x08\x49\xe6\x38\xbc\x76\xfd\xb4\xe0\xe0\xc6\x31\x86\x54\x79\xd5\x4c\x23\x22\x70\xa0\x0d\xea\xbb\x73\x8e\x4a\xe6\x38\xd8\x1e\xd0\xcc\xd6\x8e\xc3\x8d\x41\x35\x99\xe3\x80\x93\x75\xbd\x1d\x6e\xcb\xec\x6a\x96\x77\xce\x8a\x03\xed\x01\x53\xdf\xa9\x80\xfe\xfb\x6f\x00\x00\x00\xff\xff\xa7\x48\xc0\xee\xe6\x20\x00\x00") + +func fontsWavyFlfBytes() ([]byte, error) { + return bindataRead( + _fontsWavyFlf, + "fonts/wavy.flf", + ) +} + +func fontsWavyFlf() (*asset, error) { + bytes, err := fontsWavyFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/wavy.flf", size: 8422, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _fontsWeirdFlf = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x57\x4f\x8f\xa3\xb8\x17\xbc\xfb\x53\x94\xfa\xc7\x4f\x49\x66\x1a\x3c\x3d\xa3\x9e\x43\x9f\xd0\x4a\x7b\x5c\xad\xb4\xb7\x95\x90\x1c\x07\x4c\x60\x9b\x3f\x2d\x20\xdb\xdd\x92\x3f\xfc\x0a\x3f\x07\x42\x62\x02\x73\x08\x0e\x50\x2e\xbf\x57\xcf\x2e\x9b\xb4\x48\xbf\x4b\x0f\x3f\xf1\x8c\xa7\x6f\xf8\x86\xa7\x1f\xec\x5d\xe5\x4d\x12\xa4\x45\x8a\xed\xbf\xaa\x69\xf3\xba\xc2\xf7\x1d\x3b\x7c\xbe\x00\xbf\xc9\x16\x7f\xa8\xfc\x1f\xd5\x00\x28\xcd\x9f\x30\xaf\xd2\x3a\x78\xcf\xab\xa0\x3b\xa9\xa0\x2a\x00\x1c\x64\x1b\x26\xb2\x54\xaf\xc1\x6b\x97\x05\xad\x62\x69\xfe\xa1\x12\xf4\x0c\x7f\x7d\xca\x0a\x7f\xd7\xa7\xf8\x15\xc0\x67\xdf\x86\x71\x1b\x9c\x1a\x75\xcc\x2b\x19\xc4\x92\xb5\x75\xa9\xd0\xbe\xa9\x38\x97\x05\xe2\x4c\x36\x32\xee\x54\xd3\x62\xf3\xbf\xff\x7f\xd9\x40\x75\x71\x00\xd9\x28\x54\x75\x87\x52\x76\x71\x96\x57\xc7\x47\x74\x99\xfa\x34\x8f\xd3\xa6\x2e\x51\x77\x99\x6a\x90\xd6\x55\xd7\x06\xec\xf7\x8f\xb7\x42\x56\xb2\xeb\xb3\xa8\x53\xa4\x79\xd3\x76\x28\xf2\x4a\xbd\xb0\x3e\x75\xf8\x78\x28\xe5\x31\x8f\x51\x9d\xca\x83\x6a\x1e\x90\xd6\x0d\xd2\xbc\x50\xc8\x13\x55\x75\x79\x9a\xc7\xa6\x33\x93\x00\xe0\xa3\xcd\xea\x53\x91\x40\x16\xef\xf2\xb3\xc5\x41\x61\x2f\x37\x8f\xa6\x53\x55\xbf\x33\x8f\x40\x5d\xa6\xf0\x90\xc9\x26\x39\x14\xb2\x7a\x7d\x80\xef\xe3\xad\xc9\xab\xae\x85\x6c\x21\x61\x9e\x3e\xe2\x70\xea\x10\xcb\x6a\xd3\xf5\x34\x6d\x79\x6a\x33\x95\xb0\x9f\xc4\x90\xa9\xfc\x98\x75\x7d\xc4\x72\x54\x81\x3d\xdf\x79\xf9\x68\x34\xc9\xab\xb8\x38\x25\x79\x75\x44\xa2\xda\x58\x55\x89\x6a\x5a\xf6\xfd\x9b\xe9\x56\xca\x0f\x93\x39\x0a\x55\x1d\xbb\x0c\x5b\xf5\x71\x06\xc7\x75\x59\xaa\x8a\x84\x69\x77\xf8\x0a\x89\xf4\x94\x1c\x15\x52\x19\x77\x75\xc3\x9e\x9e\x0d\x43\xa2\x52\x79\x2a\x3a\x0a\xb6\xac\x13\x65\x12\xef\xb2\xbc\x35\x72\x63\x5b\xe4\xaf\x0a\x0f\x7e\x89\xa7\xe7\x07\xd4\x95\xe1\x95\x55\x62\x78\x77\xec\xe9\x87\x61\x21\xa5\xfb\xf0\x27\xc3\x1a\xf1\xbc\x70\x45\x13\xb2\x5e\x07\xf4\xb7\xdc\x3c\xdd\xd2\x4b\x4d\x8d\x30\x0d\x41\xa8\x0d\x99\xc7\x39\x75\xbf\xbc\xc2\xbe\x1d\xf8\x10\x20\x40\xc8\xfc\xaf\xfe\x57\x7f\x68\xb0\xc7\x1e\x23\x9f\x87\x47\xd1\x77\xe7\x1a\x3a\x64\x5b\xa1\x85\x19\x4c\x63\x17\x32\x2d\x34\x0d\xb3\x1f\xa2\x0c\x59\x0d\x70\x13\xaa\xe9\xd5\x0f\xcd\x81\xfa\x62\x68\x41\x01\x42\x7b\x3d\x9f\xb0\xb8\xc8\xf0\x09\x1e\x4d\x52\xe9\x5f\x99\x11\xc6\x0b\xc5\x66\x5e\x87\xac\x1f\x4a\x63\xfc\x21\xb2\x2f\xa2\xfe\x46\x8f\x3f\x3e\x10\x9a\x4b\xa0\x83\x90\xf9\x5f\xfc\x90\x6d\xf4\x7e\x24\x1c\x24\x62\xd0\x20\x65\x86\xff\x53\x7d\xa7\xca\x0e\x57\x7e\x55\x2f\x5b\x21\x21\x6e\x4a\x8b\x55\x7c\x3e\x2e\x94\x85\x51\x16\xe0\x26\xf1\xfe\x09\x9f\x16\x55\x08\x52\xd6\x54\x0a\xa6\xd1\x00\xac\xb2\x93\x41\x6d\x11\x74\x3f\xc8\x16\x9a\x46\x34\x45\xf5\xa8\xa8\xb8\x65\xdd\x19\x56\xaa\x07\x37\x50\x7e\xcd\x2a\x6c\xd6\x9a\xb2\xd6\x74\x67\x58\xaf\xa0\xb6\xb8\x14\xeb\x14\x4a\x63\x38\x02\xf0\x4c\x22\x67\x2d\x5d\xac\x97\xd0\x2d\x41\x67\x15\xb8\x15\x6b\x31\x80\x31\xd6\x55\xac\x4b\x0a\x0c\xb5\x16\x17\x75\xf7\xcf\xb3\xe3\x1e\x88\x4f\x40\xfd\x83\x90\xf5\xec\x51\xff\xd2\x2c\x21\xea\xed\x9d\xaf\xa4\x05\x5d\x2f\x9e\xdb\xc5\x62\x7b\x9d\x59\xdc\x0a\x51\x2e\xdc\xe9\x39\x53\xe8\x9f\x82\x93\x42\xde\x3a\x85\xce\x62\x3a\x75\xd7\x04\xd5\x2b\x75\xf7\xa8\x9a\x4b\x85\x3f\xb3\x2e\xac\x12\xf7\x74\xa2\xb4\xae\x15\x10\x73\xd0\xeb\x05\x3f\x8d\xf5\xee\x24\x75\xac\x92\x39\xb1\xa8\x7c\xdb\x9e\xcc\xfe\x26\xa6\xe6\xe9\x8b\xeb\x2e\x64\x18\x46\x9a\x1d\x28\xb2\xe1\x47\xd7\x7e\xb5\x5a\x6a\x0b\xd5\xdc\x4c\x0b\x8e\x3b\xe1\x9f\xa1\x34\x83\x34\x41\x69\x97\xc1\x9c\x21\xad\xb0\x39\xf7\x64\xbb\x5b\x95\x09\xab\x73\x5b\x72\xb1\xce\x88\xe5\x98\x41\xf3\xde\x65\x04\x20\x43\x26\x3f\x1e\xae\xee\x3a\xdd\xcf\xdd\x01\xf5\x22\x6b\x70\xd1\x22\x54\x93\xc5\x6b\xae\xdd\x50\x6e\x12\xe2\x74\x36\x30\xac\x7c\xa6\xa4\x6b\xbc\xf0\x2c\x93\x61\xb5\x1e\x63\x59\xaf\xa0\x62\xdc\xec\x5f\x6c\xbb\xf7\x87\x79\x1e\xd1\x6c\x8f\x68\xce\x47\xb4\x39\x46\x83\x7e\xe2\xf2\x54\xf0\x42\xad\xbf\x31\xb4\x66\x37\x8d\x2e\x8f\x1a\xb8\x3c\x6a\x0c\x1b\x2d\xe6\x8e\x6a\xbe\xef\x7b\xc3\xa1\x25\x9a\x3f\xb4\x4c\x98\x6e\xb6\x28\xed\x14\x10\x8b\xbb\xd9\x0c\xab\xcb\xa9\x06\xe8\xb8\x49\xff\x0a\xab\x10\x62\xe7\xf4\xbf\x5b\x57\x98\xc4\xb1\x18\xeb\x18\x80\x10\x7c\x59\x01\xb7\xff\x79\x2e\xff\x23\x03\x04\x79\x1f\xc5\xe2\xf5\x1c\x80\x77\x67\xb8\x31\x89\x9d\x63\x61\xdf\xb1\xdb\xcb\xec\x40\x21\xeb\x79\xcf\x73\x0b\xf1\x0b\xd0\x95\x53\x61\x84\xea\x95\x50\x7d\x9e\x26\x8b\xb1\x2e\xd7\xd7\x5a\xdf\xac\x47\x61\x71\x83\x75\xac\xc0\x55\x0a\xd8\xe5\x79\xcf\xf9\xdc\xac\x7c\x19\x3a\x1a\xe0\x22\x34\xb2\xce\x17\xd9\x58\xf9\xbc\x58\x56\x25\x7e\xb3\xc4\xcc\x37\x5f\xc8\x84\xf9\xd2\xe9\xdd\xcf\xdc\x82\xfc\xcf\xce\xc2\xc9\x8c\xf4\x8c\xe7\x19\xd3\xeb\x6f\xb5\xb0\xfd\xcc\xad\xbf\x21\x43\x05\x7d\x49\x44\x82\xbb\xbf\x51\x86\x08\x3c\x5f\x08\x7f\xdd\x71\xe4\x0a\x7a\x6f\xa3\xf2\x7c\x60\x25\x74\xaa\xa8\x2f\xcc\x18\xf7\xcd\x73\x0e\x3a\xcf\x0a\xf8\x6b\x0f\x8f\x2e\xf3\xd4\xf6\x5c\xfd\x5f\x00\x00\x00\xff\xff\xa0\x78\x63\x55\x69\x12\x00\x00") + +func fontsWeirdFlfBytes() ([]byte, error) { + return bindataRead( + _fontsWeirdFlf, + "fonts/weird.flf", + ) +} + +func fontsWeirdFlf() (*asset, error) { + bytes, err := fontsWeirdFlfBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "fonts/weird.flf", size: 4713, mode: os.FileMode(436), modTime: time.Unix(1529341546, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "fonts/3-d.flf": fonts3DFlf, + "fonts/3x5.flf": fonts3x5Flf, + "fonts/5lineoblique.flf": fonts5lineobliqueFlf, + "fonts/acrobatic.flf": fontsAcrobaticFlf, + "fonts/alligator.flf": fontsAlligatorFlf, + "fonts/alligator2.flf": fontsAlligator2Flf, + "fonts/alphabet.flf": fontsAlphabetFlf, + "fonts/avatar.flf": fontsAvatarFlf, + "fonts/banner.flf": fontsBannerFlf, + "fonts/banner3-D.flf": fontsBanner3DFlf, + "fonts/banner3.flf": fontsBanner3Flf, + "fonts/banner4.flf": fontsBanner4Flf, + "fonts/barbwire.flf": fontsBarbwireFlf, + "fonts/basic.flf": fontsBasicFlf, + "fonts/bell.flf": fontsBellFlf, + "fonts/big.flf": fontsBigFlf, + "fonts/bigchief.flf": fontsBigchiefFlf, + "fonts/binary.flf": fontsBinaryFlf, + "fonts/block.flf": fontsBlockFlf, + "fonts/bubble.flf": fontsBubbleFlf, + "fonts/bulbhead.flf": fontsBulbheadFlf, + "fonts/calgphy2.flf": fontsCalgphy2Flf, + "fonts/caligraphy.flf": fontsCaligraphyFlf, + "fonts/catwalk.flf": fontsCatwalkFlf, + "fonts/chunky.flf": fontsChunkyFlf, + "fonts/coinstak.flf": fontsCoinstakFlf, + "fonts/colossal.flf": fontsColossalFlf, + "fonts/computer.flf": fontsComputerFlf, + "fonts/contessa.flf": fontsContessaFlf, + "fonts/contrast.flf": fontsContrastFlf, + "fonts/cosmic.flf": fontsCosmicFlf, + "fonts/cosmike.flf": fontsCosmikeFlf, + "fonts/cricket.flf": fontsCricketFlf, + "fonts/cursive.flf": fontsCursiveFlf, + "fonts/cyberlarge.flf": fontsCyberlargeFlf, + "fonts/cybermedium.flf": fontsCybermediumFlf, + "fonts/cybersmall.flf": fontsCybersmallFlf, + "fonts/diamond.flf": fontsDiamondFlf, + "fonts/digital.flf": fontsDigitalFlf, + "fonts/doh.flf": fontsDohFlf, + "fonts/doom.flf": fontsDoomFlf, + "fonts/dotmatrix.flf": fontsDotmatrixFlf, + "fonts/drpepper.flf": fontsDrpepperFlf, + "fonts/eftichess.flf": fontsEftichessFlf, + "fonts/eftifont.flf": fontsEftifontFlf, + "fonts/eftipiti.flf": fontsEftipitiFlf, + "fonts/eftirobot.flf": fontsEftirobotFlf, + "fonts/eftitalic.flf": fontsEftitalicFlf, + "fonts/eftiwall.flf": fontsEftiwallFlf, + "fonts/eftiwater.flf": fontsEftiwaterFlf, + "fonts/elite.flf": fontsEliteFlf, + "fonts/epic.flf": fontsEpicFlf, + "fonts/fender.flf": fontsFenderFlf, + "fonts/fourtops.flf": fontsFourtopsFlf, + "fonts/fuzzy.flf": fontsFuzzyFlf, + "fonts/goofy.flf": fontsGoofyFlf, + "fonts/gothic.flf": fontsGothicFlf, + "fonts/graffiti.flf": fontsGraffitiFlf, + "fonts/hollywood.flf": fontsHollywoodFlf, + "fonts/invita.flf": fontsInvitaFlf, + "fonts/isometric1.flf": fontsIsometric1Flf, + "fonts/isometric2.flf": fontsIsometric2Flf, + "fonts/isometric3.flf": fontsIsometric3Flf, + "fonts/isometric4.flf": fontsIsometric4Flf, + "fonts/italic.flf": fontsItalicFlf, + "fonts/ivrit.flf": fontsIvritFlf, + "fonts/jazmine.flf": fontsJazmineFlf, + "fonts/jerusalem.flf": fontsJerusalemFlf, + "fonts/katakana.flf": fontsKatakanaFlf, + "fonts/kban.flf": fontsKbanFlf, + "fonts/larry3d.flf": fontsLarry3dFlf, + "fonts/lcd.flf": fontsLcdFlf, + "fonts/lean.flf": fontsLeanFlf, + "fonts/letters.flf": fontsLettersFlf, + "fonts/linux.flf": fontsLinuxFlf, + "fonts/lockergnome.flf": fontsLockergnomeFlf, + "fonts/madrid.flf": fontsMadridFlf, + "fonts/marquee.flf": fontsMarqueeFlf, + "fonts/maxfour.flf": fontsMaxfourFlf, + "fonts/mike.flf": fontsMikeFlf, + "fonts/mini.flf": fontsMiniFlf, + "fonts/mirror.flf": fontsMirrorFlf, + "fonts/mnemonic.flf": fontsMnemonicFlf, + "fonts/morse.flf": fontsMorseFlf, + "fonts/moscow.flf": fontsMoscowFlf, + "fonts/nancyj-fancy.flf": fontsNancyjFancyFlf, + "fonts/nancyj-underlined.flf": fontsNancyjUnderlinedFlf, + "fonts/nancyj.flf": fontsNancyjFlf, + "fonts/nipples.flf": fontsNipplesFlf, + "fonts/ntgreek.flf": fontsNtgreekFlf, + "fonts/o8.flf": fontsO8Flf, + "fonts/ogre.flf": fontsOgreFlf, + "fonts/pawp.flf": fontsPawpFlf, + "fonts/peaks.flf": fontsPeaksFlf, + "fonts/pebbles.flf": fontsPebblesFlf, + "fonts/pepper.flf": fontsPepperFlf, + "fonts/poison.flf": fontsPoisonFlf, + "fonts/puffy.flf": fontsPuffyFlf, + "fonts/pyramid.flf": fontsPyramidFlf, + "fonts/rectangles.flf": fontsRectanglesFlf, + "fonts/relief.flf": fontsReliefFlf, + "fonts/relief2.flf": fontsRelief2Flf, + "fonts/rev.flf": fontsRevFlf, + "fonts/roman.flf": fontsRomanFlf, + "fonts/rot13.flf": fontsRot13Flf, + "fonts/rounded.flf": fontsRoundedFlf, + "fonts/rowancap.flf": fontsRowancapFlf, + "fonts/rozzo.flf": fontsRozzoFlf, + "fonts/runic.flf": fontsRunicFlf, + "fonts/runyc.flf": fontsRunycFlf, + "fonts/sblood.flf": fontsSbloodFlf, + "fonts/script.flf": fontsScriptFlf, + "fonts/serifcap.flf": fontsSerifcapFlf, + "fonts/shadow.flf": fontsShadowFlf, + "fonts/short.flf": fontsShortFlf, + "fonts/slant.flf": fontsSlantFlf, + "fonts/slide.flf": fontsSlideFlf, + "fonts/slscript.flf": fontsSlscriptFlf, + "fonts/small.flf": fontsSmallFlf, + "fonts/smisome1.flf": fontsSmisome1Flf, + "fonts/smkeyboard.flf": fontsSmkeyboardFlf, + "fonts/smscript.flf": fontsSmscriptFlf, + "fonts/smshadow.flf": fontsSmshadowFlf, + "fonts/smslant.flf": fontsSmslantFlf, + "fonts/smtengwar.flf": fontsSmtengwarFlf, + "fonts/speed.flf": fontsSpeedFlf, + "fonts/stampatello.flf": fontsStampatelloFlf, + "fonts/standard.flf": fontsStandardFlf, + "fonts/starwars.flf": fontsStarwarsFlf, + "fonts/stellar.flf": fontsStellarFlf, + "fonts/stop.flf": fontsStopFlf, + "fonts/straight.flf": fontsStraightFlf, + "fonts/tanja.flf": fontsTanjaFlf, + "fonts/tengwar.flf": fontsTengwarFlf, + "fonts/term.flf": fontsTermFlf, + "fonts/thick.flf": fontsThickFlf, + "fonts/thin.flf": fontsThinFlf, + "fonts/threepoint.flf": fontsThreepointFlf, + "fonts/ticks.flf": fontsTicksFlf, + "fonts/ticksslant.flf": fontsTicksslantFlf, + "fonts/tinker-toy.flf": fontsTinkerToyFlf, + "fonts/tombstone.flf": fontsTombstoneFlf, + "fonts/trek.flf": fontsTrekFlf, + "fonts/tsalagi.flf": fontsTsalagiFlf, + "fonts/twopoint.flf": fontsTwopointFlf, + "fonts/univers.flf": fontsUniversFlf, + "fonts/usaflag.flf": fontsUsaflagFlf, + "fonts/wavy.flf": fontsWavyFlf, + "fonts/weird.flf": fontsWeirdFlf, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "fonts": &bintree{nil, map[string]*bintree{ + "3-d.flf": &bintree{fonts3DFlf, map[string]*bintree{}}, + "3x5.flf": &bintree{fonts3x5Flf, map[string]*bintree{}}, + "5lineoblique.flf": &bintree{fonts5lineobliqueFlf, map[string]*bintree{}}, + "acrobatic.flf": &bintree{fontsAcrobaticFlf, map[string]*bintree{}}, + "alligator.flf": &bintree{fontsAlligatorFlf, map[string]*bintree{}}, + "alligator2.flf": &bintree{fontsAlligator2Flf, map[string]*bintree{}}, + "alphabet.flf": &bintree{fontsAlphabetFlf, map[string]*bintree{}}, + "avatar.flf": &bintree{fontsAvatarFlf, map[string]*bintree{}}, + "banner.flf": &bintree{fontsBannerFlf, map[string]*bintree{}}, + "banner3-D.flf": &bintree{fontsBanner3DFlf, map[string]*bintree{}}, + "banner3.flf": &bintree{fontsBanner3Flf, map[string]*bintree{}}, + "banner4.flf": &bintree{fontsBanner4Flf, map[string]*bintree{}}, + "barbwire.flf": &bintree{fontsBarbwireFlf, map[string]*bintree{}}, + "basic.flf": &bintree{fontsBasicFlf, map[string]*bintree{}}, + "bell.flf": &bintree{fontsBellFlf, map[string]*bintree{}}, + "big.flf": &bintree{fontsBigFlf, map[string]*bintree{}}, + "bigchief.flf": &bintree{fontsBigchiefFlf, map[string]*bintree{}}, + "binary.flf": &bintree{fontsBinaryFlf, map[string]*bintree{}}, + "block.flf": &bintree{fontsBlockFlf, map[string]*bintree{}}, + "bubble.flf": &bintree{fontsBubbleFlf, map[string]*bintree{}}, + "bulbhead.flf": &bintree{fontsBulbheadFlf, map[string]*bintree{}}, + "calgphy2.flf": &bintree{fontsCalgphy2Flf, map[string]*bintree{}}, + "caligraphy.flf": &bintree{fontsCaligraphyFlf, map[string]*bintree{}}, + "catwalk.flf": &bintree{fontsCatwalkFlf, map[string]*bintree{}}, + "chunky.flf": &bintree{fontsChunkyFlf, map[string]*bintree{}}, + "coinstak.flf": &bintree{fontsCoinstakFlf, map[string]*bintree{}}, + "colossal.flf": &bintree{fontsColossalFlf, map[string]*bintree{}}, + "computer.flf": &bintree{fontsComputerFlf, map[string]*bintree{}}, + "contessa.flf": &bintree{fontsContessaFlf, map[string]*bintree{}}, + "contrast.flf": &bintree{fontsContrastFlf, map[string]*bintree{}}, + "cosmic.flf": &bintree{fontsCosmicFlf, map[string]*bintree{}}, + "cosmike.flf": &bintree{fontsCosmikeFlf, map[string]*bintree{}}, + "cricket.flf": &bintree{fontsCricketFlf, map[string]*bintree{}}, + "cursive.flf": &bintree{fontsCursiveFlf, map[string]*bintree{}}, + "cyberlarge.flf": &bintree{fontsCyberlargeFlf, map[string]*bintree{}}, + "cybermedium.flf": &bintree{fontsCybermediumFlf, map[string]*bintree{}}, + "cybersmall.flf": &bintree{fontsCybersmallFlf, map[string]*bintree{}}, + "diamond.flf": &bintree{fontsDiamondFlf, map[string]*bintree{}}, + "digital.flf": &bintree{fontsDigitalFlf, map[string]*bintree{}}, + "doh.flf": &bintree{fontsDohFlf, map[string]*bintree{}}, + "doom.flf": &bintree{fontsDoomFlf, map[string]*bintree{}}, + "dotmatrix.flf": &bintree{fontsDotmatrixFlf, map[string]*bintree{}}, + "drpepper.flf": &bintree{fontsDrpepperFlf, map[string]*bintree{}}, + "eftichess.flf": &bintree{fontsEftichessFlf, map[string]*bintree{}}, + "eftifont.flf": &bintree{fontsEftifontFlf, map[string]*bintree{}}, + "eftipiti.flf": &bintree{fontsEftipitiFlf, map[string]*bintree{}}, + "eftirobot.flf": &bintree{fontsEftirobotFlf, map[string]*bintree{}}, + "eftitalic.flf": &bintree{fontsEftitalicFlf, map[string]*bintree{}}, + "eftiwall.flf": &bintree{fontsEftiwallFlf, map[string]*bintree{}}, + "eftiwater.flf": &bintree{fontsEftiwaterFlf, map[string]*bintree{}}, + "elite.flf": &bintree{fontsEliteFlf, map[string]*bintree{}}, + "epic.flf": &bintree{fontsEpicFlf, map[string]*bintree{}}, + "fender.flf": &bintree{fontsFenderFlf, map[string]*bintree{}}, + "fourtops.flf": &bintree{fontsFourtopsFlf, map[string]*bintree{}}, + "fuzzy.flf": &bintree{fontsFuzzyFlf, map[string]*bintree{}}, + "goofy.flf": &bintree{fontsGoofyFlf, map[string]*bintree{}}, + "gothic.flf": &bintree{fontsGothicFlf, map[string]*bintree{}}, + "graffiti.flf": &bintree{fontsGraffitiFlf, map[string]*bintree{}}, + "hollywood.flf": &bintree{fontsHollywoodFlf, map[string]*bintree{}}, + "invita.flf": &bintree{fontsInvitaFlf, map[string]*bintree{}}, + "isometric1.flf": &bintree{fontsIsometric1Flf, map[string]*bintree{}}, + "isometric2.flf": &bintree{fontsIsometric2Flf, map[string]*bintree{}}, + "isometric3.flf": &bintree{fontsIsometric3Flf, map[string]*bintree{}}, + "isometric4.flf": &bintree{fontsIsometric4Flf, map[string]*bintree{}}, + "italic.flf": &bintree{fontsItalicFlf, map[string]*bintree{}}, + "ivrit.flf": &bintree{fontsIvritFlf, map[string]*bintree{}}, + "jazmine.flf": &bintree{fontsJazmineFlf, map[string]*bintree{}}, + "jerusalem.flf": &bintree{fontsJerusalemFlf, map[string]*bintree{}}, + "katakana.flf": &bintree{fontsKatakanaFlf, map[string]*bintree{}}, + "kban.flf": &bintree{fontsKbanFlf, map[string]*bintree{}}, + "larry3d.flf": &bintree{fontsLarry3dFlf, map[string]*bintree{}}, + "lcd.flf": &bintree{fontsLcdFlf, map[string]*bintree{}}, + "lean.flf": &bintree{fontsLeanFlf, map[string]*bintree{}}, + "letters.flf": &bintree{fontsLettersFlf, map[string]*bintree{}}, + "linux.flf": &bintree{fontsLinuxFlf, map[string]*bintree{}}, + "lockergnome.flf": &bintree{fontsLockergnomeFlf, map[string]*bintree{}}, + "madrid.flf": &bintree{fontsMadridFlf, map[string]*bintree{}}, + "marquee.flf": &bintree{fontsMarqueeFlf, map[string]*bintree{}}, + "maxfour.flf": &bintree{fontsMaxfourFlf, map[string]*bintree{}}, + "mike.flf": &bintree{fontsMikeFlf, map[string]*bintree{}}, + "mini.flf": &bintree{fontsMiniFlf, map[string]*bintree{}}, + "mirror.flf": &bintree{fontsMirrorFlf, map[string]*bintree{}}, + "mnemonic.flf": &bintree{fontsMnemonicFlf, map[string]*bintree{}}, + "morse.flf": &bintree{fontsMorseFlf, map[string]*bintree{}}, + "moscow.flf": &bintree{fontsMoscowFlf, map[string]*bintree{}}, + "nancyj-fancy.flf": &bintree{fontsNancyjFancyFlf, map[string]*bintree{}}, + "nancyj-underlined.flf": &bintree{fontsNancyjUnderlinedFlf, map[string]*bintree{}}, + "nancyj.flf": &bintree{fontsNancyjFlf, map[string]*bintree{}}, + "nipples.flf": &bintree{fontsNipplesFlf, map[string]*bintree{}}, + "ntgreek.flf": &bintree{fontsNtgreekFlf, map[string]*bintree{}}, + "o8.flf": &bintree{fontsO8Flf, map[string]*bintree{}}, + "ogre.flf": &bintree{fontsOgreFlf, map[string]*bintree{}}, + "pawp.flf": &bintree{fontsPawpFlf, map[string]*bintree{}}, + "peaks.flf": &bintree{fontsPeaksFlf, map[string]*bintree{}}, + "pebbles.flf": &bintree{fontsPebblesFlf, map[string]*bintree{}}, + "pepper.flf": &bintree{fontsPepperFlf, map[string]*bintree{}}, + "poison.flf": &bintree{fontsPoisonFlf, map[string]*bintree{}}, + "puffy.flf": &bintree{fontsPuffyFlf, map[string]*bintree{}}, + "pyramid.flf": &bintree{fontsPyramidFlf, map[string]*bintree{}}, + "rectangles.flf": &bintree{fontsRectanglesFlf, map[string]*bintree{}}, + "relief.flf": &bintree{fontsReliefFlf, map[string]*bintree{}}, + "relief2.flf": &bintree{fontsRelief2Flf, map[string]*bintree{}}, + "rev.flf": &bintree{fontsRevFlf, map[string]*bintree{}}, + "roman.flf": &bintree{fontsRomanFlf, map[string]*bintree{}}, + "rot13.flf": &bintree{fontsRot13Flf, map[string]*bintree{}}, + "rounded.flf": &bintree{fontsRoundedFlf, map[string]*bintree{}}, + "rowancap.flf": &bintree{fontsRowancapFlf, map[string]*bintree{}}, + "rozzo.flf": &bintree{fontsRozzoFlf, map[string]*bintree{}}, + "runic.flf": &bintree{fontsRunicFlf, map[string]*bintree{}}, + "runyc.flf": &bintree{fontsRunycFlf, map[string]*bintree{}}, + "sblood.flf": &bintree{fontsSbloodFlf, map[string]*bintree{}}, + "script.flf": &bintree{fontsScriptFlf, map[string]*bintree{}}, + "serifcap.flf": &bintree{fontsSerifcapFlf, map[string]*bintree{}}, + "shadow.flf": &bintree{fontsShadowFlf, map[string]*bintree{}}, + "short.flf": &bintree{fontsShortFlf, map[string]*bintree{}}, + "slant.flf": &bintree{fontsSlantFlf, map[string]*bintree{}}, + "slide.flf": &bintree{fontsSlideFlf, map[string]*bintree{}}, + "slscript.flf": &bintree{fontsSlscriptFlf, map[string]*bintree{}}, + "small.flf": &bintree{fontsSmallFlf, map[string]*bintree{}}, + "smisome1.flf": &bintree{fontsSmisome1Flf, map[string]*bintree{}}, + "smkeyboard.flf": &bintree{fontsSmkeyboardFlf, map[string]*bintree{}}, + "smscript.flf": &bintree{fontsSmscriptFlf, map[string]*bintree{}}, + "smshadow.flf": &bintree{fontsSmshadowFlf, map[string]*bintree{}}, + "smslant.flf": &bintree{fontsSmslantFlf, map[string]*bintree{}}, + "smtengwar.flf": &bintree{fontsSmtengwarFlf, map[string]*bintree{}}, + "speed.flf": &bintree{fontsSpeedFlf, map[string]*bintree{}}, + "stampatello.flf": &bintree{fontsStampatelloFlf, map[string]*bintree{}}, + "standard.flf": &bintree{fontsStandardFlf, map[string]*bintree{}}, + "starwars.flf": &bintree{fontsStarwarsFlf, map[string]*bintree{}}, + "stellar.flf": &bintree{fontsStellarFlf, map[string]*bintree{}}, + "stop.flf": &bintree{fontsStopFlf, map[string]*bintree{}}, + "straight.flf": &bintree{fontsStraightFlf, map[string]*bintree{}}, + "tanja.flf": &bintree{fontsTanjaFlf, map[string]*bintree{}}, + "tengwar.flf": &bintree{fontsTengwarFlf, map[string]*bintree{}}, + "term.flf": &bintree{fontsTermFlf, map[string]*bintree{}}, + "thick.flf": &bintree{fontsThickFlf, map[string]*bintree{}}, + "thin.flf": &bintree{fontsThinFlf, map[string]*bintree{}}, + "threepoint.flf": &bintree{fontsThreepointFlf, map[string]*bintree{}}, + "ticks.flf": &bintree{fontsTicksFlf, map[string]*bintree{}}, + "ticksslant.flf": &bintree{fontsTicksslantFlf, map[string]*bintree{}}, + "tinker-toy.flf": &bintree{fontsTinkerToyFlf, map[string]*bintree{}}, + "tombstone.flf": &bintree{fontsTombstoneFlf, map[string]*bintree{}}, + "trek.flf": &bintree{fontsTrekFlf, map[string]*bintree{}}, + "tsalagi.flf": &bintree{fontsTsalagiFlf, map[string]*bintree{}}, + "twopoint.flf": &bintree{fontsTwopointFlf, map[string]*bintree{}}, + "univers.flf": &bintree{fontsUniversFlf, map[string]*bintree{}}, + "usaflag.flf": &bintree{fontsUsaflagFlf, map[string]*bintree{}}, + "wavy.flf": &bintree{fontsWavyFlf, map[string]*bintree{}}, + "weird.flf": &bintree{fontsWeirdFlf, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/common-nighthawk/go-figure/figlet-parser.go b/vendor/github.com/common-nighthawk/go-figure/figlet-parser.go new file mode 100644 index 000000000..4c595a5ee --- /dev/null +++ b/vendor/github.com/common-nighthawk/go-figure/figlet-parser.go @@ -0,0 +1,52 @@ +package figure + +import ( + "strconv" + "strings" +) + +const signature = "flf2" +const reverseFlag = "1" + +var charDelimiters = [3]string{"@", "#", "$"} +var hardblanksBlacklist = [2]byte{'a', '2'} + +func getHeight(metadata string) int { + datum := strings.Fields(metadata)[1] + height, _ := strconv.Atoi(datum) + return height +} + +func getBaseline(metadata string) int { + datum := strings.Fields(metadata)[2] + baseline, _ := strconv.Atoi(datum) + return baseline +} + +func getHardblank(metadata string) byte { + datum := strings.Fields(metadata)[0] + hardblank := datum[len(datum)-1] + if hardblank == hardblanksBlacklist[0] || hardblank == hardblanksBlacklist[1] { + return ' ' + } else { + return hardblank + } +} + +func getReverse(metadata string) bool { + data := strings.Fields(metadata) + return len(data) > 6 && data[6] == reverseFlag +} + +func lastCharLine(text string, height int) bool { + endOfLine, length := " ", 2 + if height == 1 && len(text) > 0 { + length = 1 + } + if len(text) >= length { + endOfLine = text[len(text)-length:] + } + return endOfLine == strings.Repeat(charDelimiters[0], length) || + endOfLine == strings.Repeat(charDelimiters[1], length) || + endOfLine == strings.Repeat(charDelimiters[2], length) +} diff --git a/vendor/github.com/common-nighthawk/go-figure/figure.go b/vendor/github.com/common-nighthawk/go-figure/figure.go new file mode 100644 index 000000000..a2559fc4b --- /dev/null +++ b/vendor/github.com/common-nighthawk/go-figure/figure.go @@ -0,0 +1,80 @@ +package figure + +import ( + "io" + "log" + "reflect" + "strings" +) + +const ascii_offset = 32 +const first_ascii = ' ' +const last_ascii = '~' + +type figure struct { + phrase string + font + strict bool + color string +} + +func NewFigure(phrase, fontName string, strict bool) figure { + font := newFont(fontName) + if font.reverse { + phrase = reverse(phrase) + } + return figure{phrase: phrase, font: font, strict: strict} +} + +func NewColorFigure(phrase, fontName string, color string, strict bool) figure { + color = strings.ToLower(color) + if _, found := colors[color]; !found { + log.Fatalf("invalid color. must be one of: %s", reflect.ValueOf(colors).MapKeys()) + } + + fig := NewFigure(phrase, fontName, strict) + fig.color = color + return fig +} + +func NewFigureWithFont(phrase string, reader io.Reader, strict bool) figure { + font := newFontFromReader(reader) + if font.reverse { + phrase = reverse(phrase) + } + return figure{phrase: phrase, font: font, strict: strict} +} + +func (figure figure) Slicify() (rows []string) { + for r := 0; r < figure.font.height; r++ { + printRow := "" + for _, char := range figure.phrase { + if char < first_ascii || char > last_ascii { + if figure.strict { + log.Fatal("invalid input.") + } else { + char = '?' + } + } + fontIndex := char - ascii_offset + charRowText := scrub(figure.font.letters[fontIndex][r], figure.font.hardblank) + printRow += charRowText + } + if r < figure.font.baseline || len(strings.TrimSpace(printRow)) > 0 { + rows = append(rows, strings.TrimRight(printRow, " ")) + } + } + return rows +} + +func scrub(text string, char byte) string { + return strings.Replace(text, string(char), " ", -1) +} + +func reverse(s string) string { + runes := []rune(s) + for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { + runes[i], runes[j] = runes[j], runes[i] + } + return string(runes) +} diff --git a/vendor/github.com/common-nighthawk/go-figure/font.go b/vendor/github.com/common-nighthawk/go-figure/font.go new file mode 100644 index 000000000..fc6852e81 --- /dev/null +++ b/vendor/github.com/common-nighthawk/go-figure/font.go @@ -0,0 +1,112 @@ +package figure + +import ( + "bufio" + "bytes" + "io" + "path" + "strings" +) + +const defaultFont = "standard" + +var colors = map[string]string{ + "reset": "\033[0m", + "red": "\033[31m", + "green": "\033[32m", + "yellow": "\033[33m", + "blue": "\033[34m", + "purple": "\033[35m", + "cyan": "\033[36m", + "gray": "\033[37m", + "white": "\033[97m", +} + +type font struct { + name string + height int + baseline int + hardblank byte + reverse bool + letters [][]string +} + +func newFont(name string) (font font) { + font.setName(name) + fontBytes, err := Asset(path.Join("fonts", font.name+".flf")) + if err != nil { + panic(err) + } + fontBytesReader := bytes.NewReader(fontBytes) + scanner := bufio.NewScanner(fontBytesReader) + font.setAttributes(scanner) + font.setLetters(scanner) + return font +} + +func newFontFromReader(reader io.Reader) (font font) { + scanner := bufio.NewScanner(reader) + font.setAttributes(scanner) + font.setLetters(scanner) + return font +} + +func (font *font) setName(name string) { + font.name = name + if len(name) < 1 { + font.name = defaultFont + } +} + +func (font *font) setAttributes(scanner *bufio.Scanner) { + for scanner.Scan() { + text := scanner.Text() + if strings.HasPrefix(text, signature) { + font.height = getHeight(text) + font.baseline = getBaseline(text) + font.hardblank = getHardblank(text) + font.reverse = getReverse(text) + break + } + } +} + +func (font *font) setLetters(scanner *bufio.Scanner) { + font.letters = append(font.letters, make([]string, font.height, font.height)) //TODO: set spaces from flf + for i := range font.letters[0] { + font.letters[0][i] = " " + } //TODO: set spaces from flf + letterIndex := 0 + for scanner.Scan() { + text, cutLength, letterIndexInc := scanner.Text(), 1, 0 + if lastCharLine(text, font.height) { + font.letters = append(font.letters, []string{}) + letterIndexInc = 1 + if font.height > 1 { + cutLength = 2 + } + } + if letterIndex > 0 { + appendText := "" + if len(text) > 1 { + appendText = text[:len(text)-cutLength] + } + font.letters[letterIndex] = append(font.letters[letterIndex], appendText) + } + letterIndex += letterIndexInc + } +} + +func (font *font) evenLetters() { + var longest int + for _, letter := range font.letters { + if len(letter) > 0 && len(letter[0]) > longest { + longest = len(letter[0]) + } + } + for _, letter := range font.letters { + for i, row := range letter { + letter[i] = row + strings.Repeat(" ", longest-len(row)) + } + } +} diff --git a/vendor/github.com/common-nighthawk/go-figure/public_methods.go b/vendor/github.com/common-nighthawk/go-figure/public_methods.go new file mode 100644 index 000000000..5a1541162 --- /dev/null +++ b/vendor/github.com/common-nighthawk/go-figure/public_methods.go @@ -0,0 +1,108 @@ +package figure + +import ( + "fmt" + "io" + "strings" + "time" +) + +//stdout +func (fig figure) Print() { + for _, printRow := range fig.Slicify() { + if fig.color != "" { + printRow = colors[fig.color] + printRow + colors["reset"] + } + fmt.Println(printRow) + } +} + +// returns a colored string +func (fig figure) ColorString() string { + s := "" + for _, printRow := range fig.Slicify() { + if fig.color != "" { + printRow = colors[fig.color] + printRow + colors["reset"] + } + s += fmt.Sprintf("%s\n", printRow) + } + return s +} + +func (fig figure) String() string { + s := "" + for _, printRow := range fig.Slicify() { + s += fmt.Sprintf("%s\n", printRow) + } + return s +} + +func (fig figure) Scroll(duration, stillness int, direction string) { + endTime := time.Now().Add(time.Duration(duration) * time.Millisecond) + fig.phrase = fig.phrase + " " + clearScreen() + for time.Now().Before(endTime) { + var shiftedPhrase string + chars := []byte(fig.phrase) + if strings.HasPrefix(strings.ToLower(direction), "r") { + shiftedPhrase = string(append(chars[len(chars)-1:], chars[0:len(chars)-1]...)) + } else { + shiftedPhrase = string(append(chars[1:len(chars)], chars[0])) + } + fig.phrase = shiftedPhrase + fig.Print() + sleep(stillness) + clearScreen() + } +} + +func (fig figure) Blink(duration, timeOn, timeOff int) { + if timeOff < 0 { + timeOff = timeOn + } + endTime := time.Now().Add(time.Duration(duration) * time.Millisecond) + clearScreen() + for time.Now().Before(endTime) { + fig.Print() + sleep(timeOn) + clearScreen() + sleep(timeOff) + } +} + +func (fig figure) Dance(duration, freeze int) { + endTime := time.Now().Add(time.Duration(duration) * time.Millisecond) + font := fig.font //TODO: change to deep copy + font.evenLetters() + figures := []figure{figure{font: font}, figure{font: font}} + clearScreen() + for i, c := range fig.phrase { + appenders := []string{" ", " "} + appenders[i%2] = string(c) + for f, _ := range figures { + figures[f].phrase = figures[f].phrase + appenders[f] + } + } + for p := 0; time.Now().Before(endTime); p ^= 1 { + figures[p].Print() + figures[1-p].Print() + sleep(freeze) + clearScreen() + } +} + +//writers +func Write(w io.Writer, fig figure) { + for _, printRow := range fig.Slicify() { + fmt.Fprintf(w, "%v\n", printRow) + } +} + +//helpers +func clearScreen() { + fmt.Print("\033[H\033[2J") +} + +func sleep(milliseconds int) { + time.Sleep(time.Duration(milliseconds) * time.Millisecond) +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/config/config_unix.go b/vendor/github.com/containerd/containerd/remotes/docker/config/config_unix.go new file mode 100644 index 000000000..9db18dedc --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/config/config_unix.go @@ -0,0 +1,42 @@ +//go:build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "crypto/x509" + "path/filepath" +) + +func hostPaths(root, host string) (hosts []string) { + ch := hostDirectory(host) + if ch != host { + hosts = append(hosts, filepath.Join(root, ch)) + } + + hosts = append(hosts, + filepath.Join(root, host), + filepath.Join(root, "_default"), + ) + + return +} + +func rootSystemPool() (*x509.CertPool, error) { + return x509.SystemCertPool() +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/config/config_windows.go b/vendor/github.com/containerd/containerd/remotes/docker/config/config_windows.go new file mode 100644 index 000000000..4697728b9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/config/config_windows.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "crypto/x509" + "path/filepath" + "strings" +) + +func hostPaths(root, host string) (hosts []string) { + ch := hostDirectory(host) + if ch != host { + hosts = append(hosts, filepath.Join(root, strings.Replace(ch, ":", "", -1))) + } + + hosts = append(hosts, + filepath.Join(root, strings.Replace(host, ":", "", -1)), + filepath.Join(root, "_default"), + ) + + return +} + +func rootSystemPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/config/docker_fuzzer_internal.go b/vendor/github.com/containerd/containerd/remotes/docker/config/docker_fuzzer_internal.go new file mode 100644 index 000000000..335614fdb --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/config/docker_fuzzer_internal.go @@ -0,0 +1,44 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package config + +import ( + "os" + + fuzz "github.com/AdaLogics/go-fuzz-headers" +) + +func FuzzParseHostsFile(data []byte) int { + f := fuzz.NewConsumer(data) + dir, err := os.MkdirTemp("", "fuzz-") + if err != nil { + return 0 + } + err = f.CreateFiles(dir) + if err != nil { + return 0 + } + defer os.RemoveAll(dir) + b, err := f.GetBytes() + if err != nil { + return 0 + } + _, _ = parseHostsFile(dir, b) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/config/hosts.go b/vendor/github.com/containerd/containerd/remotes/docker/config/hosts.go new file mode 100644 index 000000000..aa8ea959e --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/config/hosts.go @@ -0,0 +1,580 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package config contains utilities for helping configure the Docker resolver +package config + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes/docker" + "github.com/pelletier/go-toml" +) + +// UpdateClientFunc is a function that lets you to amend http Client behavior used by registry clients. +type UpdateClientFunc func(client *http.Client) error + +type hostConfig struct { + scheme string + host string + path string + + capabilities docker.HostCapabilities + + caCerts []string + clientPairs [][2]string + skipVerify *bool + + header http.Header + + // TODO: Add credential configuration (domain alias, username) +} + +// HostOptions is used to configure registry hosts +type HostOptions struct { + HostDir func(string) (string, error) + Credentials func(host string) (string, string, error) + DefaultTLS *tls.Config + DefaultScheme string + // UpdateClient will be called after creating http.Client object, so clients can provide extra configuration + UpdateClient UpdateClientFunc + AuthorizerOpts []docker.AuthorizerOpt +} + +// ConfigureHosts creates a registry hosts function from the provided +// host creation options. The host directory can read hosts.toml or +// certificate files laid out in the Docker specific layout. +// If a `HostDir` function is not required, defaults are used. +func ConfigureHosts(ctx context.Context, options HostOptions) docker.RegistryHosts { + return func(host string) ([]docker.RegistryHost, error) { + var hosts []hostConfig + if options.HostDir != nil { + dir, err := options.HostDir(host) + if err != nil && !errdefs.IsNotFound(err) { + return nil, err + } + if dir != "" { + log.G(ctx).WithField("dir", dir).Debug("loading host directory") + hosts, err = loadHostDir(ctx, dir) + if err != nil { + return nil, err + } + } + } + + // If hosts was not set, add a default host + // NOTE: Check nil here and not empty, the host may be + // intentionally configured to not have any endpoints + if hosts == nil { + hosts = make([]hostConfig, 1) + } + if len(hosts) > 0 && hosts[len(hosts)-1].host == "" { + if host == "docker.io" { + hosts[len(hosts)-1].scheme = "https" + hosts[len(hosts)-1].host = "registry-1.docker.io" + } else if docker.IsLocalhost(host) { + hosts[len(hosts)-1].host = host + if options.DefaultScheme == "" || options.DefaultScheme == "http" { + hosts[len(hosts)-1].scheme = "http" + + // Skipping TLS verification for localhost + var skipVerify = true + hosts[len(hosts)-1].skipVerify = &skipVerify + } else { + hosts[len(hosts)-1].scheme = options.DefaultScheme + } + } else { + hosts[len(hosts)-1].host = host + if options.DefaultScheme != "" { + hosts[len(hosts)-1].scheme = options.DefaultScheme + } else { + hosts[len(hosts)-1].scheme = "https" + } + } + hosts[len(hosts)-1].path = "/v2" + hosts[len(hosts)-1].capabilities = docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush + } + + var defaultTLSConfig *tls.Config + if options.DefaultTLS != nil { + defaultTLSConfig = options.DefaultTLS + } else { + defaultTLSConfig = &tls.Config{} + } + + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + FallbackDelay: 300 * time.Millisecond, + }).DialContext, + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: defaultTLSConfig, + ExpectContinueTimeout: 5 * time.Second, + } + + client := &http.Client{ + Transport: defaultTransport, + } + if options.UpdateClient != nil { + if err := options.UpdateClient(client); err != nil { + return nil, err + } + } + + authOpts := []docker.AuthorizerOpt{docker.WithAuthClient(client)} + if options.Credentials != nil { + authOpts = append(authOpts, docker.WithAuthCreds(options.Credentials)) + } + authOpts = append(authOpts, options.AuthorizerOpts...) + authorizer := docker.NewDockerAuthorizer(authOpts...) + + rhosts := make([]docker.RegistryHost, len(hosts)) + for i, host := range hosts { + + rhosts[i].Scheme = host.scheme + rhosts[i].Host = host.host + rhosts[i].Path = host.path + rhosts[i].Capabilities = host.capabilities + rhosts[i].Header = host.header + + if host.caCerts != nil || host.clientPairs != nil || host.skipVerify != nil { + tr := defaultTransport.Clone() + tlsConfig := tr.TLSClientConfig + if host.skipVerify != nil { + tlsConfig.InsecureSkipVerify = *host.skipVerify + } + if host.caCerts != nil { + if tlsConfig.RootCAs == nil { + rootPool, err := rootSystemPool() + if err != nil { + return nil, fmt.Errorf("unable to initialize cert pool: %w", err) + } + tlsConfig.RootCAs = rootPool + } + for _, f := range host.caCerts { + data, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("unable to read CA cert %q: %w", f, err) + } + if !tlsConfig.RootCAs.AppendCertsFromPEM(data) { + return nil, fmt.Errorf("unable to load CA cert %q", f) + } + } + } + + if host.clientPairs != nil { + for _, pair := range host.clientPairs { + certPEMBlock, err := os.ReadFile(pair[0]) + if err != nil { + return nil, fmt.Errorf("unable to read CERT file %q: %w", pair[0], err) + } + var keyPEMBlock []byte + if pair[1] != "" { + keyPEMBlock, err = os.ReadFile(pair[1]) + if err != nil { + return nil, fmt.Errorf("unable to read CERT file %q: %w", pair[1], err) + } + } else { + // Load key block from same PEM file + keyPEMBlock = certPEMBlock + } + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, fmt.Errorf("failed to load X509 key pair: %w", err) + } + + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + } + + c := *client + c.Transport = tr + if options.UpdateClient != nil { + if err := options.UpdateClient(&c); err != nil { + return nil, err + } + } + + rhosts[i].Client = &c + rhosts[i].Authorizer = docker.NewDockerAuthorizer(append(authOpts, docker.WithAuthClient(&c))...) + } else { + rhosts[i].Client = client + rhosts[i].Authorizer = authorizer + } + } + + return rhosts, nil + } + +} + +// HostDirFromRoot returns a function which finds a host directory +// based at the given root. +func HostDirFromRoot(root string) func(string) (string, error) { + return func(host string) (string, error) { + for _, p := range hostPaths(root, host) { + if _, err := os.Stat(p); err == nil { + return p, nil + } else if !os.IsNotExist(err) { + return "", err + } + } + return "", errdefs.ErrNotFound + } +} + +// hostDirectory converts ":port" to "_port_" in directory names +func hostDirectory(host string) string { + idx := strings.LastIndex(host, ":") + if idx > 0 { + return host[:idx] + "_" + host[idx+1:] + "_" + } + return host +} + +func loadHostDir(ctx context.Context, hostsDir string) ([]hostConfig, error) { + b, err := os.ReadFile(filepath.Join(hostsDir, "hosts.toml")) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + + if len(b) == 0 { + // If hosts.toml does not exist, fallback to checking for + // certificate files based on Docker's certificate file + // pattern (".crt", ".cert", ".key" files) + return loadCertFiles(ctx, hostsDir) + } + + hosts, err := parseHostsFile(hostsDir, b) + if err != nil { + log.G(ctx).WithError(err).Error("failed to decode hosts.toml") + // Fallback to checking certificate files + return loadCertFiles(ctx, hostsDir) + } + + return hosts, nil +} + +type hostFileConfig struct { + // Capabilities determine what operations a host is + // capable of performing. Allowed values + // - pull + // - resolve + // - push + Capabilities []string `toml:"capabilities"` + + // CACert are the public key certificates for TLS + // Accepted types + // - string - Single file with certificate(s) + // - []string - Multiple files with certificates + CACert interface{} `toml:"ca"` + + // Client keypair(s) for TLS with client authentication + // Accepted types + // - string - Single file with public and private keys + // - []string - Multiple files with public and private keys + // - [][2]string - Multiple keypairs with public and private keys in separate files + Client interface{} `toml:"client"` + + // SkipVerify skips verification of the server's certificate chain + // and host name. This should only be used for testing or in + // combination with other methods of verifying connections. + SkipVerify *bool `toml:"skip_verify"` + + // Header are additional header files to send to the server + Header map[string]interface{} `toml:"header"` + + // OverridePath indicates the API root endpoint is defined in the URL + // path rather than by the API specification. + // This may be used with non-compliant OCI registries to override the + // API root endpoint. + OverridePath bool `toml:"override_path"` + + // TODO: Credentials: helper? name? username? alternate domain? token? +} + +func parseHostsFile(baseDir string, b []byte) ([]hostConfig, error) { + tree, err := toml.LoadBytes(b) + if err != nil { + return nil, fmt.Errorf("failed to parse TOML: %w", err) + } + + // HACK: we want to keep toml parsing structures private in this package, however go-toml ignores private embedded types. + // so we remap it to a public type within the func body, so technically it's public, but not possible to import elsewhere. + //nolint:unused + type HostFileConfig = hostFileConfig + + c := struct { + HostFileConfig + // Server specifies the default server. When `host` is + // also specified, those hosts are tried first. + Server string `toml:"server"` + // HostConfigs store the per-host configuration + HostConfigs map[string]hostFileConfig `toml:"host"` + }{} + + orderedHosts, err := getSortedHosts(tree) + if err != nil { + return nil, err + } + + var ( + hosts []hostConfig + ) + + if err := tree.Unmarshal(&c); err != nil { + return nil, err + } + + // Parse hosts array + for _, host := range orderedHosts { + config := c.HostConfigs[host] + + parsed, err := parseHostConfig(host, baseDir, config) + if err != nil { + return nil, err + } + hosts = append(hosts, parsed) + } + + // Parse root host config and append it as the last element + parsed, err := parseHostConfig(c.Server, baseDir, c.HostFileConfig) + if err != nil { + return nil, err + } + hosts = append(hosts, parsed) + + return hosts, nil +} + +func parseHostConfig(server string, baseDir string, config hostFileConfig) (hostConfig, error) { + var ( + result = hostConfig{} + err error + ) + + if server != "" { + if !strings.HasPrefix(server, "http") { + server = "https://" + server + } + u, err := url.Parse(server) + if err != nil { + return hostConfig{}, fmt.Errorf("unable to parse server %v: %w", server, err) + } + result.scheme = u.Scheme + result.host = u.Host + if len(u.Path) > 0 { + u.Path = path.Clean(u.Path) + if !strings.HasSuffix(u.Path, "/v2") && !config.OverridePath { + u.Path = u.Path + "/v2" + } + } else if !config.OverridePath { + u.Path = "/v2" + } + result.path = u.Path + } + + result.skipVerify = config.SkipVerify + + if len(config.Capabilities) > 0 { + for _, c := range config.Capabilities { + switch strings.ToLower(c) { + case "pull": + result.capabilities |= docker.HostCapabilityPull + case "resolve": + result.capabilities |= docker.HostCapabilityResolve + case "push": + result.capabilities |= docker.HostCapabilityPush + default: + return hostConfig{}, fmt.Errorf("unknown capability %v", c) + } + } + } else { + result.capabilities = docker.HostCapabilityPull | docker.HostCapabilityResolve | docker.HostCapabilityPush + } + + if config.CACert != nil { + switch cert := config.CACert.(type) { + case string: + result.caCerts = []string{makeAbsPath(cert, baseDir)} + case []interface{}: + result.caCerts, err = makeStringSlice(cert, func(p string) string { + return makeAbsPath(p, baseDir) + }) + if err != nil { + return hostConfig{}, err + } + default: + return hostConfig{}, fmt.Errorf("invalid type %v for \"ca\"", cert) + } + } + + if config.Client != nil { + switch client := config.Client.(type) { + case string: + result.clientPairs = [][2]string{{makeAbsPath(client, baseDir), ""}} + case []interface{}: + // []string or [][2]string + for _, pairs := range client { + switch p := pairs.(type) { + case string: + result.clientPairs = append(result.clientPairs, [2]string{makeAbsPath(p, baseDir), ""}) + case []interface{}: + slice, err := makeStringSlice(p, func(s string) string { + return makeAbsPath(s, baseDir) + }) + if err != nil { + return hostConfig{}, err + } + if len(slice) != 2 { + return hostConfig{}, fmt.Errorf("invalid pair %v for \"client\"", p) + } + + var pair [2]string + copy(pair[:], slice) + result.clientPairs = append(result.clientPairs, pair) + default: + return hostConfig{}, fmt.Errorf("invalid type %T for \"client\"", p) + } + } + default: + return hostConfig{}, fmt.Errorf("invalid type %v for \"client\"", client) + } + } + + if config.Header != nil { + header := http.Header{} + for key, ty := range config.Header { + switch value := ty.(type) { + case string: + header[key] = []string{value} + case []interface{}: + header[key], err = makeStringSlice(value, nil) + if err != nil { + return hostConfig{}, err + } + default: + return hostConfig{}, fmt.Errorf("invalid type %v for header %q", ty, key) + } + } + result.header = header + } + + return result, nil +} + +// getSortedHosts returns the list of hosts as they defined in the file. +func getSortedHosts(root *toml.Tree) ([]string, error) { + iter, ok := root.Get("host").(*toml.Tree) + if !ok { + return nil, errors.New("invalid `host` tree") + } + + list := append([]string{}, iter.Keys()...) + + // go-toml stores TOML sections in the map object, so no order guaranteed. + // We retrieve line number for each key and sort the keys by position. + sort.Slice(list, func(i, j int) bool { + h1 := iter.GetPath([]string{list[i]}).(*toml.Tree) + h2 := iter.GetPath([]string{list[j]}).(*toml.Tree) + return h1.Position().Line < h2.Position().Line + }) + + return list, nil +} + +// makeStringSlice is a helper func to convert from []interface{} to []string. +// Additionally an optional cb func may be passed to perform string mapping. +func makeStringSlice(slice []interface{}, cb func(string) string) ([]string, error) { + out := make([]string, len(slice)) + for i, value := range slice { + str, ok := value.(string) + if !ok { + return nil, fmt.Errorf("unable to cast %v to string", value) + } + + if cb != nil { + out[i] = cb(str) + } else { + out[i] = str + } + } + return out, nil +} + +func makeAbsPath(p string, base string) string { + if filepath.IsAbs(p) { + return p + } + return filepath.Join(base, p) +} + +// loadCertsDir loads certs from certsDir like "/etc/docker/certs.d" . +// Compatible with Docker file layout +// - files ending with ".crt" are treated as CA certificate files +// - files ending with ".cert" are treated as client certificates, and +// files with the same name but ending with ".key" are treated as the +// corresponding private key. +// NOTE: If a ".key" file is missing, this function will just return +// the ".cert", which may contain the private key. If the ".cert" file +// does not contain the private key, the caller should detect and error. +func loadCertFiles(ctx context.Context, certsDir string) ([]hostConfig, error) { + fs, err := os.ReadDir(certsDir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + hosts := make([]hostConfig, 1) + for _, f := range fs { + if f.IsDir() { + continue + } + if strings.HasSuffix(f.Name(), ".crt") { + hosts[0].caCerts = append(hosts[0].caCerts, filepath.Join(certsDir, f.Name())) + } + if strings.HasSuffix(f.Name(), ".cert") { + var pair [2]string + certFile := f.Name() + pair[0] = filepath.Join(certsDir, certFile) + // Check if key also exists + keyFile := filepath.Join(certsDir, certFile[:len(certFile)-5]+".key") + if _, err := os.Stat(keyFile); err == nil { + pair[1] = keyFile + } else if !os.IsNotExist(err) { + return nil, err + } + hosts[0].clientPairs = append(hosts[0].clientPairs, pair) + } + } + return hosts, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 000000000..b071cea51 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,690 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression + ctx context.Context + minChunkSize int +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// This option will be ignored if WithCompression option is used. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + +// WithMinChunkSize option specifies the minimal number of bytes of data +// must be written in one gzip stream. +// By increasing this number, one gzip stream can contain multiple files +// and it hopefully leads to smaller result blob. +// NOTE: This adds a TOC property that old reader doesn't understand. +func WithMinChunkSize(minChunkSize int) Option { + return func(o *options) error { + o.minChunkSize = minChunkSize + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) + } + } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + var tarParts [][]*entry + if opts.minChunkSize > 0 { + // Each entry needs to know the size of the current gzip stream so they + // cannot be processed in parallel. + tarParts = [][]*entry{entries} + } else { + tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) + } + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + sw.MinChunkSize = opts.minChunkSize + if sw.needsOpenGzEntries == nil { + sw.needsOpenGzEntries = make(map[string]struct{}) + } + for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { + sw.needsOpenGzEntries[f] = struct{}{} + } + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, fmt.Errorf("failed to sort: %w", err) + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, fmt.Errorf("failed to sort tar entries: %w", err) + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReadSeeker(in) + if err != nil { + return nil, fmt.Errorf("failed to make position watcher: %w", err) + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return nil, fmt.Errorf("failed to parse tar file, %w", err) + } + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return fmt.Errorf("file: %q: %w", name, errNotFound) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := os.CreateTemp(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { + pos := int64(0) + return &countReadSeeker{r: r, cPos: &pos}, nil +} + +type countReadSeeker struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReadSeeker) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReadSeeker) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 000000000..6de78b02d --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 000000000..f4d554655 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1223 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behavior is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocOffset >= 0 && tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + var chunkTopIndex int + for i, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + switch ent.Type { + case "reg", "chunk": + if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { + chunkTopIndex = i + } + ent.chunkTopIndex = chunkTopIndex + } + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + org, err := r.getSource(ent) + if err != nil { + return err + } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 && e.InnerOffset == 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) newFileReader(name string) (*fileReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + return &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + }, nil +} + +func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + fr.preRead = preRead + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries + preRead func(*TOCEntry, io.Reader) error +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + + if fr.preRead == nil { + if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { + return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) + } + return io.ReadFull(dr, p) + } + + var retN int + var retErr error + var found bool + var nr int64 + for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { + if !e.isDataType() { + continue + } + if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { + break + } + if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { + return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) + } + nr = e.InnerOffset + if e == ent { + found = true + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) + } + retN, retErr = io.ReadFull(dr, p) + nr += off + int64(retN) + continue + } + cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} + if err := fr.preRead(e, cr); err != nil { + return 0, fmt.Errorf("failed to pre read: %w", err) + } + nr += cr.n + } + if !found { + return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") + } + return retN, retErr +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + uncompressedCounter *countWriteFlusher + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int + + // MinChunkSize optionally controls the minimum number of bytes + // of data must be written in one gzip stream before a new gzip + // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. + MinChunkSize int + + needsOpenGzEntries map[string]struct{} +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, fmt.Errorf("failed to parse footer: %w", err) + } + if blobPayloadSize < 0 { + blobPayloadSize = sr.Size() + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + uncompressedCounter: &countWriteFlusher{}, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +func (w *Writer) flushGz() error { + if w.closed { + return errors.New("flush on closed Writer") + } + if w.gz != nil { + if f, ok := w.gz.(interface { + Flush() error + }); ok { + return f.Flush() + } + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + if w.gz != nil { + w.gz = w.uncompressedCounter.register(w.gz) + } + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + prevOffset := w.cw.n + var prevOffsetUncompressed int64 + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + + // We flush the underlying compression writer here to correctly calculate "w.cw.n". + if err := w.flushGz(); err != nil { + return err + } + if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { + if err := w.closeGz(); err != nil { + return err + } + ent.Offset = w.cw.n + prevOffset = ent.Offset + prevOffsetUncompressed = w.uncompressedCounter.n + } else { + ent.Offset = prevOffset + ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed + } + + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := io.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +func (w *Writer) needsOpenGz(ent *TOCEntry) bool { + if ent.Type != "reg" { + return false + } + if w.needsOpenGzEntries == nil { + return false + } + _, ok := w.needsOpenGzEntries[ent.Name] + return ok +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if tocOff < 0 { + // This means that TOC isn't contained in the blob. + // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from + // the external location. + start := time.Now() + toc, tocDgst, err := d.ParseTOC(nil) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +type countWriteFlusher struct { + io.WriteCloser + n int64 +} + +func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { + wc.WriteCloser = w + return wc +} + +func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { + n, err = wc.WriteCloser.Write(p) + wc.n += int64(n) + return +} + +func (wc *countWriteFlusher) Flush() error { + if f, ok := wc.WriteCloser.(interface { + Flush() error + }); ok { + return f.Flush() + } + return nil +} + +func (wc *countWriteFlusher) Close() error { + err := wc.WriteCloser.Close() + wc.WriteCloser = nil + return err +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} + +type countReader struct { + r io.Reader + n int64 +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + cr.n += int64(n) + return +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 000000000..f24afe32f --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,237 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" +) + +type gzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &GzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { + compressionLevel int +} + +func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} + +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) + } + defer zr.Close() + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *LegacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 000000000..0ca6fd75f --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2366 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + TestStreams(t *testing.T, b []byte, streams []int64) + DiffIDOf(*testing.T, []byte) string + String() string +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +type TestingControllerFactory func() TestingController + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *testing.T, controllers ...TestingControllerFactory) { + tests := []struct { + name string + chunkSize int + minChunkSize []int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + minChunkSize: []int{0, 64000}, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo1.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, newCL := range controllers { + newCL := newCL + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + cl1 := newCL() + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl1) + sw.MinChunkSize = minChunkSize + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl1), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + var opts []Option + if minChunkSize > 0 { + opts = append(opts, WithMinChunkSize(minChunkSize)) + } + cl2 := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl2), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl2.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } + } +} + +func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aGz, err := cla.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := clb.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := io.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := io.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t *testing.T, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + if srcCompression == gzipType { + w = gzip.NewWriter(buf) + } else if srcCompression == zstdType { + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + } else { + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t *testing.T, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !(aok && bok) || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset >= 0) == (b.Offset >= 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { + tests := []struct { + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + minChunkSize []int + }{ + { + name: "no-regfile", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + minChunkSize: []int{0, 64000}, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + minChunkSize: []int{0, 64000}, + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + regDigest(t, "bar/foo2.txt", "b", dgstMap), + regDigest(t, "foo3.txt", "c", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, newCL := range controllers { + newCL := newCL + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + cl := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl, newCL) + } + }) + } + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *testing.T) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + cl := newController() + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, fmt.Errorf("error reading footer: %w", err) + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) + } + + // Decode the TOC JSON + var tocReader io.Reader + if tocOffset >= 0 { + tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + } + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + data64KB := randomContents(64000) + + tests := []struct { + name string + chunkSize int + minChunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + wantTOCVersion int // default = 1 + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // (empty tar) + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + file("foo22", "ccc"), + dir("bar/"), + file("bar/bar.txt", "aaa"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo22", len("ccc")), + hasFileLen("bar/bar.txt", len("aaa")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo22", digestFor("ccc")), + hasFileDigest("bar/bar.txt", digestFor("aaa")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo22", 0, "ccc"), + hasFileContentsRange("foo22", 1, "cc"), + hasFileContentsRange("foo22", 2, "c"), + hasFileContentsRange("bar/bar.txt", 0, "aaa"), + hasFileContentsRange("bar/bar.txt", 1, "aa"), + hasFileContentsRange("bar/bar.txt", 2, "a"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + dir("bar/"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo/foo1", 1, data64KB[1:]), + hasFileContentsRange("foo/foo1", 2, data64KB[2:]), + hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + } + + for _, tt := range tests { + for _, newCL := range controllers { + newCL := newCL + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + cl1 := newCL() + w := NewWriterWithCompressor(&stargzBuf, cl1) + w.ChunkSize = tt.chunkSize + w.MinChunkSize = tt.minChunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl1.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + telemetry, checkCalled := newCalledTelemetry() + sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) + r, err := Open( + sr, + WithDecompressors(cl1), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + wantTOCVersion := 1 + if tt.wantTOCVersion > 0 { + wantTOCVersion = tt.wantTOCVersion + } + if r.toc.Version != wantTOCVersion { + t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) + } + + footerSize := cl1.FooterSize() + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + t.Errorf("failed to read footer: %v", err) + } + _, tocOffset, _, err := cl1.ParseFooter(footer) + if err != nil { + t.Errorf("failed to parse footer: %v", err) + } + if err := checkCalled(tocOffset >= 0); err != nil { + t.Errorf("telemetry failure: %v", err) + } + + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + streamOffsets := []int64{0} + prevOffset := int64(-1) + streams := 0 + for _, e := range r.toc.Entries { + if e.Offset > prevOffset { + streamOffsets = append(streamOffsets, e.Offset) + prevOffset = e.Offset + streams++ + } + } + streams++ // TOC + if tocOffset >= 0 { + // toc is in the blob + streamOffsets = append(streamOffsets, tocOffset) + } + streams++ // footer + streamOffsets = append(streamOffsets, footerOffset) + if streams != wantNumGz { + t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) + } + + t.Logf("testing streams: %+v", streamOffsets) + cl1.TestStreams(t, b, streamOffsets) + + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +type chunkInfo struct { + name string + data string +} + +func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func(needsGetTOC bool) error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if needsGetTOC { + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t *testing.T, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t *testing.T, r *Reader) +} + +type stargzCheckFn func(*testing.T, *Reader) + +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + extraMap := make(map[string]chunkInfo) + for _, e := range extra { + extraMap[e.name] = e + } + var extraNames []string + for n := range extraMap { + extraNames = append(extraNames, n) + } + f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { + t.Logf("On %q: got preread of %q", file, e.Name) + ex, ok := extraMap[e.Name] + if !ok { + t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) + } + got, err := io.ReadAll(cr) + if err != nil { + t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) + } + if ex.data != string(got) { + t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) + } + delete(extraMap, e.Name) + return nil + }) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + if len(extraMap) != 0 { + var exNames []string + for _, ex := range extraMap { + exNames = append(exNames, ex.name) + } + t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + +func viewContent(c []byte) string { + if len(c) < 100 { + return string(c) + } + return string(c[:50]) + "...(omit)..." + string(c[50:100]) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomContents(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = runes[rand.Intn(len(runes))] + } + return string(b) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } + +func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { + if len(streams) == 0 { + return // nop + } + + wants := map[int64]struct{}{} + for _, s := range streams { + wants[s] = struct{}{} + } + + len0 := len(b) + br := bytes.NewReader(b) + zr := new(gzip.Reader) + t.Logf("got gzip streams:") + numStreams := 0 + for { + zoff := len0 - br.Len() + if err := zr.Reset(br); err != nil { + if err == io.EOF { + return + } + t.Fatalf("countStreams(gzip), Reset: %v", err) + } + zr.Multistream(false) + n, err := io.Copy(io.Discard, zr) + if err != nil { + t.Fatalf("countStreams(gzip), Copy: %v", err) + } + var extra string + if len(zr.Header.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + } + t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) + delete(wants, int64(zoff)) + numStreams++ + } +} + +func GzipDiffIDOf(t *testing.T, b []byte) string { + h := sha256.New() + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("diffIDOf(gzip): %v", err) + } + defer zr.Close() + if _, err := io.Copy(h, zr); err != nil { + t.Fatalf("diffIDOf(gzip).Copy: %v", err) + } + return fmt.Sprintf("sha256:%x", h.Sum(nil)) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 000000000..57e0aa614 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,342 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + // InnerOffset is an optional field indicates uncompressed offset + // of this "reg" or "chunk" payload in a stream starts from Offset. + // This field enables to put multiple "reg" or "chunk" payloads + // in one chunk with having the same Offset but different InnerOffset. + InnerOffset int64 `json:"innerOffset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry + + // chunkTopIndex is index of the entry where Offset starts in the blob. + chunkTopIndex int +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + // + // The returned writer should implement "Flush() error" function that flushes + // any pending compressed data to the underlying writer. + Writer(w io.Writer) (WriteFlushCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader + // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. + // + // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the + // footer (blob size - tocOff - FooterSize). + // If blobPayloadSize < 0, blobPayloadSize become the blob size. + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location + // and return it. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} + +type WriteFlushCloser interface { + io.WriteCloser + Flush() error +} diff --git a/vendor/github.com/containers/image/v5/LICENSE b/vendor/github.com/containers/image/v5/LICENSE new file mode 100644 index 000000000..953563530 --- /dev/null +++ b/vendor/github.com/containers/image/v5/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/image/v5/docker/daemon/client.go b/vendor/github.com/containers/image/v5/docker/daemon/client.go new file mode 100644 index 000000000..323a02fc0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/client.go @@ -0,0 +1,85 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/v5/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if sys != nil && sys.DockerDaemonHost != "" { + host = sys.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + // + // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set + // TLSClientConfig to nil. This can be achieved by using the form `http://` + url, err := dockerclient.ParseHostURL(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if url.Scheme != "unix" { + if url.Scheme == "http" { + httpClient = httpConfig() + } else { + hc, err := tlsConfig(sys) + if err != nil { + return nil, err + } + httpClient = hc + } + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(sys *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if sys != nil && sys.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} + +func httpConfig() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: nil, + }, + CheckRedirect: dockerclient.CheckRedirect, + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go new file mode 100644 index 000000000..f68981472 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -0,0 +1,154 @@ +package daemon + +import ( + "context" + "io" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type daemonImageDestination struct { + ref daemonReference + mustMatchRuntimeOS bool + *tarfile.Destination // Implements most of types.ImageDestination + archive *tarfile.Writer + // For talking to imageLoadGoroutine + goroutineCancel context.CancelFunc + statusChannel <-chan error + writer *io.PipeWriter + // Other state + committed bool // writer has been closed +} + +// newImageDestination returns a types.ImageDestination for the specified image reference. +func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { + if ref.ref == nil { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + namedTaggedRef, ok := ref.ref.(reference.NamedTagged) + if !ok { + return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) + } + + var mustMatchRuntimeOS = true + if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "initializing docker engine client") + } + + reader, writer := io.Pipe() + archive := tarfile.NewWriter(writer) + // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. + statusChannel := make(chan error, 1) + + goroutineContext, goroutineCancel := context.WithCancel(ctx) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) + + return &daemonImageDestination{ + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(sys, archive, namedTaggedRef), + archive: archive, + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, + }, nil +} + +// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel +func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { + err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") + defer func() { + logrus.Debugf("docker-daemon: sending done, status %v", err) + statusChannel <- err + }() + defer func() { + if err == nil { + reader.Close() + } else { + if err := reader.CloseWithError(err); err != nil { + logrus.Debugf("imageLoadGoroutine: Error during reader.CloseWithError: %v", err) + } + } + }() + + resp, err := c.ImageLoad(ctx, reader, true) + if err != nil { + err = errors.Wrap(err, "saving image to docker engine") + return + } + defer resp.Body.Close() +} + +// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved +func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { + return types.PreserveOriginal +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *daemonImageDestination) MustMatchRuntimeOS() bool { + return d.mustMatchRuntimeOS +} + +// Close removes resources associated with an initialized ImageDestination, if any. +func (d *daemonImageDestination) Close() error { + if !d.committed { + logrus.Debugf("docker-daemon: Closing tar stream to abort loading") + // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. + // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including + // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the + // net/http version with native Context support in Go 1.7) do not always actually immediately cancel + // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and + // return early if the context is canceled without terminating the goroutine at all. + // So we need this CloseWithError to terminate sending the HTTP request Body + // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending + // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. + // Whether that works or not, closing the PipeWriter seems desirable in any case. + if err := d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")); err != nil { + return err + } + } + d.goroutineCancel() + + return nil +} + +func (d *daemonImageDestination) Reference() types.ImageReference { + return d.ref +} + +// Commit marks the process of storing the image as successful and asks for the image to be persisted. +// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list +// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the +// original manifest list digest, if desired. +// WARNING: This does not have any transactional semantics: +// - Uploaded data MAY be visible to others before Commit() is called +// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) +func (d *daemonImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { + logrus.Debugf("docker-daemon: Closing tar stream") + if err := d.archive.Close(); err != nil { + return err + } + if err := d.writer.Close(); err != nil { + return err + } + d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. + + logrus.Debugf("docker-daemon: Waiting for status") + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-d.statusChannel: + return err + } +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go new file mode 100644 index 000000000..a6d8a6cf5 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "context" + + "github.com/containers/image/v5/docker/internal/tarfile" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +type daemonImageSource struct { + ref daemonReference + *tarfile.Source // Implements most of types.ImageSource +} + +// newImageSource returns a types.ImageSource for the specified image reference. +// The caller must call .Close() on the returned ImageSource. +// +// It would be great if we were able to stream the input tar as it is being +// sent; but Docker sends the top-level manifest, which determines which paths +// to look for, at the end, so in we will need to seek back and re-read, several times. +// (We could, perhaps, expect an exact sequence, assume that the first plaintext file +// is the config, and that the following len(RootFS) files are the layers, but that feels +// way too brittle.) +func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { + c, err := newDockerClient(sys) + if err != nil { + return nil, errors.Wrap(err, "initializing docker engine client") + } + // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. + // Either way ImageSave should create a tarball with exactly one image. + inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) + if err != nil { + return nil, errors.Wrap(err, "loading image from docker engine") + } + defer inputStream.Close() + + archive, err := tarfile.NewReaderFromStream(sys, inputStream) + if err != nil { + return nil, err + } + src := tarfile.NewSource(archive, true, nil, -1) + return &daemonImageSource{ + ref: ref, + Source: src, + }, nil +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (s *daemonImageSource) Reference() types.ImageReference { + return s.ref +} diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go new file mode 100644 index 000000000..4e4ed6881 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go @@ -0,0 +1,223 @@ +package daemon + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/policyconfiguration" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/image" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func init() { + transports.Register(Transport) +} + +// Transport is an ImageTransport for images managed by a local Docker daemon. +var Transport = daemonTransport{} + +type daemonTransport struct{} + +// Name returns the name of the transport, which must be unique among other transports. +func (t daemonTransport) Name() string { + return "docker-daemon" +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { + return ParseReference(reference) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { + // ID values cannot be effectively namespaced, and are clearly invalid host:port values. + if _, err := digest.Parse(scope); err == nil { + return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) + } + + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. + return nil +} + +// daemonReference is an ImageReference for images managed by a local Docker daemon +// Exactly one of id and ref can be set. +// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) +// For daemonImageDestination, it must be a ref, which is NamedTagged. +// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. +// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) +type daemonReference struct { + id digest.Digest + ref reference.Named // !reference.IsNameOnly +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func ParseReference(refString string) (types.ImageReference, error) { + // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. + // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). + + // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). + // reference.ParseAnyReference interprets such strings as digests. + if dgst, err := digest.Parse(refString); err == nil { + // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. + // Other digest references are ambiguous, so refuse them. + if dgst.Algorithm() != digest.Canonical { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) + } + return NewReference(dgst, nil) + } + + ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values + if err != nil { + return nil, err + } + if reference.FamiliarName(ref) == digest.Canonical.String() { + return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) + } + return NewReference("", ref) +} + +// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) +func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { + if id != "" && ref != nil { + return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") + } + if ref != nil { + if reference.IsNameOnly(ref) { + return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) + } + // A github.com/distribution/reference value can have a tag and a digest at the same time! + // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. + // This MAY be accepted in the future. + // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop + // the tag or the digest first?) + _, isTagged := ref.(reference.NamedTagged) + _, isDigested := ref.(reference.Canonical) + if isTagged && isDigested { + return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") + } + } + return daemonReference{ + id: id, + ref: ref, + }, nil +} + +func (ref daemonReference) Transport() types.ImageTransport { + return Transport +} + +// StringWithinTransport returns a string representation of the reference, which MUST be such that +// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. +// NOTE: The returned string is not promised to be equal to the original input to ParseReference; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; +// instead, see transports.ImageName(). +func (ref daemonReference) StringWithinTransport() string { + switch { + case ref.id != "": + return ref.id.String() + case ref.ref != nil: + return reference.FamiliarString(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// DockerReference returns a Docker reference associated with this reference +// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, +// not e.g. after redirect or alias processing), or nil if unknown/not applicable. +func (ref daemonReference) DockerReference() reference.Named { + return ref.ref // May be nil +} + +// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. +// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; +// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical +// (i.e. various references with exactly the same semantics should return the same configuration identity) +// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but +// not required/guaranteed that it will be a valid input to Transport().ParseReference(). +// Returns "" if configuration identities for these references are not supported. +func (ref daemonReference) PolicyConfigurationIdentity() string { + // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. + // But the existence of image IDs means that we can’t truly well namespace the input: + // a single image can be namespaced either using the name or the ID depending on how it is named. + // + // That’s fairly unexpected, but we have to cope somehow. + // + // So, use the ordinary docker/policyconfiguration namespacing for named images. + // image IDs all fall into the root namespace. + // Users can set up the root namespace to be either untrusted or rejected, + // and to set up specific trust for named namespaces. This allows verifying image + // identity when a name is known, and unnamed images would be untrusted or rejected. + switch { + case ref.id != "": + return "" // This still allows using the default "" scope to define a global policy for ID-identified images. + case ref.ref != nil: + res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) + if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. + panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) + } + return res + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search +// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed +// in order, terminating on first match, and an implicit "" is always checked at the end. +// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), +// and each following element to be a prefix of the element preceding it. +func (ref daemonReference) PolicyConfigurationNamespaces() []string { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + switch { + case ref.id != "": + return []string{} + case ref.ref != nil: + return policyconfiguration.DockerReferenceNamespaces(ref.ref) + default: // Coverage: Should never happen, NewReference above should refuse such values. + panic("Internal inconsistency: daemonReference has empty id and nil ref") + } +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { + src, err := newImageSource(ctx, sys, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, sys, src) +} + +// NewImageSource returns a types.ImageSource for this reference. +// The caller must call .Close() on the returned ImageSource. +func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { + return newImageSource(ctx, sys, ref) +} + +// NewImageDestination returns a types.ImageDestination for this reference. +// The caller must call .Close() on the returned ImageDestination. +func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { + return newImageDestination(ctx, sys, ref) +} + +// DeleteImage deletes the named image from the registry, if supported. +func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { + // Should this just untag the image? Should this stop running containers? + // The semantics is not quite as clear as for remote repositories. + // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. + return errors.Errorf("Deleting images not implemented for docker-daemon: images") +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go new file mode 100644 index 000000000..7e1580990 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go @@ -0,0 +1,200 @@ +package tarfile + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/streamdigest" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. +type Destination struct { + archive *Writer + repoTags []reference.NamedTagged + // Other state. + config []byte + sysCtx *types.SystemContext +} + +// NewDestination returns a tarfile.Destination adding images to the specified Writer. +func NewDestination(sys *types.SystemContext, archive *Writer, ref reference.NamedTagged) *Destination { + repoTags := []reference.NamedTagged{} + if ref != nil { + repoTags = append(repoTags, ref) + } + return &Destination{ + archive: archive, + repoTags: repoTags, + sysCtx: sys, + } +} + +// AddRepoTags adds the specified tags to the destination's repoTags. +func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { + d.repoTags = append(d.repoTags, tags...) +} + +// SupportedManifestMIMETypes tells which manifest mime types the destination supports +// If an empty slice or nil it's returned, then any mime type can be tried to upload +func (d *Destination) SupportedManifestMIMETypes() []string { + return []string{ + manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. + } +} + +// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. +// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. +func (d *Destination) SupportsSignatures(ctx context.Context) error { + return errors.Errorf("Storing signatures for docker tar files is not supported") +} + +// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// uploaded to the image destination, true otherwise. +func (d *Destination) AcceptsForeignLayerURLs() bool { + return false +} + +// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. +func (d *Destination) MustMatchRuntimeOS() bool { + return false +} + +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *Destination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. +} + +// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. +func (d *Destination) HasThreadSafePutBlob() bool { + // The code _is_ actually thread-safe, but apart from computing sizes/digests of layers where + // this is unknown in advance, the actual copy is serialized by d.archive, so there probably isn’t + // much benefit from concurrency, mostly just extra CPU, memory and I/O contention. + return false +} + +// PutBlob writes contents of stream and returns data representing the result (with all data filled in). +// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. +// inputInfo.Size is the expected length of stream, if known. +// May update cache. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { + // Ouch, we need to stream the blob into a temporary file just to determine the size. + // When the layer is decompressed, we also have to generate the digest on uncompressed data. + if inputInfo.Size == -1 || inputInfo.Digest == "" { + logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") + streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo) + if err != nil { + return types.BlobInfo{}, err + } + defer cleanup() + stream = streamCopy + logrus.Debugf("... streaming done") + } + + if err := d.archive.lock(); err != nil { + return types.BlobInfo{}, err + } + defer d.archive.unlock() + + // Maybe the blob has been already sent + ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo) + if err != nil { + return types.BlobInfo{}, err + } + if ok { + return reusedInfo, nil + } + + if isConfig { + buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return types.BlobInfo{}, errors.Wrap(err, "reading Config file stream") + } + d.config = buf + if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { + return types.BlobInfo{}, errors.Wrap(err, "writing Config file") + } + } else { + if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { + return types.BlobInfo{}, err + } + } + d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}) + return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil +} + +// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination +// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). +// info.Digest must not be empty. +// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may +// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be +// reflected in the manifest that will be written. +// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// May use and/or update cache. +func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { + if err := d.archive.lock(); err != nil { + return false, types.BlobInfo{}, err + } + defer d.archive.unlock() + + return d.archive.tryReusingBlobLocked(info) +} + +// PutManifest writes manifest to the destination. +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. +// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. +// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), +// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.New(`Manifest lists are not supported for docker tar files`) + } + // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, + // so the caller trying a different manifest kind would be pointless. + var man manifest.Schema2 + if err := json.Unmarshal(m, &man); err != nil { + return errors.Wrap(err, "parsing manifest") + } + if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { + return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") + } + + if err := d.archive.lock(); err != nil { + return err + } + defer d.archive.unlock() + + if err := d.archive.writeLegacyMetadataLocked(man.LayersDescriptors, d.config, d.repoTags); err != nil { + return err + } + + return d.archive.ensureManifestItemLocked(man.LayersDescriptors, man.ConfigDescriptor.Digest, d.repoTags) +} + +// PutSignatures would add the given signatures to the docker tarfile (currently not supported). +// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so +// there can be no secondary manifests. MUST be called after PutManifest (signatures reference manifest contents). +func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { + if instanceDigest != nil { + return errors.Errorf(`Manifest lists are not supported for docker tar files`) + } + if len(signatures) != 0 { + return errors.Errorf("Storing signatures for docker tar files is not supported") + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go new file mode 100644 index 000000000..c77c002d1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -0,0 +1,268 @@ +package tarfile + +import ( + "archive/tar" + "encoding/json" + "io" + "os" + "path" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// Reader is a ((docker save)-formatted) tar archive that allows random access to any component. +type Reader struct { + // None of the fields below are modified after the archive is created, until .Close(); + // this allows concurrent readers of the same archive. + path string // "" if the archive has already been closed. + removeOnClose bool // Remove file on close if true + Manifest []ManifestItem // Guaranteed to exist after the archive is created. +} + +// NewReaderFromFile returns a Reader for the specified path. +// The caller should call .Close() on the returned archive when done. +func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { + file, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "opening file %q", path) + } + defer file.Close() + + // If the file is already not compressed we can just return the file itself + // as a source. Otherwise we pass the stream to NewReaderFromStream. + stream, isCompressed, err := compression.AutoDecompress(file) + if err != nil { + return nil, errors.Wrapf(err, "detecting compression for file %q", path) + } + defer stream.Close() + if !isCompressed { + return newReader(path, false) + } + return NewReaderFromStream(sys, stream) +} + +// NewReaderFromStream returns a Reader for the specified inputStream, +// which can be either compressed or uncompressed. The caller can close the +// inputStream immediately after NewReaderFromFile returns. +// The caller should call .Close() on the returned archive when done. +func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { + // Save inputStream to a temporary file + tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + if err != nil { + return nil, errors.Wrap(err, "creating temporary file") + } + defer tarCopyFile.Close() + + succeeded := false + defer func() { + if !succeeded { + os.Remove(tarCopyFile.Name()) + } + }() + + // In order to be compatible with docker-load, we need to support + // auto-decompression (it's also a nice quality-of-life thing to avoid + // giving users really confusing "invalid tar header" errors). + uncompressedStream, _, err := compression.AutoDecompress(inputStream) + if err != nil { + return nil, errors.Wrap(err, "auto-decompressing input") + } + defer uncompressedStream.Close() + + // Copy the plain archive to the temporary file. + // + // TODO: This can take quite some time, and should ideally be cancellable + // using a context.Context. + if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { + return nil, errors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name()) + } + succeeded = true + + return newReader(tarCopyFile.Name(), true) +} + +// newReader creates a Reader for the specified path and removeOnClose flag. +// The caller should call .Close() on the returned archive when done. +func newReader(path string, removeOnClose bool) (*Reader, error) { + // This is a valid enough archive, except Manifest is not yet filled. + r := Reader{ + path: path, + removeOnClose: removeOnClose, + } + succeeded := false + defer func() { + if !succeeded { + r.Close() + } + }() + + // We initialize Manifest immediately when constructing the Reader instead + // of later on-demand because every caller will need the data, and because doing it now + // removes the need to synchronize the access/creation of the data if the archive is later + // used from multiple goroutines to access different images. + + // FIXME? Do we need to deal with the legacy format? + bytes, err := r.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize) + if err != nil { + return nil, err + } + if err := json.Unmarshal(bytes, &r.Manifest); err != nil { + return nil, errors.Wrap(err, "decoding tar manifest.json") + } + + succeeded = true + return &r, nil +} + +// Close removes resources associated with an initialized Reader, if any. +func (r *Reader) Close() error { + path := r.path + r.path = "" // Mark the archive as closed + if r.removeOnClose { + return os.Remove(path) + } + return nil +} + +// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or +// both of which should be (nil, -1). +// On success, it returns the manifest item and an index of the matching tag, if a tag was used +// for matching; the index is -1 if a tag was not used. +func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) { + switch { + case ref != nil && sourceIndex != -1: + return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d", + ref.String(), sourceIndex) + + case ref != nil: + refString := ref.String() + for i := range r.Manifest { + for tagIndex, tag := range r.Manifest[i].RepoTags { + parsedTag, err := reference.ParseNormalizedNamed(tag) + if err != nil { + return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i) + } + if parsedTag.String() == refString { + return &r.Manifest[i], tagIndex, nil + } + } + } + return nil, -1, errors.Errorf("Tag %#v not found", refString) + + case sourceIndex != -1: + if sourceIndex >= len(r.Manifest) { + return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available", + sourceIndex, len(r.Manifest)) + } + return &r.Manifest[sourceIndex], -1, nil + + default: + if len(r.Manifest) != 1 { + return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest)) + } + return &r.Manifest[0], -1, nil + } +} + +// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. +type tarReadCloser struct { + *tar.Reader + backingFile *os.File +} + +func (t *tarReadCloser) Close() error { + return t.backingFile.Close() +} + +// openTarComponent returns a ReadCloser for the specific file within the archive. +// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), +// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. +// It is safe to call this method from multiple goroutines simultaneously. +// The caller should call .Close() on the returned stream. +func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) { + // This is only a sanity check; if anyone did concurrently close ra, this access is technically + // racy against the write in .Close(). + if r.path == "" { + return nil, errors.New("Internal error: trying to read an already closed tarfile.Reader") + } + + f, err := os.Open(r.path) + if err != nil { + return nil, err + } + succeeded := false + defer func() { + if !succeeded { + f.Close() + } + }() + + tarReader, header, err := findTarComponent(f, componentPath) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested + // We follow only one symlink; so no loops are possible. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return nil, err + } + // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, + // so we don't care. + tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) + if err != nil { + return nil, err + } + if header == nil { + return nil, os.ErrNotExist + } + } + + if !header.FileInfo().Mode().IsRegular() { + return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + } + succeeded = true + return &tarReadCloser{Reader: tarReader, backingFile: f}, nil +} + +// findTarComponent returns a header and a reader matching componentPath within inputFile, +// or (nil, nil, nil) if not found. +func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) { + t := tar.NewReader(inputFile) + componentPath = path.Clean(componentPath) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, err + } + if path.Clean(h.Name) == componentPath { + return t, h, nil + } + } + return nil, nil, nil +} + +// readTarComponent returns full contents of componentPath. +// It is safe to call this method from multiple goroutines simultaneously. +func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) { + file, err := r.openTarComponent(path) + if err != nil { + return nil, errors.Wrapf(err, "loading tar component %s", path) + } + defer file.Close() + bytes, err := iolimits.ReadAtMost(file, limit) + if err != nil { + return nil, err + } + return bytes, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go new file mode 100644 index 000000000..8e9be17c1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -0,0 +1,330 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "io" + "os" + "path" + "sync" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Source is a partial implementation of types.ImageSource for reading from tarPath. +type Source struct { + archive *Reader + closeArchive bool // .Close() the archive when the source is closed. + // If ref is nil and sourceIndex is -1, indicates the only image in the archive. + ref reference.NamedTagged // May be nil + sourceIndex int // May be -1 + // The following data is only available after ensureCachedDataIsPresent() succeeds + tarManifest *ManifestItem // nil if not available yet. + configBytes []byte + configDigest digest.Digest + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo + // Other state + generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. + cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe + cacheDataResult error // Private state for ensureCachedDataIsPresent +} + +type layerInfo struct { + path string + size int64 +} + +// NewSource returns a tarfile.Source for an image in the specified archive matching ref +// and sourceIndex (or the only image if they are (nil, -1)). +// The archive will be closed if closeArchive +func NewSource(archive *Reader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *Source { + return &Source{ + archive: archive, + closeArchive: closeArchive, + ref: ref, + sourceIndex: sourceIndex, + } +} + +// ensureCachedDataIsPresent loads data necessary for any of the public accessors. +// It is safe to call this from multi-threaded code. +func (s *Source) ensureCachedDataIsPresent() error { + s.cacheDataLock.Do(func() { + s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() + }) + return s.cacheDataResult +} + +// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. +// Call ensureCachedDataIsPresent instead. +func (s *Source) ensureCachedDataIsPresentPrivate() error { + tarManifest, _, err := s.archive.ChooseManifestItem(s.ref, s.sourceIndex) + if err != nil { + return err + } + + // Read and parse config. + configBytes, err := s.archive.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize) + if err != nil { + return err + } + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. + if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { + return errors.Wrapf(err, "decoding tar config %s", tarManifest.Config) + } + if parsedConfig.RootFS == nil { + return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config) + } + + knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) + if err != nil { + return err + } + + // Success; commit. + s.tarManifest = tarManifest + s.configBytes = configBytes + s.configDigest = digest.FromBytes(configBytes) + s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs + s.knownLayers = knownLayers + return nil +} + +// Close removes resources associated with an initialized Source, if any. +func (s *Source) Close() error { + if s.closeArchive { + return s.archive.Close() + } + return nil +} + +// TarManifest returns contents of manifest.json +func (s *Source) TarManifest() []ManifestItem { + return s.archive.Manifest +} + +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { + // Collect layer data available in manifest and config. + if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { + return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) + } + knownLayers := map[digest.Digest]*layerInfo{} + unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. + for i, diffID := range parsedConfig.RootFS.DiffIDs { + if _, ok := knownLayers[diffID]; ok { + // Apparently it really can happen that a single image contains the same layer diff more than once. + // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter + // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. + continue + } + layerPath := path.Clean(tarManifest.Layers[i]) + if _, ok := unknownLayerSizes[layerPath]; ok { + return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + } + li := &layerInfo{ // A new element in each iteration + path: layerPath, + size: -1, + } + knownLayers[diffID] = li + unknownLayerSizes[layerPath] = li + } + + // Scan the tar file to collect layer sizes. + file, err := os.Open(s.archive.path) + if err != nil { + return nil, err + } + defer file.Close() + t := tar.NewReader(file) + for { + h, err := t.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + layerPath := path.Clean(h.Name) + // FIXME: Cache this data across images in Reader. + if li, ok := unknownLayerSizes[layerPath]; ok { + // Since GetBlob will decompress layers that are compressed we need + // to do the decompression here as well, otherwise we will + // incorrectly report the size. Pretty critical, since tools like + // umoci always compress layer blobs. Obviously we only bother with + // the slower method of checking if it's compressed. + uncompressedStream, isCompressed, err := compression.AutoDecompress(t) + if err != nil { + return nil, errors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath) + } + defer uncompressedStream.Close() + + uncompressedSize := h.Size + if isCompressed { + uncompressedSize, err = io.Copy(io.Discard, uncompressedStream) + if err != nil { + return nil, errors.Wrapf(err, "reading %s to find its size", layerPath) + } + } + li.size = uncompressedSize + delete(unknownLayerSizes, layerPath) + } + } + if len(unknownLayerSizes) != 0 { + return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. + } + + return knownLayers, nil +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary instances. +func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`) + } + if s.generatedManifest == nil { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, "", err + } + m := manifest.Schema2{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + ConfigDescriptor: manifest.Schema2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + Size: int64(len(s.configBytes)), + Digest: s.configDigest, + }, + LayersDescriptors: []manifest.Schema2Descriptor{}, + } + for _, diffID := range s.orderedDiffIDList { + li, ok := s.knownLayers[diffID] + if !ok { + return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) + } + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball + MediaType: manifest.DockerV2Schema2LayerMediaType, + Size: li.size, + }) + } + manifestBytes, err := json.Marshal(&m) + if err != nil { + return nil, "", err + } + s.generatedManifest = manifestBytes + } + return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil +} + +// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. +type uncompressedReadCloser struct { + io.Reader + underlyingCloser func() error + uncompressedCloser func() error +} + +func (r uncompressedReadCloser) Close() error { + var res error + if err := r.uncompressedCloser(); err != nil { + res = err + } + if err := r.underlyingCloser(); err != nil && res == nil { + res = err + } + return res +} + +// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. +func (s *Source) HasThreadSafeGetBlob() bool { + return true +} + +// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). +// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. +// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. +func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { + if err := s.ensureCachedDataIsPresent(); err != nil { + return nil, 0, err + } + + if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. + return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil + } + + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, + underlyingStream, err := s.archive.openTarComponent(li.path) + if err != nil { + return nil, 0, err + } + closeUnderlyingStream := true + defer func() { + if closeUnderlyingStream { + underlyingStream.Close() + } + }() + + // In order to handle the fact that digests != diffIDs (and thus that a + // caller which is trying to verify the blob will run into problems), + // we need to decompress blobs. This is a bit ugly, but it's a + // consequence of making everything addressable by their DiffID rather + // than by their digest... + // + // In particular, because the v2s2 manifest being generated uses + // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of + // layers not their _actual_ digest. The result is that copy/... will + // be verifying a "digest" which is not the actual layer's digest (but + // is instead the DiffID). + + uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) + if err != nil { + return nil, 0, errors.Wrapf(err, "auto-decompressing blob %s", info.Digest) + } + + newStream := uncompressedReadCloser{ + Reader: uncompressedStream, + underlyingCloser: underlyingStream.Close, + uncompressedCloser: uncompressedStream.Close, + } + closeUnderlyingStream = false + + return newStream, li.size, nil + } + + return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as there can be no secondary manifests. +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } + return [][]byte{}, nil +} + +// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer +// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() +// to read the image's layers. +// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil, +// as the primary manifest can not be a list, so there can be no secondary manifests. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (s *Source) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go new file mode 100644 index 000000000..6e6ccd2d8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/types.go @@ -0,0 +1,28 @@ +package tarfile + +import ( + "github.com/containers/image/v5/manifest" + "github.com/opencontainers/go-digest" +) + +// Various data structures. + +// Based on github.com/docker/docker/image/tarexport/tarexport.go +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +// ManifestItem is an element of the array stored in the top-level manifest.json file. +type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API. + Config string + RepoTags []string + Layers []string + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` +} + +type imageID string diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go new file mode 100644 index 000000000..255f0d354 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -0,0 +1,381 @@ +package tarfile + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Writer allows creating a (docker save)-formatted tar archive containing one or more images. +type Writer struct { + mutex sync.Mutex + // ALL of the following members can only be accessed with the mutex held. + // Use Writer.lock() to obtain the mutex. + writer io.Writer + tar *tar.Writer // nil if the Writer has already been closed. + // Other state. + blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs + repositories map[string]map[string]string + legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent. + manifest []ManifestItem + manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above. +} + +// NewWriter returns a Writer for the specified io.Writer. +// The caller must eventually call .Close() on the returned object to create a valid archive. +func NewWriter(dest io.Writer) *Writer { + return &Writer{ + writer: dest, + tar: tar.NewWriter(dest), + blobs: make(map[digest.Digest]types.BlobInfo), + repositories: map[string]map[string]string{}, + legacyLayers: map[string]struct{}{}, + manifestByConfig: map[digest.Digest]int{}, + } +} + +// lock does some sanity checks and locks the Writer. +// If this function succeeds, the caller must call w.unlock. +// Do not use Writer.mutex directly. +func (w *Writer) lock() error { + w.mutex.Lock() + if w.tar == nil { + w.mutex.Unlock() + return errors.New("Internal error: trying to use an already closed tarfile.Writer") + } + return nil +} + +// unlock releases the lock obtained by Writer.lock +// Do not use Writer.mutex directly. +func (w *Writer) unlock() { + w.mutex.Unlock() +} + +// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata. +// info.Digest must not be empty. +// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size. +// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. +// The caller must have locked the Writer. +func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) { + if info.Digest == "" { + return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") + } + if blob, ok := w.blobs[info.Digest]; ok { + return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil + } + return false, types.BlobInfo{}, nil +} + +// recordBlob records metadata of a recorded blob, which must contain at least a digest and size. +// The caller must have locked the Writer. +func (w *Writer) recordBlobLocked(info types.BlobInfo) { + w.blobs[info.Digest] = info +} + +// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer +// The caller must have locked the Writer. +func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error { + if _, ok := w.legacyLayers[layerID]; !ok { + // Create a symlink for the legacy format, where there is one subdirectory per layer ("image"). + // See also the comment in physicalLayerPath. + physicalLayerPath := w.physicalLayerPath(layerDigest) + if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { + return errors.Wrap(err, "creating layer symbolic link") + } + + b := []byte("1.0") + if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil { + return errors.Wrap(err, "writing VERSION file") + } + + if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil { + return errors.Wrap(err, "writing config json file") + } + + w.legacyLayers[layerID] = struct{}{} + } + return nil +} + +// writeLegacyMetadataLocked writes legacy layer metadata and records tags for a single image. +func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2Descriptor, configBytes []byte, repoTags []reference.NamedTagged) error { + var chainID digest.Digest + lastLayerID := "" + for i, l := range layerDescriptors { + // The legacy format requires a config file per layer + layerConfig := make(map[string]interface{}) + + // The root layer doesn't have any parent + if lastLayerID != "" { + layerConfig["parent"] = lastLayerID + } + // The top layer configuration file is generated by using subpart of the image configuration + if i == len(layerDescriptors)-1 { + var config map[string]*json.RawMessage + err := json.Unmarshal(configBytes, &config) + if err != nil { + return errors.Wrap(err, "unmarshaling config") + } + for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { + layerConfig[attr] = config[attr] + } + } + + // This chainID value matches the computation in docker/docker/layer.CreateChainID … + if chainID == "" { + chainID = l.Digest + } else { + chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) + } + // … but note that the image ID does not _exactly_ match docker/docker/image/v1.CreateID, primarily because + // we create the image configs differently in details. At least recent versions allocate new IDs on load, + // so this is fine as long as the IDs we use are unique / cannot loop. + // + // For intermediate images, we could just use the chainID as an image ID, but using a digest of ~the created + // config makes sure that everything uses the same “namespace”; a bit less efficient but clearer. + // + // Temporarily add the chainID to the config, only for the purpose of generating the image ID. + layerConfig["layer_id"] = chainID + b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point. + if err != nil { + return errors.Wrap(err, "marshaling layer config") + } + delete(layerConfig, "layer_id") + layerID := digest.Canonical.FromBytes(b).Hex() + layerConfig["id"] = layerID + + configBytes, err := json.Marshal(layerConfig) + if err != nil { + return errors.Wrap(err, "marshaling layer config") + } + + if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil { + return err + } + + lastLayerID = layerID + } + + if lastLayerID != "" { + for _, repoTag := range repoTags { + if val, ok := w.repositories[repoTag.Name()]; ok { + val[repoTag.Tag()] = lastLayerID + } else { + w.repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): lastLayerID} + } + } + } + return nil +} + +// checkManifestItemsMatch checks that a and b describe the same image, +// and returns an error if that’s not the case (which should never happen). +func checkManifestItemsMatch(a, b *ManifestItem) error { + if a.Config != b.Config { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config) + } + if len(a.Layers) != len(b.Layers) { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers) + } + for i := range a.Layers { + if a.Layers[i] != b.Layers[i] { + return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i]) + } + } + // Ignore RepoTags, that will be built later. + // Ignore Parent and LayerSources, which we don’t set to anything meaningful. + return nil +} + +// ensureManifestItemLocked ensures that there is a manifest item pointing to (layerDescriptors, configDigest) with repoTags +// The caller must have locked the Writer. +func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error { + layerPaths := []string{} + for _, l := range layerDescriptors { + layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest)) + } + + var item *ManifestItem + newItem := ManifestItem{ + Config: w.configPath(configDigest), + RepoTags: []string{}, + Layers: layerPaths, + Parent: "", // We don’t have this information + LayerSources: nil, + } + if i, ok := w.manifestByConfig[configDigest]; ok { + item = &w.manifest[i] + if err := checkManifestItemsMatch(item, &newItem); err != nil { + return err + } + } else { + i := len(w.manifest) + w.manifestByConfig[configDigest] = i + w.manifest = append(w.manifest, newItem) + item = &w.manifest[i] + } + + knownRepoTags := map[string]struct{}{} + for _, repoTag := range item.RepoTags { + knownRepoTags[repoTag] = struct{}{} + } + for _, tag := range repoTags { + // For github.com/docker/docker consumers, this works just as well as + // refString := ref.String() + // because when reading the RepoTags strings, github.com/docker/docker/reference + // normalizes both of them to the same value. + // + // Doing it this way to include the normalized-out `docker.io[/library]` does make + // a difference for github.com/projectatomic/docker consumers, with the + // “Add --add-registry and --block-registry options to docker daemon” patch. + // These consumers treat reference strings which include a hostname and reference + // strings without a hostname differently. + // + // Using the host name here is more explicit about the intent, and it has the same + // effect as (docker pull) in projectatomic/docker, which tags the result using + // a hostname-qualified reference. + // See https://github.com/containers/image/issues/72 for a more detailed + // analysis and explanation. + refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) + + if _, ok := knownRepoTags[refString]; !ok { + item.RepoTags = append(item.RepoTags, refString) + knownRepoTags[refString] = struct{}{} + } + } + + return nil +} + +// Close writes all outstanding data about images to the archive, and finishes writing data +// to the underlying io.Writer. +// No more images can be added after this is called. +func (w *Writer) Close() error { + if err := w.lock(); err != nil { + return err + } + defer w.unlock() + + b, err := json.Marshal(&w.manifest) + if err != nil { + return err + } + if err := w.sendBytesLocked(manifestFileName, b); err != nil { + return err + } + + b, err = json.Marshal(w.repositories) + if err != nil { + return errors.Wrap(err, "marshaling repositories") + } + if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil { + return errors.Wrap(err, "writing config json file") + } + + if err := w.tar.Close(); err != nil { + return err + } + w.tar = nil // Mark the Writer as closed. + return nil +} + +// configPath returns a path we choose for storing a config with the specified digest. +// NOTE: This is an internal implementation detail, not a format property, and can change +// any time. +func (w *Writer) configPath(configDigest digest.Digest) string { + return configDigest.Hex() + ".json" +} + +// physicalLayerPath returns a path we choose for storing a layer with the specified digest +// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format). +// NOTE: This is an internal implementation detail, not a format property, and can change +// any time. +func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string { + // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way + // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described + // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) + // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers + // in the root of the tarball. + return layerDigest.Hex() + ".tar" +} + +type tarFI struct { + path string + size int64 + isSymlink bool +} + +func (t *tarFI) Name() string { + return t.path +} +func (t *tarFI) Size() int64 { + return t.size +} +func (t *tarFI) Mode() os.FileMode { + if t.isSymlink { + return os.ModeSymlink + } + return 0444 +} +func (t *tarFI) ModTime() time.Time { + return time.Unix(0, 0) +} +func (t *tarFI) IsDir() bool { + return false +} +func (t *tarFI) Sys() interface{} { + return nil +} + +// sendSymlinkLocked sends a symlink into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendSymlinkLocked(path string, target string) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) + if err != nil { + return nil + } + logrus.Debugf("Sending as tar link %s -> %s", path, target) + return w.tar.WriteHeader(hdr) +} + +// sendBytesLocked sends a path into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendBytesLocked(path string, b []byte) error { + return w.sendFileLocked(path, int64(len(b)), bytes.NewReader(b)) +} + +// sendFileLocked sends a file into the tar stream. +// The caller must have locked the Writer. +func (w *Writer) sendFileLocked(path string, expectedSize int64, stream io.Reader) error { + hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") + if err != nil { + return nil + } + logrus.Debugf("Sending as tar file %s", path) + if err := w.tar.WriteHeader(hdr); err != nil { + return err + } + // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. + size, err := io.Copy(w.tar, stream) + if err != nil { + return err + } + if size != expectedSize { + return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) + } + return nil +} diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go new file mode 100644 index 000000000..94e9e5f23 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -0,0 +1,77 @@ +package policyconfiguration + +import ( + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/pkg/errors" +) + +// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceIdentity(ref reference.Named) (string, error) { + res := ref.Name() + tagged, isTagged := ref.(reference.NamedTagged) + digested, isDigested := ref.(reference.Canonical) + switch { + case isTagged && isDigested: // Note that this CAN actually happen. + return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) + case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() + return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) + case isTagged: + res = res + ":" + tagged.Tag() + case isDigested: + res = res + "@" + digested.Digest().String() + default: // Coverage: The above was supposed to be exhaustive. + return "", errors.New("Internal inconsistency, unexpected default branch") + } + return res, nil +} + +// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, +// as a backend for ImageReference.PolicyConfigurationIdentity. +// The reference must satisfy !reference.IsNameOnly(). +func DockerReferenceNamespaces(ref reference.Named) []string { + // Look for a match of the repository, and then of the possible parent + // namespaces. Note that this only happens on the expanded host names + // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", + // then in its parent "docker.io/library"; in none of "busybox", + // un-namespaced "library" nor in "" supposedly implicitly representing "library/". + // + // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last + // iteration matches the host name (for any namespace). + res := []string{} + name := ref.Name() + for { + res = append(res, name) + + lastSlash := strings.LastIndex(name, "/") + if lastSlash == -1 { + break + } + name = name[:lastSlash] + } + + // Strip port number if any, before appending to res slice. + // Currently, the most compatible behavior is to return + // example.com:8443/ns, example.com:8443, *.com. + // If a port number is not specified, the expected behavior would be + // example.com/ns, example.com, *.com + portNumColon := strings.Index(name, ":") + if portNumColon != -1 { + name = name[:portNumColon] + } + + // Append wildcarded domains to res slice + for { + firstDot := strings.Index(name, ".") + if firstDot == -1 { + break + } + name = name[firstDot+1:] + + res = append(res, "*."+name) + } + return res +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/README.md b/vendor/github.com/containers/image/v5/docker/reference/README.md new file mode 100644 index 000000000..3c4d74eb4 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/README.md @@ -0,0 +1,2 @@ +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, +except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v5/docker/reference/helpers.go b/vendor/github.com/containers/image/v5/docker/reference/helpers.go new file mode 100644 index 000000000..978df7eab --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go new file mode 100644 index 000000000..6a86ec64f --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go @@ -0,0 +1,181 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go new file mode 100644 index 000000000..8c0c23b2f --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go new file mode 100644 index 000000000..7b15871f7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go @@ -0,0 +1,6 @@ +package reference + +// Return true if the specified string fully matches `IdentifierRegexp`. +func IsFullIdentifier(s string) bool { + return anchoredIdentifierRegexp.MatchString(s) +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go new file mode 100644 index 000000000..786034932 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/containers/image/v5/image/docker_list.go b/vendor/github.com/containers/image/v5/image/docker_list.go new file mode 100644 index 000000000..4fe84413c --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_list.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + list, err := manifest.Schema2ListFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "parsing schema2 manifest list") + } + targetManifestDigest, err := list.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "loading manifest for target platform") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema1.go b/vendor/github.com/containers/image/v5/image/docker_schema1.go new file mode 100644 index 000000000..5f24970c3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_schema1.go @@ -0,0 +1,248 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestSchema1 struct { + m *manifest.Schema1 +} + +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { + m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema1) manifestMIMEType() string { + return manifest.DockerV2Schema1SignedMediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { + return nil, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{}) + if err != nil { + return nil, err + } + return v2s2.OCIConfig(ctx) +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + // This is a bit convoluted: We can’t just have a "get embedded docker reference" method + // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually + // embed a full docker/distribution reference, but only the repo name and tag (without the host name). + // So we would have to provide a “return repo without host name, and tag” getter for the generic code, + // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the + // generic copy code needs to know about is reference.Named and that a manifest may need updating + // for some destinations. + name := reference.Path(ref) + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } else { + tag = "" + } + return m.m.Name != name || m.m.Tag != tag +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { + return m.m.Inspect(nil) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} + + // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, + // handle conversions between them by doing nothing. + if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType { + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + if options.EmbeddedDockerReference != nil { + copy.m.Name = reference.Path(options.EmbeddedDockerReference) + if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { + copy.m.Tag = tagged.Tag() + } else { + copy.m.Tag = "" + } + } + + return memoryImageFromManifest(©), nil +} + +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// Based on github.com/docker/docker/distribution/pull_v2.go +func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { + uploadedLayerInfos := options.InformationOnly.LayerInfos + layerDiffIDs := options.InformationOnly.LayerDiffIDs + + if len(m.m.ExtractedV1Compatibility) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) + } + if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) + } + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) + } + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) + } + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.FSLayers)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index + + if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { + var size int64 + if uploadedLayerInfos != nil { + size = uploadedLayerInfos[v2Index].Size + } + var d digest.Digest + if layerDiffIDs != nil { + d = layerDiffIDs[v2Index] + } + layers = append(layers, manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", + Size: size, + Digest: m.m.FSLayers[v1Index].BlobSum, + }) + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index]) + } + diffIDs = append(diffIDs, d) + } + } + configJSON, err := m.m.ToSchema2Config(diffIDs) + if err != nil { + return nil, err + } + configDescriptor := manifest.Schema2Descriptor{ + MediaType: "application/vnd.docker.container.image.v1+json", + Size: int64(len(configJSON)), + Digest: digest.FromBytes(configJSON), + } + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestOCI1(ctx, options) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema1) SupportsEncryption(context.Context) bool { + return false +} diff --git a/vendor/github.com/containers/image/v5/image/docker_schema2.go b/vendor/github.com/containers/image/v5/image/docker_schema2.go new file mode 100644 index 000000000..b250a6b1d --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/docker_schema2.go @@ -0,0 +1,400 @@ +package image + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is +// a non-zero embedded timestamp; we could zero that, but that would just waste storage space +// in registries, so let’s use the same values. +var GzippedEmptyLayer = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +type manifestSchema2 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 +} + +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestSchema2{ + src: src, + m: m, + }, nil +} + +// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { + return &manifestSchema2{ + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), + } +} + +func (m *manifestSchema2) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestSchema2) manifestMIMEType() string { + return m.m.MediaType +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestSchema2) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + configBlob, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields + // than OCI v1. This unmarshal makes sure we drop docker v2s2 + // fields that aren't needed in OCI v1. + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(configBlob, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestSchema2) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the CompressionOperation and CompressionAlgorithm specified in one or more +// options.LayerInfos items is anything other than gzip. +func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { + configOCI, err := m.OCIConfig(ctx) + if err != nil { + return nil, err + } + configOCIBytes, err := json.Marshal(configOCI) + if err != nil { + return nil, err + } + + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), + } + + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) + for idx := range layers { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + switch m.m.LayersDescriptors[idx].MediaType { + case manifest.DockerV2Schema2ForeignLayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable + case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip + case manifest.DockerV2SchemaLayerMediaTypeUncompressed: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema2LayerMediaType: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) + } + } + + return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema2 object. +// +// Based on docker/distribution/manifest/schema1/config_builder.go +func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + dest := options.InformationOnly.Destination + + var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil + if options.LayerInfos != nil { + if len(options.LayerInfos) != len(m.m.LayersDescriptors) { + return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", + len(options.LayerInfos), len(m.m.LayersDescriptors)) + } + convertedLayerUpdates = []types.BlobInfo{} + } + + configBytes, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + imageConfig := &manifest.Schema2Image{} + if err := json.Unmarshal(configBytes, imageConfig); err != nil { + return nil, err + } + + // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) + nonemptyLayerIndex := 0 + var parentV1ID string // Set in the loop + v1ID := "" + haveGzippedEmptyLayer := false + if len(imageConfig.History) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) + } + for v2Index, historyEntry := range imageConfig.History { + parentV1ID = v1ID + v1Index := len(imageConfig.History) - 1 - v2Index + + var blobDigest digest.Digest + if historyEntry.EmptyLayer { + emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} + + if !haveGzippedEmptyLayer { + logrus.Debugf("Uploading empty layer during conversion to schema 1") + // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, + // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) + if err != nil { + return nil, errors.Wrap(err, "uploading empty layer") + } + if info.Digest != emptyLayerBlobInfo.Digest { + return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) + } + haveGzippedEmptyLayer = true + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) + } + blobDigest = emptyLayerBlobInfo.Digest + } else { + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) + } + if options.LayerInfos != nil { + convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) + } + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest + nonemptyLayerIndex++ + } + + // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. + v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) + if err != nil { + return nil, err + } + v1ID = v + + fakeImage := manifest.Schema1V1Compatibility{ + ID: v1ID, + Parent: parentV1ID, + Comment: historyEntry.Comment, + Created: historyEntry.Created, + Author: historyEntry.Author, + ThrowAway: historyEntry.EmptyLayer, + } + fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} + v1CompatibilityBytes, err := json.Marshal(&fakeImage) + if err != nil { + return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) + } + + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} + // Note that parentV1ID of the top layer is preserved when exiting this loop + } + + // Now patch in real configuration for the top layer (v1Index == 0) + v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. + if err != nil { + return nil, err + } + v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) + if err != nil { + return nil, err + } + history[0].V1Compatibility = string(v1Config) + + if options.LayerInfos != nil { + options.LayerInfos = convertedLayerUpdates + } + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } + return m1, nil +} + +func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { + if err := blobDigest.Validate(); err != nil { + return "", err + } + parts := append([]string{blobDigest.Hex()}, others...) + v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) + return hex.EncodeToString(v1IDHash[:]), nil +} + +func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Preserve everything we don't specifically know about. + // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) + rawContents := map[string]*json.RawMessage{} + if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! + return nil, err + } + delete(rawContents, "rootfs") + delete(rawContents, "history") + + updates := map[string]interface{}{"id": v1ID} + if parentV1ID != "" { + updates["parent"] = parentV1ID + } + if throwaway { + updates["throwaway"] = throwaway + } + for field, value := range updates { + encoded, err := json.Marshal(value) + if err != nil { + return nil, err + } + rawContents[field] = (*json.RawMessage)(&encoded) + } + return json.Marshal(rawContents) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestSchema2) SupportsEncryption(context.Context) bool { + return false +} diff --git a/vendor/github.com/containers/image/v5/image/manifest.go b/vendor/github.com/containers/image/v5/image/manifest.go new file mode 100644 index 000000000..36d70b5c2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/manifest.go @@ -0,0 +1,113 @@ +package image + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// genericManifest is an interface for parsing, modifying image manifests and related data. +// Note that the public methods are intended to be a subset of types.Image +// so that embedding a genericManifest into structs works. +// will support v1 one day... +type genericManifest interface { + serialize() ([]byte, error) + manifestMIMEType() string + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() types.BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*imgspecv1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*types.ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // This does not change the state of the original Image object. + UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) + // SupportsEncryption returns if encryption is supported for the manifest type + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 + SupportsEncryption(ctx context.Context) bool +} + +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: + return manifestSchema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return manifestOCI1FromManifest(src, manblob) + case manifest.DockerV2Schema2MediaType: + return manifestSchema2FromManifest(src, manblob) + case manifest.DockerV2ListMediaType: + return manifestSchema2FromManifestList(ctx, sys, src, manblob) + case imgspecv1.MediaTypeImageIndex: + return manifestOCI1FromImageIndex(ctx, sys, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. +func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { + blobs := make([]types.BlobInfo, len(layers)) + for i, layer := range layers { + blobs[i] = layer.BlobInfo + } + return blobs +} + +// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation +// converted to a specific manifest MIME type. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original genericManifest object. +type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) + +// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if +// required and re-apply the options to the converted type. +// It returns (nil, nil) if no conversion was requested. +func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) { + if options.ManifestMIMEType == "" { + return nil, nil + } + + converter, ok := converters[options.ManifestMIMEType] + if !ok { + return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) + } + + optionsCopy := options + convertedManifest, err := converter(ctx, &optionsCopy) + if err != nil { + return nil, err + } + convertedImage := memoryImageFromManifest(convertedManifest) + + optionsCopy.ManifestMIMEType = "" + return convertedImage.UpdatedImage(ctx, optionsCopy) +} diff --git a/vendor/github.com/containers/image/v5/image/memory.go b/vendor/github.com/containers/image/v5/image/memory.go new file mode 100644 index 000000000..4c96b37d8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/memory.go @@ -0,0 +1,64 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +// memoryImage is a mostly-implementation of types.Image assembled from data +// created in memory, used primarily as a return value of types.Image.UpdatedImage +// as a way to carry various structured information in a type-safe and easy-to-use way. +// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone +// collection of all related information, e.g. there is no way to get layer blobs +// from a memoryImage. +type memoryImage struct { + genericManifest + serializedManifest []byte // A private cache for Manifest() +} + +func memoryImageFromManifest(m genericManifest) types.Image { + return &memoryImage{ + genericManifest: m, + serializedManifest: nil, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *memoryImage) Reference() types.ImageReference { + // It would really be inappropriate to return the ImageReference of the image this was based on. + return nil +} + +// Size returns the size of the image as stored, if known, or -1 if not. +func (i *memoryImage) Size() (int64, error) { + return -1, nil +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.serializedManifest == nil { + m, err := i.genericManifest.serialize() + if err != nil { + return nil, "", err + } + i.serializedManifest = m + } + return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { + // Modifying an image invalidates signatures; a caller asking the updated image for signatures + // is probably confused. + return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") +} + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return nil, nil +} diff --git a/vendor/github.com/containers/image/v5/image/oci.go b/vendor/github.com/containers/image/v5/image/oci.go new file mode 100644 index 000000000..58e9c03ba --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/oci.go @@ -0,0 +1,248 @@ +package image + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type manifestOCI1 struct { + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 +} + +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { + return nil, err + } + return &manifestOCI1{ + src: src, + m: m, + }, nil +} + +// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { + return &manifestOCI1{ + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), + } +} + +func (m *manifestOCI1) serialize() ([]byte, error) { + return m.m.Serialize() +} + +func (m *manifestOCI1) manifestMIMEType() string { + return imgspecv1.MediaTypeImageManifest +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. +func (m *manifestOCI1) ConfigInfo() types.BlobInfo { + return m.m.ConfigInfo() +} + +// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. +// The result is cached; it is OK to call this however often you need. +func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { + if m.configBlob == nil { + if m.src == nil { + return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") + } + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) + if err != nil { + return nil, err + } + defer stream.Close() + blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) + if err != nil { + return nil, err + } + computedDigest := digest.FromBytes(blob) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + } + m.configBlob = blob + } + return m.configBlob, nil +} + +// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about +// layers in the resulting configuration isn't guaranteed to be returned to due how +// old image manifests work (docker v2s1 especially). +func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { + cb, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + configOCI := &imgspecv1.Image{} + if err := json.Unmarshal(cb, configOCI); err != nil { + return nil, err + } + return configOCI, nil +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *manifestOCI1) LayerInfos() []types.BlobInfo { + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) +} + +// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. +// It returns false if the manifest does not embed a Docker reference. +// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) +func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { + return false +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob(ctx) + if err != nil { + return nil, err + } + return config, nil + } + return m.m.Inspect(getter) +} + +// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. +// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute +// (most importantly it forces us to download the full layers even if they are already present at the destination). +func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { + return false +} + +// UpdatedImage returns a types.Image modified according to options. +// This does not change the state of the original Image object. +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError +// if the combination of CompressionOperation and CompressionAlgorithm specified +// in one or more options.LayerInfos items indicates that a layer is compressed using +// an algorithm that is not allowed in OCI. +func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } + + converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ + manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, + manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, + manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, + }) + if err != nil { + return nil, err + } + + if converted != nil { + return converted, nil + } + + // No conversion required, update manifest + if options.LayerInfos != nil { + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err + } + } + // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. + + return memoryImageFromManifest(©), nil +} + +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + +// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestSchema1 object. +// +// We need this function just because a function returning an implementation of the genericManifest +// interface is not automatically assignable to a function type returning the genericManifest interface +func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + return m.convertToManifestSchema2(ctx, options) +} + +// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { + // Create a copy of the descriptor. + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + + // The only difference between OCI and DockerSchema2 is the mediatypes. The + // media type of the manifest is handled by manifestSchema2FromComponents. + config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + for idx := range layers { + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + switch layers[idx].MediaType { + case imgspecv1.MediaTypeImageLayerNonDistributable: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType + case imgspecv1.MediaTypeImageLayerNonDistributableGzip: + layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip + case imgspecv1.MediaTypeImageLayerNonDistributableZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + case imgspecv1.MediaTypeImageLayer: + layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + case imgspecv1.MediaTypeImageLayerGzip: + layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType + case imgspecv1.MediaTypeImageLayerZstd: + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + default: + return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) + } + } + + // Rather than copying the ConfigBlob now, we just pass m.src to the + // translated manifest, since the only difference is the mediatype of + // descriptors there is no change to any blob stored in m.src. + return manifestSchema2FromComponents(config, m.src, nil, layers), nil +} + +// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. +// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned +// value. +// This does not change the state of the original manifestOCI1 object. +func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { + // We can't directly convert to V1, but we can transitively convert via a V2 image + m2, err := m.convertToManifestSchema2(ctx, options) + if err != nil { + return nil, err + } + + return m2.convertToManifestSchema1(ctx, options) +} + +// SupportsEncryption returns if encryption is supported for the manifest type +func (m *manifestOCI1) SupportsEncryption(context.Context) bool { + return true +} diff --git a/vendor/github.com/containers/image/v5/image/oci_index.go b/vendor/github.com/containers/image/v5/image/oci_index.go new file mode 100644 index 000000000..4e6ca879a --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/oci_index.go @@ -0,0 +1,34 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/pkg/errors" +) + +func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + index, err := manifest.OCI1IndexFromManifest(manblob) + if err != nil { + return nil, errors.Wrapf(err, "parsing OCI1 index") + } + targetManifestDigest, err := index.ChooseInstance(sys) + if err != nil { + return nil, errors.Wrapf(err, "choosing image instance") + } + manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) + if err != nil { + return nil, errors.Wrapf(err, "loading manifest for target platform") + } + + matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) + if err != nil { + return nil, errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, errors.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) + } + + return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) +} diff --git a/vendor/github.com/containers/image/v5/image/sourced.go b/vendor/github.com/containers/image/v5/image/sourced.go new file mode 100644 index 000000000..3a016e1d0 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/sourced.go @@ -0,0 +1,104 @@ +// Package image consolidates knowledge about various container image formats +// (as opposed to image storage mechanisms, which are handled by types.ImageSource) +// and exposes all of them using an unified interface. +package image + +import ( + "context" + + "github.com/containers/image/v5/types" +) + +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. +// +// FromSource “takes ownership” of the input ImageSource and will call src.Close() +// when the image is closed. (This does not prevent callers from using both the +// Image and ImageSource objects simultaneously, but it means that they only need to +// the Image.) +// +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. +func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() +} + +// sourcedImage is a general set of utilities for working with container images, +// whatever is their underlying location (i.e. dockerImageSource-independent). +// Note the existence of skopeo/docker.Image: some instances of a `types.Image` +// may not be a `sourcedImage` directly. However, most users of `types.Image` +// do not care, and those who care about `skopeo/docker.Image` know they do. +type sourcedImage struct { + *UnparsedImage + manifestBlob []byte + manifestMIMEType string + // genericManifest contains data corresponding to manifestBlob. + // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest + // if you want to preserve the original manifest; use manifestBlob directly. + genericManifest +} + +// FromUnparsedImage returns a types.Image implementation for unparsed. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. +// +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { + // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: + // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, + // this is the only UnparsedImage implementation around, anyway. + + // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). + manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) + if err != nil { + return nil, err + } + + parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) + if err != nil { + return nil, err + } + + return &sourcedImage{ + UnparsedImage: unparsed, + manifestBlob: manifestBlob, + manifestMIMEType: manifestMIMEType, + genericManifest: parsedManifest, + }, nil +} + +// Size returns the size of the image as stored, if it's known, or -1 if it isn't. +func (i *sourcedImage) Size() (int64, error) { + return -1, nil +} + +// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. +func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { + return i.manifestBlob, i.manifestMIMEType, nil +} + +func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { + return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) +} diff --git a/vendor/github.com/containers/image/v5/image/unparsed.go b/vendor/github.com/containers/image/v5/image/unparsed.go new file mode 100644 index 000000000..c64852f72 --- /dev/null +++ b/vendor/github.com/containers/image/v5/image/unparsed.go @@ -0,0 +1,95 @@ +package image + +import ( + "context" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +type UnparsedImage struct { + src types.ImageSource + instanceDigest *digest.Digest + cachedManifest []byte // A private cache for Manifest(); nil if not yet known. + // A private cache for Manifest(), may be the empty string if guessing failed. + // Valid iff cachedManifest is not nil. + cachedManifestMIMEType string + cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. +} + +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } +} + +// Reference returns the reference used to set up this source, _as specified by the user_ +// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. +func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. + return i.src.Reference() +} + +// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { + if i.cachedManifest == nil { + m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) + if err != nil { + return nil, "", err + } + + // ImageSource.GetManifest does not do digest verification, but we do; + // this immediately protects also any user of types.Image. + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) + } + } + + i.cachedManifest = m + i.cachedManifestMIMEType = mt + } + return i.cachedManifest, i.cachedManifestMIMEType, nil +} + +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + +// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. +func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { + if i.cachedSignatures == nil { + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) + if err != nil { + return nil, err + } + i.cachedSignatures = sigs + } + return i.cachedSignatures, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go new file mode 100644 index 000000000..b86e8b1ac --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -0,0 +1,64 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/pkg/compression" + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original +// object if it implements BlobInfoCache2, or a wrapper which discards compression information +// if it only implements BlobInfoCache. +func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 { + if bic2, ok := bic.(BlobInfoCache2); ok { + return bic2 + } + return &v1OnlyBlobInfoCache{ + BlobInfoCache: bic, + } +} + +type v1OnlyBlobInfoCache struct { + types.BlobInfoCache +} + +func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +} + +func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 { + return nil +} + +// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of +// types.BICReplacementCandidate, dropping compression information. +func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate { + candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates)) + for _, c := range v2candidates { + candidates = append(candidates, types.BICReplacementCandidate{ + Digest: c.Digest, + Location: c.Location, + }) + } + return candidates +} + +// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm +// values suitable for inclusion in a types.BlobInfo structure, based on the name of the +// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by +// TryReusingBlob() implementations to set values in the BlobInfo structure that they return +// upon success. +func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) { + switch compressorName { + case Uncompressed: + return types.Decompress, nil, nil + case UnknownCompression: + return types.PreserveOriginal, nil, nil + default: + algo, err := compression.AlgorithmByName(compressorName) + if err == nil { + return types.Compress, &algo, nil + } + return types.PreserveOriginal, nil, err + } +} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go new file mode 100644 index 000000000..3c2be57f3 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -0,0 +1,45 @@ +package blobinfocache + +import ( + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" +) + +const ( + // Uncompressed is the value we store in a blob info cache to indicate that we know that + // the blob in the corresponding location is not compressed. + Uncompressed = "uncompressed" + // UnknownCompression is the value we store in a blob info cache to indicate that we don't + // know if the blob in the corresponding location is compressed (and if so, how) or not. + UnknownCompression = "unknown" +) + +// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind +// of compression was applied to the blobs it keeps information about. +type BlobInfoCache2 interface { + types.BlobInfoCache + // RecordDigestCompressorName records a compressor for the blob with the specified digest, + // or Uncompressed or UnknownCompression. + // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a + // digest just because some remote author claims so (e.g. because a manifest says so); + // otherwise the cache could be poisoned and cause us to make incorrect edits to type + // information in a manifest. + RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) + // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations + // that could possibly be reused within the specified (transport scope) (if they still + // exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if + // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look + // up variants of the blob which have the same uncompressed digest. + // + // The CompressorName fields in returned data must never be UnknownCompression. + CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 +} + +// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. +type BICReplacementCandidate2 struct { + Digest digest.Digest + CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression + Location types.BICLocationReference +} diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go new file mode 100644 index 000000000..49fa410e9 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go @@ -0,0 +1,59 @@ +package iolimits + +import ( + "io" + + "github.com/pkg/errors" +) + +// All constants below are intended to be used as limits for `ReadAtMost`. The +// immediate use-case for limiting the size of in-memory copied data is to +// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of +// copying data until running out of memory, we error out after hitting the +// specified limit. +const ( + // megaByte denotes one megabyte and is intended to be used as a limit in + // `ReadAtMost`. + megaByte = 1 << 20 + // MaxManifestBodySize is the maximum allowed size of a manifest. The limit + // of 4 MB aligns with the one of a Docker registry: + // https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30 + MaxManifestBodySize = 4 * megaByte + // MaxAuthTokenBodySize is the maximum allowed size of an auth token. + // The limit of 1 MB is considered to be greatly sufficient. + MaxAuthTokenBodySize = megaByte + // MaxSignatureListBodySize is the maximum allowed size of a signature list. + // The limit of 4 MB is considered to be greatly sufficient. + MaxSignatureListBodySize = 4 * megaByte + // MaxSignatureBodySize is the maximum allowed size of a signature. + // The limit of 4 MB is considered to be greatly sufficient. + MaxSignatureBodySize = 4 * megaByte + // MaxErrorBodySize is the maximum allowed size of an error-response body. + // The limit of 1 MB is considered to be greatly sufficient. + MaxErrorBodySize = megaByte + // MaxConfigBodySize is the maximum allowed size of a config blob. + // The limit of 4 MB is considered to be greatly sufficient. + MaxConfigBodySize = 4 * megaByte + // MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body. + // The limit of 4 MB is considered to be greatly sufficient. + MaxOpenShiftStatusBody = 4 * megaByte + // MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images) + // The limit of 1 MB is considered to be greatly sufficient. + MaxTarFileManifestSize = megaByte +) + +// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded. +func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { + limitedReader := io.LimitReader(reader, int64(limit+1)) + + res, err := io.ReadAll(limitedReader) + if err != nil { + return nil, err + } + + if len(res) > limit { + return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit) + } + + return res, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go new file mode 100644 index 000000000..3bda6af29 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -0,0 +1,196 @@ +package platform + +// Largely based on +// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go +// Copyright 2012-2017 Docker, Inc. +// +// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go +// Copyright The containerd Authors. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// https://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bufio" + "fmt" + "os" + "runtime" + "strings" + + "github.com/containers/image/v5/types" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if runtime.GOOS != "linux" { + return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern) +} + +func getCPUVariantWindows(arch string) string { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch arch { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "" + } + + return variant +} + +func getCPUVariantArm() string { + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + return "" + } + // TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286) + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "" + } + + return variant +} + +func getCPUVariant(os string, arch string) string { + if os == "windows" { + return getCPUVariantWindows(arch) + } + if arch == "arm" || arch == "arm64" { + return getCPUVariantArm() + } + return "" +} + +// compatibility contains, for a specified architecture, a list of known variants, in the +// order from most capable (most restrictive) to least capable (most compatible). +// Architectures that don’t have variants should not have an entry here. +var compatibility = map[string][]string{ + "arm": {"v8", "v7", "v6", "v5"}, + "arm64": {"v8"}, +} + +// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, +// the most compatible platform is first. +// If some option (arch, os, variant) is not present, a value from current platform is detected. +func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { + wantedArch := runtime.GOARCH + wantedVariant := "" + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } else { + // Only auto-detect the variant if we are using the default architecture. + // If the user has specified the ArchitectureChoice, don't autodetect, even if + // ctx.ArchitectureChoice == runtime.GOARCH, because we have no idea whether the runtime.GOARCH + // value is relevant to the use case, and if we do autodetect a variant, + // ctx.VariantChoice can't be used to override it back to "". + wantedVariant = getCPUVariant(runtime.GOOS, runtime.GOARCH) + } + if ctx != nil && ctx.VariantChoice != "" { + wantedVariant = ctx.VariantChoice + } + + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + var variants []string = nil + if wantedVariant != "" { + // If the user requested a specific variant, we'll walk down + // the list from most to least compatible. + if compatibility[wantedArch] != nil { + variantOrder := compatibility[wantedArch] + for i, v := range variantOrder { + if wantedVariant == v { + variants = variantOrder[i:] + break + } + } + } + if variants == nil { + // user wants a variant which we know nothing about - not even compatibility + variants = []string{wantedVariant} + } + // Make sure to have a candidate with an empty variant as well. + variants = append(variants, "") + } else { + // Make sure to have a candidate with an empty variant as well. + variants = append(variants, "") + // If available add the entire compatibility matrix for the specific architecture. + if possibleVariants, ok := compatibility[wantedArch]; ok { + variants = append(variants, possibleVariants...) + } + } + + res := make([]imgspecv1.Platform, 0, len(variants)) + for _, v := range variants { + res = append(res, imgspecv1.Platform{ + OS: wantedOS, + Architecture: wantedArch, + Variant: v, + }) + } + return res, nil +} + +// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches +// an item from the return value of WantedPlatforms. +func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool { + return image.Architecture == wanted.Architecture && + image.OS == wanted.OS && + image.Variant == wanted.Variant +} diff --git a/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go new file mode 100644 index 000000000..b8d3a7e56 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go @@ -0,0 +1,57 @@ +package putblobdigest + +import ( + "io" + + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// Digester computes a digest of the provided stream, if not known yet. +type Digester struct { + knownDigest digest.Digest // Or "" + digester digest.Digester // Or nil +} + +// newDigester initiates computation of a digest.Canonical digest of stream, +// if !validDigest; otherwise it just records knownDigest to be returned later. +// The caller MUST use the returned stream instead of the original value. +func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) { + if validDigest { + return Digester{knownDigest: knownDigest}, stream + } else { + res := Digester{ + digester: digest.Canonical.Digester(), + } + stream = io.TeeReader(stream, res.digester.Hash()) + return res, stream + } +} + +// DigestIfUnknown initiates computation of a digest.Canonical digest of stream, +// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will +// be used (accepting any algorithm). +// The caller MUST use the returned stream instead of the original value. +func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { + d := blobInfo.Digest + return newDigester(stream, d, d != "") +} + +// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream, +// if a digest.Canonical digest is not supplied in the provided blobInfo; +// otherwise blobInfo.Digest will be used. +// The caller MUST use the returned stream instead of the original value. +func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { + d := blobInfo.Digest + return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical) +} + +// Digest() returns a digest value possibly computed by Digester. +// This must be called only after all of the stream returned by a Digester constructor +// has been successfully read. +func (d Digester) Digest() digest.Digest { + if d.digester != nil { + return d.digester.Digest() + } + return d.knownDigest +} diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go new file mode 100644 index 000000000..84bb656ac --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -0,0 +1,40 @@ +package streamdigest + +import ( + "fmt" + "io" + "os" + + "github.com/containers/image/v5/internal/putblobdigest" + "github.com/containers/image/v5/internal/tmpdir" + "github.com/containers/image/v5/types" +) + +// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo. +// The temporary file is returned as an io.Reader along with a cleanup function. +// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. +// If an error occurs, inputInfo is not modified. +func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { + diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + if err != nil { + return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) + } + cleanup := func() { + diskBlob.Close() + os.Remove(diskBlob.Name()) + } + digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo) + written, err := io.Copy(diskBlob, stream) + if err != nil { + cleanup() + return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err) + } + _, err = diskBlob.Seek(0, io.SeekStart) + if err != nil { + cleanup() + return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err) + } + inputInfo.Digest = digester.Digest() + inputInfo.Size = written + return diskBlob, cleanup, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go new file mode 100644 index 000000000..809446e18 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go @@ -0,0 +1,34 @@ +package tmpdir + +import ( + "os" + "runtime" + + "github.com/containers/image/v5/types" +) + +// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. +// You can override this at build time with +// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' +var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles + +// builtinUnixTempDirForBigFiles is the directory path to store big files. +// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. +// DO NOT change this, instead see unixTempDirForBigFiles above. +const builtinUnixTempDirForBigFiles = "/var/tmp" + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string { + if sys != nil && sys.BigFilesTemporaryDir != "" { + return sys.BigFilesTemporaryDir + } + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = unixTempDirForBigFiles + } + return temporaryDirectoryForBigFiles +} diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go new file mode 100644 index 000000000..20955ab7f --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -0,0 +1,211 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// dupStringSlice returns a deep copy of a slice of strings, or nil if the +// source slice is empty. +func dupStringSlice(list []string) []string { + if len(list) == 0 { + return nil + } + dup := make([]string, len(list)) + copy(dup, list) + return dup +} + +// dupStringStringMap returns a deep copy of a map[string]string, or nil if the +// passed-in map is nil or has no keys. +func dupStringStringMap(m map[string]string) map[string]string { + if len(m) == 0 { + return nil + } + result := make(map[string]string) + for k, v := range m { + result[k] = v + } + return result +} + +// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat +// can expect to be present. +type allowedManifestFields int + +const ( + allowedFieldConfig allowedManifestFields = 1 << iota + allowedFieldFSLayers + allowedFieldHistory + allowedFieldLayers + allowedFieldManifests + allowedFieldFirstUnusedBit // Keep this at the end! +) + +// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than +// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields +// other than the ones the caller specifically allows. +// expectedMIMEType is used only for diagnostics. +// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format +// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous +// data that just isn’t what was expected, as opposed to actually ambiguous data. +func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, + allowed allowedManifestFields) error { + if allowed >= allowedFieldFirstUnusedBit { + return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) + } + // Use a private type to decode, not just a map[string]interface{}, because we want + // to also reject case-insensitive matches (which would be used by Go when really decoding + // the manifest). + // (It is expected that as manifest formats are added or extended over time, more fields will be added + // here.) + detectedFields := struct { + Config interface{} `json:"config"` + FSLayers interface{} `json:"fsLayers"` + History interface{} `json:"history"` + Layers interface{} `json:"layers"` + Manifests interface{} `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &detectedFields); err != nil { + // The caller was supposed to already validate version numbers, so this should not happen; + // let’s not bother with making this error “nice”. + return err + } + unexpected := []string{} + // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. + if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 { + unexpected = append(unexpected, "config") + } + if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 { + unexpected = append(unexpected, "fsLayers") + } + if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 { + unexpected = append(unexpected, "history") + } + if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 { + unexpected = append(unexpected, "layers") + } + if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 { + unexpected = append(unexpected, "manifests") + } + if len(unexpected) != 0 { + return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, + unexpected, expectedMIMEType) + } + return nil +} + +// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func layerInfosToStrings(infos []LayerInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} + +// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed +// versions of “the same kind of content”. +// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed; +// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported". +type compressionMIMETypeSet map[string]string + +const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant +const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported” + +// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil +// to mean "no compression"), based on variantTable. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but it can't be combined with this +// algorithm to produce an updated MIME type that complies with the standard that defines mimeType. +// If the compression algorithm is unrecognized, or mimeType is not known to have variants that +// differ from it only in what type of compression has been applied, the returned error will not be +// a ManifestLayerCompressionIncompatibilityError. +func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) { + if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries + return "", fmt.Errorf("cannot update unknown MIME type") + } + for _, variants := range variantTable { + for _, mt := range variants { + if mt == mimeType { // Found the variant + name := mtsUncompressed + if algorithm != nil { + name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark() + } + if res, ok := variants[name]; ok { + if res != mtsUnsupportedMIMEType { + return res, nil + } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)} + } + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} + } + if name != mtsUncompressed { + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)} + } + // We can't very well say “the idea of no compression is unknown” + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)} + } + } + } + if algorithm != nil { + return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType) + } + return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType) +} + +// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to +// mimeType, based on variantTable. It may use updated.Digest for error messages. +// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants +// that differ only in what type of compression is applied, but applying updated.CompressionOperation +// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the +// standard that defines mimeType. +func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) { + // Note that manifests in containers-storage might be reporting the + // wrong media type since the original manifests are stored while layers + // are decompressed in storage. Hence, we need to consider the case + // that an already {de}compressed layer should be {de}compressed; + // compressionVariantMIMEType does that by not caring whether the original is + // {de}compressed. + switch updated.CompressionOperation { + case types.PreserveOriginal: + // Force a change to the media type if we're being told to use a particular compressor, + // since it might be different from the one associated with the media type. Otherwise, + // try to keep the original media type. + if updated.CompressionAlgorithm != nil { + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + } + // Keep the original media type. + return mimeType, nil + + case types.Decompress: + return compressionVariantMIMEType(variantTable, mimeType, nil) + + case types.Compress: + if updated.CompressionAlgorithm == nil { + logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest) + return mimeType, nil + } + return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) + + default: + return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation) + } +} + +// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm +// could not be applied to a layer MIME type. A caller that receives this should either retry +// the call with a different compression algorithm, or attempt to use a different manifest type. +type ManifestLayerCompressionIncompatibilityError struct { + text string +} + +func (m ManifestLayerCompressionIncompatibilityError) Error() string { + return m.text +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go new file mode 100644 index 000000000..6d12c4cec --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -0,0 +1,320 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! + ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) + SchemaVersion int `json:"schemaVersion"` +} + +type schema1V1CompatibilityContainerConfig struct { + Cmd []string +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType, + allowedFieldFSLayers|allowedFieldHistory); err != nil { + return nil, err + } + if err := s1.initialize(); err != nil { + return nil, err + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + s1 := Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } + if err := s1.initialize(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. +func (m *Schema1) initialize() error { + if len(m.FSLayers) != len(m.History) { + return errors.New("length of history not equal to number of layers") + } + if len(m.FSLayers) == 0 { + return errors.New("no FSLayers in manifest") + } + m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) + for i, h := range m.History { + if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { + return errors.Wrapf(err, "parsing v2s1 history entry %d", i) + } + } + return nil +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []LayerInfo { + layers := make([]LayerInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, + EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, + } + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) + for i, info := range layerInfos { + // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + // m.initialize() has verified that len(m.FSLayers) == len(m.History) + for _, compat := range m.ExtractedV1Compatibility { + if err := validateV1ID(compat.ID); err != nil { + return err + } + } + if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range m.ExtractedV1Compatibility { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { + if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: m.Tag, + Created: &s1.Created, + DockerVersion: s1.DockerVersion, + Architecture: s1.Architecture, + Os: s1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s1.Config != nil { + i.Labels = s1.Config.Labels + i.Env = s1.Config.Env + } + return i, nil +} + +// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s1 := Schema2V1Image{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s1) + if err != nil { + return nil, errors.Wrapf(err, "decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s1) + if err != nil { + return nil, errors.Wrapf(err, "re-encoding compat image config %#v", s1) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, compat := range m.ExtractedV1Compatibility { + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "re-decoding compat image config %#v", s1) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2Config(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go new file mode 100644 index 000000000..1f4db54ee --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -0,0 +1,297 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "time" + + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/pkg/strslice" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. +func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + MediaType: desc.MediaType, + } +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // Variant is a variant of the CPU that the image is built and runs on + Variant string `json:"variant,omitempty"` + // OS is the operating system used to built and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType, + allowedFieldConfig|allowedFieldLayers); err != nil { + return nil, err + } + // Check manifest's and layers' media types. + if err := SupportedSchema2MediaType(s2.MediaType); err != nil { + return nil, err + } + for _, layer := range s2.LayersDescriptors { + if err := SupportedSchema2MediaType(layer.MediaType); err != nil { + return nil, err + } + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromSchema2Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: DockerV2Schema2ForeignLayerMediaType, + compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, + { + mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed, + compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType, + compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, + }, +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that would result in anything other than gzip compression. +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + mimeType := original[i].MediaType + // First make sure we support the media type of the original layer. + if err := SupportedSchema2MediaType(mimeType); err != nil { + return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType) + } + mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info) + if err != nil { + return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest) + } + m.LayersDescriptors[i].MediaType = mimeType + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: &s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Variant: s2.Variant, + Os: s2.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + i.Env = s2.Config.Env + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go new file mode 100644 index 000000000..e97dfbd88 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go @@ -0,0 +1,221 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + platform "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Schema2PlatformSpec describes the platform which a particular manifest is +// specialized for. +type Schema2PlatformSpec struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` // removed in OCI +} + +// Schema2ManifestDescriptor references a platform-specific manifest. +type Schema2ManifestDescriptor struct { + Schema2Descriptor + Platform Schema2PlatformSpec `json:"platform"` +} + +// Schema2List is a list of platform-specific manifests. +type Schema2List struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []Schema2ManifestDescriptor `json:"manifests"` +} + +// MIMEType returns the MIME type of this particular manifest list. +func (list *Schema2List) MIMEType() string { + return list.MediaType +} + +// Instances returns a slice of digests of the manifests that this list knows of. +func (list *Schema2List) Instances() []digest.Digest { + results := make([]digest.Digest, len(list.Manifests)) + for i, m := range list.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the list. +func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range list.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, errors.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (list *Schema2List) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(list.Manifests) { + return errors.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest", i+1, len(updates)) + } + list.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + list.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) + } + list.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as a schema2 manifest list, and returns the digest +// of the image which is appropriate for the current environment. +func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", errors.Wrapf(err, "getting platform information %#v", ctx) + } + for _, wantedPlatform := range wantedPlatforms { + for _, d := range list.Manifests { + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: dupStringSlice(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } + } + } + return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +// Serialize returns the list in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (list *Schema2List) Serialize() ([]byte, error) { + buf, err := json.Marshal(list) + if err != nil { + return nil, errors.Wrapf(err, "marshaling Schema2List %#v", list) + } + return buf, nil +} + +// Schema2ListFromComponents creates a Schema2 manifest list instance from the +// supplied data. +func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { + list := Schema2List{ + SchemaVersion: 2, + MediaType: DockerV2ListMediaType, + Manifests: make([]Schema2ManifestDescriptor, len(components)), + } + for i, component := range components { + m := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: dupStringSlice(component.URLs), + }, + Schema2PlatformSpec{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: dupStringSlice(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + Features: dupStringSlice(component.Platform.Features), + }, + } + list.Manifests[i] = m + } + return &list +} + +// Schema2ListClone creates a deep copy of the passed-in list. +func Schema2ListClone(list *Schema2List) *Schema2List { + return Schema2ListFromComponents(list.Manifests) +} + +// ToOCI1Index returns the list encoded as an OCI1 index. +func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) { + components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) + for _, manifest := range list.Manifests { + converted := imgspecv1.Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: dupStringSlice(manifest.URLs), + Platform: &imgspecv1.Platform{ + OS: manifest.Platform.OS, + Architecture: manifest.Platform.Architecture, + OSFeatures: dupStringSlice(manifest.Platform.OSFeatures), + OSVersion: manifest.Platform.OSVersion, + Variant: manifest.Platform.Variant, + }, + } + components = append(components, converted) + } + oci := OCI1IndexFromComponents(components, nil) + return oci, nil +} + +// ToSchema2List returns the list encoded as a Schema2 list. +func (list *Schema2List) ToSchema2List() (*Schema2List, error) { + return Schema2ListClone(list), nil +} + +// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled +// JSON, presumably generated by encoding a Schema2 manifest list. +func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { + list := Schema2List{ + Manifests: []Schema2ManifestDescriptor{}, + } + if err := json.Unmarshal(manifest, &list); err != nil { + return nil, errors.Wrapf(err, "unmarshaling Schema2List %q", string(manifest)) + } + if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, + allowedFieldManifests); err != nil { + return nil, err + } + return &list, nil +} + +// Clone returns a deep copy of this list and its contents. +func (list *Schema2List) Clone() List { + return Schema2ListClone(list) +} + +// ConvertToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return list.Clone(), nil + case imgspecv1.MediaTypeImageIndex: + return list.ToOCI1Index() + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) + } +} diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go new file mode 100644 index 000000000..58982597e --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/list.go @@ -0,0 +1,80 @@ +package manifest + +import ( + "fmt" + + "github.com/containers/image/v5/types" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // SupportedListMIMETypes is a list of the manifest list types that we know how to + // read/manipulate/write. + SupportedListMIMETypes = []string{ + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, + } +) + +// List is an interface for parsing, modifying lists of image manifests. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members +// directly. +type List interface { + // MIMEType returns the MIME type of this particular manifest list. + MIMEType() string + + // Instances returns a list of the manifests that this list knows of, other than its own. + Instances() []digest.Digest + + // Update information about the list's instances. The length of the passed-in slice must + // match the length of the list of instances which the list already contains, and every field + // must be specified. + UpdateInstances([]ListUpdate) error + + // Instance returns the size and MIME type of a particular instance in the list. + Instance(digest.Digest) (ListUpdate, error) + + // ChooseInstance selects which manifest is most appropriate for the platform described by the + // SystemContext, or for the current platform if the SystemContext doesn't specify any details. + ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) + + // Serialize returns the list in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded + // from, even if no modifications were made! + Serialize() ([]byte, error) + + // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. + ConvertToMIMEType(mimeType string) (List, error) + + // Clone returns a deep copy of this list and its contents. + Clone() List +} + +// ListUpdate includes the fields which a List's UpdateInstances() method will modify. +type ListUpdate struct { + Digest digest.Digest + Size int64 + MediaType string +} + +// ListFromBlob parses a list of manifests. +func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { + normalized := NormalizedMIMEType(manifestMIMEType) + switch normalized { + case DockerV2ListMediaType: + return Schema2ListFromManifest(manifest) + case imgspecv1.MediaTypeImageIndex: + return OCI1IndexFromManifest(manifest) + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") + } + return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) +} + +// ConvertListToMIMEType converts the passed-in manifest list to a manifest +// list of the specified type. +func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) { + return list.ConvertToMIMEType(manifestMIMEType) +} diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go new file mode 100644 index 000000000..2e3e5da15 --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -0,0 +1,270 @@ +package manifest + +import ( + "encoding/json" + "fmt" + + "github.com/containers/image/v5/types" + "github.com/containers/libtrust" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// FIXME: Should we just use docker/distribution and docker/docker implementations directly? + +// FIXME(runcom, mitr): should we have a mediatype pkg?? +const ( + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 + DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" + // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature + DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 + DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" + // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. + DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. + DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list + DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" + // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. + DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. +func SupportedSchema2MediaType(m string) error { + switch m { + case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: + return nil + default: + return fmt.Errorf("unsupported docker v2s2 media type: %q", m) + } +} + +// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource +// should request from the backend unless directed otherwise. +var DefaultRequestedManifestMIMETypes = []string{ + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2Schema1SignedMediaType, + DockerV2Schema1MediaType, + DockerV2ListMediaType, + imgspecv1.MediaTypeImageIndex, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []LayerInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) +} + +// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. +type LayerInfo struct { + types.BlobInfo + EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + +// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. +// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, +// but we may not have such metadata available (e.g. when the manifest is a local file). +func GuessMIMEType(manifest []byte) string { + // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. + // Also docker/distribution/manifest.Versioned. + meta := struct { + MediaType string `json:"mediaType"` + SchemaVersion int `json:"schemaVersion"` + Signatures interface{} `json:"signatures"` + }{} + if err := json.Unmarshal(manifest, &meta); err != nil { + return "" + } + + switch meta.MediaType { + case DockerV2Schema2MediaType, DockerV2ListMediaType, + imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. + return meta.MediaType + } + // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. + switch meta.SchemaVersion { + case 1: + if meta.Signatures != nil { + return DockerV2Schema1SignedMediaType + } + return DockerV2Schema1MediaType + case 2: + // Best effort to understand if this is an OCI image since mediaType + // wasn't in the manifest for OCI image-spec < 1.0.2. + // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. + ociMan := struct { + Config struct { + MediaType string `json:"mediaType"` + } `json:"config"` + }{} + if err := json.Unmarshal(manifest, &ociMan); err != nil { + return "" + } + switch ociMan.Config.MediaType { + case imgspecv1.MediaTypeImageConfig: + return imgspecv1.MediaTypeImageManifest + case DockerV2Schema2ConfigMediaType: + // This case should not happen since a Docker image + // must declare a top-level media type and + // `meta.MediaType` has already been checked. + return DockerV2Schema2MediaType + } + // Maybe an image index or an OCI artifact. + ociIndex := struct { + Manifests []imgspecv1.Descriptor `json:"manifests"` + }{} + if err := json.Unmarshal(manifest, &ociIndex); err != nil { + return "" + } + if len(ociIndex.Manifests) != 0 { + if ociMan.Config.MediaType == "" { + return imgspecv1.MediaTypeImageIndex + } + // FIXME: this is mixing media types of manifests and configs. + return ociMan.Config.MediaType + } + // It's most likely an OCI artifact with a custom config media + // type which is not (and cannot) be covered by the media-type + // checks cabove. + return imgspecv1.MediaTypeImageManifest + } + return "" +} + +// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. +func Digest(manifest []byte) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + return digest.FromBytes(manifest), nil +} + +// MatchesDigest returns true iff the manifest matches expectedDigest. +// Error may be set if this returns false. +// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, +// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. +func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { + // This should eventually support various digest types. + actualDigest, err := Digest(manifest) + if err != nil { + return false, err + } + return expectedDigest == actualDigest, nil +} + +// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. +// This is useful to make the manifest acceptable to a docker/distribution registry (even though nothing needs or wants the JWS signature). +func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err // Coverage: This can fail only if rand.Reader fails. + } + + js, err := libtrust.NewJSONSignature(manifest) + if err != nil { + return nil, err + } + if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. + return nil, err + } + return js.PrettySignature("signatures") +} + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex +} + +// MIMETypeSupportsEncryption returns true if the mimeType supports encryption +func MIMETypeSupportsEncryption(mimeType string) bool { + return mimeType == imgspecv1.MediaTypeImageManifest +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + imgspecv1.MediaTypeImageIndex, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + nmt := NormalizedMIMEType(mt) + switch nmt { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + } + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt) +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go new file mode 100644 index 000000000..5892184df --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -0,0 +1,220 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "strings" + + compressiontypes "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/image/v5/types" + ociencspec "github.com/containers/ocicrypt/spec" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. +func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + Annotations: desc.Annotations, + MediaType: desc.MediaType, + } +} + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// SupportedOCI1MediaType checks if the specified string is a supported OCI1 +// media type. +// +// Deprecated: blindly rejecting unknown MIME types when the consumer does not +// need to process the input just reduces interoperability (and violates the +// standard) with no benefit, and that this function does not check that the +// media type is appropriate for any specific purpose, so it’s not all that +// useful for validation anyway. +func SupportedOCI1MediaType(m string) error { + switch m { + case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: + return nil + default: + return fmt.Errorf("unsupported OCIv1 media type: %q", m) + } +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, + allowedFieldConfig|allowedFieldLayers); err != nil { + return nil, err + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageManifest, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return BlobInfoFromOCI1Descriptor(m.Config) +} + +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromOCI1Descriptor(layer), + EmptyLayer: false, + }) + } + return blobs +} + +var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ + { + mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, + compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, + compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, + }, + { + mtsUncompressed: imgspecv1.MediaTypeImageLayer, + compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip, + compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd, + }, +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) +// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and +// CompressionAlgorithm that isn't supported by OCI. +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + mimeType := original[i].MediaType + if info.CryptoOperation == types.Decrypt { + decMimeType, err := getDecryptedMediaType(mimeType) + if err != nil { + return fmt.Errorf("error preparing updated manifest: decryption specified but original mediatype is not encrypted: %q", mimeType) + } + mimeType = decMimeType + } + mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info) + if err != nil { + return errors.Wrapf(err, "preparing updated manifest, layer %q", info.Digest) + } + if info.CryptoOperation == types.Encrypt { + encMediaType, err := getEncryptedMediaType(mimeType) + if err != nil { + return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType) + } + mimeType = encMediaType + } + + m.Layers[i].MediaType = mimeType + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + if err := json.Unmarshal(config, d1); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: v1.Created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: layerInfosToStrings(m.LayerInfos()), + Env: v1.Config.Env, + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} + +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support encryption +func getEncryptedMediaType(mediatype string) (string, error) { + for _, s := range strings.Split(mediatype, "+")[1:] { + if s == "encrypted" { + return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype) + } + } + unsuffixedMediatype := strings.Split(mediatype, "+")[0] + switch unsuffixedMediatype { + case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: + return mediatype + "+encrypted", nil + } + + return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype) +} + +// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// an error if the mediatype does not support decryption +func getDecryptedMediaType(mediatype string) (string, error) { + if !strings.HasSuffix(mediatype, "+encrypted") { + return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype) + } + + return strings.TrimSuffix(mediatype, "+encrypted"), nil +} diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go new file mode 100644 index 000000000..c4f11e09c --- /dev/null +++ b/vendor/github.com/containers/image/v5/manifest/oci_index.go @@ -0,0 +1,233 @@ +package manifest + +import ( + "encoding/json" + "fmt" + "runtime" + + platform "github.com/containers/image/v5/internal/pkg/platform" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// OCI1Index is just an alias for the OCI index type, but one which we can +// provide methods for. +type OCI1Index struct { + imgspecv1.Index +} + +// MIMEType returns the MIME type of this particular manifest index. +func (index *OCI1Index) MIMEType() string { + return imgspecv1.MediaTypeImageIndex +} + +// Instances returns a slice of digests of the manifests that this index knows of. +func (index *OCI1Index) Instances() []digest.Digest { + results := make([]digest.Digest, len(index.Manifests)) + for i, m := range index.Manifests { + results[i] = m.Digest + } + return results +} + +// Instance returns the ListUpdate of a particular instance in the index. +func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) { + for _, manifest := range index.Manifests { + if manifest.Digest == instanceDigest { + return ListUpdate{ + Digest: manifest.Digest, + Size: manifest.Size, + MediaType: manifest.MediaType, + }, nil + } + } + return ListUpdate{}, errors.Errorf("unable to find instance %s in OCI1Index", instanceDigest) +} + +// UpdateInstances updates the sizes, digests, and media types of the manifests +// which the list catalogs. +func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error { + if len(updates) != len(index.Manifests) { + return errors.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates)) + } + for i := range updates { + if err := updates[i].Digest.Validate(); err != nil { + return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest", i+1, len(updates)) + } + index.Manifests[i].Digest = updates[i].Digest + if updates[i].Size < 0 { + return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size) + } + index.Manifests[i].Size = updates[i].Size + if updates[i].MediaType == "" { + return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) + } + index.Manifests[i].MediaType = updates[i].MediaType + } + return nil +} + +// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest +// of the image which is appropriate for the current environment. +func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { + wantedPlatforms, err := platform.WantedPlatforms(ctx) + if err != nil { + return "", errors.Wrapf(err, "getting platform information %#v", ctx) + } + for _, wantedPlatform := range wantedPlatforms { + for _, d := range index.Manifests { + if d.Platform == nil { + continue + } + imagePlatform := imgspecv1.Platform{ + Architecture: d.Platform.Architecture, + OS: d.Platform.OS, + OSVersion: d.Platform.OSVersion, + OSFeatures: dupStringSlice(d.Platform.OSFeatures), + Variant: d.Platform.Variant, + } + if platform.MatchesPlatform(imagePlatform, wantedPlatform) { + return d.Digest, nil + } + } + } + + for _, d := range index.Manifests { + if d.Platform == nil { + return d.Digest, nil + } + } + return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) +} + +// Serialize returns the index in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (index *OCI1Index) Serialize() ([]byte, error) { + buf, err := json.Marshal(index) + if err != nil { + return nil, errors.Wrapf(err, "marshaling OCI1Index %#v", index) + } + return buf, nil +} + +// OCI1IndexFromComponents creates an OCI1 image index instance from the +// supplied data. +func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { + index := OCI1Index{ + imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: make([]imgspecv1.Descriptor, len(components)), + Annotations: dupStringStringMap(annotations), + }, + } + for i, component := range components { + var platform *imgspecv1.Platform + if component.Platform != nil { + platform = &imgspecv1.Platform{ + Architecture: component.Platform.Architecture, + OS: component.Platform.OS, + OSVersion: component.Platform.OSVersion, + OSFeatures: dupStringSlice(component.Platform.OSFeatures), + Variant: component.Platform.Variant, + } + } + m := imgspecv1.Descriptor{ + MediaType: component.MediaType, + Size: component.Size, + Digest: component.Digest, + URLs: dupStringSlice(component.URLs), + Annotations: dupStringStringMap(component.Annotations), + Platform: platform, + } + index.Manifests[i] = m + } + return &index +} + +// OCI1IndexClone creates a deep copy of the passed-in index. +func OCI1IndexClone(index *OCI1Index) *OCI1Index { + return OCI1IndexFromComponents(index.Manifests, index.Annotations) +} + +// ToOCI1Index returns the index encoded as an OCI1 index. +func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) { + return OCI1IndexClone(index), nil +} + +// ToSchema2List returns the index encoded as a Schema2 list. +func (index *OCI1Index) ToSchema2List() (*Schema2List, error) { + components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) + for _, manifest := range index.Manifests { + platform := manifest.Platform + if platform == nil { + platform = &imgspecv1.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + } + converted := Schema2ManifestDescriptor{ + Schema2Descriptor{ + MediaType: manifest.MediaType, + Size: manifest.Size, + Digest: manifest.Digest, + URLs: dupStringSlice(manifest.URLs), + }, + Schema2PlatformSpec{ + OS: platform.OS, + Architecture: platform.Architecture, + OSFeatures: dupStringSlice(platform.OSFeatures), + OSVersion: platform.OSVersion, + Variant: platform.Variant, + }, + } + components = append(components, converted) + } + s2 := Schema2ListFromComponents(components) + return s2, nil +} + +// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled +// JSON, presumably generated by encoding a OCI1 manifest index. +func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { + index := OCI1Index{ + Index: imgspecv1.Index{ + Versioned: imgspec.Versioned{SchemaVersion: 2}, + MediaType: imgspecv1.MediaTypeImageIndex, + Manifests: []imgspecv1.Descriptor{}, + Annotations: make(map[string]string), + }, + } + if err := json.Unmarshal(manifest, &index); err != nil { + return nil, errors.Wrapf(err, "unmarshaling OCI1Index %q", string(manifest)) + } + if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, + allowedFieldManifests); err != nil { + return nil, err + } + return &index, nil +} + +// Clone returns a deep copy of this list and its contents. +func (index *OCI1Index) Clone() List { + return OCI1IndexClone(index) +} + +// ConvertToMIMEType converts the passed-in image index to a manifest list of +// the specified type. +func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) { + switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { + case DockerV2ListMediaType: + return index.ToSchema2List() + case imgspecv1.MediaTypeImageIndex: + return index.Clone(), nil + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: + return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) + default: + // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) + } +} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go new file mode 100644 index 000000000..4b7122f92 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -0,0 +1,50 @@ +// Package none implements a dummy BlobInfoCache which records no data. +package none + +import ( + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" +) + +// noCache implements a dummy BlobInfoCache which records no data. +type noCache struct { +} + +// NoCache implements BlobInfoCache by not recording any data. +// +// This exists primarily for implementations of configGetter for +// Manifest.Inspect, because configs only have one representation. +// Any use of BlobInfoCache with blobs should usually use at least a +// short-lived cache, ideally blobinfocache.DefaultCache. +var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{}) + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + return "" +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/compression.go b/vendor/github.com/containers/image/v5/pkg/compression/compression.go new file mode 100644 index 000000000..34c90dd77 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/compression.go @@ -0,0 +1,166 @@ +package compression + +import ( + "bytes" + "compress/bzip2" + "fmt" + "io" + + "github.com/containers/image/v5/pkg/compression/internal" + "github.com/containers/image/v5/pkg/compression/types" + "github.com/containers/storage/pkg/chunked/compressor" + "github.com/klauspost/pgzip" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" +) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm = types.Algorithm + +var ( + // Gzip compression. + Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName, + []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) + // Bzip2 compression. + Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName, + []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) + // Xz compression. + Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName, + []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) + // Zstd compression. + Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName, + []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) + // Zstd:chunked compression. + ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */ + nil, ZstdDecompressor, compressor.ZstdCompressor) + + compressionAlgorithms = map[string]Algorithm{ + Gzip.Name(): Gzip, + Bzip2.Name(): Bzip2, + Xz.Name(): Xz, + Zstd.Name(): Zstd, + ZstdChunked.Name(): ZstdChunked, + } +) + +// AlgorithmByName returns the compressor by its name +func AlgorithmByName(name string) (Algorithm, error) { + algorithm, ok := compressionAlgorithms[name] + if ok { + return algorithm, nil + } + return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) +} + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { + return pgzip.NewReader(r) +} + +// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return io.NopCloser(bzip2.NewReader(r)), nil +} + +// XzDecompressor is a DecompressorFunc for the xz compression algorithm. +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return io.NopCloser(r), nil +} + +// gzipCompressor is a CompressorFunc for the gzip compression algorithm. +func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level != nil { + return pgzip.NewWriterLevel(r, *level) + } + return pgzip.NewWriter(r), nil +} + +// bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. +func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return nil, fmt.Errorf("bzip2 compression not supported") +} + +// xzCompressor is a CompressorFunc for the xz compression algorithm. +func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + return xz.NewWriter(r) +} + +// CompressStream returns the compressor by its name +func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { + m := map[string]string{} + return internal.AlgorithmCompressor(algo)(dest, m, level) +} + +// CompressStreamWithMetadata returns the compressor by its name. If the compression +// generates any metadata, it is written to the provided metadata map. +func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, metadata, level) +} + +// DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid +// value and nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { + buffer := [8]byte{} + + n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. + // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. + return Algorithm{}, nil, nil, err + } + + var retAlgo Algorithm + var decompressor DecompressorFunc + for _, algo := range compressionAlgorithms { + prefix := internal.AlgorithmPrefix(algo) + if len(prefix) > 0 && bytes.HasPrefix(buffer[:n], prefix) { + logrus.Debugf("Detected compression format %s", algo.Name()) + retAlgo = algo + decompressor = internal.AlgorithmDecompressor(algo) + break + } + } + if decompressor == nil { + logrus.Debugf("No compression detected") + } + + return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil +} + +// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. +// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. +func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { + _, d, r, e := DetectCompressionFormat(input) + return d, r, e +} + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "detecting compression") + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "initializing decompression") + } + } else { + res = io.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go new file mode 100644 index 000000000..fb37ca317 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -0,0 +1,65 @@ +package internal + +import "io" + +// CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// The caller must call Close() on the stream (even if the input stream does not need closing!). +type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm struct { + name string + mime string + prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. + decompressor DecompressorFunc + compressor CompressorFunc +} + +// NewAlgorithm creates an Algorithm instance. +// This function exists so that Algorithm instances can only be created by code that +// is allowed to import this internal subpackage. +func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + return Algorithm{ + name: name, + mime: mime, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, + } +} + +// Name returns the name for the compression algorithm. +func (c Algorithm) Name() string { + return c.name +} + +// InternalUnstableUndocumentedMIMEQuestionMark ??? +// DO NOT USE THIS anywhere outside of c/image until it is properly documented. +func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string { + return c.mime +} + +// AlgorithmCompressor returns the compressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmCompressor(algo Algorithm) CompressorFunc { + return algo.compressor +} + +// AlgorithmDecompressor returns the decompressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { + return algo.decompressor +} + +// AlgorithmPrefix returns the prefix field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmPrefix(algo Algorithm) []byte { + return algo.prefix +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go new file mode 100644 index 000000000..43d03b601 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go @@ -0,0 +1,41 @@ +package types + +import ( + "github.com/containers/image/v5/pkg/compression/internal" +) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// Algorithm is a compression algorithm provided and supported by pkg/compression. +// It can’t be supplied from the outside. +type Algorithm = internal.Algorithm + +const ( + // GzipAlgorithmName is the name used by pkg/compression.Gzip. + // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + GzipAlgorithmName = "gzip" + // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2. + // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + Bzip2AlgorithmName = "bzip2" + // XzAlgorithmName is the name used by pkg/compression.Xz. + // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + XzAlgorithmName = "Xz" + // ZstdAlgorithmName is the name used by pkg/compression.Zstd. + // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + ZstdAlgorithmName = "zstd" + // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked. + // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm + // will actually be available. (In fact it is intended for this types package not to depend + // on any of the implementations.) + ZstdChunkedAlgorithmName = "zstd:chunked" +) diff --git a/vendor/github.com/containers/image/v5/pkg/compression/zstd.go b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go new file mode 100644 index 000000000..39ae014d2 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/zstd.go @@ -0,0 +1,59 @@ +package compression + +import ( + "io" + + "github.com/klauspost/compress/zstd" +) + +type wrapperZstdDecoder struct { + decoder *zstd.Decoder +} + +func (w *wrapperZstdDecoder) Close() error { + w.decoder.Close() + return nil +} + +func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { + return w.decoder.DecodeAll(input, dst) +} + +func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { + return w.decoder.Read(p) +} + +func (w *wrapperZstdDecoder) Reset(r io.Reader) error { + return w.decoder.Reset(r) +} + +func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { + return w.decoder.WriteTo(wr) +} + +func zstdReader(buf io.Reader) (io.ReadCloser, error) { + decoder, err := zstd.NewReader(buf) + return &wrapperZstdDecoder{decoder: decoder}, err +} + +func zstdWriter(dest io.Writer) (io.WriteCloser, error) { + return zstd.NewWriter(dest) +} + +func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} + +// zstdCompressor is a CompressorFunc for the zstd compression algorithm. +func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + return zstdWriter(r) + } + return zstdWriterWithLevel(r, *level) +} + +// ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. +func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { + return zstdReader(r) +} diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/README.md b/vendor/github.com/containers/image/v5/pkg/strslice/README.md new file mode 100644 index 000000000..ae6097e82 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/README.md @@ -0,0 +1 @@ +This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go new file mode 100644 index 000000000..bad493fb8 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/stub.go b/vendor/github.com/containers/image/v5/transports/stub.go new file mode 100644 index 000000000..2c186a90c --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/stub.go @@ -0,0 +1,36 @@ +package transports + +import ( + "fmt" + + "github.com/containers/image/v5/types" +) + +// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +type stubTransport string + +// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. +func NewStubTransport(name string) types.ImageTransport { + return stubTransport(name) +} + +// Name returns the name of the transport, which must be unique among other transports. +func (s stubTransport) Name() string { + return string(s) +} + +// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. +func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { + return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) +} + +// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys +// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). +// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. +// scope passed to this function will not be "", that value is always allowed. +func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { + // Allowing any reference in here allows tools with some transports stubbed-out to still + // use signature verification policies which refer to these stubbed-out transports. + // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . + return nil +} diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go new file mode 100644 index 000000000..46ee3710f --- /dev/null +++ b/vendor/github.com/containers/image/v5/transports/transports.go @@ -0,0 +1,90 @@ +package transports + +import ( + "fmt" + "sort" + "sync" + + "github.com/containers/image/v5/types" +) + +// knownTransports is a registry of known ImageTransport instances. +type knownTransports struct { + transports map[string]types.ImageTransport + mu sync.Mutex +} + +func (kt *knownTransports) Get(k string) types.ImageTransport { + kt.mu.Lock() + t := kt.transports[k] + kt.mu.Unlock() + return t +} + +func (kt *knownTransports) Remove(k string) { + kt.mu.Lock() + delete(kt.transports, k) + kt.mu.Unlock() +} + +func (kt *knownTransports) Add(t types.ImageTransport) { + kt.mu.Lock() + defer kt.mu.Unlock() + name := t.Name() + if t := kt.transports[name]; t != nil { + panic(fmt.Sprintf("Duplicate image transport name %s", name)) + } + kt.transports[name] = t +} + +var kt *knownTransports + +func init() { + kt = &knownTransports{ + transports: make(map[string]types.ImageTransport), + } +} + +// Get returns the transport specified by name or nil when unavailable. +func Get(name string) types.ImageTransport { + return kt.Get(name) +} + +// Delete deletes a transport from the registered transports. +func Delete(name string) { + kt.Remove(name) +} + +// Register registers a transport. +func Register(t types.ImageTransport) { + kt.Add(t) +} + +// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that +// ParseImageName(ImageName(reference)) returns an equivalent reference. +// +// This is the generally recommended way to refer to images in the UI. +// +// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; +// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. +func ImageName(ref types.ImageReference) string { + return ref.Transport().Name() + ":" + ref.StringWithinTransport() +} + +// ListNames returns a list of non deprecated transport names. +// Deprecated transports can be used, but are not presented to users. +func ListNames() []string { + kt.mu.Lock() + defer kt.mu.Unlock() + deprecated := map[string]bool{ + "atomic": true, + } + var names []string + for _, transport := range kt.transports { + if !deprecated[transport.Name()] { + names = append(names, transport.Name()) + } + } + sort.Strings(names) + return names +} diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go new file mode 100644 index 000000000..dcff8caf7 --- /dev/null +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -0,0 +1,694 @@ +package types + +import ( + "context" + "io" + "time" + + "github.com/containers/image/v5/docker/reference" + compression "github.com/containers/image/v5/pkg/compression/types" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTransport is a top-level namespace for ways to to store/load an image. +// It should generally correspond to ImageSource/ImageDestination implementations. +// +// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. +// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS +// (or, even, IPv4 or IPv6). +// +// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. +// For example, several different ImageTransport implementations may be based on local filesystem paths, +// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) +// +// See also transports.KnownTransports. +type ImageTransport interface { + // Name returns the name of the transport, which must be unique among other transports. + Name() string + // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. + ParseReference(reference string) (ImageReference, error) + // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys + // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). + // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. + // scope passed to this function will not be "", that value is always allowed. + ValidatePolicyConfigurationScope(scope string) error +} + +// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. +// +// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening +// within an ImageTransport.ParseReference() or equivalent API creating the reference object. +// That's also why the various identification/formatting methods of this type do not support returning errors. +// +// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside +// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. +type ImageReference interface { + Transport() ImageTransport + // StringWithinTransport returns a string representation of the reference, which MUST be such that + // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. + // NOTE: The returned string is not promised to be equal to the original input to ParseReference; + // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; + // instead, see transports.ImageName(). + StringWithinTransport() string + + // DockerReference returns a Docker reference associated with this reference + // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, + // not e.g. after redirect or alias processing), or nil if unknown/not applicable. + DockerReference() reference.Named + + // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. + // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; + // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical + // (i.e. various references with exactly the same semantics should return the same configuration identity) + // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but + // not required/guaranteed that it will be a valid input to Transport().ParseReference(). + // Returns "" if configuration identities for these references are not supported. + PolicyConfigurationIdentity() string + + // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search + // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed + // in order, terminating on first match, and an implicit "" is always checked at the end. + // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), + // and each following element to be a prefix of the element preceding it. + PolicyConfigurationNamespaces() []string + + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) + // NewImageDestination returns a types.ImageDestination for this reference. + // The caller must call .Close() on the returned ImageDestination. + NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) + + // DeleteImage deletes the named image from the registry, if supported. + DeleteImage(ctx context.Context, sys *SystemContext) error +} + +// LayerCompression indicates if layers must be compressed, decompressed or preserved +type LayerCompression int + +const ( + // PreserveOriginal indicates the layer must be preserved, ie + // no compression or decompression. + PreserveOriginal LayerCompression = iota + // Decompress indicates the layer must be decompressed + Decompress + // Compress indicates the layer must be compressed + Compress +) + +// LayerCrypto indicates if layers have been encrypted or decrypted or none +type LayerCrypto int + +const ( + // PreserveOriginalCrypto indicates the layer must be preserved, ie + // no encryption/decryption + PreserveOriginalCrypto LayerCrypto = iota + // Encrypt indicates the layer is encrypted + Encrypt + // Decrypt indicates the layer is decrypted + Decrypt +) + +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string + Annotations map[string]string + MediaType string + // CompressionOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer's "compressed or not" should be preserved, + // possibly while changing the compression algorithm from one to another, + // or if it should be compressed or decompressed. The field defaults to + // preserve the original layer's compressedness. + // TODO: To remove together with CryptoOperation in re-design to remove + // field out out of BlobInfo. + CompressionOperation LayerCompression + // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct + // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be + // set when `CompressionOperation == Compress` and MAY be set when + // `CompressionOperation == PreserveOriginal` and the compression type is + // being changed for an already-compressed layer. + CompressionAlgorithm *compression.Algorithm + // CryptoOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer was encrypted/decrypted + // TODO: To remove together with CompressionOperation in re-design to + // remove field out out of BlobInfo. + CryptoOperation LayerCrypto +} + +// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cache should just fall back to copying the blobs the usual way. +// +// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by +// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). +// This is primarily useful for copying images around; for examining their properties, Image (below) +// is usually more useful. +// Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. +type ImageSource interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Close removes resources associated with an initialized ImageSource, if any. + Close() error + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). + // It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool + // GetSignatures returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer + // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() + // to read the image's layers. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error) +} + +// ImageDestination is a service, possibly remote (= slow), to store components of a single image. +// +// There is a specific required order for some of the calls: +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) +// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. +// +// Each ImageDestination should eventually be closed by calling Close(). +type ImageDestination interface { + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, + // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. + Reference() ImageReference + // Close removes resources associated with an initialized ImageDestination, if any. + Close() error + + // SupportedManifestMIMETypes tells which manifest mime types the destination supports + // If an empty slice or nil it's returned, then any mime type can be tried to upload + SupportedManifestMIMETypes() []string + // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. + // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. + SupportsSignatures(ctx context.Context) error + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression() LayerCompression + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. + MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + + // PutBlob writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // May update cache. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + // PutManifest writes manifest to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated + // by `manifest.Digest()`. + // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. + // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), + // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. + PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error + // PutSignatures writes a set of signatures to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // MUST be called after PutManifest (signatures may reference manifest contents). + PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error + // Commit marks the process of storing the image as successful and asks for the image to be persisted. + // unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list + // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the + // original manifest list digest, if desired. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before Commit() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) + Commit(ctx context.Context, unparsedToplevel UnparsedImage) error +} + +// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, +// refuses specifically this manifest type, but may accept a different manifest type. +type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ManifestTypeRejectedError) Error() string { + return e.Err.Error() +} + +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +type UnparsedImage interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. + Manifest(ctx context.Context) ([]byte, string, error) + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. + Signatures(ctx context.Context) ([][]byte, error) +} + +// Image is the primary API for inspecting properties of images. +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*v1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(context.Context) ([]BlobInfo, error) + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if + // manifests of type options.ManifestMIMEType can not include layers that are compressed + // in accordance with the CompressionOperation and CompressionAlgorithm specified in one + // or more options.LayerInfos items, though retrying with a different + // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm + // values might succeed. + UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) + // SupportsEncryption returns an indicator that the image supports encryption + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 + SupportsEncryption(ctx context.Context) bool + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + EmbeddedDockerReference reference.Named + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. +} + +// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. +// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported +// for other manifest types. +type ImageInspectInfo struct { + Tag string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Variant string + Os string + Layers []string + Env []string +} + +// DockerAuthConfig contains authorization information for connecting to a registry. +// the value of Username and Password can be empty for accessing the registry anonymously +type DockerAuthConfig struct { + Username string + Password string + // IdentityToken can be used as an refresh_token in place of username and + // password to obtain the bearer/access token in oauth2 flow. If identity + // token is set, password should not be set. + // Ref: https://docs.docker.com/registry/spec/auth/oauth/ + IdentityToken string +} + +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b { + o = OptionalBoolTrue + } + return o +} + +// ShortNameMode defines the mode of short-name resolution. +// +// The use of unqualified-search registries entails an ambiguity as it's +// unclear from which registry a given image, referenced by a short name, may +// be pulled from. +// +// The ShortNameMode type defines how short names should resolve. +type ShortNameMode int + +const ( + ShortNameModeInvalid ShortNameMode = iota + // Use all configured unqualified-search registries without prompting + // the user. + ShortNameModeDisabled + // If stdout and stdin are a TTY, prompt the user to select a configured + // unqualified-search registry. Otherwise, use all configured + // unqualified-search registries. + // + // Note that if only one unqualified-search registry is set, it will be + // used without prompting. + ShortNameModePermissive + // Always prompt the user to select a configured unqualified-search + // registry. Throw an error if stdout or stdin is not a TTY as + // prompting isn't possible. + // + // Note that if only one unqualified-search registry is set, it will be + // used without prompting. + ShortNameModeEnforcing +) + +// SystemContext allows parameterizing access to implicitly-accessed resources, +// like configuration files in /etc and users' login state in their home directory. +// Various components can share the same field only if their semantics is exactly +// the same; if in doubt, add a new field. +// It is always OK to pass nil instead of a SystemContext. +type SystemContext struct { + // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). + // Not used for any of the more specific path overrides available in this struct. + // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). + // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . + // and there is no need to worry about the environment.) + // NOTE: This does NOT affect paths starting by $HOME. + RootForImplicitAbsolutePaths string + + // === Global configuration overrides === + // If not "", overrides the system's default path for signature.Policy configuration. + SignaturePolicyPath string + // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) + RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // Path to the system-wide registries configuration directory + SystemRegistriesConfDirPath string + // Path to the user-specific short-names configuration file + UserShortNameAliasConfPath string + // If set, short-name resolution in pkg/shortnames must follow the specified mode + ShortNameMode *ShortNameMode + // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and + // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce + // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this + // specific context. + PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool + // If not "", overrides the default path for the authentication file, but only new format files + AuthFilePath string + // if not "", overrides the default path for the authentication file, but with the legacy format; + // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; + // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) + // in locations other than the home dir; openshift components should then set this field in those cases; + // this field is ignored if `AuthFilePath` is set (we favor the newer format); + // only reading of this data is supported; + LegacyFormatAuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match. + VariantChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged + // If not "", overrides the temporary directory to use for storing big files + BigFilesTemporaryDir string + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool + + // === docker.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a container registry. + DockerCertPath string + // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. + // Ignored if DockerCertPath is non-empty. + DockerPerHostCertDirPath string + // Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify OptionalBool + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + // Ignored if DockerBearerRegistryToken is non-empty. + DockerAuthConfig *DockerAuthConfig + // if not "", the library uses this registry token to authenticate to the registry + DockerBearerRegistryToken string + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool + // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list + DockerDisableDestSchema1MIMETypes bool + // If true, the physical pull source of docker transport images logged as info level + DockerLogMirrorChoice bool + // Directory to use for OSTree temporary files + OSTreeTmpDirPath string + // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry. + // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, + // when the digest for a blob is unknown. + DockerRegistryPushPrecomputeDigests bool + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool + // DirForceDecompress decompresses the image layers if set to true + DirForceDecompress bool + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int +} + +// ProgressEvent is the type of events a progress reader can produce +// Warning: new event types may be added any time. +type ProgressEvent uint + +const ( + // ProgressEventNewArtifact will be fired on progress reader setup + ProgressEventNewArtifact ProgressEvent = iota + + // ProgressEventRead indicates that the artifact download is currently in + // progress + ProgressEventRead + + // ProgressEventDone is fired when the data transfer has been finished for + // the specific artifact + ProgressEventDone + + // ProgressEventSkipped is fired when the artifact has been skipped because + // its already available at the destination + ProgressEventSkipped +) + +// ProgressProperties is used to pass information from the copy code to a monitor which +// can use the real-time information to produce output or react to changes. +type ProgressProperties struct { + // The event indicating what + Event ProgressEvent + + // The artifact which has been updated in this interval + Artifact BlobInfo + + // The currently downloaded size in bytes + // Increases from 0 to the final Artifact size + Offset uint64 + + // The additional offset which has been downloaded inside the last update + // interval. Will be reset after each ProgressEventRead event. + OffsetUpdate uint64 +} diff --git a/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..83b061c70 --- /dev/null +++ b/vendor/github.com/containers/libtrust/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## The libtrust Project Community Code of Conduct + +The libtrust project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/libtrust/CONTRIBUTING.md b/vendor/github.com/containers/libtrust/CONTRIBUTING.md new file mode 100644 index 000000000..05be0f8ab --- /dev/null +++ b/vendor/github.com/containers/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/containers/libtrust/LICENSE b/vendor/github.com/containers/libtrust/LICENSE new file mode 100644 index 000000000..27448585a --- /dev/null +++ b/vendor/github.com/containers/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/libtrust/MAINTAINERS b/vendor/github.com/containers/libtrust/MAINTAINERS new file mode 100644 index 000000000..9768175fe --- /dev/null +++ b/vendor/github.com/containers/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/containers/libtrust/README.md b/vendor/github.com/containers/libtrust/README.md new file mode 100644 index 000000000..dcffb31ae --- /dev/null +++ b/vendor/github.com/containers/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/containers/libtrust/SECURITY.md b/vendor/github.com/containers/libtrust/SECURITY.md new file mode 100644 index 000000000..fab2c41e8 --- /dev/null +++ b/vendor/github.com/containers/libtrust/SECURITY.md @@ -0,0 +1,3 @@ +## Security and Disclosure Information Policy for the libtrust Project + +The libtrust Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/libtrust/certificates.go b/vendor/github.com/containers/libtrust/certificates.go new file mode 100644 index 000000000..3dcca33cb --- /dev/null +++ b/vendor/github.com/containers/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/containers/libtrust/doc.go b/vendor/github.com/containers/libtrust/doc.go new file mode 100644 index 000000000..ec5d2159c --- /dev/null +++ b/vendor/github.com/containers/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/containers/libtrust/ec_key.go b/vendor/github.com/containers/libtrust/ec_key.go new file mode 100644 index 000000000..0ee1b9110 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key.go @@ -0,0 +1,422 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + r, s, err := k.sign(data, hashID) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/ec_key_no_openssl.go b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go new file mode 100644 index 000000000..d6cdaca3f --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_no_openssl.go @@ -0,0 +1,23 @@ +// +build !libtrust_openssl + +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + return ecdsa.Sign(rand.Reader, k.PrivateKey, hash) +} diff --git a/vendor/github.com/containers/libtrust/ec_key_openssl.go b/vendor/github.com/containers/libtrust/ec_key_openssl.go new file mode 100644 index 000000000..4137511f1 --- /dev/null +++ b/vendor/github.com/containers/libtrust/ec_key_openssl.go @@ -0,0 +1,24 @@ +// +build libtrust_openssl + +package libtrust + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func (k *ecPrivateKey) sign(data io.Reader, hashID crypto.Hash) (r, s *big.Int, err error) { + hId := k.signatureAlgorithm.HashID() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(data) + if err != nil { + return nil, nil, fmt.Errorf("error reading data: %s", err) + } + + return ecdsa.HashSign(rand.Reader, k.PrivateKey, buf.Bytes(), hId) +} diff --git a/vendor/github.com/containers/libtrust/filter.go b/vendor/github.com/containers/libtrust/filter.go new file mode 100644 index 000000000..5b2b4fca6 --- /dev/null +++ b/vendor/github.com/containers/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/containers/libtrust/hash.go b/vendor/github.com/containers/libtrust/hash.go new file mode 100644 index 000000000..a2df787dd --- /dev/null +++ b/vendor/github.com/containers/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/containers/libtrust/jsonsign.go b/vendor/github.com/containers/libtrust/jsonsign.go new file mode 100644 index 000000000..cb2ca9a76 --- /dev/null +++ b/vendor/github.com/containers/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/containers/libtrust/key.go b/vendor/github.com/containers/libtrust/key.go new file mode 100644 index 000000000..73642db2a --- /dev/null +++ b/vendor/github.com/containers/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/containers/libtrust/key_files.go b/vendor/github.com/containers/libtrust/key_files.go new file mode 100644 index 000000000..c526de545 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/containers/libtrust/key_manager.go b/vendor/github.com/containers/libtrust/key_manager.go new file mode 100644 index 000000000..9a98ae357 --- /dev/null +++ b/vendor/github.com/containers/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/containers/libtrust/rsa_key.go b/vendor/github.com/containers/libtrust/rsa_key.go new file mode 100644 index 000000000..dac4cacf2 --- /dev/null +++ b/vendor/github.com/containers/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/containers/libtrust/util.go b/vendor/github.com/containers/libtrust/util.go new file mode 100644 index 000000000..a5a101d3f --- /dev/null +++ b/vendor/github.com/containers/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/containers/ocicrypt/LICENSE b/vendor/github.com/containers/ocicrypt/LICENSE new file mode 100644 index 000000000..953563530 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/ocicrypt/spec/spec.go b/vendor/github.com/containers/ocicrypt/spec/spec.go new file mode 100644 index 000000000..8665f6f21 --- /dev/null +++ b/vendor/github.com/containers/ocicrypt/spec/spec.go @@ -0,0 +1,16 @@ +package spec + +const ( + // MediaTypeLayerEnc is MIME type used for encrypted layers. + MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted" + // MediaTypeLayerGzipEnc is MIME type used for encrypted gzip-compressed layers. + MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted" + // MediaTypeLayerZstdEnc is MIME type used for encrypted zstd-compressed layers. + MediaTypeLayerZstdEnc = "application/vnd.oci.image.layer.v1.tar+zstd+encrypted" + // MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers. + MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted" + // MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers. + MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted" + // MediaTypeLayerZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers. + MediaTypeLayerNonDistributableZsdtEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted" +) diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS new file mode 100644 index 000000000..129dd3969 --- /dev/null +++ b/vendor/github.com/containers/storage/AUTHORS @@ -0,0 +1,1523 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Miller +Adam Singer +Aditi Rajagopal +Aditya +Adria Casas +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Ali Dehghani +Allen Madsen +Allen Sun +almoehi +Alvin Richards +amangoel +Amen Belayneh +Amit Bakshi +Amit Krishnan +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew Munsell +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Ankush Agarwal +Anonmily +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +ArikaChen +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bill W +bin liu +Blake Geno +Boaz Shuster +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Carl Henrik Lunde +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Hanxiao +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Gageot +David Gebler +David Lawrence +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Xia +David Young +Davide Ceretti +Dawn Chen +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dieter Reuter +Dima Stopel +Dimitri John Ledkov +Dimitry Andric +Dinesh Subhraveti +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitry Demeshchuk +Dmitry Gusev +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivind Uggedal +Elan Ruusamäe +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Fabiano Rosas +Fabio Falci +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +Francesc Campoy +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +GabrielNicolasAvellaneda +Galen Sampson +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +He Simei +heartlock <21521209@zju.edu.cn> +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jake Champlin +Jake Moshenko +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Jian Zhang +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John OBrien III +John Starks +John Tims +John Warwick +John Willis +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jordan Williams +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Hawn +Josh Poimboeuf +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +kayrus +Ke Xu +Keli Hu +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickael Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Clark +Kevin J. Lynagh +Kevin Menard +Kevin P. Kucharczyk +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Pelykh +Krasimir Georgiev +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +lalyos +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Laszlo Meszaros +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Marshall +Lewis Peckover +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +LIZAO LI +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Luis Martínez de Bartolomé Izquierdo +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lynda O'Leary +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt McCormick +Matt Moore +Matt Robenolt +Matthew Heon +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mengdi Gao +Mert Yazıcıoğlu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minar +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miloslav Trmač +mingqing +Mingzhen Feng +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Nelson Chen +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +Otto Kekäläinen +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qg <1373319223@qq.com> +qhuang +Qiang Huang +qq690388648 <690388648@qq.com> +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Wallis +Roberto G. Hashioka +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean OMeara +Sean P. Kane +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +Shekhar Gulati +Sheng Yang +Shengbo Song +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +shuai-z +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tibor Vass +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonis Tiigi +Tonny Xu +Tony Daws +Tony Miller +toogley +Torstein Husebø +tpng +tracylihui <793912329@qq.com> +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Tõnis Tiigi +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Xing +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +xamyzhao +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +Youcef YEKHLEF +Yuan Sun +yuchangchun +yuchengxia +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +Zhenan Ye <21551168@zju.edu.cn> +Zhu Guihua +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +搏通 diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE new file mode 100644 index 000000000..8f3fee627 --- /dev/null +++ b/vendor/github.com/containers/storage/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/storage/NOTICE b/vendor/github.com/containers/storage/NOTICE new file mode 100644 index 000000000..8a37c1c7b --- /dev/null +++ b/vendor/github.com/containers/storage/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2016 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go new file mode 100644 index 000000000..aeb7cfd4f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go @@ -0,0 +1,454 @@ +package compressor + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "bufio" + "encoding/base64" + "io" + "io/ioutil" + + "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +const RollsumBits = 16 +const holesThreshold = int64(1 << 10) + +type holesFinder struct { + reader *bufio.Reader + fileOff int64 + zeros int64 + from int64 + threshold int64 + + state int +} + +const ( + holesFinderStateRead = iota + holesFinderStateAccumulate + holesFinderStateFound + holesFinderStateEOF +) + +// ReadByte reads a single byte from the underlying reader. +// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil). +// If there are at least f.THRESHOLD consecutive zeros, then the +// return value is (N_CONSECUTIVE_ZEROS, '\x00'). +func (f *holesFinder) ReadByte() (int64, byte, error) { + for { + switch f.state { + // reading the file stream + case holesFinderStateRead: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + b, err := f.reader.ReadByte() + if err != nil { + return 0, b, err + } + + if b != 0 { + return 0, b, err + } + + f.zeros = 1 + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } else { + f.state = holesFinderStateAccumulate + } + // accumulating zeros, but still didn't reach the threshold + case holesFinderStateAccumulate: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + continue + } + return 0, b, err + } + + if b == 0 { + f.zeros++ + if f.zeros == f.threshold { + f.state = holesFinderStateFound + } + } else { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + } + // found a hole. Number of zeros >= threshold + case holesFinderStateFound: + b, err := f.reader.ReadByte() + if err != nil { + if err == io.EOF { + f.state = holesFinderStateEOF + } + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + if b != 0 { + if f.reader.UnreadByte(); err != nil { + return 0, 0, err + } + f.state = holesFinderStateRead + + holeLen := f.zeros + f.zeros = 0 + return holeLen, 0, nil + } + f.zeros++ + // reached EOF. Flush pending zeros if any. + case holesFinderStateEOF: + if f.zeros > 0 { + f.zeros-- + return 0, 0, nil + } + return 0, 0, io.EOF + } + } +} + +type rollingChecksumReader struct { + reader *holesFinder + closed bool + rollsum *RollSum + pendingHole int64 + + // WrittenOut is the total number of bytes read from + // the stream. + WrittenOut int64 + + // IsLastChunkZeros tells whether the last generated + // chunk is a hole (made of consecutive zeros). If it + // is false, then the last chunk is a data chunk + // generated by the rolling checksum. + IsLastChunkZeros bool +} + +func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { + rc.IsLastChunkZeros = false + + if rc.pendingHole > 0 { + toCopy := int64(len(b)) + if rc.pendingHole < toCopy { + toCopy = rc.pendingHole + } + rc.pendingHole -= toCopy + for i := int64(0); i < toCopy; i++ { + b[i] = 0 + } + + rc.WrittenOut += toCopy + + rc.IsLastChunkZeros = true + + // if there are no other zeros left, terminate the chunk + return rc.pendingHole == 0, int(toCopy), nil + } + + if rc.closed { + return false, 0, io.EOF + } + + for i := 0; i < len(b); i++ { + holeLen, n, err := rc.reader.ReadByte() + if err != nil { + if err == io.EOF { + rc.closed = true + if i == 0 { + return false, 0, err + } + return false, i, nil + } + // Report any other error type + return false, -1, err + } + if holeLen > 0 { + for j := int64(0); j < holeLen; j++ { + rc.rollsum.Roll(0) + } + rc.pendingHole = holeLen + return true, i, nil + } + b[i] = n + rc.WrittenOut++ + rc.rollsum.Roll(n) + if rc.rollsum.OnSplitWithBits(RollsumBits) { + return true, i + 1, nil + } + } + return false, len(b), nil +} + +type chunk struct { + ChunkOffset int64 + Offset int64 + Checksum string + ChunkSize int64 + ChunkType string +} + +func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error { + // total written so far. Used to retrieve partial offsets in the file + dest := ioutils.NewWriteCounter(destFile) + + tr := tar.NewReader(reader) + tr.RawAccounting = true + + buf := make([]byte, 4096) + + zstdWriter, err := internal.ZstdWriterWithLevel(dest, level) + if err != nil { + return err + } + defer func() { + if zstdWriter != nil { + zstdWriter.Close() + zstdWriter.Flush() + } + }() + + restartCompression := func() (int64, error) { + var offset int64 + if zstdWriter != nil { + if err := zstdWriter.Close(); err != nil { + return 0, err + } + if err := zstdWriter.Flush(); err != nil { + return 0, err + } + offset = dest.Count + zstdWriter.Reset(dest) + } + return offset, nil + } + + var metadata []internal.FileMetadata + for { + hdr, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + return err + } + + payloadDigester := digest.Canonical.Digester() + chunkDigester := digest.Canonical.Digester() + + // Now handle the payload, if any + startOffset := int64(0) + lastOffset := int64(0) + lastChunkOffset := int64(0) + + checksum := "" + + chunks := []chunk{} + + hf := &holesFinder{ + threshold: holesThreshold, + reader: bufio.NewReader(tr), + } + + rcReader := &rollingChecksumReader{ + reader: hf, + rollsum: NewRollSum(), + } + + payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + for { + mustSplit, read, errRead := rcReader.Read(buf) + if errRead != nil && errRead != io.EOF { + return err + } + // restart the compression only if there is a payload. + if read > 0 { + if startOffset == 0 { + startOffset, err = restartCompression() + if err != nil { + return err + } + lastOffset = startOffset + } + + if _, err := payloadDest.Write(buf[:read]); err != nil { + return err + } + } + if (mustSplit || errRead == io.EOF) && startOffset > 0 { + off, err := restartCompression() + if err != nil { + return err + } + + chunkSize := rcReader.WrittenOut - lastChunkOffset + if chunkSize > 0 { + chunkType := internal.ChunkTypeData + if rcReader.IsLastChunkZeros { + chunkType = internal.ChunkTypeZeros + } + + chunks = append(chunks, chunk{ + ChunkOffset: lastChunkOffset, + Offset: lastOffset, + Checksum: chunkDigester.Digest().String(), + ChunkSize: chunkSize, + ChunkType: chunkType, + }) + } + + lastOffset = off + lastChunkOffset = rcReader.WrittenOut + chunkDigester = digest.Canonical.Digester() + payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) + } + if errRead == io.EOF { + if startOffset > 0 { + checksum = payloadDigester.Digest().String() + } + break + } + } + + typ, err := internal.GetType(hdr.Typeflag) + if err != nil { + return err + } + xattrs := make(map[string]string) + for k, v := range hdr.Xattrs { + xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v)) + } + entries := []internal.FileMetadata{ + { + Type: typ, + Name: hdr.Name, + Linkname: hdr.Linkname, + Mode: hdr.Mode, + Size: hdr.Size, + UID: hdr.Uid, + GID: hdr.Gid, + ModTime: &hdr.ModTime, + AccessTime: &hdr.AccessTime, + ChangeTime: &hdr.ChangeTime, + Devmajor: hdr.Devmajor, + Devminor: hdr.Devminor, + Xattrs: xattrs, + Digest: checksum, + Offset: startOffset, + EndOffset: lastOffset, + }, + } + for i := 1; i < len(chunks); i++ { + entries = append(entries, internal.FileMetadata{ + Type: internal.TypeChunk, + Name: hdr.Name, + ChunkOffset: chunks[i].ChunkOffset, + }) + } + if len(chunks) > 1 { + for i := range chunks { + entries[i].ChunkSize = chunks[i].ChunkSize + entries[i].Offset = chunks[i].Offset + entries[i].ChunkDigest = chunks[i].Checksum + entries[i].ChunkType = chunks[i].ChunkType + } + } + metadata = append(metadata, entries...) + } + + rawBytes := tr.RawBytes() + if _, err := zstdWriter.Write(rawBytes); err != nil { + return err + } + if err := zstdWriter.Flush(); err != nil { + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + zstdWriter = nil + + return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level) +} + +type zstdChunkedWriter struct { + tarSplitOut *io.PipeWriter + tarSplitErr chan error +} + +func (w zstdChunkedWriter) Close() error { + err := <-w.tarSplitErr + if err != nil { + w.tarSplitOut.Close() + return err + } + return w.tarSplitOut.Close() +} + +func (w zstdChunkedWriter) Write(p []byte) (int, error) { + select { + case err := <-w.tarSplitErr: + w.tarSplitOut.Close() + return 0, err + default: + return w.tarSplitOut.Write(p) + } +} + +// zstdChunkedWriterWithLevel writes a zstd compressed tarball where each file is +// compressed separately so it can be addressed separately. Idea based on CRFS: +// https://github.com/google/crfs +// The difference with CRFS is that the zstd compression is used instead of gzip. +// The reason for it is that zstd supports embedding metadata ignored by the decoder +// as part of the compressed stream. +// A manifest json file with all the metadata is appended at the end of the tarball +// stream, using zstd skippable frames. +// The final file will look like: +// [FILE_1][FILE_2]..[FILE_N][SKIPPABLE FRAME 1][SKIPPABLE FRAME 2] +// Where: +// [FILE_N]: [ZSTD HEADER][TAR HEADER][PAYLOAD FILE_N][ZSTD FOOTER] +// [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] +// [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER] +// MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format. +func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level int) (io.WriteCloser, error) { + ch := make(chan error, 1) + r, w := io.Pipe() + + go func() { + ch <- writeZstdChunkedStream(out, metadata, r, level) + io.Copy(ioutil.Discard, r) + r.Close() + close(ch) + }() + + return zstdChunkedWriter{ + tarSplitOut: w, + tarSplitErr: ch, + }, nil +} + +// ZstdCompressor is a CompressorFunc for the zstd compression algorithm. +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { + if level == nil { + l := 10 + level = &l + } + + return zstdChunkedWriterWithLevel(r, metadata, *level) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go new file mode 100644 index 000000000..f4dfad822 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go @@ -0,0 +1,81 @@ +/* +Copyright 2011 The Perkeep Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rollsum implements rolling checksums similar to apenwarr's bup, which +// is similar to librsync. +// +// The bup project is at https://github.com/apenwarr/bup and its splitting in +// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c +package compressor + +import ( + "math/bits" +) + +const windowSize = 64 // Roll assumes windowSize is a power of 2 +const charOffset = 31 + +const blobBits = 13 +const blobSize = 1 << blobBits // 8k + +type RollSum struct { + s1, s2 uint32 + window [windowSize]uint8 + wofs int +} + +func NewRollSum() *RollSum { + return &RollSum{ + s1: windowSize * charOffset, + s2: windowSize * (windowSize - 1) * charOffset, + } +} + +func (rs *RollSum) add(drop, add uint32) { + s1 := rs.s1 + add - drop + rs.s1 = s1 + rs.s2 += s1 - uint32(windowSize)*(drop+charOffset) +} + +// Roll adds ch to the rolling sum. +func (rs *RollSum) Roll(ch byte) { + wp := &rs.window[rs.wofs] + rs.add(uint32(*wp), uint32(ch)) + *wp = ch + rs.wofs = (rs.wofs + 1) & (windowSize - 1) +} + +// OnSplit reports whether at least 13 consecutive trailing bits of +// the current checksum are set the same way. +func (rs *RollSum) OnSplit() bool { + return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1)) +} + +// OnSplitWithBits reports whether at least n consecutive trailing bits +// of the current checksum are set the same way. +func (rs *RollSum) OnSplitWithBits(n uint32) bool { + mask := (uint32(1) << n) - 1 + return rs.s2&mask == (^uint32(0))&mask +} + +func (rs *RollSum) Bits() int { + rsum := rs.Digest() >> (blobBits + 1) + return blobBits + bits.TrailingZeros32(^rsum) +} + +func (rs *RollSum) Digest() uint32 { + return (rs.s1 << 16) | (rs.s2 & 0xffff) +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go new file mode 100644 index 000000000..3bb5286d9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -0,0 +1,184 @@ +package internal + +// NOTE: This is used from github.com/containers/image by callers that +// don't otherwise use containers/storage, so don't make this depend on any +// larger software like the graph drivers. + +import ( + "archive/tar" + "bytes" + "encoding/binary" + "fmt" + "io" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" +) + +type TOC struct { + Version int `json:"version"` + Entries []FileMetadata `json:"entries"` + + // internal: used by unmarshalToc + StringsBuf bytes.Buffer `json:"-"` +} + +type FileMetadata struct { + Type string `json:"type"` + Name string `json:"name"` + Linkname string `json:"linkName,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + UID int `json:"uid,omitempty"` + GID int `json:"gid,omitempty"` + ModTime *time.Time `json:"modtime,omitempty"` + AccessTime *time.Time `json:"accesstime,omitempty"` + ChangeTime *time.Time `json:"changetime,omitempty"` + Devmajor int64 `json:"devMajor,omitempty"` + Devminor int64 `json:"devMinor,omitempty"` + Xattrs map[string]string `json:"xattrs,omitempty"` + Digest string `json:"digest,omitempty"` + Offset int64 `json:"offset,omitempty"` + EndOffset int64 `json:"endOffset,omitempty"` + + ChunkSize int64 `json:"chunkSize,omitempty"` + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkDigest string `json:"chunkDigest,omitempty"` + ChunkType string `json:"chunkType,omitempty"` + + // internal: computed by mergeTOCEntries. + Chunks []*FileMetadata `json:"-"` +} + +const ( + ChunkTypeData = "" + ChunkTypeZeros = "zeros" +) + +const ( + TypeReg = "reg" + TypeChunk = "chunk" + TypeLink = "hardlink" + TypeChar = "char" + TypeBlock = "block" + TypeDir = "dir" + TypeFifo = "fifo" + TypeSymlink = "symlink" +) + +var TarTypes = map[byte]string{ + tar.TypeReg: TypeReg, + tar.TypeRegA: TypeReg, + tar.TypeLink: TypeLink, + tar.TypeChar: TypeChar, + tar.TypeBlock: TypeBlock, + tar.TypeDir: TypeDir, + tar.TypeFifo: TypeFifo, + tar.TypeSymlink: TypeSymlink, +} + +func GetType(t byte) (string, error) { + r, found := TarTypes[t] + if !found { + return "", fmt.Errorf("unknown tarball type: %v", t) + } + return r, nil +} + +const ( + ManifestChecksumKey = "io.containers.zstd-chunked.manifest-checksum" + ManifestInfoKey = "io.containers.zstd-chunked.manifest-position" + + // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. + ManifestTypeCRFS = 1 + + // FooterSizeSupported is the footer size supported by this implementation. + // Newer versions of the image format might increase this value, so reject + // any version that is not supported. + FooterSizeSupported = 40 +) + +var ( + // when the zstd decoder encounters a skippable frame + 1 byte for the size, it + // will ignore it. + // https://tools.ietf.org/html/rfc8478#section-3.1.2 + skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18} + + ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78} +) + +func appendZstdSkippableFrame(dest io.Writer, data []byte) error { + if _, err := dest.Write(skippableFrameMagic); err != nil { + return err + } + + var size []byte = make([]byte, 4) + binary.LittleEndian.PutUint32(size, uint32(len(data))) + if _, err := dest.Write(size); err != nil { + return err + } + if _, err := dest.Write(data); err != nil { + return err + } + return nil +} + +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error { + // 8 is the size of the zstd skippable frame header + the frame size + manifestOffset := offset + 8 + + toc := TOC{ + Version: 1, + Entries: metadata, + } + + var json = jsoniter.ConfigCompatibleWithStandardLibrary + // Generate the manifest + manifest, err := json.Marshal(toc) + if err != nil { + return err + } + + var compressedBuffer bytes.Buffer + zstdWriter, err := ZstdWriterWithLevel(&compressedBuffer, level) + if err != nil { + return err + } + if _, err := zstdWriter.Write(manifest); err != nil { + zstdWriter.Close() + return err + } + if err := zstdWriter.Close(); err != nil { + return err + } + compressedManifest := compressedBuffer.Bytes() + + manifestDigester := digest.Canonical.Digester() + manifestChecksum := manifestDigester.Hash() + if _, err := manifestChecksum.Write(compressedManifest); err != nil { + return err + } + + outMetadata[ManifestChecksumKey] = manifestDigester.Digest().String() + outMetadata[ManifestInfoKey] = fmt.Sprintf("%d:%d:%d:%d", manifestOffset, len(compressedManifest), len(manifest), ManifestTypeCRFS) + if err := appendZstdSkippableFrame(dest, compressedManifest); err != nil { + return err + } + + // Store the offset to the manifest and its size in LE order + var manifestDataLE []byte = make([]byte, FooterSizeSupported) + binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset) + binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest))) + binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest))) + binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS)) + copy(manifestDataLE[32:], ZstdChunkedFrameMagic) + + return appendZstdSkippableFrame(dest, manifestDataLE) +} + +func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { + el := zstd.EncoderLevelFromZstd(level) + return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go new file mode 100644 index 000000000..3d737b3e1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go new file mode 100644 index 000000000..72a04f349 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go new file mode 100644 index 000000000..cd12470f9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go @@ -0,0 +1,195 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// AtomicFileWriterOptions specifies options for creating the atomic file writer. +type AtomicFileWriterOptions struct { + // NoSync specifies whether the sync call must be skipped for the file. + // If NoSync is not specified, the file is synced to the + // storage after it has been written and before it is moved to + // the specified path. + NoSync bool +} + +var defaultWriterOptions AtomicFileWriterOptions = AtomicFileWriterOptions{} + +// SetDefaultOptions overrides the default options used when creating an +// atomic file writer. +func SetDefaultOptions(opts AtomicFileWriterOptions) { + defaultWriterOptions = opts +} + +// NewAtomicFileWriterWithOpts returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + if opts == nil { + opts = &defaultWriterOptions + } + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + noSync: opts.NoSync, + }, nil +} + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + return NewAtomicFileWriterWithOpts(filename, perm, nil) +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode + noSync bool +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if !w.noSync { + if err := fdatasync(w.f); err != nil { + w.f.Close() + return err + } + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + if !defaultWriterOptions.NoSync { + return w.File.Close() + } + err := fdatasync(w.File) + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go new file mode 100644 index 000000000..0da78a063 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go @@ -0,0 +1,11 @@ +package ioutils + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func fdatasync(f *os.File) error { + return unix.Fdatasync(int(f.Fd())) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go new file mode 100644 index 000000000..79a094035 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux + +package ioutils + +import ( + "os" +) + +func fdatasync(f *os.File) error { + return f.Sync() +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go new file mode 100644 index 000000000..0e89787d4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/readers.go @@ -0,0 +1,171 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +type readWriteToCloserWrapper struct { + io.Reader + io.WriterTo + closer func() error +} + +func (r *readWriteToCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + if wt, ok := r.(io.WriterTo); ok { + return &readWriteToCloserWrapper{ + Reader: r, + WriterTo: wt, + closer: closer, + } + } + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go new file mode 100644 index 000000000..1539ad21b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go new file mode 100644 index 000000000..c719c120b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/containers/storage/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go new file mode 100644 index 000000000..52a4901ad --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go new file mode 100644 index 000000000..ccc7f9c23 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/containers/storage/pkg/longpath/longpath.go b/vendor/github.com/containers/storage/pkg/longpath/longpath.go new file mode 100644 index 000000000..9b15bfff4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/coreos/go-oidc/v3/LICENSE b/vendor/github.com/coreos/go-oidc/v3/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/go-oidc/v3/NOTICE b/vendor/github.com/coreos/go-oidc/v3/NOTICE new file mode 100644 index 000000000..b39ddfa5c --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go new file mode 100644 index 000000000..b7bd09275 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jose.go @@ -0,0 +1,17 @@ +package oidc + +// JOSE asymmetric signing algorithm values as defined by RFC 7518 +// +// see: https://tools.ietf.org/html/rfc7518#section-3.1 +const ( + RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 + RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 + RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 + ES256 = "ES256" // ECDSA using P-256 and SHA-256 + ES384 = "ES384" // ECDSA using P-384 and SHA-384 + ES512 = "ES512" // ECDSA using P-521 and SHA-512 + PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 + PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 + PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 + EdDSA = "EdDSA" // Ed25519 using SHA-512 +) diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go new file mode 100644 index 000000000..539933b3d --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -0,0 +1,250 @@ +package oidc + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "errors" + "fmt" + "io/ioutil" + "net/http" + "sync" + "time" + + jose "github.com/go-jose/go-jose/v3" +) + +// StaticKeySet is a verifier that validates JWT against a static set of public keys. +type StaticKeySet struct { + // PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and + // *ecdsa.PublicKey. + PublicKeys []crypto.PublicKey +} + +// VerifySignature compares the signature against a static set of public keys. +func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, err := jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("parsing jwt: %v", err) + } + for _, pub := range s.PublicKeys { + switch pub.(type) { + case *rsa.PublicKey: + case *ecdsa.PublicKey: + case ed25519.PublicKey: + default: + return nil, fmt.Errorf("invalid public key type provided: %T", pub) + } + payload, err := jws.Verify(pub) + if err != nil { + continue + } + return payload, nil + } + return nil, fmt.Errorf("no public keys able to verify jwt") +} + +// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP +// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically +// used by NewProvider using the URLs returned by OpenID Connect discovery, but is +// exposed for providers that don't support discovery or to prevent round trips to the +// discovery URL. +// +// The returned KeySet is a long lived verifier that caches keys based on any +// keys change. Reuse a common remote key set instead of creating new ones as needed. +func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet { + return newRemoteKeySet(ctx, jwksURL, time.Now) +} + +func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet { + if now == nil { + now = time.Now + } + return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now} +} + +// RemoteKeySet is a KeySet implementation that validates JSON web tokens against +// a jwks_uri endpoint. +type RemoteKeySet struct { + jwksURL string + ctx context.Context + now func() time.Time + + // guard all other fields + mu sync.RWMutex + + // inflight suppresses parallel execution of updateKeys and allows + // multiple goroutines to wait for its result. + inflight *inflight + + // A set of cached keys. + cachedKeys []jose.JSONWebKey +} + +// inflight is used to wait on some in-flight request from multiple goroutines. +type inflight struct { + doneCh chan struct{} + + keys []jose.JSONWebKey + err error +} + +func newInflight() *inflight { + return &inflight{doneCh: make(chan struct{})} +} + +// wait returns a channel that multiple goroutines can receive on. Once it returns +// a value, the inflight request is done and result() can be inspected. +func (i *inflight) wait() <-chan struct{} { + return i.doneCh +} + +// done can only be called by a single goroutine. It records the result of the +// inflight request and signals other goroutines that the result is safe to +// inspect. +func (i *inflight) done(keys []jose.JSONWebKey, err error) { + i.keys = keys + i.err = err + close(i.doneCh) +} + +// result cannot be called until the wait() channel has returned a value. +func (i *inflight) result() ([]jose.JSONWebKey, error) { + return i.keys, i.err +} + +// paresdJWTKey is a context key that allows common setups to avoid parsing the +// JWT twice. It holds a *jose.JSONWebSignature value. +var parsedJWTKey contextKey + +// VerifySignature validates a payload against a signature from the jwks_uri. +// +// Users MUST NOT call this method directly and should use an IDTokenVerifier +// instead. This method skips critical validations such as 'alg' values and is +// only exported to implement the KeySet interface. +func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature) + if !ok { + var err error + jws, err = jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + } + return r.verify(ctx, jws) +} + +func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) { + // We don't support JWTs signed with multiple signatures. + keyID := "" + for _, sig := range jws.Signatures { + keyID = sig.Header.KeyID + break + } + + keys := r.keysFromCache() + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(&key); err == nil { + return payload, nil + } + } + } + + // If the kid doesn't match, check for new keys from the remote. This is the + // strategy recommended by the spec. + // + // https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys + keys, err := r.keysFromRemote(ctx) + if err != nil { + return nil, fmt.Errorf("fetching keys %v", err) + } + + for _, key := range keys { + if keyID == "" || key.KeyID == keyID { + if payload, err := jws.Verify(&key); err == nil { + return payload, nil + } + } + } + return nil, errors.New("failed to verify id token signature") +} + +func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) { + r.mu.RLock() + defer r.mu.RUnlock() + return r.cachedKeys +} + +// keysFromRemote syncs the key set from the remote set, records the values in the +// cache, and returns the key set. +func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) { + // Need to lock to inspect the inflight request field. + r.mu.Lock() + // If there's not a current inflight request, create one. + if r.inflight == nil { + r.inflight = newInflight() + + // This goroutine has exclusive ownership over the current inflight + // request. It releases the resource by nil'ing the inflight field + // once the goroutine is done. + go func() { + // Sync keys and finish inflight when that's done. + keys, err := r.updateKeys() + + r.inflight.done(keys, err) + + // Lock to update the keys and indicate that there is no longer an + // inflight request. + r.mu.Lock() + defer r.mu.Unlock() + + if err == nil { + r.cachedKeys = keys + } + + // Free inflight so a different request can run. + r.inflight = nil + }() + } + inflight := r.inflight + r.mu.Unlock() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.wait(): + return inflight.result() + } +} + +func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) { + req, err := http.NewRequest("GET", r.jwksURL, nil) + if err != nil { + return nil, fmt.Errorf("oidc: can't create request: %v", err) + } + + resp, err := doRequest(r.ctx, req) + if err != nil { + return nil, fmt.Errorf("oidc: get keys failed %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body) + } + + var keySet jose.JSONWebKeySet + err = unmarshalResp(resp, body, &keySet) + if err != nil { + return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body) + } + return keySet.Keys, nil +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go new file mode 100644 index 000000000..b159d1ccd --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -0,0 +1,547 @@ +// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package. +package oidc + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io/ioutil" + "mime" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" +) + +const ( + // ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests. + ScopeOpenID = "openid" + + // ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting + // OAuth2 refresh tokens. + // + // Support for this scope differs between OpenID Connect providers. For instance + // Google rejects it, favoring appending "access_type=offline" as part of the + // authorization request instead. + // + // See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess + ScopeOfflineAccess = "offline_access" +) + +var ( + errNoAtHash = errors.New("id token did not have an access token hash") + errInvalidAtHash = errors.New("access token hash does not match value in ID token") +) + +type contextKey int + +var issuerURLKey contextKey + +// ClientContext returns a new Context that carries the provided HTTP client. +// +// This method sets the same context key used by the golang.org/x/oauth2 package, +// so the returned context works for that package too. +// +// myClient := &http.Client{} +// ctx := oidc.ClientContext(parentContext, myClient) +// +// // This will use the custom client +// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com") +func ClientContext(ctx context.Context, client *http.Client) context.Context { + return context.WithValue(ctx, oauth2.HTTPClient, client) +} + +func getClient(ctx context.Context) *http.Client { + if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok { + return c + } + return nil +} + +// InsecureIssuerURLContext allows discovery to work when the issuer_url reported +// by upstream is mismatched with the discovery URL. This is meant for integration +// with off-spec providers such as Azure. +// +// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0" +// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0" +// +// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL) +// +// // Provider will be discovered with the discoveryBaseURL, but use issuerURL +// // for future issuer validation. +// provider, err := oidc.NewProvider(ctx, discoveryBaseURL) +// +// This is insecure because validating the correct issuer is critical for multi-tenant +// proivders. Any overrides here MUST be carefully reviewed. +func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context { + return context.WithValue(ctx, issuerURLKey, issuerURL) +} + +func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { + client := http.DefaultClient + if c := getClient(ctx); c != nil { + client = c + } + return client.Do(req.WithContext(ctx)) +} + +// Provider represents an OpenID Connect server's configuration. +type Provider struct { + issuer string + authURL string + tokenURL string + userInfoURL string + jwksURL string + algorithms []string + + // Raw claims returned by the server. + rawClaims []byte + + // Guards all of the following fields. + mu sync.Mutex + // HTTP client specified from the initial NewProvider request. This is used + // when creating the common key set. + client *http.Client + // A key set that uses context.Background() and is shared between all code paths + // that don't have a convinent way of supplying a unique context. + commonRemoteKeySet KeySet +} + +func (p *Provider) remoteKeySet() KeySet { + p.mu.Lock() + defer p.mu.Unlock() + if p.commonRemoteKeySet == nil { + ctx := context.Background() + if p.client != nil { + ctx = ClientContext(ctx, p.client) + } + p.commonRemoteKeySet = NewRemoteKeySet(ctx, p.jwksURL) + } + return p.commonRemoteKeySet +} + +type providerJSON struct { + Issuer string `json:"issuer"` + AuthURL string `json:"authorization_endpoint"` + TokenURL string `json:"token_endpoint"` + JWKSURL string `json:"jwks_uri"` + UserInfoURL string `json:"userinfo_endpoint"` + Algorithms []string `json:"id_token_signing_alg_values_supported"` +} + +// supportedAlgorithms is a list of algorithms explicitly supported by this +// package. If a provider supports other algorithms, such as HS256 or none, +// those values won't be passed to the IDTokenVerifier. +var supportedAlgorithms = map[string]bool{ + RS256: true, + RS384: true, + RS512: true, + ES256: true, + ES384: true, + ES512: true, + PS256: true, + PS384: true, + PS512: true, + EdDSA: true, +} + +// ProviderConfig allows creating providers when discovery isn't supported. It's +// generally easier to use NewProvider directly. +type ProviderConfig struct { + // IssuerURL is the identity of the provider, and the string it uses to sign + // ID tokens with. For example "https://accounts.google.com". This value MUST + // match ID tokens exactly. + IssuerURL string + // AuthURL is the endpoint used by the provider to support the OAuth 2.0 + // authorization endpoint. + AuthURL string + // TokenURL is the endpoint used by the provider to support the OAuth 2.0 + // token endpoint. + TokenURL string + // UserInfoURL is the endpoint used by the provider to support the OpenID + // Connect UserInfo flow. + // + // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo + UserInfoURL string + // JWKSURL is the endpoint used by the provider to advertise public keys to + // verify issued ID tokens. This endpoint is polled as new keys are made + // available. + JWKSURL string + + // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign + // ID tokens. If not provided, this defaults to the algorithms advertised by + // the JWK endpoint, then the set of algorithms supported by this package. + Algorithms []string +} + +// NewProvider initializes a provider from a set of endpoints, rather than +// through discovery. +func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { + return &Provider{ + issuer: p.IssuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + userInfoURL: p.UserInfoURL, + jwksURL: p.JWKSURL, + algorithms: p.Algorithms, + client: getClient(ctx), + } +} + +// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider. +// +// The issuer is the URL identifier for the service. For example: "https://accounts.google.com" +// or "https://login.salesforce.com". +func NewProvider(ctx context.Context, issuer string) (*Provider, error) { + wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration" + req, err := http.NewRequest("GET", wellKnown, nil) + if err != nil { + return nil, err + } + resp, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + var p providerJSON + err = unmarshalResp(resp, body, &p) + if err != nil { + return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err) + } + + issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string) + if !skipIssuerValidation { + issuerURL = issuer + } + if p.Issuer != issuerURL && !skipIssuerValidation { + return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer) + } + var algs []string + for _, a := range p.Algorithms { + if supportedAlgorithms[a] { + algs = append(algs, a) + } + } + return &Provider{ + issuer: issuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + userInfoURL: p.UserInfoURL, + jwksURL: p.JWKSURL, + algorithms: algs, + rawClaims: body, + client: getClient(ctx), + }, nil +} + +// Claims unmarshals raw fields returned by the server during discovery. +// +// var claims struct { +// ScopesSupported []string `json:"scopes_supported"` +// ClaimsSupported []string `json:"claims_supported"` +// } +// +// if err := provider.Claims(&claims); err != nil { +// // handle unmarshaling error +// } +// +// For a list of fields defined by the OpenID Connect spec see: +// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata +func (p *Provider) Claims(v interface{}) error { + if p.rawClaims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(p.rawClaims, v) +} + +// Endpoint returns the OAuth2 auth and token endpoints for the given provider. +func (p *Provider) Endpoint() oauth2.Endpoint { + return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL} +} + +// UserInfoEndpoint returns the OpenID Connect userinfo endpoint for the given +// provider. +func (p *Provider) UserInfoEndpoint() string { + return p.userInfoURL +} + +// UserInfo represents the OpenID Connect userinfo claims. +type UserInfo struct { + Subject string `json:"sub"` + Profile string `json:"profile"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + + claims []byte +} + +type userInfoRaw struct { + Subject string `json:"sub"` + Profile string `json:"profile"` + Email string `json:"email"` + // Handle providers that return email_verified as a string + // https://forums.aws.amazon.com/thread.jspa?messageID=949441󧳁 and + // https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11 + EmailVerified stringAsBool `json:"email_verified"` +} + +// Claims unmarshals the raw JSON object claims into the provided object. +func (u *UserInfo) Claims(v interface{}) error { + if u.claims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(u.claims, v) +} + +// UserInfo uses the token source to query the provider's user info endpoint. +func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) { + if p.userInfoURL == "" { + return nil, errors.New("oidc: user info endpoint is not supported by this provider") + } + + req, err := http.NewRequest("GET", p.userInfoURL, nil) + if err != nil { + return nil, fmt.Errorf("oidc: create GET request: %v", err) + } + + token, err := tokenSource.Token() + if err != nil { + return nil, fmt.Errorf("oidc: get access token: %v", err) + } + token.SetAuthHeader(req) + + resp, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: %s", resp.Status, body) + } + + ct := resp.Header.Get("Content-Type") + mediaType, _, parseErr := mime.ParseMediaType(ct) + if parseErr == nil && mediaType == "application/jwt" { + payload, err := p.remoteKeySet().VerifySignature(ctx, string(body)) + if err != nil { + return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err) + } + body = payload + } + + var userInfo userInfoRaw + if err := json.Unmarshal(body, &userInfo); err != nil { + return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err) + } + return &UserInfo{ + Subject: userInfo.Subject, + Profile: userInfo.Profile, + Email: userInfo.Email, + EmailVerified: bool(userInfo.EmailVerified), + claims: body, + }, nil +} + +// IDToken is an OpenID Connect extension that provides a predictable representation +// of an authorization event. +// +// The ID Token only holds fields OpenID Connect requires. To access additional +// claims returned by the server, use the Claims method. +type IDToken struct { + // The URL of the server which issued this token. OpenID Connect + // requires this value always be identical to the URL used for + // initial discovery. + // + // Note: Because of a known issue with Google Accounts' implementation + // this value may differ when using Google. + // + // See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo + Issuer string + + // The client ID, or set of client IDs, that this token is issued for. For + // common uses, this is the client that initialized the auth flow. + // + // This package ensures the audience contains an expected value. + Audience []string + + // A unique string which identifies the end user. + Subject string + + // Expiry of the token. Ths package will not process tokens that have + // expired unless that validation is explicitly turned off. + Expiry time.Time + // When the token was issued by the provider. + IssuedAt time.Time + + // Initial nonce provided during the authentication redirect. + // + // This package does NOT provided verification on the value of this field + // and it's the user's responsibility to ensure it contains a valid value. + Nonce string + + // at_hash claim, if set in the ID token. Callers can verify an access token + // that corresponds to the ID token using the VerifyAccessToken method. + AccessTokenHash string + + // signature algorithm used for ID token, needed to compute a verification hash of an + // access token + sigAlgorithm string + + // Raw payload of the id_token. + claims []byte + + // Map of distributed claim names to claim sources + distributedClaims map[string]claimSource +} + +// Claims unmarshals the raw JSON payload of the ID Token into a provided struct. +// +// idToken, err := idTokenVerifier.Verify(rawIDToken) +// if err != nil { +// // handle error +// } +// var claims struct { +// Email string `json:"email"` +// EmailVerified bool `json:"email_verified"` +// } +// if err := idToken.Claims(&claims); err != nil { +// // handle error +// } +func (i *IDToken) Claims(v interface{}) error { + if i.claims == nil { + return errors.New("oidc: claims not set") + } + return json.Unmarshal(i.claims, v) +} + +// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token +// matches the hash in the id token. It returns an error if the hashes don't match. +// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token +// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken +func (i *IDToken) VerifyAccessToken(accessToken string) error { + if i.AccessTokenHash == "" { + return errNoAtHash + } + var h hash.Hash + switch i.sigAlgorithm { + case RS256, ES256, PS256: + h = sha256.New() + case RS384, ES384, PS384: + h = sha512.New384() + case RS512, ES512, PS512, EdDSA: + h = sha512.New() + default: + return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm) + } + h.Write([]byte(accessToken)) // hash documents that Write will never return an error + sum := h.Sum(nil)[:h.Size()/2] + actual := base64.RawURLEncoding.EncodeToString(sum) + if actual != i.AccessTokenHash { + return errInvalidAtHash + } + return nil +} + +type idToken struct { + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience audience `json:"aud"` + Expiry jsonTime `json:"exp"` + IssuedAt jsonTime `json:"iat"` + NotBefore *jsonTime `json:"nbf"` + Nonce string `json:"nonce"` + AtHash string `json:"at_hash"` + ClaimNames map[string]string `json:"_claim_names"` + ClaimSources map[string]claimSource `json:"_claim_sources"` +} + +type claimSource struct { + Endpoint string `json:"endpoint"` + AccessToken string `json:"access_token"` +} + +type stringAsBool bool + +func (sb *stringAsBool) UnmarshalJSON(b []byte) error { + switch string(b) { + case "true", `"true"`: + *sb = true + case "false", `"false"`: + *sb = false + default: + return errors.New("invalid value for boolean") + } + return nil +} + +type audience []string + +func (a *audience) UnmarshalJSON(b []byte) error { + var s string + if json.Unmarshal(b, &s) == nil { + *a = audience{s} + return nil + } + var auds []string + if err := json.Unmarshal(b, &auds); err != nil { + return err + } + *a = auds + return nil +} + +type jsonTime time.Time + +func (j *jsonTime) UnmarshalJSON(b []byte) error { + var n json.Number + if err := json.Unmarshal(b, &n); err != nil { + return err + } + var unix int64 + + if t, err := n.Int64(); err == nil { + unix = t + } else { + f, err := n.Float64() + if err != nil { + return err + } + unix = int64(f) + } + *j = jsonTime(time.Unix(unix, 0)) + return nil +} + +func unmarshalResp(r *http.Response, body []byte, v interface{}) error { + err := json.Unmarshal(body, &v) + if err == nil { + return nil + } + ct := r.Header.Get("Content-Type") + mediaType, _, parseErr := mime.ParseMediaType(ct) + if parseErr == nil && mediaType == "application/json" { + return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err) + } + return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err) +} diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go new file mode 100644 index 000000000..3e5ffbc76 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -0,0 +1,356 @@ +package oidc + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + jose "github.com/go-jose/go-jose/v3" + "golang.org/x/oauth2" +) + +const ( + issuerGoogleAccounts = "https://accounts.google.com" + issuerGoogleAccountsNoScheme = "accounts.google.com" +) + +// TokenExpiredError indicates that Verify failed because the token was expired. This +// error does NOT indicate that the token is not also invalid for other reasons. Other +// checks might have failed if the expiration check had not failed. +type TokenExpiredError struct { + // Expiry is the time when the token expired. + Expiry time.Time +} + +func (e *TokenExpiredError) Error() string { + return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry) +} + +// KeySet is a set of publc JSON Web Keys that can be used to validate the signature +// of JSON web tokens. This is expected to be backed by a remote key set through +// provider metadata discovery or an in-memory set of keys delivered out-of-band. +type KeySet interface { + // VerifySignature parses the JSON web token, verifies the signature, and returns + // the raw payload. Header and claim fields are validated by other parts of the + // package. For example, the KeySet does not need to check values such as signature + // algorithm, issuer, and audience since the IDTokenVerifier validates these values + // independently. + // + // If VerifySignature makes HTTP requests to verify the token, it's expected to + // use any HTTP client associated with the context through ClientContext. + VerifySignature(ctx context.Context, jwt string) (payload []byte, err error) +} + +// IDTokenVerifier provides verification for ID Tokens. +type IDTokenVerifier struct { + keySet KeySet + config *Config + issuer string +} + +// NewVerifier returns a verifier manually constructed from a key set and issuer URL. +// +// It's easier to use provider discovery to construct an IDTokenVerifier than creating +// one directly. This method is intended to be used with provider that don't support +// metadata discovery, or avoiding round trips when the key set URL is already known. +// +// This constructor can be used to create a verifier directly using the issuer URL and +// JSON Web Key Set URL without using discovery: +// +// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs") +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +// +// Or a static key set (e.g. for testing): +// +// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}} +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) +func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier { + return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL} +} + +// Config is the configuration for an IDTokenVerifier. +type Config struct { + // Expected audience of the token. For a majority of the cases this is expected to be + // the ID of the client that initialized the login flow. It may occasionally differ if + // the provider supports the authorizing party (azp) claim. + // + // If not provided, users must explicitly set SkipClientIDCheck. + ClientID string + // If specified, only this set of algorithms may be used to sign the JWT. + // + // If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this + // defaults to the set of algorithms the provider supports. Otherwise this values + // defaults to RS256. + SupportedSigningAlgs []string + + // If true, no ClientID check performed. Must be true if ClientID field is empty. + SkipClientIDCheck bool + // If true, token expiry is not checked. + SkipExpiryCheck bool + + // SkipIssuerCheck is intended for specialized cases where the the caller wishes to + // defer issuer validation. When enabled, callers MUST independently verify the Token's + // Issuer is a known good value. + // + // Mismatched issuers often indicate client mis-configuration. If mismatches are + // unexpected, evaluate if the provided issuer URL is incorrect instead of enabling + // this option. + SkipIssuerCheck bool + + // Time function to check Token expiry. Defaults to time.Now + Now func() time.Time + + // InsecureSkipSignatureCheck causes this package to skip JWT signature validation. + // It's intended for special cases where providers (such as Azure), use the "none" + // algorithm. + // + // This option can only be enabled safely when the ID Token is received directly + // from the provider after the token exchange. + // + // This option MUST NOT be used when receiving an ID Token from sources other + // than the token endpoint. + InsecureSkipSignatureCheck bool +} + +// VerifierContext returns an IDTokenVerifier that uses the provider's key set to +// verify JWTs. As opposed to Verifier, the context is used for all requests to +// the upstream JWKs endpoint. +func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier { + return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config) +} + +// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs. +// +// The returned verifier uses a background context for all requests to the upstream +// JWKs endpoint. To control that context, use VerifierContext instead. +func (p *Provider) Verifier(config *Config) *IDTokenVerifier { + return p.newVerifier(p.remoteKeySet(), config) +} + +func (p *Provider) newVerifier(keySet KeySet, config *Config) *IDTokenVerifier { + if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 { + // Make a copy so we don't modify the config values. + cp := &Config{} + *cp = *config + cp.SupportedSigningAlgs = p.algorithms + config = cp + } + return NewVerifier(p.issuer, keySet, config) +} + +func parseJWT(p string) ([]byte, error) { + parts := strings.Split(p, ".") + if len(parts) < 2 { + return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts)) + } + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err) + } + return payload, nil +} + +func contains(sli []string, ele string) bool { + for _, s := range sli { + if s == ele { + return true + } + } + return false +} + +// Returns the Claims from the distributed JWT token +func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) { + req, err := http.NewRequest("GET", src.Endpoint, nil) + if err != nil { + return nil, fmt.Errorf("malformed request: %v", err) + } + if src.AccessToken != "" { + req.Header.Set("Authorization", "Bearer "+src.AccessToken) + } + + resp, err := doRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read response body: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode) + } + + token, err := verifier.Verify(ctx, string(body)) + if err != nil { + return nil, fmt.Errorf("malformed response body: %v", err) + } + + return token.claims, nil +} + +// Verify parses a raw ID Token, verifies it's been signed by the provider, performs +// any additional checks depending on the Config, and returns the payload. +// +// Verify does NOT do nonce validation, which is the callers responsibility. +// +// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation +// +// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code")) +// if err != nil { +// // handle error +// } +// +// // Extract the ID Token from oauth2 token. +// rawIDToken, ok := oauth2Token.Extra("id_token").(string) +// if !ok { +// // handle error +// } +// +// token, err := verifier.Verify(ctx, rawIDToken) +func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) { + // Throw out tokens with invalid claims before trying to verify the token. This lets + // us do cheap checks before possibly re-syncing keys. + payload, err := parseJWT(rawIDToken) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + var token idToken + if err := json.Unmarshal(payload, &token); err != nil { + return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err) + } + + distributedClaims := make(map[string]claimSource) + + //step through the token to map claim names to claim sources" + for cn, src := range token.ClaimNames { + if src == "" { + return nil, fmt.Errorf("oidc: failed to obtain source from claim name") + } + s, ok := token.ClaimSources[src] + if !ok { + return nil, fmt.Errorf("oidc: source does not exist") + } + distributedClaims[cn] = s + } + + t := &IDToken{ + Issuer: token.Issuer, + Subject: token.Subject, + Audience: []string(token.Audience), + Expiry: time.Time(token.Expiry), + IssuedAt: time.Time(token.IssuedAt), + Nonce: token.Nonce, + AccessTokenHash: token.AtHash, + claims: payload, + distributedClaims: distributedClaims, + } + + // Check issuer. + if !v.config.SkipIssuerCheck && t.Issuer != v.issuer { + // Google sometimes returns "accounts.google.com" as the issuer claim instead of + // the required "https://accounts.google.com". Detect this case and allow it only + // for Google. + // + // We will not add hooks to let other providers go off spec like this. + if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) { + return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer) + } + } + + // If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty. + // + // This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party). + if !v.config.SkipClientIDCheck { + if v.config.ClientID != "" { + if !contains(t.Audience, v.config.ClientID) { + return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience) + } + } else { + return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set") + } + } + + // If a SkipExpiryCheck is false, make sure token is not expired. + if !v.config.SkipExpiryCheck { + now := time.Now + if v.config.Now != nil { + now = v.config.Now + } + nowTime := now() + + if t.Expiry.Before(nowTime) { + return nil, &TokenExpiredError{Expiry: t.Expiry} + } + + // If nbf claim is provided in token, ensure that it is indeed in the past. + if token.NotBefore != nil { + nbfTime := time.Time(*token.NotBefore) + // Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew. + // https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153 + leeway := 5 * time.Minute + + if nowTime.Add(leeway).Before(nbfTime) { + return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime) + } + } + } + + if v.config.InsecureSkipSignatureCheck { + return t, nil + } + + jws, err := jose.ParseSigned(rawIDToken) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } + + switch len(jws.Signatures) { + case 0: + return nil, fmt.Errorf("oidc: id token not signed") + case 1: + default: + return nil, fmt.Errorf("oidc: multiple signatures on id token not supported") + } + + sig := jws.Signatures[0] + supportedSigAlgs := v.config.SupportedSigningAlgs + if len(supportedSigAlgs) == 0 { + supportedSigAlgs = []string{RS256} + } + + if !contains(supportedSigAlgs, sig.Header.Algorithm) { + return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm) + } + + t.sigAlgorithm = sig.Header.Algorithm + + ctx = context.WithValue(ctx, parsedJWTKey, jws) + gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken) + if err != nil { + return nil, fmt.Errorf("failed to verify signature: %v", err) + } + + // Ensure that the payload returned by the square actually matches the payload parsed earlier. + if !bytes.Equal(gotPayload, payload) { + return nil, errors.New("oidc: internal error, payload parsed did not match previous payload") + } + + return t, nil +} + +// Nonce returns an auth code option which requires the ID Token created by the +// OpenID Connect provider to contain the specified nonce. +func Nonce(nonce string) oauth2.AuthCodeOption { + return oauth2.SetAuthURLParam("nonce", nonce) +} diff --git a/vendor/github.com/cyberphone/json-canonicalization/LICENSE b/vendor/github.com/cyberphone/json-canonicalization/LICENSE new file mode 100644 index 000000000..591211595 --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/LICENSE @@ -0,0 +1,13 @@ + Copyright 2018 Anders Rundgren + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go new file mode 100644 index 000000000..92574a3f4 --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/es6numfmt.go @@ -0,0 +1,71 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package converts numbers in IEEE-754 double precision into the +// format specified for JSON in EcmaScript Version 6 and forward. +// The core application for this is canonicalization: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "math" + "strconv" + "strings" +) + +const invalidPattern uint64 = 0x7ff0000000000000 + +func NumberToJSON(ieeeF64 float64) (res string, err error) { + ieeeU64 := math.Float64bits(ieeeF64) + + // Special case: NaN and Infinity are invalid in JSON + if (ieeeU64 & invalidPattern) == invalidPattern { + return "null", errors.New("Invalid JSON number: " + strconv.FormatUint(ieeeU64, 16)) + } + + // Special case: eliminate "-0" as mandated by the ES6-JSON/JCS specifications + if ieeeF64 == 0 { // Right, this line takes both -0 and 0 + return "0", nil + } + + // Deal with the sign separately + var sign string = "" + if ieeeF64 < 0 { + ieeeF64 =-ieeeF64 + sign = "-" + } + + // ES6 has a unique "g" format + var format byte = 'e' + if ieeeF64 < 1e+21 && ieeeF64 >= 1e-6 { + format = 'f' + } + + // The following should do the trick: + es6Formatted := strconv.FormatFloat(ieeeF64, format, -1, 64) + + // Minor cleanup + exponent := strings.IndexByte(es6Formatted, 'e') + if exponent > 0 { + // Go outputs "1e+09" which must be rewritten as "1e+9" + if es6Formatted[exponent + 2] == '0' { + es6Formatted = es6Formatted[:exponent + 2] + es6Formatted[exponent + 3:] + } + } + return sign + es6Formatted, nil +} diff --git a/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go new file mode 100644 index 000000000..661f41055 --- /dev/null +++ b/vendor/github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer/jsoncanonicalizer.go @@ -0,0 +1,378 @@ +// +// Copyright 2006-2019 WebPKI.org (http://webpki.org). +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This package transforms JSON data in UTF-8 according to: +// https://tools.ietf.org/html/draft-rundgren-json-canonicalization-scheme-02 + +package jsoncanonicalizer + +import ( + "errors" + "container/list" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +type nameValueType struct { + name string + sortKey []uint16 + value string +} + +// JSON standard escapes (modulo \u) +var asciiEscapes = []byte{'\\', '"', 'b', 'f', 'n', 'r', 't'} +var binaryEscapes = []byte{'\\', '"', '\b', '\f', '\n', '\r', '\t'} + +// JSON literals +var literals = []string{"true", "false", "null"} + +func Transform(jsonData []byte) (result []byte, e error) { + + // JSON data MUST be UTF-8 encoded + var jsonDataLength int = len(jsonData) + + // Current pointer in jsonData + var index int = 0 + + // "Forward" declarations are needed for closures referring each other + var parseElement func() string + var parseSimpleType func() string + var parseQuotedString func() string + var parseObject func() string + var parseArray func() string + + var globalError error = nil + + checkError := func(e error) { + // We only honor the first reported error + if globalError == nil { + globalError = e + } + } + + setError := func(msg string) { + checkError(errors.New(msg)) + } + + isWhiteSpace := func(c byte) bool { + return c == 0x20 || c == 0x0a || c == 0x0d || c == 0x09 + } + + nextChar := func() byte { + if index < jsonDataLength { + c := jsonData[index] + if c > 0x7f { + setError("Unexpected non-ASCII character") + } + index++ + return c + } + setError("Unexpected EOF reached") + return '"' + } + + scan := func() byte { + for { + c := nextChar() + if isWhiteSpace(c) { + continue; + } + return c + } + } + + scanFor := func(expected byte) { + c := scan() + if c != expected { + setError("Expected '" + string(expected) + "' but got '" + string(c) + "'") + } + } + + getUEscape := func() rune { + start := index + nextChar() + nextChar() + nextChar() + nextChar() + if globalError != nil { + return 0 + } + u16, err := strconv.ParseUint(string(jsonData[start:index]), 16, 64) + checkError(err) + return rune(u16) + } + + testNextNonWhiteSpaceChar := func() byte { + save := index + c := scan() + index = save + return c + } + + decorateString := func(rawUTF8 string) string { + var quotedString strings.Builder + quotedString.WriteByte('"') + CoreLoop: + for _, c := range []byte(rawUTF8) { + // Is this within the JSON standard escapes? + for i, esc := range binaryEscapes { + if esc == c { + quotedString.WriteByte('\\') + quotedString.WriteByte(asciiEscapes[i]) + continue CoreLoop + } + } + if c < 0x20 { + // Other ASCII control characters must be escaped with \uhhhh + quotedString.WriteString(fmt.Sprintf("\\u%04x", c)) + } else { + quotedString.WriteByte(c) + } + } + quotedString.WriteByte('"') + return quotedString.String() + } + + parseQuotedString = func() string { + var rawString strings.Builder + CoreLoop: + for globalError == nil { + var c byte + if index < jsonDataLength { + c = jsonData[index] + index++ + } else { + nextChar() + break + } + if (c == '"') { + break; + } + if c < ' ' { + setError("Unterminated string literal") + } else if c == '\\' { + // Escape sequence + c = nextChar() + if c == 'u' { + // The \u escape + firstUTF16 := getUEscape() + if utf16.IsSurrogate(firstUTF16) { + // If the first UTF-16 code unit has a certain value there must be + // another succeeding UTF-16 code unit as well + if nextChar() != '\\' || nextChar() != 'u' { + setError("Missing surrogate") + } else { + // Output the UTF-32 code point as UTF-8 + rawString.WriteRune(utf16.DecodeRune(firstUTF16, getUEscape())) + } + } else { + // Single UTF-16 code identical to UTF-32. Output as UTF-8 + rawString.WriteRune(firstUTF16) + } + } else if c == '/' { + // Benign but useless escape + rawString.WriteByte('/') + } else { + // The JSON standard escapes + for i, esc := range asciiEscapes { + if esc == c { + rawString.WriteByte(binaryEscapes[i]) + continue CoreLoop + } + } + setError("Unexpected escape: \\" + string(c)) + } + } else { + // Just an ordinary ASCII character alternatively a UTF-8 byte + // outside of ASCII. + // Note that properly formatted UTF-8 never clashes with ASCII + // making byte per byte search for ASCII break characters work + // as expected. + rawString.WriteByte(c) + } + } + return rawString.String() + } + + parseSimpleType = func() string { + var token strings.Builder + index-- + for globalError == nil { + c := testNextNonWhiteSpaceChar() + if c == ',' || c == ']' || c == '}' { + break; + } + c = nextChar() + if isWhiteSpace(c) { + break + } + token.WriteByte(c) + } + if token.Len() == 0 { + setError("Missing argument") + } + value := token.String() + // Is it a JSON literal? + for _, literal := range literals { + if literal == value { + return literal + } + } + // Apparently not so we assume that it is a I-JSON number + ieeeF64, err := strconv.ParseFloat(value, 64) + checkError(err) + value, err = NumberToJSON(ieeeF64) + checkError(err) + return value + } + + parseElement = func() string { + switch scan() { + case '{': + return parseObject() + case '"': + return decorateString(parseQuotedString()) + case '[': + return parseArray() + default: + return parseSimpleType() + } + } + + parseArray = func() string { + var arrayData strings.Builder + arrayData.WriteByte('[') + var next bool = false + for globalError == nil && testNextNonWhiteSpaceChar() != ']' { + if next { + scanFor(',') + arrayData.WriteByte(',') + } else { + next = true + } + arrayData.WriteString(parseElement()) + } + scan() + arrayData.WriteByte(']') + return arrayData.String() + } + + lexicographicallyPrecedes := func(sortKey []uint16, e *list.Element) bool { + // Find the minimum length of the sortKeys + oldSortKey := e.Value.(nameValueType).sortKey + minLength := len(oldSortKey) + if minLength > len(sortKey) { + minLength = len(sortKey) + } + for q := 0; q < minLength; q++ { + diff := int(sortKey[q]) - int(oldSortKey[q]) + if diff < 0 { + // Smaller => Precedes + return true + } else if diff > 0 { + // Bigger => No match + return false + } + // Still equal => Continue + } + // The sortKeys compared equal up to minLength + if len(sortKey) < len(oldSortKey) { + // Shorter => Precedes + return true + } + if len(sortKey) == len(oldSortKey) { + setError("Duplicate key: " + e.Value.(nameValueType).name) + } + // Longer => No match + return false + } + + parseObject = func() string { + nameValueList := list.New() + var next bool = false + CoreLoop: + for globalError == nil && testNextNonWhiteSpaceChar() != '}' { + if next { + scanFor(',') + } + next = true + scanFor('"') + rawUTF8 := parseQuotedString() + if globalError != nil { + break; + } + // Sort keys on UTF-16 code units + // Since UTF-8 doesn't have endianess this is just a value transformation + // In the Go case the transformation is UTF-8 => UTF-32 => UTF-16 + sortKey := utf16.Encode([]rune(rawUTF8)) + scanFor(':') + nameValue := nameValueType{rawUTF8, sortKey, parseElement()} + for e := nameValueList.Front(); e != nil; e = e.Next() { + // Check if the key is smaller than a previous key + if lexicographicallyPrecedes(sortKey, e) { + // Precedes => Insert before and exit sorting + nameValueList.InsertBefore(nameValue, e) + continue CoreLoop + } + // Continue searching for a possibly succeeding sortKey + // (which is straightforward since the list is ordered) + } + // The sortKey is either the first or is succeeding all previous sortKeys + nameValueList.PushBack(nameValue) + } + // Scan away '}' + scan() + // Now everything is sorted so we can properly serialize the object + var objectData strings.Builder + objectData.WriteByte('{') + next = false + for e := nameValueList.Front(); e != nil; e = e.Next() { + if next { + objectData.WriteByte(',') + } + next = true + nameValue := e.Value.(nameValueType) + objectData.WriteString(decorateString(nameValue.name)) + objectData.WriteByte(':') + objectData.WriteString(nameValue.value) + } + objectData.WriteByte('}') + return objectData.String() + } + + ///////////////////////////////////////////////// + // This is where Transform actually begins... // + ///////////////////////////////////////////////// + var transformed string + + if testNextNonWhiteSpaceChar() == '[' { + scan() + transformed = parseArray() + } else { + scanFor('{') + transformed = parseObject() + } + for index < jsonDataLength { + if !isWhiteSpace(jsonData[index]) { + setError("Improperly terminated JSON object") + break; + } + index++ + } + return []byte(transformed), globalError +} \ No newline at end of file diff --git a/vendor/github.com/digitorus/pkcs7/.gitignore b/vendor/github.com/digitorus/pkcs7/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/digitorus/pkcs7/LICENSE b/vendor/github.com/digitorus/pkcs7/LICENSE new file mode 100644 index 000000000..75f320908 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/digitorus/pkcs7/Makefile b/vendor/github.com/digitorus/pkcs7/Makefile new file mode 100644 index 000000000..47c73b868 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/Makefile @@ -0,0 +1,20 @@ +all: vet staticcheck test + +test: + go test -covermode=count -coverprofile=coverage.out . + +showcoverage: test + go tool cover -html=coverage.out + +vet: + go vet . + +lint: + golint . + +staticcheck: + staticcheck . + +gettools: + go get -u honnef.co/go/tools/... + go get -u golang.org/x/lint/golint diff --git a/vendor/github.com/digitorus/pkcs7/README.md b/vendor/github.com/digitorus/pkcs7/README.md new file mode 100644 index 000000000..a55d117c6 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/README.md @@ -0,0 +1,69 @@ +# pkcs7 + +[![GoDoc](https://godoc.org/go.mozilla.org/pkcs7?status.svg)](https://godoc.org/go.mozilla.org/pkcs7) +[![Build Status](https://github.com/mozilla-services/pkcs7/workflows/CI/badge.svg?branch=master&event=push)](https://github.com/mozilla-services/pkcs7/actions/workflows/ci.yml?query=branch%3Amaster+event%3Apush) + +pkcs7 implements parsing and creating signed and enveloped messages. + +```go +package main + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "os" + + "go.mozilla.org/pkcs7" +) + +func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) { + toBeSigned, err := NewSignedData(content) + if err != nil { + err = fmt.Errorf("Cannot initialize signed data: %s", err) + return + } + if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil { + err = fmt.Errorf("Cannot add signer: %s", err) + return + } + + // Detach signature, omit if you want an embedded signature + toBeSigned.Detach() + + signed, err = toBeSigned.Finish() + if err != nil { + err = fmt.Errorf("Cannot finish signing data: %s", err) + return + } + + // Verify the signature + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) + p7, err := pkcs7.Parse(signed) + if err != nil { + err = fmt.Errorf("Cannot parse our signed data: %s", err) + return + } + + // since the signature was detached, reattach the content here + p7.Content = content + + if bytes.Compare(content, p7.Content) != 0 { + err = fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) + return + } + if err = p7.Verify(); err != nil { + err = fmt.Errorf("Cannot verify our signed data: %s", err) + return + } + + return signed, nil +} +``` + + + +## Credits +This is a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7) diff --git a/vendor/github.com/digitorus/pkcs7/ber.go b/vendor/github.com/digitorus/pkcs7/ber.go new file mode 100644 index 000000000..73da024a0 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/ber.go @@ -0,0 +1,271 @@ +package pkcs7 + +import ( + "bytes" + "errors" +) + +var encodeIndent = 0 + +type asn1Object interface { + EncodeTo(writer *bytes.Buffer) error +} + +type asn1Structured struct { + tagBytes []byte + content []asn1Object +} + +func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { + //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) + encodeIndent++ + inner := new(bytes.Buffer) + for _, obj := range s.content { + err := obj.EncodeTo(inner) + if err != nil { + return err + } + } + encodeIndent-- + out.Write(s.tagBytes) + encodeLength(out, inner.Len()) + out.Write(inner.Bytes()) + return nil +} + +type asn1Primitive struct { + tagBytes []byte + length int + content []byte +} + +func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { + _, err := out.Write(p.tagBytes) + if err != nil { + return err + } + if err = encodeLength(out, p.length); err != nil { + return err + } + //fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) + //fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) + out.Write(p.content) + + return nil +} + +func ber2der(ber []byte) ([]byte, error) { + if len(ber) == 0 { + return nil, errors.New("ber2der: input ber is empty") + } + //fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) + out := new(bytes.Buffer) + + obj, _, err := readObject(ber, 0) + if err != nil { + return nil, err + } + obj.EncodeTo(out) + + // if offset < len(ber) { + // return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber)) + //} + + return out.Bytes(), nil +} + +// encodes lengths that are longer than 127 into string of bytes +func marshalLongLength(out *bytes.Buffer, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +// computes the byte length of an encoded length value +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +// encodes the length in DER format +// If the length fits in 7 bits, the value is encoded directly. +// +// Otherwise, the number of bytes to encode the length is first determined. +// This number is likely to be 4 or less for a 32bit length. This number is +// added to 0x80. The length is encoded in big endian encoding follow after +// +// Examples: +// length | byte 1 | bytes n +// 0 | 0x00 | - +// 120 | 0x78 | - +// 200 | 0x81 | 0xC8 +// 500 | 0x82 | 0x01 0xF4 +// +func encodeLength(out *bytes.Buffer, length int) (err error) { + if length >= 128 { + l := lengthLength(length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLongLength(out, length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(length)) + if err != nil { + return + } + } + return +} + +func readObject(ber []byte, offset int) (asn1Object, int, error) { + berLen := len(ber) + if offset >= berLen { + return nil, 0, errors.New("ber2der: offset is after end of ber data") + } + tagStart := offset + b := ber[offset] + offset++ + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + tag := b & 0x1F // last 5 bits + if tag == 0x1F { + tag = 0 + for ber[offset] >= 0x80 { + tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + // jvehent 20170227: this doesn't appear to be used anywhere... + //tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + tagEnd := offset + + kind := b & 0x20 + if kind == 0 { + debugprint("--> Primitive\n") + } else { + debugprint("--> Constructed\n") + } + // read length + var length int + l := ber[offset] + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + indefinite := false + if l > 0x80 { + numberOfBytes := (int)(l & 0x7F) + if numberOfBytes > 4 { // int is only guaranteed to be 32bit + return nil, 0, errors.New("ber2der: BER tag length too long") + } + if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F { + return nil, 0, errors.New("ber2der: BER tag length is negative") + } + if (int)(ber[offset]) == 0x0 { + return nil, 0, errors.New("ber2der: BER tag length has leading zero") + } + debugprint("--> (compute length) indicator byte: %x\n", l) + debugprint("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes]) + for i := 0; i < numberOfBytes; i++ { + length = length*256 + (int)(ber[offset]) + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + } else if l == 0x80 { + indefinite = true + } else { + length = (int)(l) + } + if length < 0 { + return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") + } + //fmt.Printf("--> length : %d\n", length) + contentEnd := offset + length + if contentEnd > len(ber) { + return nil, 0, errors.New("ber2der: BER tag length is more than available data") + } + debugprint("--> content start : %d\n", offset) + debugprint("--> content end : %d\n", contentEnd) + debugprint("--> content : % X\n", ber[offset:contentEnd]) + var obj asn1Object + if indefinite && kind == 0 { + return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding") + } + if kind == 0 { + obj = asn1Primitive{ + tagBytes: ber[tagStart:tagEnd], + length: length, + content: ber[offset:contentEnd], + } + } else { + var subObjects []asn1Object + for (offset < contentEnd) || indefinite { + var subObj asn1Object + var err error + subObj, offset, err = readObject(ber, offset) + if err != nil { + return nil, 0, err + } + subObjects = append(subObjects, subObj) + + if indefinite { + terminated, err := isIndefiniteTermination(ber, offset) + if err != nil { + return nil, 0, err + } + + if terminated { + break + } + } + } + obj = asn1Structured{ + tagBytes: ber[tagStart:tagEnd], + content: subObjects, + } + } + + // Apply indefinite form length with 0x0000 terminator. + if indefinite { + contentEnd = offset + 2 + } + + return obj, contentEnd, nil +} + +func isIndefiniteTermination(ber []byte, offset int) (bool, error) { + if len(ber) - offset < 2 { + return false, errors.New("ber2der: Invalid BER format") + } + + return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil +} + +func debugprint(format string, a ...interface{}) { + //fmt.Printf(format, a) +} diff --git a/vendor/github.com/digitorus/pkcs7/decrypt.go b/vendor/github.com/digitorus/pkcs7/decrypt.go new file mode 100644 index 000000000..0d088d628 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/decrypt.go @@ -0,0 +1,177 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" +) + +// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed +var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported") + +// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data +var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type") + +// Decrypt decrypts encrypted content info for recipient cert and private key +func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte, error) { + data, ok := p7.raw.(envelopedData) + if !ok { + return nil, ErrNotEncryptedContent + } + recipient := selectRecipientForCertificate(data.RecipientInfos, cert) + if recipient.EncryptedKey == nil { + return nil, errors.New("pkcs7: no enveloped recipient for provided certificate") + } + switch pkey := pkey.(type) { + case *rsa.PrivateKey: + var contentKey []byte + contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey) + if err != nil { + return nil, err + } + return data.EncryptedContentInfo.decrypt(contentKey) + } + return nil, ErrUnsupportedAlgorithm +} + +// DecryptUsingPSK decrypts encrypted data using caller provided +// pre-shared secret +func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) { + data, ok := p7.raw.(encryptedData) + if !ok { + return nil, ErrNotEncryptedContent + } + return data.EncryptedContentInfo.decrypt(key) +} + +func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { + alg := eci.ContentEncryptionAlgorithm.Algorithm + if !alg.Equal(OIDEncryptionAlgorithmDESCBC) && + !alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES256CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128GCM) && + !alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg) + return nil, ErrUnsupportedAlgorithm + } + + // EncryptedContent can either be constructed of multple OCTET STRINGs + // or _be_ a tagged OCTET STRING + var cyphertext []byte + if eci.EncryptedContent.IsCompound { + // Complex case to concat all of the children OCTET STRINGs + var buf bytes.Buffer + cypherbytes := eci.EncryptedContent.Bytes + for { + var part []byte + cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part) + buf.Write(part) + if cypherbytes == nil { + break + } + } + cyphertext = buf.Bytes() + } else { + // Simple case, the bytes _are_ the cyphertext + cyphertext = eci.EncryptedContent.Bytes + } + + var block cipher.Block + var err error + + switch { + case alg.Equal(OIDEncryptionAlgorithmDESCBC): + block, err = des.NewCipher(key) + case alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC): + block, err = des.NewTripleDESCipher(key) + case alg.Equal(OIDEncryptionAlgorithmAES256CBC), alg.Equal(OIDEncryptionAlgorithmAES256GCM): + fallthrough + case alg.Equal(OIDEncryptionAlgorithmAES128GCM), alg.Equal(OIDEncryptionAlgorithmAES128CBC): + block, err = aes.NewCipher(key) + } + + if err != nil { + return nil, err + } + + if alg.Equal(OIDEncryptionAlgorithmAES128GCM) || alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + params := aesGCMParameters{} + paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes + + _, err := asn1.Unmarshal(paramBytes, ¶ms) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + if len(params.Nonce) != gcm.NonceSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + if params.ICVLen != gcm.Overhead() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + + plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil) + if err != nil { + return nil, err + } + + return plaintext, nil + } + + iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes + if len(iv) != block.BlockSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are malformed") + } + mode := cipher.NewCBCDecrypter(block, iv) + plaintext := make([]byte, len(cyphertext)) + mode.CryptBlocks(plaintext, cyphertext) + if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil { + return nil, err + } + return plaintext, nil +} + +func unpad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + if len(data)%blocklen != 0 || len(data) == 0 { + return nil, fmt.Errorf("invalid data len %d", len(data)) + } + + // the last byte is the length of padding + padlen := int(data[len(data)-1]) + + // check padding integrity, all bytes should be the same + pad := data[len(data)-padlen:] + for _, padbyte := range pad { + if padbyte != byte(padlen) { + return nil, errors.New("invalid padding") + } + } + + return data[:len(data)-padlen], nil +} + +func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo { + for _, recp := range recipients { + if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) { + return recp + } + } + return recipientInfo{} +} diff --git a/vendor/github.com/digitorus/pkcs7/encrypt.go b/vendor/github.com/digitorus/pkcs7/encrypt.go new file mode 100644 index 000000000..6b2655708 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/encrypt.go @@ -0,0 +1,399 @@ +package pkcs7 + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +type envelopedData struct { + Version int + RecipientInfos []recipientInfo `asn1:"set"` + EncryptedContentInfo encryptedContentInfo +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type recipientInfo struct { + Version int + IssuerAndSerialNumber issuerAndSerial + KeyEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedKey []byte +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent asn1.RawValue `asn1:"tag:0,optional"` +} + +const ( + // EncryptionAlgorithmDESCBC is the DES CBC encryption algorithm + EncryptionAlgorithmDESCBC = iota + + // EncryptionAlgorithmAES128CBC is the AES 128 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES128CBC + + // EncryptionAlgorithmAES256CBC is the AES 256 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES256CBC + + // EncryptionAlgorithmAES128GCM is the AES 128 bits with GCM encryption algorithm + EncryptionAlgorithmAES128GCM + + // EncryptionAlgorithmAES256GCM is the AES 256 bits with GCM encryption algorithm + EncryptionAlgorithmAES256GCM +) + +// ContentEncryptionAlgorithm determines the algorithm used to encrypt the +// plaintext message. Change the value of this variable to change which +// algorithm is used in the Encrypt() function. +var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC + +// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt +// content with an unsupported algorithm. +var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported") + +// ErrPSKNotProvided is returned when attempting to encrypt +// using a PSK without actually providing the PSK. +var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided") + +const nonceSize = 12 + +type aesGCMParameters struct { + Nonce []byte `asn1:"tag:4"` + ICVLen int +} + +func encryptAESGCM(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128GCM: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128GCM + case EncryptionAlgorithmAES256GCM: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256GCM + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESGCM: %d", ContentEncryptionAlgorithm) + } + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create nonce + nonce := make([]byte, nonceSize) + + _, err := rand.Read(nonce) + if err != nil { + return nil, nil, err + } + + // Encrypt content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, nil, err + } + + ciphertext := gcm.Seal(nil, nonce, content, nil) + + // Prepare ASN.1 Encrypted Content Info + paramSeq := aesGCMParameters{ + Nonce: nonce, + ICVLen: gcm.Overhead(), + } + + paramBytes, err := asn1.Marshal(paramSeq) + if err != nil { + return nil, nil, err + } + + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{ + Tag: asn1.TagSequence, + Bytes: paramBytes, + }, + }, + EncryptedContent: marshalEncryptedContent(ciphertext), + } + + return key, &eci, nil +} + +func encryptDESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + if key == nil { + // Create DES key + key = make([]byte, 8) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, des.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := des.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmDESCBC, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128CBC: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128CBC + case EncryptionAlgorithmAES256CBC: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256CBC + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESCBC: %d", ContentEncryptionAlgorithm) + } + + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, aes.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +// Encrypt creates and returns an envelope data PKCS7 structure with encrypted +// recipient keys for each recipient public key. +// +// The algorithm used to perform encryption is determined by the current value +// of the global ContentEncryptionAlgorithm package variable. By default, the +// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the +// value before calling Encrypt(). For example: +// +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM +// +// TODO(fullsailor): Add support for encrypting content with other algorithms +func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { + var eci *encryptedContentInfo + var key []byte + var err error + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + key, eci, err = encryptDESCBC(content, nil) + case EncryptionAlgorithmAES128CBC: + fallthrough + case EncryptionAlgorithmAES256CBC: + key, eci, err = encryptAESCBC(content, nil) + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + key, eci, err = encryptAESGCM(content, nil) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare each recipient's encrypted cipher key + recipientInfos := make([]recipientInfo, len(recipients)) + for i, recipient := range recipients { + encrypted, err := encryptKey(key, recipient) + if err != nil { + return nil, err + } + ias, err := cert2issuerAndSerial(recipient) + if err != nil { + return nil, err + } + info := recipientInfo{ + Version: 0, + IssuerAndSerialNumber: ias, + KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmRSA, + }, + EncryptedKey: encrypted, + } + recipientInfos[i] = info + } + + // Prepare envelope content + envelope := envelopedData{ + EncryptedContentInfo: *eci, + Version: 0, + RecipientInfos: recipientInfos, + } + innerContent, err := asn1.Marshal(envelope) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEnvelopedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +// EncryptUsingPSK creates and returns an encrypted data PKCS7 structure, +// encrypted using caller provided pre-shared secret. +func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { + var eci *encryptedContentInfo + var err error + + if key == nil { + return nil, ErrPSKNotProvided + } + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + _, eci, err = encryptDESCBC(content, key) + + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + _, eci, err = encryptAESGCM(content, key) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare encrypted-data content + ed := encryptedData{ + Version: 0, + EncryptedContentInfo: *eci, + } + innerContent, err := asn1.Marshal(ed) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEncryptedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +func marshalEncryptedContent(content []byte) asn1.RawValue { + asn1Content, _ := asn1.Marshal(content) + return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true} +} + +func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) { + if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil { + return rsa.EncryptPKCS1v15(rand.Reader, pub, key) + } + return nil, ErrUnsupportedAlgorithm +} + +func pad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + padlen := blocklen - (len(data) % blocklen) + if padlen == 0 { + padlen = blocklen + } + pad := bytes.Repeat([]byte{byte(padlen)}, padlen) + return append(data, pad...), nil +} diff --git a/vendor/github.com/digitorus/pkcs7/pkcs7.go b/vendor/github.com/digitorus/pkcs7/pkcs7.go new file mode 100644 index 000000000..0b74dff9b --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/pkcs7.go @@ -0,0 +1,302 @@ +// Package pkcs7 implements parsing and generation of some PKCS#7 structures. +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "sort" + + _ "crypto/sha1" // for crypto.SHA1 +) + +// PKCS7 Represents a PKCS7 structure +type PKCS7 struct { + Content []byte + Certificates []*x509.Certificate + CRLs []pkix.CertificateList + Signers []signerInfo + raw interface{} +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"explicit,optional,tag:0"` +} + +// ErrUnsupportedContentType is returned when a PKCS7 content is not supported. +// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2), +// and Enveloped Data are supported (1.2.840.113549.1.7.3) +var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type") + +type unsignedData []byte + +var ( + // Signed Data OIDs + OIDData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1} + OIDSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2} + OIDEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3} + OIDEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6} + OIDAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3} + OIDAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4} + OIDAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5} + + // Digest Algorithms + OIDDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} + OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + + OIDDigestAlgorithmECDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + OIDDigestAlgorithmECDSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + OIDDigestAlgorithmECDSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + + // Signature Algorithms + OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + + OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + + OIDEncryptionAlgorithmEDDSA25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + + // Encryption Algorithms + OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} + OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} + OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} + OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} + OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} + OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} +) + +func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { + switch { + case oid.Equal(OIDDigestAlgorithmSHA1), oid.Equal(OIDDigestAlgorithmECDSASHA1), + oid.Equal(OIDDigestAlgorithmDSA), oid.Equal(OIDDigestAlgorithmDSASHA1), + oid.Equal(OIDEncryptionAlgorithmRSA): + return crypto.SHA1, nil + case oid.Equal(OIDDigestAlgorithmSHA256), oid.Equal(OIDDigestAlgorithmECDSASHA256): + return crypto.SHA256, nil + case oid.Equal(OIDDigestAlgorithmSHA384), oid.Equal(OIDDigestAlgorithmECDSASHA384): + return crypto.SHA384, nil + case oid.Equal(OIDDigestAlgorithmSHA512), oid.Equal(OIDDigestAlgorithmECDSASHA512): + return crypto.SHA512, nil + } + return crypto.Hash(0), ErrUnsupportedAlgorithm +} + +// getDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm +// and returns the corresponding OID digest algorithm +func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) { + switch digestAlg { + case x509.SHA1WithRSA, x509.ECDSAWithSHA1: + return OIDDigestAlgorithmSHA1, nil + case x509.SHA256WithRSA, x509.ECDSAWithSHA256: + return OIDDigestAlgorithmSHA256, nil + case x509.SHA384WithRSA, x509.ECDSAWithSHA384: + return OIDDigestAlgorithmSHA384, nil + case x509.SHA512WithRSA, x509.ECDSAWithSHA512, x509.PureEd25519: + return OIDDigestAlgorithmSHA512, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm") +} + +// getOIDForEncryptionAlgorithm takes a private key or signer and +// the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm +func getOIDForEncryptionAlgorithm(keyOrSigner interface{}, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { + _, ok := keyOrSigner.(*dsa.PrivateKey) + if ok { + return OIDDigestAlgorithmDSA, nil + } + + signer, ok := keyOrSigner.(crypto.Signer) + if !ok { + return nil, errors.New("pkcs7: key does not implement crypto.Signer") + } + switch signer.Public().(type) { + case *rsa.PublicKey: + switch { + default: + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDEncryptionAlgorithmRSA): + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDEncryptionAlgorithmRSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDEncryptionAlgorithmRSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDEncryptionAlgorithmRSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDEncryptionAlgorithmRSASHA512, nil + } + case *ecdsa.PublicKey: + switch { + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDDigestAlgorithmECDSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDDigestAlgorithmECDSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDDigestAlgorithmECDSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDDigestAlgorithmECDSASHA512, nil + } + case ed25519.PublicKey: + return OIDEncryptionAlgorithmEDDSA25519, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown key type %T", signer.Public()) +} + +// Parse decodes a DER encoded PKCS7 package +func Parse(data []byte) (p7 *PKCS7, err error) { + if len(data) == 0 { + return nil, errors.New("pkcs7: input data is empty") + } + var info contentInfo + der, err := ber2der(data) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(der, &info) + if len(rest) > 0 { + err = asn1.SyntaxError{Msg: "trailing data"} + return + } + if err != nil { + return + } + + // fmt.Printf("--> Content Type: %s", info.ContentType) + switch { + case info.ContentType.Equal(OIDSignedData): + return parseSignedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEnvelopedData): + return parseEnvelopedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEncryptedData): + return parseEncryptedData(info.Content.Bytes) + } + return nil, ErrUnsupportedContentType +} + +func parseEnvelopedData(data []byte) (*PKCS7, error) { + var ed envelopedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func parseEncryptedData(data []byte) (*PKCS7, error) { + var ed encryptedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { + if len(raw.Raw) == 0 { + return nil, nil + } + + var val asn1.RawValue + if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil { + return nil, err + } + + return x509.ParseCertificates(val.Bytes) +} + +func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool { + return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Equal(cert.RawIssuer, ias.IssuerName.FullBytes) +} + +// Attribute represents a key value pair attribute. Value must be marshalable byte +// `encoding/asn1` +type Attribute struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +type attributes struct { + types []asn1.ObjectIdentifier + values []interface{} +} + +// Add adds the attribute, maintaining insertion order +func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) { + attrs.types = append(attrs.types, attrType) + attrs.values = append(attrs.values, value) +} + +type sortableAttribute struct { + SortKey []byte + Attribute attribute +} + +type attributeSet []sortableAttribute + +func (sa attributeSet) Len() int { + return len(sa) +} + +func (sa attributeSet) Less(i, j int) bool { + return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0 +} + +func (sa attributeSet) Swap(i, j int) { + sa[i], sa[j] = sa[j], sa[i] +} + +func (sa attributeSet) Attributes() []attribute { + attrs := make([]attribute, len(sa)) + for i, attr := range sa { + attrs[i] = attr.Attribute + } + return attrs +} + +func (attrs *attributes) ForMarshalling() ([]attribute, error) { + sortables := make(attributeSet, len(attrs.types)) + for i := range sortables { + attrType := attrs.types[i] + attrValue := attrs.values[i] + asn1Value, err := asn1.Marshal(attrValue) + if err != nil { + return nil, err + } + attr := attribute{ + Type: attrType, + Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag + } + encoded, err := asn1.Marshal(attr) + if err != nil { + return nil, err + } + sortables[i] = sortableAttribute{ + SortKey: encoded, + Attribute: attr, + } + } + sort.Sort(sortables) + return sortables.Attributes(), nil +} diff --git a/vendor/github.com/digitorus/pkcs7/sign.go b/vendor/github.com/digitorus/pkcs7/sign.go new file mode 100644 index 000000000..6cfd2ab9c --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/sign.go @@ -0,0 +1,456 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ed25519" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "time" +) + +// SignedData is an opaque data structure for creating signed data payloads +type SignedData struct { + sd signedData + certs []*x509.Certificate + data, messageDigest []byte + digestOid asn1.ObjectIdentifier + encryptionOid asn1.ObjectIdentifier +} + +// NewSignedData takes data and initializes a PKCS7 SignedData struct that is +// ready to be signed via AddSigner. The digest algorithm is set to SHA1 by default +// and can be changed by calling SetDigestAlgorithm. +func NewSignedData(data []byte) (*SignedData, error) { + content, err := asn1.Marshal(data) + if err != nil { + return nil, err + } + ci := contentInfo{ + ContentType: OIDData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + sd := signedData{ + ContentInfo: ci, + Version: 1, + } + return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA1}, nil +} + +// SignerInfoConfig are optional values to include when adding a signer +type SignerInfoConfig struct { + ExtraSignedAttributes []Attribute + ExtraUnsignedAttributes []Attribute + SkipCertificates bool +} + +type signedData struct { + Version int `asn1:"default:1"` + DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"` + ContentInfo contentInfo + Certificates rawCertificates `asn1:"optional,tag:0"` + CRLs []pkix.CertificateList `asn1:"optional,tag:1"` + SignerInfos []signerInfo `asn1:"set"` +} + +type signerInfo struct { + Version int `asn1:"default:1"` + IssuerAndSerialNumber issuerAndSerial + DigestAlgorithm pkix.AlgorithmIdentifier + AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"` + DigestEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedDigest []byte + UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"` +} + +type attribute struct { + Type asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +func marshalAttributes(attrs []attribute) ([]byte, error) { + encodedAttributes, err := asn1.Marshal(struct { + A []attribute `asn1:"set"` + }{A: attrs}) + if err != nil { + return nil, err + } + + // Remove the leading sequence octets + var raw asn1.RawValue + asn1.Unmarshal(encodedAttributes, &raw) + return raw.Bytes, nil +} + +type rawCertificates struct { + Raw asn1.RawContent +} + +type issuerAndSerial struct { + IssuerName asn1.RawValue + SerialNumber *big.Int +} + +// SetDigestAlgorithm sets the digest algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) { + sd.digestOid = d +} + +// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) { + sd.encryptionOid = d +} + +// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent. The signer can +// either be a crypto.Signer or crypto.PrivateKey. +func (sd *SignedData) AddSigner(ee *x509.Certificate, keyOrSigner interface{}, config SignerInfoConfig) error { + var parents []*x509.Certificate + return sd.AddSignerChain(ee, keyOrSigner, parents, config) +} + +// AddSignerChain signs attributes about the content and adds certificates +// and signers infos to the Signed Data. The certificate and private key +// of the end-entity signer are used to issue the signature, and any +// parent of that end-entity that need to be added to the list of +// certifications can be specified in the parents slice. +// +// The signature algorithm used to hash the data is the one of the end-entity +// certificate. The signer can be either a crypto.Signer or crypto.PrivateKey. +func (sd *SignedData) AddSignerChain(ee *x509.Certificate, keyOrSigner interface{}, parents []*x509.Certificate, config SignerInfoConfig) error { + // Following RFC 2315, 9.2 SignerInfo type, the distinguished name of + // the issuer of the end-entity signer is stored in the issuerAndSerialNumber + // section of the SignedData.SignerInfo, alongside the serial number of + // the end-entity. + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + if len(parents) == 0 { + // no parent, the issuer is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + } else { + err := verifyPartialChain(ee, parents) + if err != nil { + return err + } + // the first parent is the issuer + ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject} + } + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, + pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + ) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + encryptionOid, err := getOIDForEncryptionAlgorithm(keyOrSigner, sd.digestOid) + if err != nil { + return err + } + attrs := &attributes{} + attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType) + attrs.Add(OIDAttributeMessageDigest, sd.messageDigest) + attrs.Add(OIDAttributeSigningTime, time.Now().UTC()) + for _, attr := range config.ExtraSignedAttributes { + attrs.Add(attr.Type, attr.Value) + } + finalAttrs, err := attrs.ForMarshalling() + if err != nil { + return err + } + unsignedAttrs := &attributes{} + for _, attr := range config.ExtraUnsignedAttributes { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + // create signature of signed attributes + signature, err := signAttributes(finalAttrs, keyOrSigner, hash) + if err != nil { + return err + } + signerInfo := signerInfo{ + AuthenticatedAttributes: finalAttrs, + UnauthenticatedAttributes: finalUnsignedAttrs, + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + if !config.SkipCertificates { + sd.certs = append(sd.certs, ee) + if len(parents) > 0 { + sd.certs = append(sd.certs, parents...) + } + } + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signerInfo) + return nil +} + +// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData. +// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone +// and does not include any signed attributes like timestamp and so on. +// +// This function is needed to sign old Android APKs, something you probably +// shouldn't do unless you're maintaining backward compatibility for old +// applications. The signer can be either a crypto.Signer or crypto.PrivateKey. +func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, keyOrSigner interface{}, config SignerInfoConfig) error { + var signature []byte + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + + switch pkey := keyOrSigner.(type) { + case *dsa.PrivateKey: + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest) + if err != nil { + return err + } + signature, err = asn1.Marshal(dsaSignature{r, s}) + if err != nil { + return err + } + default: + signer, ok := keyOrSigner.(crypto.Signer) + if !ok { + return errors.New("pkcs7: private key does not implement crypto.Signer") + } + + // special case for Ed25519, which hashes as part of the signing algorithm + _, ok = signer.Public().(ed25519.PublicKey) + if ok { + signature, err = signer.Sign(rand.Reader, sd.data, crypto.Hash(0)) + } else { + signature, err = signer.Sign(rand.Reader, sd.messageDigest, hash) + if err != nil { + return err + } + } + } + + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + // no parent, the issue is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + if sd.encryptionOid == nil { + // if the encryption algorithm wasn't set by SetEncryptionAlgorithm, + // infer it from the digest algorithm + sd.encryptionOid, err = getOIDForEncryptionAlgorithm(keyOrSigner, sd.digestOid) + } + if err != nil { + return err + } + signerInfo := signerInfo{ + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + // create signature of signed attributes + sd.certs = append(sd.certs, ee) + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signerInfo) + return nil +} + +func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error { + unsignedAttrs := &attributes{} + for _, attr := range extraUnsignedAttrs { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + + si.UnauthenticatedAttributes = finalUnsignedAttrs + + return nil +} + +// AddCertificate adds the certificate to the payload. Useful for parent certificates +func (sd *SignedData) AddCertificate(cert *x509.Certificate) { + sd.certs = append(sd.certs, cert) +} + +// SetContentType sets the content type of the SignedData. For example to specify the +// content type of a time-stamp token according to RFC 3161 section 2.4.2. +func (sd *SignedData) SetContentType(contentType asn1.ObjectIdentifier) { + sd.sd.ContentInfo.ContentType = contentType +} + +// Detach removes content from the signed data struct to make it a detached signature. +// This must be called right before Finish() +func (sd *SignedData) Detach() { + sd.sd.ContentInfo = contentInfo{ContentType: OIDData} +} + +// GetSignedData returns the private Signed Data +func (sd *SignedData) GetSignedData() *signedData { + return &sd.sd +} + +// Finish marshals the content and its signers +func (sd *SignedData) Finish() ([]byte, error) { + sd.sd.Certificates = marshalCertificates(sd.certs) + inner, err := asn1.Marshal(sd.sd) + if err != nil { + return nil, err + } + outer := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true}, + } + return asn1.Marshal(outer) +} + +// RemoveAuthenticatedAttributes removes authenticated attributes from signedData +// similar to OpenSSL's PKCS7_NOATTR or -noattr flags +func (sd *SignedData) RemoveAuthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].AuthenticatedAttributes = nil + } +} + +// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData +func (sd *SignedData) RemoveUnauthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil + } +} + +// verifyPartialChain checks that a given cert is issued by the first parent in the list, +// then continue down the path. It doesn't require the last parent to be a root CA, +// or to be trusted in any truststore. It simply verifies that the chain provided, albeit +// partial, makes sense. +func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error { + if len(parents) == 0 { + return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName) + } + err := cert.CheckSignatureFrom(parents[0]) + if err != nil { + return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err) + } + if len(parents) == 1 { + // there is no more parent to check, return + return nil + } + return verifyPartialChain(parents[0], parents[1:]) +} + +func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { + var ias issuerAndSerial + // The issuer RDNSequence has to match exactly the sequence in the certificate + // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence + ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} + ias.SerialNumber = cert.SerialNumber + + return ias, nil +} + +// signs the DER encoded form of the attributes with the private key +func signAttributes(attrs []attribute, keyOrSigner interface{}, digestAlg crypto.Hash) ([]byte, error) { + attrBytes, err := marshalAttributes(attrs) + if err != nil { + return nil, err + } + h := digestAlg.New() + h.Write(attrBytes) + hash := h.Sum(nil) + + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + switch pkey := keyOrSigner.(type) { + case *dsa.PrivateKey: + r, s, err := dsa.Sign(rand.Reader, pkey, hash) + if err != nil { + return nil, err + } + return asn1.Marshal(dsaSignature{r, s}) + } + + signer, ok := keyOrSigner.(crypto.Signer) + if !ok { + return nil, errors.New("pkcs7: private key does not implement crypto.Signer") + } + + // special case for Ed25519, which hashes as part of the signing algorithm + _, ok = signer.Public().(ed25519.PublicKey) + if ok { + return signer.Sign(rand.Reader, attrBytes, crypto.Hash(0)) + } + + return signer.Sign(rand.Reader, hash, digestAlg) +} + +type dsaSignature struct { + R, S *big.Int +} + +// concats and wraps the certificates in the RawValue structure +func marshalCertificates(certs []*x509.Certificate) rawCertificates { + var buf bytes.Buffer + for _, cert := range certs { + buf.Write(cert.Raw) + } + rawCerts, _ := marshalCertificateBytes(buf.Bytes()) + return rawCerts +} + +// Even though, the tag & length are stripped out during marshalling the +// RawContent, we have to encode it into the RawContent. If its missing, +// then `asn1.Marshal()` will strip out the certificate wrapper instead. +func marshalCertificateBytes(certs []byte) (rawCertificates, error) { + var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} + b, err := asn1.Marshal(val) + if err != nil { + return rawCertificates{}, err + } + return rawCertificates{Raw: b}, nil +} + +// DegenerateCertificate creates a signed data structure containing only the +// provided certificate or certificate chain. +func DegenerateCertificate(cert []byte) ([]byte, error) { + rawCert, err := marshalCertificateBytes(cert) + if err != nil { + return nil, err + } + emptyContent := contentInfo{ContentType: OIDData} + sd := signedData{ + Version: 1, + ContentInfo: emptyContent, + Certificates: rawCert, + CRLs: []pkix.CertificateList{}, + } + content, err := asn1.Marshal(sd) + if err != nil { + return nil, err + } + signedContent := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + return asn1.Marshal(signedContent) +} diff --git a/vendor/github.com/digitorus/pkcs7/verify.go b/vendor/github.com/digitorus/pkcs7/verify.go new file mode 100644 index 000000000..d0e4f0429 --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/verify.go @@ -0,0 +1,370 @@ +package pkcs7 + +import ( + "crypto/subtle" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "time" +) + +// Verify is a wrapper around VerifyWithChain() that initializes an empty +// trust store, effectively disabling certificate verification when validating +// a signature. +func (p7 *PKCS7) Verify() (err error) { + return p7.VerifyWithChain(nil) +} + +// VerifyWithChain checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to one of the roots in the +// truststore. When the PKCS7 object includes the signing time +// authenticated attr it verifies the chain at that time and UTC now +// otherwise. +func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) { + intermediates := x509.NewCertPool() + for _, cert := range(p7.Certificates) { + intermediates.AddCert(cert) + } + + opts := x509.VerifyOptions{ + Roots: truststore, + Intermediates: intermediates, + } + + return p7.VerifyWithOpts(opts) +} + +// VerifyWithChainAtTime checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to a root in the truststore at +// currentTime. It does not use the signing time authenticated +// attribute. +func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime time.Time) (err error) { + intermediates := x509.NewCertPool() + for _, cert := range(p7.Certificates) { + intermediates.AddCert(cert) + } + + opts := x509.VerifyOptions{ + Roots: truststore, + Intermediates: intermediates, + CurrentTime: currentTime, + } + + return p7.VerifyWithOpts(opts) +} + +// VerifyWithOpts checks the signatures of a PKCS7 object. +// +// It accepts x509.VerifyOptions as a parameter. +// This struct contains a root certificate pool, an intermedate certificate pool, +// an optional list of EKUs, and an optional time that certificates should be +// checked as being valid during. + +// If VerifyOpts.Roots is not nil it verifies the chain of trust of +// the end-entity signer cert to one of the roots in the +// truststore. When the PKCS7 object includes the signing time +// authenticated attr it verifies the chain at that time and UTC now +// otherwise. +func (p7 *PKCS7) VerifyWithOpts(opts x509.VerifyOptions) (err error) { + // if KeyUsage isn't set, default to ExtKeyUsageAny + if opts.KeyUsages == nil { + opts.KeyUsages = []x509.ExtKeyUsage{x509.ExtKeyUsageAny} + } + + if len(p7.Signers) == 0 { + return errors.New("pkcs7: Message has no signers") + } + + // if opts.CurrentTime is not set, call verifySignature, + // which will verify the leaf certificate with the current time + if opts.CurrentTime.IsZero() { + for _, signer := range p7.Signers { + if err := verifySignature(p7, signer, opts); err != nil { + return err + } + } + return nil + } + // if opts.CurrentTime is set, call verifySignatureAtTime, + // which will verify the leaf certificate with opts.CurrentTime + for _, signer := range p7.Signers { + if err := verifySignatureAtTime(p7, signer, opts); err != nil { + return err + } + } + return nil +} + +func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, opts x509.VerifyOptions) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var ( + digest []byte + signingTime time.Time + ) + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + h := hash.New() + h.Write(p7.Content) + computed := h.Sum(nil) + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", + signingTime.Format(time.RFC3339), + ee.NotBefore.Format(time.RFC3339), + ee.NotAfter.Format(time.RFC3339)) + } + } + } + if opts.Roots != nil { + _, err = ee.Verify(opts) + if err != nil { + return fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err) + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) +} + +func verifySignature(p7 *PKCS7, signer signerInfo, opts x509.VerifyOptions) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + signingTime := time.Now().UTC() + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var digest []byte + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + h := hash.New() + h.Write(p7.Content) + computed := h.Sum(nil) + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", + signingTime.Format(time.RFC3339), + ee.NotBefore.Format(time.RFC3339), + ee.NotAfter.Format(time.RFC3339)) + } + } + } + if opts.Roots != nil { + opts.CurrentTime = signingTime + _, err = ee.Verify(opts) + if err != nil { + return fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err) + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) +} + +// GetOnlySigner returns an x509.Certificate for the first signer of the signed +// data payload. If there are more or less than one signer, nil is returned +func (p7 *PKCS7) GetOnlySigner() *x509.Certificate { + if len(p7.Signers) != 1 { + return nil + } + signer := p7.Signers[0] + return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) +} + +// UnmarshalSignedAttribute decodes a single attribute from the signer info +func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error { + sd, ok := p7.raw.(signedData) + if !ok { + return errors.New("pkcs7: payload is not signedData content") + } + if len(sd.SignerInfos) < 1 { + return errors.New("pkcs7: payload has no signers") + } + attributes := sd.SignerInfos[0].AuthenticatedAttributes + return unmarshalAttribute(attributes, attributeType, out) +} + +func parseSignedData(data []byte) (*PKCS7, error) { + var sd signedData + asn1.Unmarshal(data, &sd) + certs, err := sd.Certificates.Parse() + if err != nil { + return nil, err + } + // fmt.Printf("--> Signed Data Version %d\n", sd.Version) + + var compound asn1.RawValue + var content unsignedData + + // The Content.Bytes maybe empty on PKI responses. + if len(sd.ContentInfo.Content.Bytes) > 0 { + if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil { + return nil, err + } + } + // Compound octet string + if compound.IsCompound { + if compound.Tag == 4 { + if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil { + return nil, err + } + } else { + content = compound.Bytes + } + } else { + // assuming this is tag 04 + content = compound.Bytes + } + return &PKCS7{ + Content: content, + Certificates: certs, + CRLs: sd.CRLs, + Signers: sd.SignerInfos, + raw: sd}, nil +} + +// MessageDigestMismatchError is returned when the signer data digest does not +// match the computed digest for the contained content +type MessageDigestMismatchError struct { + ExpectedDigest []byte + ActualDigest []byte +} + +func (err *MessageDigestMismatchError) Error() string { + return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest) +} + +func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x509.SignatureAlgorithm, error) { + switch { + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA1): + return x509.ECDSAWithSHA1, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA256): + return x509.ECDSAWithSHA256, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA384): + return x509.ECDSAWithSHA384, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA512): + return x509.ECDSAWithSHA512, nil + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSA), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.SHA1WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.SHA256WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.SHA384WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.SHA512WithRSA, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA), + digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.DSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.DSAWithSHA256, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.ECDSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.ECDSAWithSHA256, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.ECDSAWithSHA384, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.ECDSAWithSHA512, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmEDDSA25519): + return x509.PureEd25519, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported algorithm %q", + digestEncryption.Algorithm.String()) + } +} + +func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate { + for _, cert := range certs { + if isCertMatchForIssuerAndSerial(cert, ias) { + return cert + } + } + return nil +} + +func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error { + for _, attr := range attrs { + if attr.Type.Equal(attributeType) { + _, err := asn1.Unmarshal(attr.Value.Bytes, out) + return err + } + } + return errors.New("pkcs7: attribute type not in attributes") +} diff --git a/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go b/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go new file mode 100644 index 000000000..1eb05bc3e --- /dev/null +++ b/vendor/github.com/digitorus/pkcs7/verify_test_dsa.go @@ -0,0 +1,182 @@ +// +build go1.11 go1.12 go1.13 go1.14 go1.15 + +package pkcs7 + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "os/exec" + "testing" +) + +func TestVerifyEC2(t *testing.T) { + fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture) + p7, err := Parse(fixture.Input) + if err != nil { + t.Errorf("Parse encountered unexpected error: %v", err) + } + p7.Certificates = []*x509.Certificate{fixture.Certificate} + if err := p7.Verify(); err != nil { + t.Errorf("Verify failed with error: %v", err) + } +} + +var EC2IdentityDocumentFixture = ` +-----BEGIN PKCS7----- +MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA +JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh +eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 +cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh +bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs +bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg +OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK +ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj +aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy +YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA +AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n +dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi +IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B +CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG +CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w +LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA +AAAAAA== +-----END PKCS7----- +-----BEGIN CERTIFICATE----- +MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z +ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e +ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 +VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P +hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j +k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U +hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF +lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf +MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW +MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw +vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw +7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K +-----END CERTIFICATE-----` + +func TestDSASignWithOpenSSLAndVerify(t *testing.T) { + content := []byte(` +A ship in port is safe, +but that's not what ships are built for. +-- Grace Hopper`) + // write the content to a temp file + tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpContentFile.Name(), content, 0755) + + // write the signer cert to a temp file + tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0755) + + // write the signer key to a temp file + tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0755) + + tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature") + if err != nil { + t.Fatal(err) + } + // call openssl to sign the content + opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1", + "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(), + "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(), + "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM") + out, err := opensslCMD.CombinedOutput() + if err != nil { + t.Fatalf("openssl command failed with %s: %s", err, out) + } + + // verify the signed content + pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name()) + if err != nil { + t.Fatal(err) + } + fmt.Printf("%s\n", pemSignature) + derBlock, _ := pem.Decode(pemSignature) + if derBlock == nil { + t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name()) + } + p7, err := Parse(derBlock.Bytes) + if err != nil { + t.Fatalf("Parse encountered unexpected error: %v", err) + } + if err := p7.Verify(); err != nil { + t.Fatalf("Verify failed with error: %v", err) + } + os.Remove(tmpSignerCertFile.Name()) // clean up + os.Remove(tmpSignerKeyFile.Name()) // clean up + os.Remove(tmpContentFile.Name()) // clean up +} + +var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- +MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS +PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl +pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith +1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L +vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 +zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo +g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE= +-----END PRIVATE KEY-----`) + +var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE----- +MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV +bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD +VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du +MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r +bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE +ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC +AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD +Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE +exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii +Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 +V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI +puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl +nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp +rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt +1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT +ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G +CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 +qzy/7yePTlhlpj+ahMM= +-----END CERTIFICATE-----`) + +type DSATestFixture struct { + Input []byte + Certificate *x509.Certificate +} + +func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture { + var result DSATestFixture + var derBlock *pem.Block + var pemBlock = []byte(testPEMBlock) + for { + derBlock, pemBlock = pem.Decode(pemBlock) + if derBlock == nil { + break + } + switch derBlock.Type { + case "PKCS7": + result.Input = derBlock.Bytes + case "CERTIFICATE": + result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) + } + } + + return result +} diff --git a/vendor/github.com/digitorus/timestamp/LICENSE b/vendor/github.com/digitorus/timestamp/LICENSE new file mode 100644 index 000000000..dac8634ce --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2017, Digitorus B.V. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/digitorus/timestamp/README.md b/vendor/github.com/digitorus/timestamp/README.md new file mode 100644 index 000000000..d475e03f9 --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/README.md @@ -0,0 +1,13 @@ +# RFC3161 Time-Stamp Protocol (TSP) package for Go + +[![Build & Test](https://github.com/digitorus/timestamp/workflows/Build%20&%20Test/badge.svg)](https://github.com/digitorus/timestamp/actions?query=workflow%3Abuild-and-test) +[![golangci-lint](https://github.com/digitorus/timestamp/workflows/golangci-lint/badge.svg)](https://github.com/digitorus/timestamp/actions?query=workflow%3Agolangci-lint) +[![CodeQL](https://github.com/digitorus/timestamp/workflows/CodeQL/badge.svg)](https://github.com/digitorus/timestamp/actions?query=workflow%3Acodeql) +[![Go Report Card](https://goreportcard.com/badge/github.com/digitorus/timestamp)](https://goreportcard.com/report/github.com/digitorus/timestamp) +[![Coverage Status](https://codecov.io/gh/digitorus/timestamp/branch/master/graph/badge.svg)](https://codecov.io/gh/digitorus/timestamp) +[![Go Reference](https://pkg.go.dev/badge/github.com/digitorus/timestamp.svg)](https://pkg.go.dev/github.com/digitorus/timestamp) + +Time-Stamp Protocol (TSP) package for Go + +#### General +The package timestamp implements the Time-Stamp Protocol (TSP) as specified in RFC3161 (Internet X.509 Public Key Infrastructure Time-Stamp Protocol (TSP)). diff --git a/vendor/github.com/digitorus/timestamp/borrowed.go b/vendor/github.com/digitorus/timestamp/borrowed.go new file mode 100644 index 000000000..b379b7167 --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/borrowed.go @@ -0,0 +1,56 @@ +package timestamp + +import ( + "crypto" + "encoding/asn1" +) + +// TODO(vanbroup): taken from "golang.org/x/crypto/ocsp" +// use directly from crypto/x509 when exported as suggested below. + +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target) { + return hash + } + } + return crypto.Hash(0) +} + +func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { + for hash, oid := range hashOIDs { + if hash == target { + return oid + } + } + return nil +} + +// TODO(vanbroup): taken from golang.org/x/crypto/x509 +// asn1BitLength returns the bit-length of bitString by considering the +// most-significant bit in a byte to be the "first" bit. This convention +// matches ASN.1, but differs from almost everything else. +func asn1BitLength(bitString []byte) int { + bitLen := len(bitString) * 8 + + for i := range bitString { + b := bitString[len(bitString)-i-1] + + for bit := uint(0); bit < 8; bit++ { + if (b>>bit)&1 == 1 { + return bitLen + } + bitLen-- + } + } + + return 0 +} diff --git a/vendor/github.com/digitorus/timestamp/rfc3161_struct.go b/vendor/github.com/digitorus/timestamp/rfc3161_struct.go new file mode 100644 index 000000000..04060dd02 --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/rfc3161_struct.go @@ -0,0 +1,75 @@ +package timestamp + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "math/big" + "time" +) + +// http://www.ietf.org/rfc/rfc3161.txt +// 2.4.1. Request Format +type request struct { + Version int + MessageImprint messageImprint + ReqPolicy asn1.ObjectIdentifier `asn1:"optional"` + Nonce *big.Int `asn1:"optional"` + CertReq bool `asn1:"optional,default:false"` + Extensions []pkix.Extension `asn1:"tag:0,optional"` +} + +type messageImprint struct { + HashAlgorithm pkix.AlgorithmIdentifier + HashedMessage []byte +} + +// 2.4.2. Response Format +type response struct { + Status pkiStatusInfo + TimeStampToken asn1.RawValue `asn1:"optional"` +} + +type pkiStatusInfo struct { + Status Status + StatusString []string `asn1:"optional,utf8"` + FailInfo asn1.BitString `asn1:"optional"` +} + +func (s pkiStatusInfo) FailureInfo() FailureInfo { + fi := []FailureInfo{BadAlgorithm, BadRequest, BadDataFormat, TimeNotAvailable, + UnacceptedPolicy, UnacceptedExtension, AddInfoNotAvailable, SystemFailure} + + for _, f := range fi { + if s.FailInfo.At(int(f)) != 0 { + return f + } + } + + return UnkownFailureInfo +} + +// eContent within SignedData is TSTInfo +type tstInfo struct { + Version int + Policy asn1.ObjectIdentifier + MessageImprint messageImprint + SerialNumber *big.Int + Time time.Time `asn1:"generalized"` + Accuracy accuracy `asn1:"optional"` + Ordering bool `asn1:"optional,default:false"` + Nonce *big.Int `asn1:"optional"` + TSA asn1.RawValue `asn1:"tag:0,optional"` + Extensions []pkix.Extension `asn1:"tag:1,optional"` +} + +// accuracy within TSTInfo +type accuracy struct { + Seconds int64 `asn1:"optional"` + Milliseconds int64 `asn1:"tag:0,optional"` + Microseconds int64 `asn1:"tag:1,optional"` +} + +type qcStatement struct { + StatementID asn1.ObjectIdentifier + StatementInfo asn1.RawValue `asn1:"optional"` +} diff --git a/vendor/github.com/digitorus/timestamp/signing_cert_v2_struct.go b/vendor/github.com/digitorus/timestamp/signing_cert_v2_struct.go new file mode 100644 index 000000000..0507c56e5 --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/signing_cert_v2_struct.go @@ -0,0 +1,26 @@ +package timestamp + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "math/big" +) + +type issuerAndSerial struct { + IssuerName generalNames + SerialNumber *big.Int +} + +type generalNames struct { + Name asn1.RawValue `asn1:"optional,tag:4"` +} + +type essCertIDv2 struct { + HashAlgorithm pkix.AlgorithmIdentifier `asn1:"optional"` // default sha256 + CertHash []byte + IssuerSerial issuerAndSerial `asn1:"optional"` +} + +type signingCertificateV2 struct { + Certs []essCertIDv2 +} diff --git a/vendor/github.com/digitorus/timestamp/timestamp.go b/vendor/github.com/digitorus/timestamp/timestamp.go new file mode 100644 index 000000000..8069cad8f --- /dev/null +++ b/vendor/github.com/digitorus/timestamp/timestamp.go @@ -0,0 +1,641 @@ +// Package timestamp implements the Time-Stamp Protocol (TSP) as specified in +// RFC3161 (Internet X.509 Public Key Infrastructure Time-Stamp Protocol (TSP)). +package timestamp + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "io" + "math/big" + "strconv" + "strings" + "time" + + "github.com/digitorus/pkcs7" +) + +// FailureInfo contains the failure details of an Time-Stamp request. See +// https://tools.ietf.org/html/rfc3161#section-2.4.2 +type FailureInfo int + +const ( + // UnkownFailureInfo mean that no known failure info was provided + UnkownFailureInfo FailureInfo = -1 + // BadAlgorithm defines an unrecognized or unsupported Algorithm Identifier + BadAlgorithm FailureInfo = 0 + // BadRequest indicates that the transaction not permitted or supported + BadRequest FailureInfo = 2 + // BadDataFormat means tha data submitted has the wrong format + BadDataFormat FailureInfo = 5 + // TimeNotAvailable indicates that TSA's time source is not available + TimeNotAvailable FailureInfo = 14 + // UnacceptedPolicy indicates that the requested TSA policy is not supported + // by the TSA + UnacceptedPolicy FailureInfo = 15 + // UnacceptedExtension indicates that the requested extension is not supported + // by the TSA + UnacceptedExtension FailureInfo = 16 + // AddInfoNotAvailable means that the information requested could not be + // understood or is not available + AddInfoNotAvailable FailureInfo = 17 + // SystemFailure indicates that the request cannot be handled due to system + // failure + SystemFailure FailureInfo = 25 +) + +func (f FailureInfo) String() string { + switch f { + case BadAlgorithm: + return "unrecognized or unsupported Algorithm Identifier" + case BadRequest: + return "transaction not permitted or supported" + case BadDataFormat: + return "the data submitted has the wrong format" + case TimeNotAvailable: + return "the TSA's time source is not available" + case UnacceptedPolicy: + return "the requested TSA policy is not supported by the TSA" + case UnacceptedExtension: + return "the requested extension is not supported by the TSA" + case AddInfoNotAvailable: + return "the additional information requested could not be understood or is not available" + case SystemFailure: + return "the request cannot be handled due to system failure" + default: + return "unknown failure" + } +} + +// Status contains the status of an Time-Stamp request. See +// https://tools.ietf.org/html/rfc3161#section-2.4.2 +type Status int + +const ( + // Granted PKIStatus contains the value zero a TimeStampToken, as requested, + // is present. + Granted Status = 0 + // GrantedWithMods PKIStatus contains the value one a TimeStampToken, with + // modifications, is present. + GrantedWithMods Status = 1 + // Rejection PKIStatus + Rejection Status = 2 + // Waiting PKIStatus + Waiting Status = 3 + // RevocationWarning PKIStatus + RevocationWarning Status = 4 + // RevocationNotification PKIStatus + RevocationNotification Status = 5 +) + +func (s Status) String() string { + switch s { + case Granted: + return "the request is granted" + case GrantedWithMods: + return "the request is granted with modifications" + case Rejection: + return "the request is rejected" + case Waiting: + return "the request is waiting" + case RevocationWarning: + return "revocation is imminent" + case RevocationNotification: + return "revocation has occurred" + default: + return "unknown status: " + strconv.Itoa(int(s)) + } +} + +// ParseError results from an invalid Time-Stamp request or response. +type ParseError string + +func (p ParseError) Error() string { + return string(p) +} + +// Request represents an Time-Stamp request. See +// https://tools.ietf.org/html/rfc3161#section-2.4.1 +type Request struct { + HashAlgorithm crypto.Hash + HashedMessage []byte + + // Certificates indicates if the TSA needs to return the signing certificate + // and optionally any other certificates of the chain as part of the response. + Certificates bool + + // The TSAPolicyOID field, if provided, indicates the TSA policy under + // which the TimeStampToken SHOULD be provided + TSAPolicyOID asn1.ObjectIdentifier + + // The nonce, if provided, allows the client to verify the timeliness of + // the response. + Nonce *big.Int + + // Extensions contains raw X.509 extensions from the Extensions field of the + // Time-Stamp request. When parsing requests, this can be used to extract + // non-critical extensions that are not parsed by this package. When + // marshaling OCSP requests, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // OCSP response (in the singleExtensions field). Values override any + // extensions that would otherwise be produced based on the other fields. The + // ExtraExtensions field is not populated when parsing Time-Stamp requests, + // see Extensions. + ExtraExtensions []pkix.Extension +} + +// ParseRequest parses an timestamp request in DER form. +func ParseRequest(bytes []byte) (*Request, error) { + var err error + var rest []byte + var req request + + if rest, err = asn1.Unmarshal(bytes, &req); err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in Time-Stamp request") + } + + if len(req.MessageImprint.HashedMessage) == 0 { + return nil, ParseError("Time-Stamp request contains no hashed message") + } + + hashFunc := getHashAlgorithmFromOID(req.MessageImprint.HashAlgorithm.Algorithm) + if hashFunc == crypto.Hash(0) { + return nil, ParseError("Time-Stamp request uses unknown hash function") + } + + return &Request{ + HashAlgorithm: hashFunc, + HashedMessage: req.MessageImprint.HashedMessage, + Certificates: req.CertReq, + Nonce: req.Nonce, + TSAPolicyOID: req.ReqPolicy, + Extensions: req.Extensions, + }, nil +} + +// Marshal marshals the Time-Stamp request to ASN.1 DER encoded form. +func (req *Request) Marshal() ([]byte, error) { + request := request{ + Version: 1, + MessageImprint: messageImprint{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: getOIDFromHashAlgorithm(req.HashAlgorithm), + Parameters: asn1.RawValue{ + Tag: 5, /* ASN.1 NULL */ + }, + }, + HashedMessage: req.HashedMessage, + }, + CertReq: req.Certificates, + Extensions: req.ExtraExtensions, + } + + if req.TSAPolicyOID != nil { + request.ReqPolicy = req.TSAPolicyOID + } + if req.Nonce != nil { + request.Nonce = req.Nonce + } + reqBytes, err := asn1.Marshal(request) + if err != nil { + return nil, err + } + return reqBytes, nil +} + +// Timestamp represents an Time-Stamp. See: +// https://tools.ietf.org/html/rfc3161#section-2.4.1 +type Timestamp struct { + // Timestamp token part of raw ASN.1 DER content. + RawToken []byte + + HashAlgorithm crypto.Hash + HashedMessage []byte + + Time time.Time + Accuracy time.Duration + SerialNumber *big.Int + Policy asn1.ObjectIdentifier + Ordering bool + Nonce *big.Int + Qualified bool + + Certificates []*x509.Certificate + + // If set to true, includes TSA certificate in timestamp response + AddTSACertificate bool + + // Extensions contains raw X.509 extensions from the Extensions field of the + // Time-Stamp. When parsing time-stamps, this can be used to extract + // non-critical extensions that are not parsed by this package. When + // marshaling time-stamps, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // Time-Stamp response. Values override any extensions that would otherwise + // be produced based on the other fields. The ExtraExtensions field is not + // populated when parsing Time-Stamp responses, see Extensions. + ExtraExtensions []pkix.Extension +} + +// ParseResponse parses an Time-Stamp response in DER form containing a +// TimeStampToken. +// +// Invalid signatures or parse failures will result in a ParseError. Error +// responses will result in a ResponseError. +func ParseResponse(bytes []byte) (*Timestamp, error) { + var err error + var rest []byte + var resp response + + if rest, err = asn1.Unmarshal(bytes, &resp); err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in Time-Stamp response") + } + + if resp.Status.Status > 0 { + var fis string + fi := resp.Status.FailureInfo() + if fi != UnkownFailureInfo { + fis = fi.String() + } + return nil, fmt.Errorf("%s: %s (%v)", + resp.Status.Status.String(), + strings.Join(resp.Status.StatusString, ","), + fis) + } + + if len(resp.TimeStampToken.Bytes) == 0 { + return nil, ParseError("no pkcs7 data in Time-Stamp response") + } + + return Parse(resp.TimeStampToken.FullBytes) +} + +// Parse parses an Time-Stamp in DER form. If the time-stamp contains a +// certificate then the signature over the response is checked. +// +// Invalid signatures or parse failures will result in a ParseError. Error +// responses will result in a ResponseError. +func Parse(bytes []byte) (*Timestamp, error) { + var addTSACertificate bool + p7, err := pkcs7.Parse(bytes) + if err != nil { + return nil, err + } + + if len(p7.Certificates) > 0 { + if err = p7.Verify(); err != nil { + return nil, err + } + addTSACertificate = true + } else { + addTSACertificate = false + } + + var inf tstInfo + if _, err = asn1.Unmarshal(p7.Content, &inf); err != nil { + return nil, err + } + + if len(inf.MessageImprint.HashedMessage) == 0 { + return nil, ParseError("Time-Stamp response contains no hashed message") + } + + ret := &Timestamp{ + RawToken: bytes, + HashedMessage: inf.MessageImprint.HashedMessage, + Time: inf.Time, + Accuracy: time.Duration((time.Second * time.Duration(inf.Accuracy.Seconds)) + + (time.Millisecond * time.Duration(inf.Accuracy.Milliseconds)) + + (time.Microsecond * time.Duration(inf.Accuracy.Microseconds))), + SerialNumber: inf.SerialNumber, + Policy: inf.Policy, + Ordering: inf.Ordering, + Nonce: inf.Nonce, + Certificates: p7.Certificates, + AddTSACertificate: addTSACertificate, + Extensions: inf.Extensions, + } + + ret.HashAlgorithm = getHashAlgorithmFromOID(inf.MessageImprint.HashAlgorithm.Algorithm) + if ret.HashAlgorithm == crypto.Hash(0) { + return nil, ParseError("Time-Stamp response uses unknown hash function") + } + + if oidInExtensions(asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 3}, inf.Extensions) { + ret.Qualified = true + } + return ret, nil +} + +// RequestOptions contains options for constructing timestamp requests. +type RequestOptions struct { + // Hash contains the hash function that should be used when + // constructing the timestamp request. If zero, SHA-256 will be used. + Hash crypto.Hash + + // Certificates sets Request.Certificates + Certificates bool + + // The TSAPolicyOID field, if provided, indicates the TSA policy under + // which the TimeStampToken SHOULD be provided + TSAPolicyOID asn1.ObjectIdentifier + + // The nonce, if provided, allows the client to verify the timeliness of + // the response. + Nonce *big.Int +} + +func (opts *RequestOptions) hash() crypto.Hash { + if opts == nil || opts.Hash == 0 { + return crypto.SHA256 + } + return opts.Hash +} + +// CreateRequest returns a DER-encoded, timestamp request for the status of cert. If +// opts is nil then sensible defaults are used. +func CreateRequest(r io.Reader, opts *RequestOptions) ([]byte, error) { + hashFunc := opts.hash() + + if !hashFunc.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + h := opts.hash().New() + + b := make([]byte, h.Size()) + for { + n, err := r.Read(b) + if err == io.EOF { + break + } + + _, err = h.Write(b[:n]) + if err != nil { + return nil, fmt.Errorf("failed to create hash") + } + } + + req := &Request{ + HashAlgorithm: opts.hash(), + HashedMessage: h.Sum(nil), + } + if opts != nil { + req.Certificates = opts.Certificates + } + if opts != nil && opts.TSAPolicyOID != nil { + req.TSAPolicyOID = opts.TSAPolicyOID + } + if opts != nil && opts.Nonce != nil { + req.Nonce = opts.Nonce + } + return req.Marshal() +} + +// CreateResponse returns a DER-encoded timestamp response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the responder's name field, and the +// certificate itself is provided alongside the timestamp response signature. +func (t *Timestamp) CreateResponse(signingCert *x509.Certificate, priv crypto.Signer) ([]byte, error) { + messageImprint := getMessageImprint(t.HashAlgorithm, t.HashedMessage) + + tsaSerialNumber, err := generateTSASerialNumber() + if err != nil { + return nil, err + } + tstInfo, err := t.populateTSTInfo(messageImprint, t.Policy, tsaSerialNumber, signingCert) + if err != nil { + return nil, err + } + signature, err := t.generateSignedData(tstInfo, priv, signingCert) + if err != nil { + return nil, err + } + timestampRes := response{ + Status: pkiStatusInfo{ + Status: Granted, + }, + TimeStampToken: asn1.RawValue{FullBytes: signature}, + } + tspResponseBytes, err := asn1.Marshal(timestampRes) + if err != nil { + return nil, err + } + return tspResponseBytes, nil +} + +// CreateErrorResponse is used to create response other than granted and granted with mod status +func CreateErrorResponse(pkiStatus Status, pkiFailureInfo FailureInfo) ([]byte, error) { + var bs asn1.BitString + setFlag(&bs, int(pkiFailureInfo)) + + timestampRes := response{ + Status: pkiStatusInfo{ + Status: pkiStatus, + FailInfo: bs, + }, + } + tspResponseBytes, err := asn1.Marshal(timestampRes) + if err != nil { + return nil, err + } + return tspResponseBytes, nil +} + +func setFlag(bs *asn1.BitString, i int) { + for l := len(bs.Bytes); l < 4; l++ { + (*bs).Bytes = append((*bs).Bytes, byte(0)) + (*bs).BitLength = len((*bs).Bytes) * 8 + } + b := i / 8 + p := uint(7 - (i - 8*b)) + (*bs).Bytes[b] = (*bs).Bytes[b] | (1 << p) + bs.BitLength = asn1BitLength(bs.Bytes) + bs.Bytes = bs.Bytes[0 : (bs.BitLength/8)+1] +} + +func getMessageImprint(hashAlgorithm crypto.Hash, hashedMessage []byte) messageImprint { + messageImprint := messageImprint{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: getOIDFromHashAlgorithm(hashAlgorithm), + Parameters: asn1.NullRawValue, + }, + HashedMessage: hashedMessage, + } + return messageImprint +} + +func generateTSASerialNumber() (*big.Int, error) { + randomBytes := make([]byte, 20) + _, err := rand.Read(randomBytes) + if err != nil { + return nil, err + } + serialNumber := big.NewInt(0) + serialNumber = serialNumber.SetBytes(randomBytes) + return serialNumber, nil +} + +func (t *Timestamp) populateTSTInfo(messageImprint messageImprint, policyOID asn1.ObjectIdentifier, tsaSerialNumber *big.Int, tsaCert *x509.Certificate) ([]byte, error) { + dirGeneralName, err := asn1.Marshal(asn1.RawValue{Tag: 4, Class: 2, IsCompound: true, Bytes: tsaCert.RawSubject}) + if err != nil { + return nil, err + } + tstInfo := tstInfo{ + Version: 1, + Policy: policyOID, + MessageImprint: messageImprint, + SerialNumber: tsaSerialNumber, + Time: t.Time, + TSA: asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: dirGeneralName}, + Ordering: t.Ordering, + } + if t.Nonce != nil { + tstInfo.Nonce = t.Nonce + } + if t.Accuracy != 0 { + if t.Accuracy < time.Microsecond { + // Round up to 1 microsecond if accuracy is lower than 1 microsecond but greater than 0 nanosecond + tstInfo.Accuracy.Microseconds = 1 + } else { + seconds := t.Accuracy.Truncate(time.Second) + tstInfo.Accuracy.Seconds = int64(seconds.Seconds()) + ms := (t.Accuracy - seconds).Truncate(time.Millisecond) + if ms != 0 { + tstInfo.Accuracy.Milliseconds = int64(ms.Milliseconds()) + } + microSeconds := (t.Accuracy - seconds - ms).Truncate(time.Microsecond) + if microSeconds != 0 { + tstInfo.Accuracy.Microseconds = int64(microSeconds.Microseconds()) + } + } + } + if len(t.ExtraExtensions) != 0 { + tstInfo.Extensions = t.ExtraExtensions + } + if t.Qualified && !oidInExtensions(asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 3}, t.ExtraExtensions) { + qcStatements := []qcStatement{{ + StatementID: asn1.ObjectIdentifier{0, 4, 0, 19422, 1, 1}, + }} + asn1QcStats, err := asn1.Marshal(qcStatements) + if err != nil { + return nil, err + } + tstInfo.Extensions = append(tstInfo.Extensions, pkix.Extension{ + Id: []int{1, 3, 6, 1, 5, 5, 7, 1, 3}, + Value: asn1QcStats, + Critical: false, + }) + } + tstInfoBytes, err := asn1.Marshal(tstInfo) + if err != nil { + return nil, err + } + return tstInfoBytes, nil +} + +func (t *Timestamp) populateSigningCertificateV2Ext(certificate *x509.Certificate) ([]byte, error) { + if !t.HashAlgorithm.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + if t.HashAlgorithm.HashFunc() == crypto.SHA1 { + return nil, fmt.Errorf("for SHA1 usae ESSCertID instead of ESSCertIDv2") + } + + h := t.HashAlgorithm.HashFunc().New() + _, err := h.Write(certificate.Raw) + if err != nil { + return nil, fmt.Errorf("failed to create hash") + } + + var hashAlg pkix.AlgorithmIdentifier + + // HashAlgorithm defaults to SHA256 + if t.HashAlgorithm.HashFunc() != crypto.SHA256 { + hashAlg = pkix.AlgorithmIdentifier{ + Algorithm: hashOIDs[t.HashAlgorithm.HashFunc()], + Parameters: asn1.NullRawValue, + } + } + + signingCertificateV2 := signingCertificateV2{ + Certs: []essCertIDv2{{ + HashAlgorithm: hashAlg, + CertHash: h.Sum(nil), + IssuerSerial: issuerAndSerial{ + IssuerName: generalNames{ + Name: asn1.RawValue{Tag: 4, Class: 2, IsCompound: true, Bytes: certificate.RawIssuer}, + }, + SerialNumber: certificate.SerialNumber, + }, + }}, + } + signingCertV2Bytes, err := asn1.Marshal(signingCertificateV2) + if err != nil { + return nil, err + } + return signingCertV2Bytes, nil +} + +func (t *Timestamp) generateSignedData(tstInfo []byte, signer crypto.Signer, certificate *x509.Certificate) ([]byte, error) { + signedData, err := pkcs7.NewSignedData(tstInfo) + if err != nil { + return nil, err + } + signedData.SetDigestAlgorithm(pkcs7.OIDDigestAlgorithmSHA256) + signedData.SetContentType(asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 16, 1, 4}) + + signingCertV2Bytes, err := t.populateSigningCertificateV2Ext(certificate) + if err != nil { + return nil, err + } + + signerInfoConfig := pkcs7.SignerInfoConfig{ + ExtraSignedAttributes: []pkcs7.Attribute{ + { + Type: asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 16, 2, 47}, + Value: asn1.RawValue{FullBytes: signingCertV2Bytes}, + }, + }, + } + if !t.AddTSACertificate { + signerInfoConfig.SkipCertificates = true + } + + err = signedData.AddSigner(certificate, signer, signerInfoConfig) + if err != nil { + return nil, err + } + + signature, err := signedData.Finish() + if err != nil { + return nil, err + } + return signature, nil +} + +// copied from cryto/x509 package +// oidNotInExtensions reports whether an extension with the given oid exists in +// extensions. +func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { + for _, e := range extensions { + if e.Id.Equal(oid) { + return true + } + } + return false +} diff --git a/vendor/github.com/dimchansky/utfbom/.gitignore b/vendor/github.com/dimchansky/utfbom/.gitignore new file mode 100644 index 000000000..d7ec5cebb --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/.gitignore @@ -0,0 +1,37 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.prof + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Gogland +.idea/ \ No newline at end of file diff --git a/vendor/github.com/dimchansky/utfbom/.travis.yml b/vendor/github.com/dimchansky/utfbom/.travis.yml new file mode 100644 index 000000000..19312ee35 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/.travis.yml @@ -0,0 +1,29 @@ +language: go +sudo: false + +go: + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + +cache: + directories: + - $HOME/.cache/go-build + - $HOME/gopath/pkg/mod + +env: + global: + - GO111MODULE=on + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/lint/golint +script: + - gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false) + - golint ./... # This won't break the build, just show warnings + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/dimchansky/utfbom/LICENSE new file mode 100644 index 000000000..6279cb87f --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2018-2020, Dmitrij Koniajev (dimchansky@gmail.com) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/dimchansky/utfbom/README.md new file mode 100644 index 000000000..8ece28008 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/README.md @@ -0,0 +1,66 @@ +# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) + +The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. + +## Installation + + go get -u github.com/dimchansky/utfbom + +## Example + +```go +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" +) + +func main() { + trySkip([]byte("\xEF\xBB\xBFhello")) + trySkip([]byte("hello")) +} + +func trySkip(byteData []byte) { + fmt.Println("Input:", byteData) + + // just skip BOM + output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM skipping", output) + + // skip BOM and detect encoding + sr, enc := utfbom.Skip(bytes.NewReader(byteData)) + fmt.Printf("Detected encoding: %s\n", enc) + output, err = ioutil.ReadAll(sr) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM detection and skipping", output) + fmt.Println() +} +``` + +Output: + +``` +$ go run main.go +Input: [239 187 191 104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: UTF8 +ReadAll with BOM detection and skipping [104 101 108 108 111] + +Input: [104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: Unknown +ReadAll with BOM detection and skipping [104 101 108 108 111] +``` + + diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go new file mode 100644 index 000000000..77a303e56 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/utfbom.go @@ -0,0 +1,192 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. +func (e Encoding) String() string { + switch e { + case UTF8: + return "UTF8" + case UTF16BigEndian: + return "UTF16BigEndian" + case UTF16LittleEndian: + return "UTF16LittleEndian" + case UTF32BigEndian: + return "UTF32BigEndian" + case UTF32LittleEndian: + return "UTF32LittleEndian" + default: + return "Unknown" + } +} + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && err != io.EOF) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { + if n, err = rd.Read(bom[len(buf):]); n < 0 { + panic(errNegativeRead) + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go new file mode 100644 index 000000000..2a61d5e48 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/cli.go @@ -0,0 +1,544 @@ +package command + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + dcontext "github.com/docker/cli/cli/context" + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/store" + "github.com/docker/cli/cli/debug" + cliflags "github.com/docker/cli/cli/flags" + manifeststore "github.com/docker/cli/cli/manifest/store" + registryclient "github.com/docker/cli/cli/registry/client" + "github.com/docker/cli/cli/streams" + "github.com/docker/cli/cli/trust" + "github.com/docker/cli/cli/version" + dopts "github.com/docker/cli/opts" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "github.com/spf13/cobra" + notaryclient "github.com/theupdateframework/notary/client" +) + +const defaultInitTimeout = 2 * time.Second + +// Streams is an interface which exposes the standard input and output streams +type Streams interface { + In() *streams.In + Out() *streams.Out + Err() io.Writer +} + +// Cli represents the docker command line client. +type Cli interface { + Client() client.APIClient + Out() *streams.Out + Err() io.Writer + In() *streams.In + SetIn(in *streams.In) + Apply(ops ...DockerCliOption) error + ConfigFile() *configfile.ConfigFile + ServerInfo() ServerInfo + NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) + DefaultVersion() string + CurrentVersion() string + ManifestStore() manifeststore.Store + RegistryClient(bool) registryclient.RegistryClient + ContentTrustEnabled() bool + BuildKitEnabled() (bool, error) + ContextStore() store.Store + CurrentContext() string + DockerEndpoint() docker.Endpoint +} + +// DockerCli is an instance the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + configFile *configfile.ConfigFile + options *cliflags.ClientOptions + in *streams.In + out *streams.Out + err io.Writer + client client.APIClient + serverInfo ServerInfo + contentTrust bool + contextStore store.Store + currentContext string + init sync.Once + initErr error + dockerEndpoint docker.Endpoint + contextStoreConfig store.Config + initTimeout time.Duration +} + +// DefaultVersion returns api.defaultVersion. +func (cli *DockerCli) DefaultVersion() string { + return api.DefaultVersion +} + +// CurrentVersion returns the API version currently negotiated, or the default +// version otherwise. +func (cli *DockerCli) CurrentVersion() string { + _ = cli.initialize() + if cli.client == nil { + return api.DefaultVersion + } + return cli.client.ClientVersion() +} + +// Client returns the APIClient +func (cli *DockerCli) Client() client.APIClient { + if err := cli.initialize(); err != nil { + _, _ = fmt.Fprintf(cli.Err(), "Failed to initialize: %s\n", err) + os.Exit(1) + } + return cli.client +} + +// Out returns the writer used for stdout +func (cli *DockerCli) Out() *streams.Out { + return cli.out +} + +// Err returns the writer used for stderr +func (cli *DockerCli) Err() io.Writer { + return cli.err +} + +// SetIn sets the reader used for stdin +func (cli *DockerCli) SetIn(in *streams.In) { + cli.in = in +} + +// In returns the reader used for stdin +func (cli *DockerCli) In() *streams.In { + return cli.in +} + +// ShowHelp shows the command help. +func ShowHelp(err io.Writer) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + cmd.SetOut(err) + cmd.HelpFunc()(cmd, args) + return nil + } +} + +// ConfigFile returns the ConfigFile +func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { + // TODO(thaJeztah): when would this happen? Is this only in tests (where cli.Initialize() is not called first?) + if cli.configFile == nil { + cli.configFile = config.LoadDefaultConfigFile(cli.err) + } + return cli.configFile +} + +// ServerInfo returns the server version details for the host this client is +// connected to +func (cli *DockerCli) ServerInfo() ServerInfo { + _ = cli.initialize() + return cli.serverInfo +} + +// ContentTrustEnabled returns whether content trust has been enabled by an +// environment variable. +func (cli *DockerCli) ContentTrustEnabled() bool { + return cli.contentTrust +} + +// BuildKitEnabled returns buildkit is enabled or not. +func (cli *DockerCli) BuildKitEnabled() (bool, error) { + // use DOCKER_BUILDKIT env var value if set and not empty + if v := os.Getenv("DOCKER_BUILDKIT"); v != "" { + enabled, err := strconv.ParseBool(v) + if err != nil { + return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value") + } + return enabled, nil + } + // if a builder alias is defined, we are using BuildKit + aliasMap := cli.ConfigFile().Aliases + if _, ok := aliasMap["builder"]; ok { + return true, nil + } + // otherwise, assume BuildKit is enabled but + // not if wcow reported from server side + return cli.ServerInfo().OSType != "windows", nil +} + +// ManifestStore returns a store for local manifests +func (cli *DockerCli) ManifestStore() manifeststore.Store { + // TODO: support override default location from config file + return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests")) +} + +// RegistryClient returns a client for communicating with a Docker distribution +// registry +func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient { + resolver := func(ctx context.Context, index *registry.IndexInfo) types.AuthConfig { + return ResolveAuthConfig(ctx, cli, index) + } + return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure) +} + +// InitializeOpt is the type of the functional options passed to DockerCli.Initialize +type InitializeOpt func(dockerCli *DockerCli) error + +// WithInitializeClient is passed to DockerCli.Initialize by callers who wish to set a particular API Client for use by the CLI. +func WithInitializeClient(makeClient func(dockerCli *DockerCli) (client.APIClient, error)) InitializeOpt { + return func(dockerCli *DockerCli) error { + var err error + dockerCli.client, err = makeClient(dockerCli) + return err + } +} + +// Initialize the dockerCli runs initialization that must happen after command +// line flags are parsed. +func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...InitializeOpt) error { + for _, o := range ops { + if err := o(cli); err != nil { + return err + } + } + cliflags.SetLogLevel(opts.LogLevel) + + if opts.ConfigDir != "" { + config.SetDir(opts.ConfigDir) + } + + if opts.Debug { + debug.Enable() + } + if opts.Context != "" && len(opts.Hosts) > 0 { + return errors.New("conflicting options: either specify --host or --context, not both") + } + + cli.options = opts + cli.configFile = config.LoadDefaultConfigFile(cli.err) + cli.currentContext = resolveContextName(cli.options, cli.configFile) + cli.contextStore = &ContextStoreWithDefault{ + Store: store.New(config.ContextStoreDir(), cli.contextStoreConfig), + Resolver: func() (*DefaultContext, error) { + return ResolveDefaultContext(cli.options, cli.contextStoreConfig) + }, + } + return nil +} + +// NewAPIClientFromFlags creates a new APIClient from command line flags +func NewAPIClientFromFlags(opts *cliflags.ClientOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { + if opts.Context != "" && len(opts.Hosts) > 0 { + return nil, errors.New("conflicting options: either specify --host or --context, not both") + } + + storeConfig := DefaultContextStoreConfig() + contextStore := &ContextStoreWithDefault{ + Store: store.New(config.ContextStoreDir(), storeConfig), + Resolver: func() (*DefaultContext, error) { + return ResolveDefaultContext(opts, storeConfig) + }, + } + endpoint, err := resolveDockerEndpoint(contextStore, resolveContextName(opts, configFile)) + if err != nil { + return nil, errors.Wrap(err, "unable to resolve docker endpoint") + } + return newAPIClientFromEndpoint(endpoint, configFile) +} + +func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigFile) (client.APIClient, error) { + clientOpts, err := ep.ClientOpts() + if err != nil { + return nil, err + } + customHeaders := make(map[string]string, len(configFile.HTTPHeaders)) + for k, v := range configFile.HTTPHeaders { + customHeaders[k] = v + } + customHeaders["User-Agent"] = UserAgent() + clientOpts = append(clientOpts, client.WithHTTPHeaders(customHeaders)) + return client.NewClientWithOpts(clientOpts...) +} + +func resolveDockerEndpoint(s store.Reader, contextName string) (docker.Endpoint, error) { + if s == nil { + return docker.Endpoint{}, fmt.Errorf("no context store initialized") + } + ctxMeta, err := s.GetMetadata(contextName) + if err != nil { + return docker.Endpoint{}, err + } + epMeta, err := docker.EndpointFromContext(ctxMeta) + if err != nil { + return docker.Endpoint{}, err + } + return docker.WithTLSData(s, contextName, epMeta) +} + +// Resolve the Docker endpoint for the default context (based on config, env vars and CLI flags) +func resolveDefaultDockerEndpoint(opts *cliflags.ClientOptions) (docker.Endpoint, error) { + host, err := getServerHost(opts.Hosts, opts.TLSOptions) + if err != nil { + return docker.Endpoint{}, err + } + + var ( + skipTLSVerify bool + tlsData *dcontext.TLSData + ) + + if opts.TLSOptions != nil { + skipTLSVerify = opts.TLSOptions.InsecureSkipVerify + tlsData, err = dcontext.TLSDataFromFiles(opts.TLSOptions.CAFile, opts.TLSOptions.CertFile, opts.TLSOptions.KeyFile) + if err != nil { + return docker.Endpoint{}, err + } + } + + return docker.Endpoint{ + EndpointMeta: docker.EndpointMeta{ + Host: host, + SkipTLSVerify: skipTLSVerify, + }, + TLSData: tlsData, + }, nil +} + +func (cli *DockerCli) getInitTimeout() time.Duration { + if cli.initTimeout != 0 { + return cli.initTimeout + } + return defaultInitTimeout +} + +func (cli *DockerCli) initializeFromClient() { + ctx := context.Background() + if !strings.HasPrefix(cli.dockerEndpoint.Host, "ssh://") { + // @FIXME context.WithTimeout doesn't work with connhelper / ssh connections + // time="2020-04-10T10:16:26Z" level=warning msg="commandConn.CloseWrite: commandconn: failed to wait: signal: killed" + var cancel func() + ctx, cancel = context.WithTimeout(ctx, cli.getInitTimeout()) + defer cancel() + } + + ping, err := cli.client.Ping(ctx) + if err != nil { + // Default to true if we fail to connect to daemon + cli.serverInfo = ServerInfo{HasExperimental: true} + + if ping.APIVersion != "" { + cli.client.NegotiateAPIVersionPing(ping) + } + return + } + + cli.serverInfo = ServerInfo{ + HasExperimental: ping.Experimental, + OSType: ping.OSType, + BuildkitVersion: ping.BuilderVersion, + SwarmStatus: ping.SwarmStatus, + } + cli.client.NegotiateAPIVersionPing(ping) +} + +// NotaryClient provides a Notary Repository to interact with signed metadata for an image +func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) { + return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...) +} + +// ContextStore returns the ContextStore +func (cli *DockerCli) ContextStore() store.Store { + return cli.contextStore +} + +// CurrentContext returns the current context name, based on flags, +// environment variables and the cli configuration file, in the following +// order of preference: +// +// 1. The "--context" command-line option. +// 2. The "DOCKER_CONTEXT" environment variable. +// 3. The current context as configured through the in "currentContext" +// field in the CLI configuration file ("~/.docker/config.json"). +// 4. If no context is configured, use the "default" context. +// +// # Fallbacks for backward-compatibility +// +// To preserve backward-compatibility with the "pre-contexts" behavior, +// the "default" context is used if: +// +// - The "--host" option is set +// - The "DOCKER_HOST" ([DefaultContextName]) environment variable is set +// to a non-empty value. +// +// In these cases, the default context is used, which uses the host as +// specified in "DOCKER_HOST", and TLS config from flags/env vars. +// +// Setting both the "--context" and "--host" flags is ambiguous and results +// in an error when the cli is started. +// +// CurrentContext does not validate if the given context exists or if it's +// valid; errors may occur when trying to use it. +func (cli *DockerCli) CurrentContext() string { + return cli.currentContext +} + +// CurrentContext returns the current context name, based on flags, +// environment variables and the cli configuration file. It does not +// validate if the given context exists or if it's valid; errors may +// occur when trying to use it. +// +// Refer to [DockerCli.CurrentContext] above for further details. +func resolveContextName(opts *cliflags.ClientOptions, config *configfile.ConfigFile) string { + if opts != nil && opts.Context != "" { + return opts.Context + } + if opts != nil && len(opts.Hosts) > 0 { + return DefaultContextName + } + if os.Getenv(client.EnvOverrideHost) != "" { + return DefaultContextName + } + if ctxName := os.Getenv("DOCKER_CONTEXT"); ctxName != "" { + return ctxName + } + if config != nil && config.CurrentContext != "" { + // We don't validate if this context exists: errors may occur when trying to use it. + return config.CurrentContext + } + return DefaultContextName +} + +// DockerEndpoint returns the current docker endpoint +func (cli *DockerCli) DockerEndpoint() docker.Endpoint { + if err := cli.initialize(); err != nil { + // Note that we're not terminating here, as this function may be used + // in cases where we're able to continue. + _, _ = fmt.Fprintf(cli.Err(), "%v\n", cli.initErr) + } + return cli.dockerEndpoint +} + +func (cli *DockerCli) getDockerEndPoint() (ep docker.Endpoint, err error) { + cn := cli.CurrentContext() + if cn == DefaultContextName { + return resolveDefaultDockerEndpoint(cli.options) + } + return resolveDockerEndpoint(cli.contextStore, cn) +} + +func (cli *DockerCli) initialize() error { + cli.init.Do(func() { + cli.dockerEndpoint, cli.initErr = cli.getDockerEndPoint() + if cli.initErr != nil { + cli.initErr = errors.Wrap(cli.initErr, "unable to resolve docker endpoint") + return + } + if cli.client == nil { + if cli.client, cli.initErr = newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile); cli.initErr != nil { + return + } + } + cli.initializeFromClient() + }) + return cli.initErr +} + +// Apply all the operation on the cli +func (cli *DockerCli) Apply(ops ...DockerCliOption) error { + for _, op := range ops { + if err := op(cli); err != nil { + return err + } + } + return nil +} + +// ServerInfo stores details about the supported features and platform of the +// server +type ServerInfo struct { + HasExperimental bool + OSType string + BuildkitVersion types.BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *swarm.Status +} + +// NewDockerCli returns a DockerCli instance with all operators applied on it. +// It applies by default the standard streams, and the content trust from +// environment. +func NewDockerCli(ops ...DockerCliOption) (*DockerCli, error) { + defaultOps := []DockerCliOption{ + WithContentTrustFromEnv(), + WithDefaultContextStoreConfig(), + WithStandardStreams(), + } + ops = append(defaultOps, ops...) + + cli := &DockerCli{} + if err := cli.Apply(ops...); err != nil { + return nil, err + } + return cli, nil +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) { + var host string + switch len(hosts) { + case 0: + host = os.Getenv(client.EnvOverrideHost) + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + return dopts.ParseHost(tlsOptions != nil, host) +} + +// UserAgent returns the user agent string used for making API requests +func UserAgent() string { + return "Docker-Client/" + version.Version + " (" + runtime.GOOS + ")" +} + +var defaultStoreEndpoints = []store.NamedTypeGetter{ + store.EndpointTypeGetter(docker.DockerEndpoint, func() interface{} { return &docker.EndpointMeta{} }), +} + +// RegisterDefaultStoreEndpoints registers a new named endpoint +// metadata type with the default context store config, so that +// endpoint will be supported by stores using the config returned by +// DefaultContextStoreConfig. +func RegisterDefaultStoreEndpoints(ep ...store.NamedTypeGetter) { + defaultStoreEndpoints = append(defaultStoreEndpoints, ep...) +} + +// DefaultContextStoreConfig returns a new store.Config with the default set of endpoints configured. +func DefaultContextStoreConfig() store.Config { + return store.NewConfig( + func() interface{} { return &DockerContext{} }, + defaultStoreEndpoints..., + ) +} diff --git a/vendor/github.com/docker/cli/cli/command/cli_options.go b/vendor/github.com/docker/cli/cli/command/cli_options.go new file mode 100644 index 000000000..d714947cb --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/cli_options.go @@ -0,0 +1,97 @@ +package command + +import ( + "io" + "os" + "strconv" + + "github.com/docker/cli/cli/streams" + "github.com/docker/docker/client" + "github.com/moby/term" +) + +// DockerCliOption applies a modification on a DockerCli. +type DockerCliOption func(cli *DockerCli) error + +// WithStandardStreams sets a cli in, out and err streams with the standard streams. +func WithStandardStreams() DockerCliOption { + return func(cli *DockerCli) error { + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + cli.in = streams.NewIn(stdin) + cli.out = streams.NewOut(stdout) + cli.err = stderr + return nil + } +} + +// WithCombinedStreams uses the same stream for the output and error streams. +func WithCombinedStreams(combined io.Writer) DockerCliOption { + return func(cli *DockerCli) error { + cli.out = streams.NewOut(combined) + cli.err = combined + return nil + } +} + +// WithInputStream sets a cli input stream. +func WithInputStream(in io.ReadCloser) DockerCliOption { + return func(cli *DockerCli) error { + cli.in = streams.NewIn(in) + return nil + } +} + +// WithOutputStream sets a cli output stream. +func WithOutputStream(out io.Writer) DockerCliOption { + return func(cli *DockerCli) error { + cli.out = streams.NewOut(out) + return nil + } +} + +// WithErrorStream sets a cli error stream. +func WithErrorStream(err io.Writer) DockerCliOption { + return func(cli *DockerCli) error { + cli.err = err + return nil + } +} + +// WithContentTrustFromEnv enables content trust on a cli from environment variable DOCKER_CONTENT_TRUST value. +func WithContentTrustFromEnv() DockerCliOption { + return func(cli *DockerCli) error { + cli.contentTrust = false + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + cli.contentTrust = true + } + } + return nil + } +} + +// WithContentTrust enables content trust on a cli. +func WithContentTrust(enabled bool) DockerCliOption { + return func(cli *DockerCli) error { + cli.contentTrust = enabled + return nil + } +} + +// WithDefaultContextStoreConfig configures the cli to use the default context store configuration. +func WithDefaultContextStoreConfig() DockerCliOption { + return func(cli *DockerCli) error { + cli.contextStoreConfig = DefaultContextStoreConfig() + return nil + } +} + +// WithAPIClient configures the cli to use the given API client. +func WithAPIClient(c client.APIClient) DockerCliOption { + return func(cli *DockerCli) error { + cli.client = c + return nil + } +} diff --git a/vendor/github.com/docker/cli/cli/command/context.go b/vendor/github.com/docker/cli/cli/command/context.go new file mode 100644 index 000000000..29a2be860 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/context.go @@ -0,0 +1,65 @@ +package command + +import ( + "encoding/json" + "errors" + + "github.com/docker/cli/cli/context/store" +) + +// DockerContext is a typed representation of what we put in Context metadata +type DockerContext struct { + Description string + AdditionalFields map[string]interface{} +} + +// MarshalJSON implements custom JSON marshalling +func (dc DockerContext) MarshalJSON() ([]byte, error) { + s := map[string]interface{}{} + if dc.Description != "" { + s["Description"] = dc.Description + } + if dc.AdditionalFields != nil { + for k, v := range dc.AdditionalFields { + s[k] = v + } + } + return json.Marshal(s) +} + +// UnmarshalJSON implements custom JSON marshalling +func (dc *DockerContext) UnmarshalJSON(payload []byte) error { + var data map[string]interface{} + if err := json.Unmarshal(payload, &data); err != nil { + return err + } + for k, v := range data { + switch k { + case "Description": + dc.Description = v.(string) + default: + if dc.AdditionalFields == nil { + dc.AdditionalFields = make(map[string]interface{}) + } + dc.AdditionalFields[k] = v + } + } + return nil +} + +// GetDockerContext extracts metadata from stored context metadata +func GetDockerContext(storeMetadata store.Metadata) (DockerContext, error) { + if storeMetadata.Metadata == nil { + // can happen if we save endpoints before assigning a context metadata + // it is totally valid, and we should return a default initialized value + return DockerContext{}, nil + } + res, ok := storeMetadata.Metadata.(DockerContext) + if !ok { + return DockerContext{}, errors.New("context metadata is not a valid DockerContext") + } + if storeMetadata.Name == DefaultContextName { + res.Description = "Current DOCKER_HOST based configuration" + } + return res, nil +} diff --git a/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go b/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go new file mode 100644 index 000000000..95f89fc6f --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/defaultcontextstore.go @@ -0,0 +1,191 @@ +package command + +import ( + "github.com/docker/cli/cli/context/docker" + "github.com/docker/cli/cli/context/store" + cliflags "github.com/docker/cli/cli/flags" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +const ( + // DefaultContextName is the name reserved for the default context (config & env based) + DefaultContextName = "default" +) + +// DefaultContext contains the default context data for all endpoints +type DefaultContext struct { + Meta store.Metadata + TLS store.ContextTLSData +} + +// DefaultContextResolver is a function which resolves the default context base on the configuration and the env variables +type DefaultContextResolver func() (*DefaultContext, error) + +// ContextStoreWithDefault implements the store.Store interface with a support for the default context +type ContextStoreWithDefault struct { + store.Store + Resolver DefaultContextResolver +} + +// EndpointDefaultResolver is implemented by any EndpointMeta object +// which wants to be able to populate the store with whatever their default is. +type EndpointDefaultResolver interface { + // ResolveDefault returns values suitable for storing in store.Metadata.Endpoints + // and store.ContextTLSData.Endpoints. + // + // An error is only returned for something fatal, not simply + // the lack of a default (e.g. because the config file which + // would contain it is missing). If there is no default then + // returns nil, nil, nil. + ResolveDefault() (interface{}, *store.EndpointTLSData, error) +} + +// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters +func ResolveDefaultContext(opts *cliflags.ClientOptions, config store.Config) (*DefaultContext, error) { + contextTLSData := store.ContextTLSData{ + Endpoints: make(map[string]store.EndpointTLSData), + } + contextMetadata := store.Metadata{ + Endpoints: make(map[string]interface{}), + Metadata: DockerContext{ + Description: "", + }, + Name: DefaultContextName, + } + + dockerEP, err := resolveDefaultDockerEndpoint(opts) + if err != nil { + return nil, err + } + contextMetadata.Endpoints[docker.DockerEndpoint] = dockerEP.EndpointMeta + if dockerEP.TLSData != nil { + contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerEP.TLSData.ToStoreTLSData() + } + + if err := config.ForeachEndpointType(func(n string, get store.TypeGetter) error { + if n == docker.DockerEndpoint { // handled above + return nil + } + ep := get() + if i, ok := ep.(EndpointDefaultResolver); ok { + meta, tls, err := i.ResolveDefault() + if err != nil { + return err + } + if meta == nil { + return nil + } + contextMetadata.Endpoints[n] = meta + if tls != nil { + contextTLSData.Endpoints[n] = *tls + } + } + // Nothing to be done + return nil + }); err != nil { + return nil, err + } + + return &DefaultContext{Meta: contextMetadata, TLS: contextTLSData}, nil +} + +// List implements store.Store's List +func (s *ContextStoreWithDefault) List() ([]store.Metadata, error) { + contextList, err := s.Store.List() + if err != nil { + return nil, err + } + defaultContext, err := s.Resolver() + if err != nil { + return nil, err + } + return append(contextList, defaultContext.Meta), nil +} + +// CreateOrUpdate is not allowed for the default context and fails +func (s *ContextStoreWithDefault) CreateOrUpdate(meta store.Metadata) error { + if meta.Name == DefaultContextName { + return errdefs.InvalidParameter(errors.New("default context cannot be created nor updated")) + } + return s.Store.CreateOrUpdate(meta) +} + +// Remove is not allowed for the default context and fails +func (s *ContextStoreWithDefault) Remove(name string) error { + if name == DefaultContextName { + return errdefs.InvalidParameter(errors.New("default context cannot be removed")) + } + return s.Store.Remove(name) +} + +// GetMetadata implements store.Store's GetMetadata +func (s *ContextStoreWithDefault) GetMetadata(name string) (store.Metadata, error) { + if name == DefaultContextName { + defaultContext, err := s.Resolver() + if err != nil { + return store.Metadata{}, err + } + return defaultContext.Meta, nil + } + return s.Store.GetMetadata(name) +} + +// ResetTLSMaterial is not implemented for default context and fails +func (s *ContextStoreWithDefault) ResetTLSMaterial(name string, data *store.ContextTLSData) error { + if name == DefaultContextName { + return errdefs.InvalidParameter(errors.New("default context cannot be edited")) + } + return s.Store.ResetTLSMaterial(name, data) +} + +// ResetEndpointTLSMaterial is not implemented for default context and fails +func (s *ContextStoreWithDefault) ResetEndpointTLSMaterial(contextName string, endpointName string, data *store.EndpointTLSData) error { + if contextName == DefaultContextName { + return errdefs.InvalidParameter(errors.New("default context cannot be edited")) + } + return s.Store.ResetEndpointTLSMaterial(contextName, endpointName, data) +} + +// ListTLSFiles implements store.Store's ListTLSFiles +func (s *ContextStoreWithDefault) ListTLSFiles(name string) (map[string]store.EndpointFiles, error) { + if name == DefaultContextName { + defaultContext, err := s.Resolver() + if err != nil { + return nil, err + } + tlsfiles := make(map[string]store.EndpointFiles) + for epName, epTLSData := range defaultContext.TLS.Endpoints { + var files store.EndpointFiles + for filename := range epTLSData.Files { + files = append(files, filename) + } + tlsfiles[epName] = files + } + return tlsfiles, nil + } + return s.Store.ListTLSFiles(name) +} + +// GetTLSData implements store.Store's GetTLSData +func (s *ContextStoreWithDefault) GetTLSData(contextName, endpointName, fileName string) ([]byte, error) { + if contextName == DefaultContextName { + defaultContext, err := s.Resolver() + if err != nil { + return nil, err + } + if defaultContext.TLS.Endpoints[endpointName].Files[fileName] == nil { + return nil, errdefs.NotFound(errors.Errorf("TLS data for %s/%s/%s does not exist", DefaultContextName, endpointName, fileName)) + } + return defaultContext.TLS.Endpoints[endpointName].Files[fileName], nil + } + return s.Store.GetTLSData(contextName, endpointName, fileName) +} + +// GetStorageInfo implements store.Store's GetStorageInfo +func (s *ContextStoreWithDefault) GetStorageInfo(contextName string) store.StorageInfo { + if contextName == DefaultContextName { + return store.StorageInfo{MetadataPath: "", TLSPath: ""} + } + return s.Store.GetStorageInfo(contextName) +} diff --git a/vendor/github.com/docker/cli/cli/command/events_utils.go b/vendor/github.com/docker/cli/cli/command/events_utils.go new file mode 100644 index 000000000..16d76892a --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/events_utils.go @@ -0,0 +1,47 @@ +package command + +import ( + "sync" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/sirupsen/logrus" +) + +// EventHandler is abstract interface for user to customize +// own handle functions of each type of events +type EventHandler interface { + Handle(action string, h func(eventtypes.Message)) + Watch(c <-chan eventtypes.Message) +} + +// InitEventHandler initializes and returns an EventHandler +func InitEventHandler() EventHandler { + return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go new file mode 100644 index 000000000..90cc08c53 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/registry.go @@ -0,0 +1,198 @@ +package command + +import ( + "bufio" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + + configtypes "github.com/docker/cli/cli/config/types" + "github.com/docker/cli/cli/streams" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" + "github.com/moby/term" + "github.com/pkg/errors" +) + +// ElectAuthServer returns the default registry to use. +// +// Deprecated: use [registry.IndexServer] instead. +func ElectAuthServer(_ context.Context, _ Cli) string { + return registry.IndexServer +} + +// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info +// for the given command. +func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + isDefaultRegistry := indexServer == registry.IndexServer + authConfig, err := GetDefaultAuthConfig(cli, true, indexServer, isDefaultRegistry) + if err != nil { + fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err) + } + err = ConfigureAuth(cli, "", "", &authConfig, isDefaultRegistry) + if err != nil { + return "", err + } + return EncodeAuthToBase64(authConfig) + } +} + +// ResolveAuthConfig returns auth-config for the given registry from the +// credential-store. It returns an empty AuthConfig if no credentials were +// found. +// +// It is similar to [registry.ResolveAuthConfig], but uses the credentials- +// store, instead of looking up credentials from a map. +func ResolveAuthConfig(_ context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = registry.IndexServer + } + + a, _ := cli.ConfigFile().GetAuthConfig(configKey) + return types.AuthConfig(a) +} + +// GetDefaultAuthConfig gets the default auth config given a serverAddress +// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it +func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + if !isDefaultRegistry { + serverAddress = registry.ConvertToHostname(serverAddress) + } + authconfig := configtypes.AuthConfig{} + var err error + if checkCredStore { + authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress) + if err != nil { + return types.AuthConfig{ + ServerAddress: serverAddress, + }, err + } + } + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + res := types.AuthConfig(authconfig) + return res, nil +} + +// ConfigureAuth handles prompting of user's username and password if needed +func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *types.AuthConfig, isDefaultRegistry bool) error { + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.SetIn(streams.NewIn(os.Stdin)) + } + + // Some links documenting this: + // - https://code.google.com/archive/p/mintty/issues/56 + // - https://github.com/docker/docker/issues/15272 + // - https://mintty.github.io/ (compatibility) + // Linux will hit this if you attempt `cat | docker login`, and Windows + // will hit this if you attempt docker login from mintty where stdin + // is a pipe, not a character based console. + if flPassword == "" && !cli.In().IsTerminal() { + return errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a default registry (docker hub), then display the following message. + fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + promptWithDefault(cli.Out(), "Username", authconfig.Username) + flUser = readInput(cli.In(), cli.Out()) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + if flUser == "" { + return errors.Errorf("Error: Non-null Username Required") + } + if flPassword == "" { + oldState, err := term.SaveState(cli.In().FD()) + if err != nil { + return err + } + fmt.Fprintf(cli.Out(), "Password: ") + term.DisableEcho(cli.In().FD(), oldState) + + flPassword = readInput(cli.In(), cli.Out()) + fmt.Fprint(cli.Out(), "\n") + + term.RestoreTerminal(cli.In().FD(), oldState) + if flPassword == "" { + return errors.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + + return nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +func promptWithDefault(out io.Writer, prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(out, "%s: ", prompt) + } else { + fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) + } +} + +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image +func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) { + // Retrieve encoded auth token from the image reference + authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) + if err != nil { + return "", err + } + encodedAuth, err := EncodeAuthToBase64(authConfig) + if err != nil { + return "", err + } + return encodedAuth, nil +} + +// resolveAuthConfigFromImage retrieves that AuthConfig using the image string +func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) { + registryRef, err := reference.ParseNormalizedNamed(image) + if err != nil { + return types.AuthConfig{}, err + } + repoInfo, err := registry.ParseRepositoryInfo(registryRef) + if err != nil { + return types.AuthConfig{}, err + } + return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil +} diff --git a/vendor/github.com/docker/cli/cli/command/streams.go b/vendor/github.com/docker/cli/cli/command/streams.go new file mode 100644 index 000000000..43dc6cd00 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/streams.go @@ -0,0 +1,32 @@ +package command + +import ( + "io" + + "github.com/docker/cli/cli/streams" +) + +// InStream is an input stream used by the DockerCli to read user input +// +// Deprecated: Use [streams.In] instead. +type InStream = streams.In + +// OutStream is an output stream used by the DockerCli to write normal program +// output. +// +// Deprecated: Use [streams.Out] instead. +type OutStream = streams.Out + +// NewInStream returns a new [streams.In] from an [io.ReadCloser]. +// +// Deprecated: Use [streams.NewIn] instead. +func NewInStream(in io.ReadCloser) *streams.In { + return streams.NewIn(in) +} + +// NewOutStream returns a new [streams.Out] from an [io.Writer]. +// +// Deprecated: Use [streams.NewOut] instead. +func NewOutStream(out io.Writer) *streams.Out { + return streams.NewOut(out) +} diff --git a/vendor/github.com/docker/cli/cli/command/trust.go b/vendor/github.com/docker/cli/cli/command/trust.go new file mode 100644 index 000000000..65f240858 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/trust.go @@ -0,0 +1,15 @@ +package command + +import ( + "github.com/spf13/pflag" +) + +// AddTrustVerificationFlags adds content trust flags to the provided flagset +func AddTrustVerificationFlags(fs *pflag.FlagSet, v *bool, trusted bool) { + fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image verification") +} + +// AddTrustSigningFlags adds "signing" flags to the provided flagset +func AddTrustSigningFlags(fs *pflag.FlagSet, v *bool, trusted bool) { + fs.BoolVar(v, "disable-content-trust", !trusted, "Skip image signing") +} diff --git a/vendor/github.com/docker/cli/cli/command/utils.go b/vendor/github.com/docker/cli/cli/command/utils.go new file mode 100644 index 000000000..753f428aa --- /dev/null +++ b/vendor/github.com/docker/cli/cli/command/utils.go @@ -0,0 +1,197 @@ +package command + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/cli/cli/streams" + "github.com/docker/docker/api/types/filters" + "github.com/moby/sys/sequential" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// CopyToFile writes the content of the reader to the specified file +func CopyToFile(outfile string, r io.Reader) error { + // We use sequential file access here to avoid depleting the standby list + // on Windows. On Linux, this is a call directly to os.CreateTemp + tmpFile, err := sequential.CreateTemp(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// capitalizeFirst capitalizes the first character of string +func capitalizeFirst(s string) string { + switch l := len(s); l { + case 0: + return s + case 1: + return strings.ToLower(s) + default: + return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) + } +} + +// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. +func PrettyPrint(i interface{}) string { + switch t := i.(type) { + case nil: + return "None" + case string: + return capitalizeFirst(t) + default: + return capitalizeFirst(fmt.Sprintf("%s", t)) + } +} + +// PromptForConfirmation requests and checks confirmation from user. +// This will display the provided message followed by ' [y/N] '. If +// the user input 'y' or 'Y' it returns true other false. If no +// message is provided "Are you sure you want to proceed? [y/N] " +// will be used instead. +func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool { + if message == "" { + message = "Are you sure you want to proceed?" + } + message += " [y/N] " + + _, _ = fmt.Fprint(outs, message) + + // On Windows, force the use of the regular OS stdin stream. + if runtime.GOOS == "windows" { + ins = streams.NewIn(os.Stdin) + } + + reader := bufio.NewReader(ins) + answer, _, _ := reader.ReadLine() + return strings.ToLower(string(answer)) == "y" +} + +// PruneFilters returns consolidated prune filters obtained from config.json and cli +func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { + if dockerCli.ConfigFile() == nil { + return pruneFilters + } + for _, f := range dockerCli.ConfigFile().PruneFilters { + k, v, ok := strings.Cut(f, "=") + if !ok { + continue + } + if k == "label" { + // CLI label filter supersede config.json. + // If CLI label filter conflict with config.json, + // skip adding label! filter in config.json. + if pruneFilters.Contains("label!") && pruneFilters.ExactMatch("label!", v) { + continue + } + } else if k == "label!" { + // CLI label! filter supersede config.json. + // If CLI label! filter conflict with config.json, + // skip adding label filter in config.json. + if pruneFilters.Contains("label") && pruneFilters.ExactMatch("label", v) { + continue + } + } + pruneFilters.Add(k, v) + } + + return pruneFilters +} + +// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later. +func AddPlatformFlag(flags *pflag.FlagSet, target *string) { + flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable") + flags.SetAnnotation("platform", "version", []string{"1.32"}) +} + +// ValidateOutputPath validates the output paths of the `export` and `save` commands. +func ValidateOutputPath(path string) error { + dir := filepath.Dir(filepath.Clean(path)) + if dir != "" && dir != "." { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return errors.Errorf("invalid output path: directory %q does not exist", dir) + } + } + // check whether `path` points to a regular file + // (if the path exists and doesn't point to a directory) + if fileInfo, err := os.Stat(path); !os.IsNotExist(err) { + if err != nil { + return err + } + + if fileInfo.Mode().IsDir() || fileInfo.Mode().IsRegular() { + return nil + } + + if err := ValidateOutputPathFileMode(fileInfo.Mode()); err != nil { + return errors.Wrapf(err, fmt.Sprintf("invalid output path: %q must be a directory or a regular file", path)) + } + } + return nil +} + +// ValidateOutputPathFileMode validates the output paths of the `cp` command and serves as a +// helper to `ValidateOutputPath` +func ValidateOutputPathFileMode(fileMode os.FileMode) error { + switch { + case fileMode&os.ModeDevice != 0: + return errors.New("got a device") + case fileMode&os.ModeIrregular != 0: + return errors.New("got an irregular file") + } + return nil +} + +func stringSliceIndex(s, subs []string) int { + j := 0 + if len(subs) > 0 { + for i, x := range s { + if j < len(subs) && subs[j] == x { + j++ + } else { + j = 0 + } + if len(subs) == j { + return i + 1 - j + } + } + } + return -1 +} + +// StringSliceReplaceAt replaces the sub-slice old, with the sub-slice new, in the string +// slice s, returning a new slice and a boolean indicating if the replacement happened. +// requireIdx is the index at which old needs to be found at (or -1 to disregard that). +func StringSliceReplaceAt(s, old, new []string, requireIndex int) ([]string, bool) { + idx := stringSliceIndex(s, old) + if (requireIndex != -1 && requireIndex != idx) || idx == -1 { + return s, false + } + out := append([]string{}, s[:idx]...) + out = append(out, new...) + out = append(out, s[idx+len(old):]...) + return out, true +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go new file mode 100644 index 000000000..a0b035c92 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go @@ -0,0 +1,287 @@ +// Package commandconn provides a net.Conn implementation that can be used for +// proxying (or emulating) stream via a custom command. +// +// For example, to provide an http.Client that can connect to a Docker daemon +// running in a Docker container ("DIND"): +// +// httpClient := &http.Client{ +// Transport: &http.Transport{ +// DialContext: func(ctx context.Context, _network, _addr string) (net.Conn, error) { +// return commandconn.New(ctx, "docker", "exec", "-it", containerID, "docker", "system", "dial-stdio") +// }, +// }, +// } +package commandconn + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "os" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" +) + +// New returns net.Conn +func New(ctx context.Context, cmd string, args ...string) (net.Conn, error) { + var ( + c commandConn + err error + ) + c.cmd = exec.Command(cmd, args...) + // we assume that args never contains sensitive information + logrus.Debugf("commandconn: starting %s with %v", cmd, args) + c.cmd.Env = os.Environ() + c.cmd.SysProcAttr = &syscall.SysProcAttr{} + setPdeathsig(c.cmd) + createSession(c.cmd) + c.stdin, err = c.cmd.StdinPipe() + if err != nil { + return nil, err + } + c.stdout, err = c.cmd.StdoutPipe() + if err != nil { + return nil, err + } + c.cmd.Stderr = &stderrWriter{ + stderrMu: &c.stderrMu, + stderr: &c.stderr, + debugPrefix: fmt.Sprintf("commandconn (%s):", cmd), + } + c.localAddr = dummyAddr{network: "dummy", s: "dummy-0"} + c.remoteAddr = dummyAddr{network: "dummy", s: "dummy-1"} + return &c, c.cmd.Start() +} + +// commandConn implements net.Conn +type commandConn struct { + cmd *exec.Cmd + cmdExited bool + cmdWaitErr error + cmdMutex sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderrMu sync.Mutex + stderr bytes.Buffer + stdioClosedMu sync.Mutex // for stdinClosed and stdoutClosed + stdinClosed bool + stdoutClosed bool + localAddr net.Addr + remoteAddr net.Addr +} + +// killIfStdioClosed kills the cmd if both stdin and stdout are closed. +func (c *commandConn) killIfStdioClosed() error { + c.stdioClosedMu.Lock() + stdioClosed := c.stdoutClosed && c.stdinClosed + c.stdioClosedMu.Unlock() + if !stdioClosed { + return nil + } + return c.kill() +} + +// killAndWait tries sending SIGTERM to the process before sending SIGKILL. +func killAndWait(cmd *exec.Cmd) error { + var werr error + if runtime.GOOS != "windows" { + werrCh := make(chan error) + go func() { werrCh <- cmd.Wait() }() + cmd.Process.Signal(syscall.SIGTERM) + select { + case werr = <-werrCh: + case <-time.After(3 * time.Second): + cmd.Process.Kill() + werr = <-werrCh + } + } else { + cmd.Process.Kill() + werr = cmd.Wait() + } + return werr +} + +// kill returns nil if the command terminated, regardless to the exit status. +func (c *commandConn) kill() error { + var werr error + c.cmdMutex.Lock() + if c.cmdExited { + werr = c.cmdWaitErr + } else { + werr = killAndWait(c.cmd) + c.cmdWaitErr = werr + c.cmdExited = true + } + c.cmdMutex.Unlock() + if werr == nil { + return nil + } + wExitErr, ok := werr.(*exec.ExitError) + if ok { + if wExitErr.ProcessState.Exited() { + return nil + } + } + return errors.Wrapf(werr, "commandconn: failed to wait") +} + +func (c *commandConn) onEOF(eof error) error { + // when we got EOF, the command is going to be terminated + var werr error + c.cmdMutex.Lock() + if c.cmdExited { + werr = c.cmdWaitErr + } else { + werrCh := make(chan error) + go func() { werrCh <- c.cmd.Wait() }() + select { + case werr = <-werrCh: + c.cmdWaitErr = werr + c.cmdExited = true + case <-time.After(10 * time.Second): + c.cmdMutex.Unlock() + c.stderrMu.Lock() + stderr := c.stderr.String() + c.stderrMu.Unlock() + return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, eof, stderr) + } + } + c.cmdMutex.Unlock() + if werr == nil { + return eof + } + c.stderrMu.Lock() + stderr := c.stderr.String() + c.stderrMu.Unlock() + return errors.Errorf("command %v has exited with %v, please make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr) +} + +func ignorableCloseError(err error) bool { + errS := err.Error() + ss := []string{ + os.ErrClosed.Error(), + } + for _, s := range ss { + if strings.Contains(errS, s) { + return true + } + } + return false +} + +func (c *commandConn) CloseRead() error { + // NOTE: maybe already closed here + if err := c.stdout.Close(); err != nil && !ignorableCloseError(err) { + logrus.Warnf("commandConn.CloseRead: %v", err) + } + c.stdioClosedMu.Lock() + c.stdoutClosed = true + c.stdioClosedMu.Unlock() + if err := c.killIfStdioClosed(); err != nil { + logrus.Warnf("commandConn.CloseRead: %v", err) + } + return nil +} + +func (c *commandConn) Read(p []byte) (int, error) { + n, err := c.stdout.Read(p) + if err == io.EOF { + err = c.onEOF(err) + } + return n, err +} + +func (c *commandConn) CloseWrite() error { + // NOTE: maybe already closed here + if err := c.stdin.Close(); err != nil && !ignorableCloseError(err) { + logrus.Warnf("commandConn.CloseWrite: %v", err) + } + c.stdioClosedMu.Lock() + c.stdinClosed = true + c.stdioClosedMu.Unlock() + if err := c.killIfStdioClosed(); err != nil { + logrus.Warnf("commandConn.CloseWrite: %v", err) + } + return nil +} + +func (c *commandConn) Write(p []byte) (int, error) { + n, err := c.stdin.Write(p) + if err == io.EOF { + err = c.onEOF(err) + } + return n, err +} + +func (c *commandConn) Close() error { + var err error + if err = c.CloseRead(); err != nil { + logrus.Warnf("commandConn.Close: CloseRead: %v", err) + } + if err = c.CloseWrite(); err != nil { + logrus.Warnf("commandConn.Close: CloseWrite: %v", err) + } + return err +} + +func (c *commandConn) LocalAddr() net.Addr { + return c.localAddr +} + +func (c *commandConn) RemoteAddr() net.Addr { + return c.remoteAddr +} + +func (c *commandConn) SetDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetDeadline(%v)", t) + return nil +} + +func (c *commandConn) SetReadDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetReadDeadline(%v)", t) + return nil +} + +func (c *commandConn) SetWriteDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetWriteDeadline(%v)", t) + return nil +} + +type dummyAddr struct { + network string + s string +} + +func (d dummyAddr) Network() string { + return d.network +} + +func (d dummyAddr) String() string { + return d.s +} + +type stderrWriter struct { + stderrMu *sync.Mutex + stderr *bytes.Buffer + debugPrefix string +} + +func (w *stderrWriter) Write(p []byte) (int, error) { + logrus.Debugf("%s%s", w.debugPrefix, string(p)) + w.stderrMu.Lock() + if w.stderr.Len() > 4096 { + w.stderr.Reset() + } + n, err := w.stderr.Write(p) + w.stderrMu.Unlock() + return n, err +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_linux.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_linux.go new file mode 100644 index 000000000..1cdd788cc --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_linux.go @@ -0,0 +1,10 @@ +package commandconn + +import ( + "os/exec" + "syscall" +) + +func setPdeathsig(cmd *exec.Cmd) { + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go new file mode 100644 index 000000000..2adcf0816 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go @@ -0,0 +1,11 @@ +//go:build !linux +// +build !linux + +package commandconn + +import ( + "os/exec" +) + +func setPdeathsig(cmd *exec.Cmd) { +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go new file mode 100644 index 000000000..57bdecec0 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go @@ -0,0 +1,14 @@ +//go:build !windows +// +build !windows + +package commandconn + +import ( + "os/exec" +) + +func createSession(cmd *exec.Cmd) { + // for supporting ssh connection helper with ProxyCommand + // https://github.com/docker/cli/issues/1707 + cmd.SysProcAttr.Setsid = true +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_windows.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_windows.go new file mode 100644 index 000000000..b92607450 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_windows.go @@ -0,0 +1,8 @@ +package commandconn + +import ( + "os/exec" +) + +func createSession(cmd *exec.Cmd) { +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/connhelper.go b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go new file mode 100644 index 000000000..9ac9d6744 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go @@ -0,0 +1,68 @@ +// Package connhelper provides helpers for connecting to a remote daemon host with custom logic. +package connhelper + +import ( + "context" + "net" + "net/url" + + "github.com/docker/cli/cli/connhelper/commandconn" + "github.com/docker/cli/cli/connhelper/ssh" + "github.com/pkg/errors" +) + +// ConnectionHelper allows to connect to a remote host with custom stream provider binary. +type ConnectionHelper struct { + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + Host string // dummy URL used for HTTP requests. e.g. "http://docker" +} + +// GetConnectionHelper returns Docker-specific connection helper for the given URL. +// GetConnectionHelper returns nil without error when no helper is registered for the scheme. +// +// ssh://@ URL requires Docker 18.09 or later on the remote host. +func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) { + return getConnectionHelper(daemonURL, nil) +} + +// GetConnectionHelperWithSSHOpts returns Docker-specific connection helper for +// the given URL, and accepts additional options for ssh connections. It returns +// nil without error when no helper is registered for the scheme. +// +// Requires Docker 18.09 or later on the remote host. +func GetConnectionHelperWithSSHOpts(daemonURL string, sshFlags []string) (*ConnectionHelper, error) { + return getConnectionHelper(daemonURL, sshFlags) +} + +func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper, error) { + u, err := url.Parse(daemonURL) + if err != nil { + return nil, err + } + switch scheme := u.Scheme; scheme { + case "ssh": + sp, err := ssh.ParseURL(daemonURL) + if err != nil { + return nil, errors.Wrap(err, "ssh host connection is not valid") + } + return &ConnectionHelper{ + Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { + return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args("docker", "system", "dial-stdio")...)...) + }, + Host: "http://docker.example.com", + }, nil + } + // Future version may support plugins via ~/.docker/config.json. e.g. "dind" + // See docker/cli#889 for the previous discussion. + return nil, err +} + +// GetCommandConnectionHelper returns Docker-specific connection helper constructed from an arbitrary command. +func GetCommandConnectionHelper(cmd string, flags ...string) (*ConnectionHelper, error) { + return &ConnectionHelper{ + Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { + return commandconn.New(ctx, cmd, flags...) + }, + Host: "http://docker.example.com", + }, nil +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go new file mode 100644 index 000000000..bde01ae7f --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go @@ -0,0 +1,64 @@ +// Package ssh provides the connection helper for ssh:// URL. +package ssh + +import ( + "net/url" + + "github.com/pkg/errors" +) + +// ParseURL parses URL +func ParseURL(daemonURL string) (*Spec, error) { + u, err := url.Parse(daemonURL) + if err != nil { + return nil, err + } + if u.Scheme != "ssh" { + return nil, errors.Errorf("expected scheme ssh, got %q", u.Scheme) + } + + var sp Spec + + if u.User != nil { + sp.User = u.User.Username() + if _, ok := u.User.Password(); ok { + return nil, errors.New("plain-text password is not supported") + } + } + sp.Host = u.Hostname() + if sp.Host == "" { + return nil, errors.Errorf("no host specified") + } + sp.Port = u.Port() + if u.Path != "" { + return nil, errors.Errorf("extra path after the host: %q", u.Path) + } + if u.RawQuery != "" { + return nil, errors.Errorf("extra query after the host: %q", u.RawQuery) + } + if u.Fragment != "" { + return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment) + } + return &sp, err +} + +// Spec of SSH URL +type Spec struct { + User string + Host string + Port string +} + +// Args returns args except "ssh" itself combined with optional additional command args +func (sp *Spec) Args(add ...string) []string { + var args []string + if sp.User != "" { + args = append(args, "-l", sp.User) + } + if sp.Port != "" { + args = append(args, "-p", sp.Port) + } + args = append(args, "--", sp.Host) + args = append(args, add...) + return args +} diff --git a/vendor/github.com/docker/cli/cli/context/docker/constants.go b/vendor/github.com/docker/cli/cli/context/docker/constants.go new file mode 100644 index 000000000..1db5556d5 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/docker/constants.go @@ -0,0 +1,6 @@ +package docker + +const ( + // DockerEndpoint is the name of the docker endpoint in a stored context + DockerEndpoint = "docker" +) diff --git a/vendor/github.com/docker/cli/cli/context/docker/load.go b/vendor/github.com/docker/cli/cli/context/docker/load.go new file mode 100644 index 000000000..09ec40505 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/docker/load.go @@ -0,0 +1,160 @@ +package docker + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "net" + "net/http" + "time" + + "github.com/docker/cli/cli/connhelper" + "github.com/docker/cli/cli/context" + "github.com/docker/cli/cli/context/store" + "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// EndpointMeta is a typed wrapper around a context-store generic endpoint describing +// a Docker Engine endpoint, without its tls config +type EndpointMeta = context.EndpointMetaBase + +// Endpoint is a typed wrapper around a context-store generic endpoint describing +// a Docker Engine endpoint, with its tls data +type Endpoint struct { + EndpointMeta + TLSData *context.TLSData + + // Deprecated: Use of encrypted TLS private keys has been deprecated, and + // will be removed in a future release. Golang has deprecated support for + // legacy PEM encryption (as specified in RFC 1423), as it is insecure by + // design (see https://go-review.googlesource.com/c/go/+/264159). + TLSPassword string +} + +// WithTLSData loads TLS materials for the endpoint +func WithTLSData(s store.Reader, contextName string, m EndpointMeta) (Endpoint, error) { + tlsData, err := context.LoadTLSData(s, contextName, DockerEndpoint) + if err != nil { + return Endpoint{}, err + } + return Endpoint{ + EndpointMeta: m, + TLSData: tlsData, + }, nil +} + +// tlsConfig extracts a context docker endpoint TLS config +func (c *Endpoint) tlsConfig() (*tls.Config, error) { + if c.TLSData == nil && !c.SkipTLSVerify { + // there is no specific tls config + return nil, nil + } + var tlsOpts []func(*tls.Config) + if c.TLSData != nil && c.TLSData.CA != nil { + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(c.TLSData.CA) { + return nil, errors.New("failed to retrieve context tls info: ca.pem seems invalid") + } + tlsOpts = append(tlsOpts, func(cfg *tls.Config) { + cfg.RootCAs = certPool + }) + } + if c.TLSData != nil && c.TLSData.Key != nil && c.TLSData.Cert != nil { + keyBytes := c.TLSData.Key + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, errors.New("no valid private key found") + } + if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // SA1019: x509.IsEncryptedPEMBlock is deprecated, and insecure by design + return nil, errors.New("private key is encrypted - support for encrypted private keys has been removed, see https://docs.docker.com/go/deprecated/") + } + + x509cert, err := tls.X509KeyPair(c.TLSData.Cert, keyBytes) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve context tls info") + } + tlsOpts = append(tlsOpts, func(cfg *tls.Config) { + cfg.Certificates = []tls.Certificate{x509cert} + }) + } + if c.SkipTLSVerify { + tlsOpts = append(tlsOpts, func(cfg *tls.Config) { + cfg.InsecureSkipVerify = true + }) + } + return tlsconfig.ClientDefault(tlsOpts...), nil +} + +// ClientOpts returns a slice of Client options to configure an API client with this endpoint +func (c *Endpoint) ClientOpts() ([]client.Opt, error) { + var result []client.Opt + if c.Host != "" { + helper, err := connhelper.GetConnectionHelper(c.Host) + if err != nil { + return nil, err + } + if helper == nil { + tlsConfig, err := c.tlsConfig() + if err != nil { + return nil, err + } + result = append(result, + withHTTPClient(tlsConfig), + client.WithHost(c.Host), + ) + + } else { + httpClient := &http.Client{ + // No tls + // No proxy + Transport: &http.Transport{ + DialContext: helper.Dialer, + }, + } + result = append(result, + client.WithHTTPClient(httpClient), + client.WithHost(helper.Host), + client.WithDialContext(helper.Dialer), + ) + } + } + + result = append(result, client.WithVersionFromEnv(), client.WithAPIVersionNegotiation()) + return result, nil +} + +func withHTTPClient(tlsConfig *tls.Config) func(*client.Client) error { + return func(c *client.Client) error { + if tlsConfig == nil { + // Use the default HTTPClient + return nil + } + + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + DialContext: (&net.Dialer{ + KeepAlive: 30 * time.Second, + Timeout: 30 * time.Second, + }).DialContext, + }, + CheckRedirect: client.CheckRedirect, + } + return client.WithHTTPClient(httpClient)(c) + } +} + +// EndpointFromContext parses a context docker endpoint metadata into a typed EndpointMeta structure +func EndpointFromContext(metadata store.Metadata) (EndpointMeta, error) { + ep, ok := metadata.Endpoints[DockerEndpoint] + if !ok { + return EndpointMeta{}, errors.New("cannot find docker endpoint in context") + } + typed, ok := ep.(EndpointMeta) + if !ok { + return EndpointMeta{}, errors.Errorf("endpoint %q is not of type EndpointMeta", DockerEndpoint) + } + return typed, nil +} diff --git a/vendor/github.com/docker/cli/cli/context/endpoint.go b/vendor/github.com/docker/cli/cli/context/endpoint.go new file mode 100644 index 000000000..f2735246e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/endpoint.go @@ -0,0 +1,7 @@ +package context + +// EndpointMetaBase contains fields we expect to be common for most context endpoints +type EndpointMetaBase struct { + Host string `json:",omitempty"` + SkipTLSVerify bool +} diff --git a/vendor/github.com/docker/cli/cli/context/store/doc.go b/vendor/github.com/docker/cli/cli/context/store/doc.go new file mode 100644 index 000000000..705982ae4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/store/doc.go @@ -0,0 +1,32 @@ +// Package store provides a generic way to store credentials to connect to +// virtually any kind of remote system. +// The term `context` comes from the similar feature in Kubernetes kubectl +// config files. +// +// Conceptually, a context is a set of metadata and TLS data, that can be used +// to connect to various endpoints of a remote system. TLS data and metadata +// are stored separately, so that in the future, we will be able to store +// sensitive information in a more secure way, depending on the os we are running +// on (e.g.: on Windows we could use the user Certificate Store, on macOS the +// user Keychain...). +// +// Current implementation is purely file based with the following structure: +// +// ${CONTEXT_ROOT} +// meta/ +// /meta.json: contains context medata (key/value pairs) as +// well as a list of endpoints (themselves containing +// key/value pair metadata). +// tls/ +// /endpoint1/: directory containing TLS data for the endpoint1 +// in the corresponding context. +// +// The context store itself has absolutely no knowledge about what a docker +// endpoint should contain in term of metadata or TLS config. Client code is +// responsible for generating and parsing endpoint metadata and TLS files. The +// multi-endpoints approach of this package allows to combine many different +// endpoints in the same "context". +// +// Context IDs are actually SHA256 hashes of the context name, and are there +// only to avoid dealing with special characters in context names. +package store diff --git a/vendor/github.com/docker/cli/cli/context/store/io_utils.go b/vendor/github.com/docker/cli/cli/context/store/io_utils.go new file mode 100644 index 000000000..6f854c8e5 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/store/io_utils.go @@ -0,0 +1,29 @@ +package store + +import ( + "errors" + "io" +) + +// LimitedReader is a fork of io.LimitedReader to override Read. +type LimitedReader struct { + R io.Reader + N int64 // max bytes remaining +} + +// Read is a fork of io.LimitedReader.Read that returns an error when limit exceeded. +func (l *LimitedReader) Read(p []byte) (n int, err error) { + if l.N < 0 { + return 0, errors.New("read exceeds the defined limit") + } + if l.N == 0 { + return 0, io.EOF + } + // have to cap N + 1 otherwise we won't hit limit err + if int64(len(p)) > l.N+1 { + p = p[0 : l.N+1] + } + n, err = l.R.Read(p) + l.N -= int64(n) + return n, err +} diff --git a/vendor/github.com/docker/cli/cli/context/store/metadatastore.go b/vendor/github.com/docker/cli/cli/context/store/metadatastore.go new file mode 100644 index 000000000..62c3f82a6 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/store/metadatastore.go @@ -0,0 +1,164 @@ +package store + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "reflect" + "sort" + + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/fvbommel/sortorder" + "github.com/pkg/errors" +) + +const ( + metadataDir = "meta" + metaFile = "meta.json" +) + +type metadataStore struct { + root string + config Config +} + +func (s *metadataStore) contextDir(id contextdir) string { + return filepath.Join(s.root, string(id)) +} + +func (s *metadataStore) createOrUpdate(meta Metadata) error { + contextDir := s.contextDir(contextdirOf(meta.Name)) + if err := os.MkdirAll(contextDir, 0o755); err != nil { + return err + } + bytes, err := json.Marshal(&meta) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(contextDir, metaFile), bytes, 0o644) +} + +func parseTypedOrMap(payload []byte, getter TypeGetter) (interface{}, error) { + if len(payload) == 0 || string(payload) == "null" { + return nil, nil + } + if getter == nil { + var res map[string]interface{} + if err := json.Unmarshal(payload, &res); err != nil { + return nil, err + } + return res, nil + } + typed := getter() + if err := json.Unmarshal(payload, typed); err != nil { + return nil, err + } + return reflect.ValueOf(typed).Elem().Interface(), nil +} + +func (s *metadataStore) get(name string) (Metadata, error) { + m, err := s.getByID(contextdirOf(name)) + if err != nil { + return m, errors.Wrapf(err, "context %q", name) + } + return m, nil +} + +func (s *metadataStore) getByID(id contextdir) (Metadata, error) { + fileName := filepath.Join(s.contextDir(id), metaFile) + bytes, err := os.ReadFile(fileName) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return Metadata{}, errdefs.NotFound(errors.Wrap(err, "context not found")) + } + return Metadata{}, err + } + var untyped untypedContextMetadata + r := Metadata{ + Endpoints: make(map[string]interface{}), + } + if err := json.Unmarshal(bytes, &untyped); err != nil { + return Metadata{}, fmt.Errorf("parsing %s: %v", fileName, err) + } + r.Name = untyped.Name + if r.Metadata, err = parseTypedOrMap(untyped.Metadata, s.config.contextType); err != nil { + return Metadata{}, fmt.Errorf("parsing %s: %v", fileName, err) + } + for k, v := range untyped.Endpoints { + if r.Endpoints[k], err = parseTypedOrMap(v, s.config.endpointTypes[k]); err != nil { + return Metadata{}, fmt.Errorf("parsing %s: %v", fileName, err) + } + } + return r, err +} + +func (s *metadataStore) remove(name string) error { + if err := os.RemoveAll(s.contextDir(contextdirOf(name))); err != nil { + return errors.Wrapf(err, "failed to remove metadata") + } + return nil +} + +func (s *metadataStore) list() ([]Metadata, error) { + ctxDirs, err := listRecursivelyMetadataDirs(s.root) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, err + } + var res []Metadata + for _, dir := range ctxDirs { + c, err := s.getByID(contextdir(dir)) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + continue + } + return nil, errors.Wrap(err, "failed to read metadata") + } + res = append(res, c) + } + sort.Slice(res, func(i, j int) bool { + return sortorder.NaturalLess(res[i].Name, res[j].Name) + }) + return res, nil +} + +func isContextDir(path string) bool { + s, err := os.Stat(filepath.Join(path, metaFile)) + if err != nil { + return false + } + return !s.IsDir() +} + +func listRecursivelyMetadataDirs(root string) ([]string, error) { + fis, err := os.ReadDir(root) + if err != nil { + return nil, err + } + var result []string + for _, fi := range fis { + if fi.IsDir() { + if isContextDir(filepath.Join(root, fi.Name())) { + result = append(result, fi.Name()) + } + subs, err := listRecursivelyMetadataDirs(filepath.Join(root, fi.Name())) + if err != nil { + return nil, err + } + for _, s := range subs { + result = append(result, filepath.Join(fi.Name(), s)) + } + } + } + return result, nil +} + +type untypedContextMetadata struct { + Metadata json.RawMessage `json:"metadata,omitempty"` + Endpoints map[string]json.RawMessage `json:"endpoints,omitempty"` + Name string `json:"name,omitempty"` +} diff --git a/vendor/github.com/docker/cli/cli/context/store/store.go b/vendor/github.com/docker/cli/cli/context/store/store.go new file mode 100644 index 000000000..037464bb1 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/store/store.go @@ -0,0 +1,515 @@ +package store + +import ( + "archive/tar" + "archive/zip" + "bufio" + "bytes" + _ "crypto/sha256" // ensure ids can be computed + "encoding/json" + "io" + "net/http" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/errdefs" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const restrictedNamePattern = "^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$" + +var restrictedNameRegEx = regexp.MustCompile(restrictedNamePattern) + +// Store provides a context store for easily remembering endpoints configuration +type Store interface { + Reader + Lister + Writer + StorageInfoProvider +} + +// Reader provides read-only (without list) access to context data +type Reader interface { + GetMetadata(name string) (Metadata, error) + ListTLSFiles(name string) (map[string]EndpointFiles, error) + GetTLSData(contextName, endpointName, fileName string) ([]byte, error) +} + +// Lister provides listing of contexts +type Lister interface { + List() ([]Metadata, error) +} + +// ReaderLister combines Reader and Lister interfaces +type ReaderLister interface { + Reader + Lister +} + +// StorageInfoProvider provides more information about storage details of contexts +type StorageInfoProvider interface { + GetStorageInfo(contextName string) StorageInfo +} + +// Writer provides write access to context data +type Writer interface { + CreateOrUpdate(meta Metadata) error + Remove(name string) error + ResetTLSMaterial(name string, data *ContextTLSData) error + ResetEndpointTLSMaterial(contextName string, endpointName string, data *EndpointTLSData) error +} + +// ReaderWriter combines Reader and Writer interfaces +type ReaderWriter interface { + Reader + Writer +} + +// Metadata contains metadata about a context and its endpoints +type Metadata struct { + Name string `json:",omitempty"` + Metadata interface{} `json:",omitempty"` + Endpoints map[string]interface{} `json:",omitempty"` +} + +// StorageInfo contains data about where a given context is stored +type StorageInfo struct { + MetadataPath string + TLSPath string +} + +// EndpointTLSData represents tls data for a given endpoint +type EndpointTLSData struct { + Files map[string][]byte +} + +// ContextTLSData represents tls data for a whole context +type ContextTLSData struct { + Endpoints map[string]EndpointTLSData +} + +// New creates a store from a given directory. +// If the directory does not exist or is empty, initialize it +func New(dir string, cfg Config) *ContextStore { + metaRoot := filepath.Join(dir, metadataDir) + tlsRoot := filepath.Join(dir, tlsDir) + + return &ContextStore{ + meta: &metadataStore{ + root: metaRoot, + config: cfg, + }, + tls: &tlsStore{ + root: tlsRoot, + }, + } +} + +// ContextStore implements Store. +type ContextStore struct { + meta *metadataStore + tls *tlsStore +} + +// List return all contexts. +func (s *ContextStore) List() ([]Metadata, error) { + return s.meta.list() +} + +// Names return Metadata names for a Lister +func Names(s Lister) ([]string, error) { + list, err := s.List() + if err != nil { + return nil, err + } + var names []string + for _, item := range list { + names = append(names, item.Name) + } + return names, nil +} + +// CreateOrUpdate creates or updates metadata for the context. +func (s *ContextStore) CreateOrUpdate(meta Metadata) error { + return s.meta.createOrUpdate(meta) +} + +// Remove deletes the context with the given name, if found. +func (s *ContextStore) Remove(name string) error { + if err := s.meta.remove(name); err != nil { + return errors.Wrapf(err, "failed to remove context %s", name) + } + if err := s.tls.remove(name); err != nil { + return errors.Wrapf(err, "failed to remove context %s", name) + } + return nil +} + +// GetMetadata returns the metadata for the context with the given name. +// It returns an errdefs.ErrNotFound if the context was not found. +func (s *ContextStore) GetMetadata(name string) (Metadata, error) { + return s.meta.get(name) +} + +// ResetTLSMaterial removes TLS data for all endpoints in the context and replaces +// it with the new data. +func (s *ContextStore) ResetTLSMaterial(name string, data *ContextTLSData) error { + if err := s.tls.remove(name); err != nil { + return err + } + if data == nil { + return nil + } + for ep, files := range data.Endpoints { + for fileName, data := range files.Files { + if err := s.tls.createOrUpdate(name, ep, fileName, data); err != nil { + return err + } + } + } + return nil +} + +// ResetEndpointTLSMaterial removes TLS data for the given context and endpoint, +// and replaces it with the new data. +func (s *ContextStore) ResetEndpointTLSMaterial(contextName string, endpointName string, data *EndpointTLSData) error { + if err := s.tls.removeEndpoint(contextName, endpointName); err != nil { + return err + } + if data == nil { + return nil + } + for fileName, data := range data.Files { + if err := s.tls.createOrUpdate(contextName, endpointName, fileName, data); err != nil { + return err + } + } + return nil +} + +// ListTLSFiles returns the list of TLS files present for each endpoint in the +// context. +func (s *ContextStore) ListTLSFiles(name string) (map[string]EndpointFiles, error) { + return s.tls.listContextData(name) +} + +// GetTLSData reads, and returns the content of the given fileName for an endpoint. +// It returns an errdefs.ErrNotFound if the file was not found. +func (s *ContextStore) GetTLSData(contextName, endpointName, fileName string) ([]byte, error) { + return s.tls.getData(contextName, endpointName, fileName) +} + +// GetStorageInfo returns the paths where the Metadata and TLS data are stored +// for the context. +func (s *ContextStore) GetStorageInfo(contextName string) StorageInfo { + return StorageInfo{ + MetadataPath: s.meta.contextDir(contextdirOf(contextName)), + TLSPath: s.tls.contextDir(contextName), + } +} + +// ValidateContextName checks a context name is valid. +func ValidateContextName(name string) error { + if name == "" { + return errors.New("context name cannot be empty") + } + if name == "default" { + return errors.New(`"default" is a reserved context name`) + } + if !restrictedNameRegEx.MatchString(name) { + return errors.Errorf("context name %q is invalid, names are validated against regexp %q", name, restrictedNamePattern) + } + return nil +} + +// Export exports an existing namespace into an opaque data stream +// This stream is actually a tarball containing context metadata and TLS materials, but it does +// not map 1:1 the layout of the context store (don't try to restore it manually without calling store.Import) +func Export(name string, s Reader) io.ReadCloser { + reader, writer := io.Pipe() + go func() { + tw := tar.NewWriter(writer) + defer tw.Close() + defer writer.Close() + meta, err := s.GetMetadata(name) + if err != nil { + writer.CloseWithError(err) + return + } + metaBytes, err := json.Marshal(&meta) + if err != nil { + writer.CloseWithError(err) + return + } + if err = tw.WriteHeader(&tar.Header{ + Name: metaFile, + Mode: 0o644, + Size: int64(len(metaBytes)), + }); err != nil { + writer.CloseWithError(err) + return + } + if _, err = tw.Write(metaBytes); err != nil { + writer.CloseWithError(err) + return + } + tlsFiles, err := s.ListTLSFiles(name) + if err != nil { + writer.CloseWithError(err) + return + } + if err = tw.WriteHeader(&tar.Header{ + Name: "tls", + Mode: 0o700, + Size: 0, + Typeflag: tar.TypeDir, + }); err != nil { + writer.CloseWithError(err) + return + } + for endpointName, endpointFiles := range tlsFiles { + if err = tw.WriteHeader(&tar.Header{ + Name: path.Join("tls", endpointName), + Mode: 0o700, + Size: 0, + Typeflag: tar.TypeDir, + }); err != nil { + writer.CloseWithError(err) + return + } + for _, fileName := range endpointFiles { + data, err := s.GetTLSData(name, endpointName, fileName) + if err != nil { + writer.CloseWithError(err) + return + } + if err = tw.WriteHeader(&tar.Header{ + Name: path.Join("tls", endpointName, fileName), + Mode: 0o600, + Size: int64(len(data)), + }); err != nil { + writer.CloseWithError(err) + return + } + if _, err = tw.Write(data); err != nil { + writer.CloseWithError(err) + return + } + } + } + }() + return reader +} + +const ( + maxAllowedFileSizeToImport int64 = 10 << 20 + zipType string = "application/zip" +) + +func getImportContentType(r *bufio.Reader) (string, error) { + head, err := r.Peek(512) + if err != nil && err != io.EOF { + return "", err + } + + return http.DetectContentType(head), nil +} + +// Import imports an exported context into a store +func Import(name string, s Writer, reader io.Reader) error { + // Buffered reader will not advance the buffer, needed to determine content type + r := bufio.NewReader(reader) + + importContentType, err := getImportContentType(r) + if err != nil { + return err + } + switch importContentType { + case zipType: + return importZip(name, s, r) + default: + // Assume it's a TAR (TAR does not have a "magic number") + return importTar(name, s, r) + } +} + +func isValidFilePath(p string) error { + if p != metaFile && !strings.HasPrefix(p, "tls/") { + return errors.New("unexpected context file") + } + if path.Clean(p) != p { + return errors.New("unexpected path format") + } + if strings.Contains(p, `\`) { + return errors.New(`unexpected '\' in path`) + } + return nil +} + +func importTar(name string, s Writer, reader io.Reader) error { + tr := tar.NewReader(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport}) + tlsData := ContextTLSData{ + Endpoints: map[string]EndpointTLSData{}, + } + var importedMetaFile bool + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if hdr.Typeflag != tar.TypeReg { + // skip this entry, only taking files into account + continue + } + if err := isValidFilePath(hdr.Name); err != nil { + return errors.Wrap(err, hdr.Name) + } + if hdr.Name == metaFile { + data, err := io.ReadAll(tr) + if err != nil { + return err + } + meta, err := parseMetadata(data, name) + if err != nil { + return err + } + if err := s.CreateOrUpdate(meta); err != nil { + return err + } + importedMetaFile = true + } else if strings.HasPrefix(hdr.Name, "tls/") { + data, err := io.ReadAll(tr) + if err != nil { + return err + } + if err := importEndpointTLS(&tlsData, hdr.Name, data); err != nil { + return err + } + } + } + if !importedMetaFile { + return errdefs.InvalidParameter(errors.New("invalid context: no metadata found")) + } + return s.ResetTLSMaterial(name, &tlsData) +} + +func importZip(name string, s Writer, reader io.Reader) error { + body, err := io.ReadAll(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport}) + if err != nil { + return err + } + zr, err := zip.NewReader(bytes.NewReader(body), int64(len(body))) + if err != nil { + return err + } + tlsData := ContextTLSData{ + Endpoints: map[string]EndpointTLSData{}, + } + + var importedMetaFile bool + for _, zf := range zr.File { + fi := zf.FileInfo() + if !fi.Mode().IsRegular() { + // skip this entry, only taking regular files into account + continue + } + if err := isValidFilePath(zf.Name); err != nil { + return errors.Wrap(err, zf.Name) + } + if zf.Name == metaFile { + f, err := zf.Open() + if err != nil { + return err + } + + data, err := io.ReadAll(&LimitedReader{R: f, N: maxAllowedFileSizeToImport}) + defer f.Close() + if err != nil { + return err + } + meta, err := parseMetadata(data, name) + if err != nil { + return err + } + if err := s.CreateOrUpdate(meta); err != nil { + return err + } + importedMetaFile = true + } else if strings.HasPrefix(zf.Name, "tls/") { + f, err := zf.Open() + if err != nil { + return err + } + data, err := io.ReadAll(f) + defer f.Close() + if err != nil { + return err + } + err = importEndpointTLS(&tlsData, zf.Name, data) + if err != nil { + return err + } + } + } + if !importedMetaFile { + return errdefs.InvalidParameter(errors.New("invalid context: no metadata found")) + } + return s.ResetTLSMaterial(name, &tlsData) +} + +func parseMetadata(data []byte, name string) (Metadata, error) { + var meta Metadata + if err := json.Unmarshal(data, &meta); err != nil { + return meta, err + } + if err := ValidateContextName(name); err != nil { + return Metadata{}, err + } + meta.Name = name + return meta, nil +} + +func importEndpointTLS(tlsData *ContextTLSData, path string, data []byte) error { + parts := strings.SplitN(strings.TrimPrefix(path, "tls/"), "/", 2) + if len(parts) != 2 { + // TLS endpoints require archived file directory with 2 layers + // i.e. tls/{endpointName}/{fileName} + return errors.New("archive format is invalid") + } + + epName := parts[0] + fileName := parts[1] + if _, ok := tlsData.Endpoints[epName]; !ok { + tlsData.Endpoints[epName] = EndpointTLSData{ + Files: map[string][]byte{}, + } + } + tlsData.Endpoints[epName].Files[fileName] = data + return nil +} + +// IsErrContextDoesNotExist checks if the given error is a "context does not exist" condition. +// +// Deprecated: use github.com/docker/docker/errdefs.IsNotFound() +func IsErrContextDoesNotExist(err error) bool { + return errdefs.IsNotFound(err) +} + +// IsErrTLSDataDoesNotExist checks if the given error is a "context does not exist" condition +// +// Deprecated: use github.com/docker/docker/errdefs.IsNotFound() +func IsErrTLSDataDoesNotExist(err error) bool { + return errdefs.IsNotFound(err) +} + +type contextdir string + +func contextdirOf(name string) contextdir { + return contextdir(digest.FromString(name).Encoded()) +} diff --git a/vendor/github.com/docker/cli/cli/context/store/storeconfig.go b/vendor/github.com/docker/cli/cli/context/store/storeconfig.go new file mode 100644 index 000000000..9c93ecbab --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/store/storeconfig.go @@ -0,0 +1,53 @@ +package store + +// TypeGetter is a func used to determine the concrete type of a context or +// endpoint metadata by returning a pointer to an instance of the object +// eg: for a context of type DockerContext, the corresponding TypeGetter should return new(DockerContext) +type TypeGetter func() interface{} + +// NamedTypeGetter is a TypeGetter associated with a name +type NamedTypeGetter struct { + name string + typeGetter TypeGetter +} + +// EndpointTypeGetter returns a NamedTypeGetter with the spcecified name and getter +func EndpointTypeGetter(name string, getter TypeGetter) NamedTypeGetter { + return NamedTypeGetter{ + name: name, + typeGetter: getter, + } +} + +// Config is used to configure the metadata marshaler of the context ContextStore +type Config struct { + contextType TypeGetter + endpointTypes map[string]TypeGetter +} + +// SetEndpoint set an endpoint typing information +func (c Config) SetEndpoint(name string, getter TypeGetter) { + c.endpointTypes[name] = getter +} + +// ForeachEndpointType calls cb on every endpoint type registered with the Config +func (c Config) ForeachEndpointType(cb func(string, TypeGetter) error) error { + for n, ep := range c.endpointTypes { + if err := cb(n, ep); err != nil { + return err + } + } + return nil +} + +// NewConfig creates a config object +func NewConfig(contextType TypeGetter, endpoints ...NamedTypeGetter) Config { + res := Config{ + contextType: contextType, + endpointTypes: make(map[string]TypeGetter), + } + for _, e := range endpoints { + res.endpointTypes[e.name] = e.typeGetter + } + return res +} diff --git a/vendor/github.com/docker/cli/cli/context/store/tlsstore.go b/vendor/github.com/docker/cli/cli/context/store/tlsstore.go new file mode 100644 index 000000000..ffbbde7c0 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/store/tlsstore.go @@ -0,0 +1,96 @@ +package store + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +const tlsDir = "tls" + +type tlsStore struct { + root string +} + +func (s *tlsStore) contextDir(name string) string { + return filepath.Join(s.root, string(contextdirOf(name))) +} + +func (s *tlsStore) endpointDir(name, endpointName string) string { + return filepath.Join(s.contextDir(name), endpointName) +} + +func (s *tlsStore) createOrUpdate(name, endpointName, filename string, data []byte) error { + parentOfRoot := filepath.Dir(s.root) + if err := os.MkdirAll(parentOfRoot, 0o755); err != nil { + return err + } + endpointDir := s.endpointDir(name, endpointName) + if err := os.MkdirAll(endpointDir, 0o700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(endpointDir, filename), data, 0o600) +} + +func (s *tlsStore) getData(name, endpointName, filename string) ([]byte, error) { + data, err := os.ReadFile(filepath.Join(s.endpointDir(name, endpointName), filename)) + if err != nil { + if os.IsNotExist(err) { + return nil, errdefs.NotFound(errors.Errorf("TLS data for %s/%s/%s does not exist", name, endpointName, filename)) + } + return nil, errors.Wrapf(err, "failed to read TLS data for endpoint %s", endpointName) + } + return data, nil +} + +// remove deletes all TLS data for the given context. +func (s *tlsStore) remove(name string) error { + if err := os.RemoveAll(s.contextDir(name)); err != nil { + return errors.Wrapf(err, "failed to remove TLS data") + } + return nil +} + +func (s *tlsStore) removeEndpoint(name, endpointName string) error { + if err := os.RemoveAll(s.endpointDir(name, endpointName)); err != nil { + return errors.Wrapf(err, "failed to remove TLS data for endpoint %s", endpointName) + } + return nil +} + +func (s *tlsStore) listContextData(name string) (map[string]EndpointFiles, error) { + contextDir := s.contextDir(name) + epFSs, err := os.ReadDir(contextDir) + if err != nil { + if os.IsNotExist(err) { + return map[string]EndpointFiles{}, nil + } + return nil, errors.Wrapf(err, "failed to list TLS files for context %s", name) + } + r := make(map[string]EndpointFiles) + for _, epFS := range epFSs { + if epFS.IsDir() { + fss, err := os.ReadDir(filepath.Join(contextDir, epFS.Name())) + if os.IsNotExist(err) { + continue + } + if err != nil { + return nil, errors.Wrapf(err, "failed to list TLS files for endpoint %s", epFS.Name()) + } + var files EndpointFiles + for _, fs := range fss { + if !fs.IsDir() { + files = append(files, fs.Name()) + } + } + r[epFS.Name()] = files + } + } + return r, nil +} + +// EndpointFiles is a slice of strings representing file names +type EndpointFiles []string diff --git a/vendor/github.com/docker/cli/cli/context/tlsdata.go b/vendor/github.com/docker/cli/cli/context/tlsdata.go new file mode 100644 index 000000000..c758612a1 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/context/tlsdata.go @@ -0,0 +1,98 @@ +package context + +import ( + "os" + + "github.com/docker/cli/cli/context/store" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + caKey = "ca.pem" + certKey = "cert.pem" + keyKey = "key.pem" +) + +// TLSData holds ca/cert/key raw data +type TLSData struct { + CA []byte + Key []byte + Cert []byte +} + +// ToStoreTLSData converts TLSData to the store representation +func (data *TLSData) ToStoreTLSData() *store.EndpointTLSData { + if data == nil { + return nil + } + result := store.EndpointTLSData{ + Files: make(map[string][]byte), + } + if data.CA != nil { + result.Files[caKey] = data.CA + } + if data.Cert != nil { + result.Files[certKey] = data.Cert + } + if data.Key != nil { + result.Files[keyKey] = data.Key + } + return &result +} + +// LoadTLSData loads TLS data from the store +func LoadTLSData(s store.Reader, contextName, endpointName string) (*TLSData, error) { + tlsFiles, err := s.ListTLSFiles(contextName) + if err != nil { + return nil, errors.Wrapf(err, "failed to retrieve TLS files for context %q", contextName) + } + if epTLSFiles, ok := tlsFiles[endpointName]; ok { + var tlsData TLSData + for _, f := range epTLSFiles { + data, err := s.GetTLSData(contextName, endpointName, f) + if err != nil { + return nil, errors.Wrapf(err, "failed to retrieve TLS data (%s) for context %q", f, contextName) + } + switch f { + case caKey: + tlsData.CA = data + case certKey: + tlsData.Cert = data + case keyKey: + tlsData.Key = data + default: + logrus.Warnf("unknown file in context %s TLS bundle: %s", contextName, f) + } + } + return &tlsData, nil + } + return nil, nil +} + +// TLSDataFromFiles reads files into a TLSData struct (or returns nil if all paths are empty) +func TLSDataFromFiles(caPath, certPath, keyPath string) (*TLSData, error) { + var ( + ca, cert, key []byte + err error + ) + if caPath != "" { + if ca, err = os.ReadFile(caPath); err != nil { + return nil, err + } + } + if certPath != "" { + if cert, err = os.ReadFile(certPath); err != nil { + return nil, err + } + } + if keyPath != "" { + if key, err = os.ReadFile(keyPath); err != nil { + return nil, err + } + } + if ca == nil && cert == nil && key == nil { + return nil, nil + } + return &TLSData{CA: ca, Cert: cert, Key: key}, nil +} diff --git a/vendor/github.com/docker/cli/cli/debug/debug.go b/vendor/github.com/docker/cli/cli/debug/debug.go new file mode 100644 index 000000000..b00ea63ad --- /dev/null +++ b/vendor/github.com/docker/cli/cli/debug/debug.go @@ -0,0 +1,26 @@ +package debug + +import ( + "os" + + "github.com/sirupsen/logrus" +) + +// Enable sets the DEBUG env var to true +// and makes the logger to log at debug level. +func Enable() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// Disable sets the DEBUG env var to false +// and makes the logger to log at info level. +func Disable() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsEnabled checks whether the debug flag is set or not. +func IsEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/docker/cli/cli/flags/options.go b/vendor/github.com/docker/cli/cli/flags/options.go new file mode 100644 index 000000000..cea0faf77 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/flags/options.go @@ -0,0 +1,135 @@ +package flags + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/opts" + "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" +) + +const ( + // DefaultCaFile is the default filename for the CA pem file + DefaultCaFile = "ca.pem" + // DefaultKeyFile is the default filename for the key pem file + DefaultKeyFile = "key.pem" + // DefaultCertFile is the default filename for the cert pem file + DefaultCertFile = "cert.pem" + // FlagTLSVerify is the flag name for the TLS verification option + FlagTLSVerify = "tlsverify" + // FormatHelp describes the --format flag behavior for list commands + FormatHelp = `Format output using a custom template: +'table': Print output in table format with column headers (default) +'table TEMPLATE': Print output in table format using the given Go template +'json': Print in JSON format +'TEMPLATE': Print output using the given Go template. +Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates` + // InspectFormatHelp describes the --format flag behavior for inspect commands + InspectFormatHelp = `Format output using a custom template: +'json': Print in JSON format +'TEMPLATE': Print output using the given Go template. +Refer to https://docs.docker.com/go/formatting/ for more information about formatting output with templates` +) + +var ( + dockerCertPath = os.Getenv(client.EnvOverrideCertPath) + dockerTLSVerify = os.Getenv(client.EnvTLSVerify) != "" + // TODO(thaJeztah) the 'DOCKER_TLS' environment variable is not documented, and does not have a const. + dockerTLS = os.Getenv("DOCKER_TLS") != "" +) + +// ClientOptions are the options used to configure the client cli. +type ClientOptions struct { + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options + Context string + ConfigDir string +} + +// NewClientOptions returns a new ClientOptions. +func NewClientOptions() *ClientOptions { + return &ClientOptions{} +} + +// InstallFlags adds flags for the common options on the FlagSet +func (o *ClientOptions) InstallFlags(flags *pflag.FlagSet) { + if dockerCertPath == "" { + dockerCertPath = config.Dir() + } + + flags.BoolVarP(&o.Debug, "debug", "D", false, "Enable debug mode") + flags.StringVarP(&o.LogLevel, "log-level", "l", "info", `Set the logging level ("debug", "info", "warn", "error", "fatal")`) + flags.BoolVar(&o.TLS, "tls", dockerTLS, "Use TLS; implied by --tlsverify") + flags.BoolVar(&o.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") + + o.TLSOptions = &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, DefaultCaFile), + CertFile: filepath.Join(dockerCertPath, DefaultCertFile), + KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), + } + tlsOptions := o.TLSOptions + flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") + flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") + flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") + + // opts.ValidateHost is not used here, so as to allow connection helpers + hostOpt := opts.NewNamedListOptsRef("hosts", &o.Hosts, nil) + flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") + flags.StringVarP(&o.Context, "context", "c", "", + `Name of the context to use to connect to the daemon (overrides `+client.EnvOverrideHost+` env var and default context set with "docker context use")`) +} + +// SetDefaultOptions sets default values for options after flag parsing is +// complete +func (o *ClientOptions) SetDefaultOptions(flags *pflag.FlagSet) { + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on TLS + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need + // to check that here as well + if flags.Changed(FlagTLSVerify) || o.TLSVerify { + o.TLS = true + } + + if !o.TLS { + o.TLSOptions = nil + } else { + tlsOptions := o.TLSOptions + tlsOptions.InsecureSkipVerify = !o.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !flags.Changed("tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !flags.Changed("tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +// SetLogLevel sets the logrus logging level +func SetLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/vendor/github.com/docker/cli/cli/flags/options_deprecated.go b/vendor/github.com/docker/cli/cli/flags/options_deprecated.go new file mode 100644 index 000000000..97fb7e4e6 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/flags/options_deprecated.go @@ -0,0 +1,11 @@ +package flags + +// CommonOptions are options common to both the client and the daemon. +// +// Deprecated: use [ClientOptions]. +type CommonOptions = ClientOptions + +// NewCommonOptions returns a new CommonOptions +// +// Deprecated: use [NewClientOptions]. +var NewCommonOptions = NewClientOptions diff --git a/vendor/github.com/docker/cli/cli/manifest/store/store.go b/vendor/github.com/docker/cli/cli/manifest/store/store.go new file mode 100644 index 000000000..39e576f6b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/manifest/store/store.go @@ -0,0 +1,179 @@ +package store + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/manifest/types" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Store manages local storage of image distribution manifests +type Store interface { + Remove(listRef reference.Reference) error + Get(listRef reference.Reference, manifest reference.Reference) (types.ImageManifest, error) + GetList(listRef reference.Reference) ([]types.ImageManifest, error) + Save(listRef reference.Reference, manifest reference.Reference, image types.ImageManifest) error +} + +// fsStore manages manifest files stored on the local filesystem +type fsStore struct { + root string +} + +// NewStore returns a new store for a local file path +func NewStore(root string) Store { + return &fsStore{root: root} +} + +// Remove a manifest list from local storage +func (s *fsStore) Remove(listRef reference.Reference) error { + path := filepath.Join(s.root, makeFilesafeName(listRef.String())) + return os.RemoveAll(path) +} + +// Get returns the local manifest +func (s *fsStore) Get(listRef reference.Reference, manifest reference.Reference) (types.ImageManifest, error) { + filename := manifestToFilename(s.root, listRef.String(), manifest.String()) + return s.getFromFilename(manifest, filename) +} + +func (s *fsStore) getFromFilename(ref reference.Reference, filename string) (types.ImageManifest, error) { + bytes, err := os.ReadFile(filename) + switch { + case os.IsNotExist(err): + return types.ImageManifest{}, newNotFoundError(ref.String()) + case err != nil: + return types.ImageManifest{}, err + } + var manifestInfo struct { + types.ImageManifest + + // Deprecated Fields, replaced by Descriptor + Digest digest.Digest + Platform *manifestlist.PlatformSpec + } + + if err := json.Unmarshal(bytes, &manifestInfo); err != nil { + return types.ImageManifest{}, err + } + + // Compatibility with image manifests created before + // descriptor, newer versions omit Digest and Platform + if manifestInfo.Digest != "" { + mediaType, raw, err := manifestInfo.Payload() + if err != nil { + return types.ImageManifest{}, err + } + if dgst := digest.FromBytes(raw); dgst != manifestInfo.Digest { + return types.ImageManifest{}, errors.Errorf("invalid manifest file %v: image manifest digest mismatch (%v != %v)", filename, manifestInfo.Digest, dgst) + } + manifestInfo.ImageManifest.Descriptor = ocispec.Descriptor{ + Digest: manifestInfo.Digest, + Size: int64(len(raw)), + MediaType: mediaType, + Platform: types.OCIPlatform(manifestInfo.Platform), + } + } + + return manifestInfo.ImageManifest, nil +} + +// GetList returns all the local manifests for a transaction +func (s *fsStore) GetList(listRef reference.Reference) ([]types.ImageManifest, error) { + filenames, err := s.listManifests(listRef.String()) + switch { + case err != nil: + return nil, err + case filenames == nil: + return nil, newNotFoundError(listRef.String()) + } + + manifests := []types.ImageManifest{} + for _, filename := range filenames { + filename = filepath.Join(s.root, makeFilesafeName(listRef.String()), filename) + manifest, err := s.getFromFilename(listRef, filename) + if err != nil { + return nil, err + } + manifests = append(manifests, manifest) + } + return manifests, nil +} + +// listManifests stored in a transaction +func (s *fsStore) listManifests(transaction string) ([]string, error) { + transactionDir := filepath.Join(s.root, makeFilesafeName(transaction)) + fileInfos, err := os.ReadDir(transactionDir) + switch { + case os.IsNotExist(err): + return nil, nil + case err != nil: + return nil, err + } + + filenames := make([]string, 0, len(fileInfos)) + for _, info := range fileInfos { + filenames = append(filenames, info.Name()) + } + return filenames, nil +} + +// Save a manifest as part of a local manifest list +func (s *fsStore) Save(listRef reference.Reference, manifest reference.Reference, image types.ImageManifest) error { + if err := s.createManifestListDirectory(listRef.String()); err != nil { + return err + } + filename := manifestToFilename(s.root, listRef.String(), manifest.String()) + bytes, err := json.Marshal(image) + if err != nil { + return err + } + return os.WriteFile(filename, bytes, 0o644) +} + +func (s *fsStore) createManifestListDirectory(transaction string) error { + path := filepath.Join(s.root, makeFilesafeName(transaction)) + return os.MkdirAll(path, 0o755) +} + +func manifestToFilename(root, manifestList, manifest string) string { + return filepath.Join(root, makeFilesafeName(manifestList), makeFilesafeName(manifest)) +} + +func makeFilesafeName(ref string) string { + fileName := strings.Replace(ref, ":", "-", -1) + return strings.Replace(fileName, "/", "_", -1) +} + +type notFoundError struct { + object string +} + +func newNotFoundError(ref string) *notFoundError { + return ¬FoundError{object: ref} +} + +func (n *notFoundError) Error() string { + return fmt.Sprintf("No such manifest: %s", n.object) +} + +// NotFound interface +func (n *notFoundError) NotFound() {} + +// IsNotFound returns true if the error is a not found error +func IsNotFound(err error) bool { + _, ok := err.(notFound) + return ok +} + +type notFound interface { + NotFound() +} diff --git a/vendor/github.com/docker/cli/cli/manifest/types/types.go b/vendor/github.com/docker/cli/cli/manifest/types/types.go new file mode 100644 index 000000000..ca2a3e786 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/manifest/types/types.go @@ -0,0 +1,150 @@ +package types + +import ( + "encoding/json" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/ocischema" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// ImageManifest contains info to output for a manifest object. +type ImageManifest struct { + Ref *SerializableNamed + Descriptor ocispec.Descriptor + Raw []byte `json:",omitempty"` + + // SchemaV2Manifest is used for inspection + SchemaV2Manifest *schema2.DeserializedManifest `json:",omitempty"` + // OCIManifest is used for inspection + OCIManifest *ocischema.DeserializedManifest `json:",omitempty"` +} + +// OCIPlatform creates an OCI platform from a manifest list platform spec +func OCIPlatform(ps *manifestlist.PlatformSpec) *ocispec.Platform { + if ps == nil { + return nil + } + return &ocispec.Platform{ + Architecture: ps.Architecture, + OS: ps.OS, + OSVersion: ps.OSVersion, + OSFeatures: ps.OSFeatures, + Variant: ps.Variant, + } +} + +// PlatformSpecFromOCI creates a platform spec from OCI platform +func PlatformSpecFromOCI(p *ocispec.Platform) *manifestlist.PlatformSpec { + if p == nil { + return nil + } + return &manifestlist.PlatformSpec{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: p.OSFeatures, + Variant: p.Variant, + } +} + +// Blobs returns the digests for all the blobs referenced by this manifest +func (i ImageManifest) Blobs() []digest.Digest { + digests := []digest.Digest{} + switch { + case i.SchemaV2Manifest != nil: + for _, descriptor := range i.SchemaV2Manifest.References() { + digests = append(digests, descriptor.Digest) + } + case i.OCIManifest != nil: + for _, descriptor := range i.OCIManifest.References() { + digests = append(digests, descriptor.Digest) + } + } + return digests +} + +// Payload returns the media type and bytes for the manifest +func (i ImageManifest) Payload() (string, []byte, error) { + // TODO: If available, read content from a content store by digest + switch { + case i.SchemaV2Manifest != nil: + return i.SchemaV2Manifest.Payload() + case i.OCIManifest != nil: + return i.OCIManifest.Payload() + default: + return "", nil, errors.Errorf("%s has no payload", i.Ref) + } +} + +// References implements the distribution.Manifest interface. It delegates to +// the underlying manifest. +func (i ImageManifest) References() []distribution.Descriptor { + switch { + case i.SchemaV2Manifest != nil: + return i.SchemaV2Manifest.References() + case i.OCIManifest != nil: + return i.OCIManifest.References() + default: + return nil + } +} + +// NewImageManifest returns a new ImageManifest object. The values for Platform +// are initialized from those in the image +func NewImageManifest(ref reference.Named, desc ocispec.Descriptor, manifest *schema2.DeserializedManifest) ImageManifest { + raw, err := manifest.MarshalJSON() + if err != nil { + raw = nil + } + + return ImageManifest{ + Ref: &SerializableNamed{Named: ref}, + Descriptor: desc, + Raw: raw, + SchemaV2Manifest: manifest, + } +} + +// NewOCIImageManifest returns a new ImageManifest object. The values for +// Platform are initialized from those in the image +func NewOCIImageManifest(ref reference.Named, desc ocispec.Descriptor, manifest *ocischema.DeserializedManifest) ImageManifest { + raw, err := manifest.MarshalJSON() + if err != nil { + raw = nil + } + + return ImageManifest{ + Ref: &SerializableNamed{Named: ref}, + Descriptor: desc, + Raw: raw, + OCIManifest: manifest, + } +} + +// SerializableNamed is a reference.Named that can be serialized and deserialized +// from JSON +type SerializableNamed struct { + reference.Named +} + +// UnmarshalJSON loads the Named reference from JSON bytes +func (s *SerializableNamed) UnmarshalJSON(b []byte) error { + var raw string + if err := json.Unmarshal(b, &raw); err != nil { + return errors.Wrapf(err, "invalid named reference bytes: %s", b) + } + var err error + s.Named, err = reference.ParseNamed(raw) + return err +} + +// MarshalJSON returns the JSON bytes representation +func (s *SerializableNamed) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} diff --git a/vendor/github.com/docker/cli/cli/registry/client/client.go b/vendor/github.com/docker/cli/cli/registry/client/client.go new file mode 100644 index 000000000..ba76a34fc --- /dev/null +++ b/vendor/github.com/docker/cli/cli/registry/client/client.go @@ -0,0 +1,194 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "strings" + + manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + distributionclient "github.com/docker/distribution/registry/client" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// RegistryClient is a client used to communicate with a Docker distribution +// registry +type RegistryClient interface { + GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) + GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) + MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error + PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) +} + +// NewRegistryClient returns a new RegistryClient with a resolver +func NewRegistryClient(resolver AuthConfigResolver, userAgent string, insecure bool) RegistryClient { + return &client{ + authConfigResolver: resolver, + insecureRegistry: insecure, + userAgent: userAgent, + } +} + +// AuthConfigResolver returns Auth Configuration for an index +type AuthConfigResolver func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig + +// PutManifestOptions is the data sent to push a manifest +type PutManifestOptions struct { + MediaType string + Payload []byte +} + +type client struct { + authConfigResolver AuthConfigResolver + insecureRegistry bool + userAgent string +} + +// ErrBlobCreated returned when a blob mount request was created +type ErrBlobCreated struct { + From reference.Named + Target reference.Named +} + +func (err ErrBlobCreated) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Target) +} + +// ErrHTTPProto returned if attempting to use TLS with a non-TLS registry +type ErrHTTPProto struct { + OrigErr string +} + +func (err ErrHTTPProto) Error() string { + return err.OrigErr +} + +var _ RegistryClient = &client{} + +// MountBlob into the registry, so it can be referenced by a manifest +func (c *client) MountBlob(ctx context.Context, sourceRef reference.Canonical, targetRef reference.Named) error { + repoEndpoint, err := newDefaultRepositoryEndpoint(targetRef, c.insecureRegistry) + if err != nil { + return err + } + repo, err := c.getRepositoryForReference(ctx, targetRef, repoEndpoint) + if err != nil { + return err + } + lu, err := repo.Blobs(ctx).Create(ctx, distributionclient.WithMountFrom(sourceRef)) + switch err.(type) { + case distribution.ErrBlobMounted: + logrus.Debugf("mount of blob %s succeeded", sourceRef) + return nil + case nil: + default: + return errors.Wrapf(err, "failed to mount blob %s to %s", sourceRef, targetRef) + } + lu.Cancel(ctx) + logrus.Debugf("mount of blob %s created", sourceRef) + return ErrBlobCreated{From: sourceRef, Target: targetRef} +} + +// PutManifest sends the manifest to a registry and returns the new digest +func (c *client) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) { + repoEndpoint, err := newDefaultRepositoryEndpoint(ref, c.insecureRegistry) + if err != nil { + return digest.Digest(""), err + } + + repo, err := c.getRepositoryForReference(ctx, ref, repoEndpoint) + if err != nil { + return digest.Digest(""), err + } + + manifestService, err := repo.Manifests(ctx) + if err != nil { + return digest.Digest(""), err + } + + _, opts, err := getManifestOptionsFromReference(ref) + if err != nil { + return digest.Digest(""), err + } + + dgst, err := manifestService.Put(ctx, manifest, opts...) + return dgst, errors.Wrapf(err, "failed to put manifest %s", ref) +} + +func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Named, repoEndpoint repositoryEndpoint) (distribution.Repository, error) { + repoName, err := reference.WithName(repoEndpoint.Name()) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse repo name from %s", ref) + } + httpTransport, err := c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint) + if err != nil { + if !strings.Contains(err.Error(), "server gave HTTP response to HTTPS client") { + return nil, err + } + if !repoEndpoint.endpoint.TLSConfig.InsecureSkipVerify { + return nil, ErrHTTPProto{OrigErr: err.Error()} + } + // --insecure was set; fall back to plain HTTP + if url := repoEndpoint.endpoint.URL; url != nil && url.Scheme == "https" { + url.Scheme = "http" + httpTransport, err = c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint) + if err != nil { + return nil, err + } + } + } + return distributionclient.NewRepository(repoName, repoEndpoint.BaseURL(), httpTransport) +} + +func (c *client) getHTTPTransportForRepoEndpoint(ctx context.Context, repoEndpoint repositoryEndpoint) (http.RoundTripper, error) { + httpTransport, err := getHTTPTransport( + c.authConfigResolver(ctx, repoEndpoint.info.Index), + repoEndpoint.endpoint, + repoEndpoint.Name(), + c.userAgent) + return httpTransport, errors.Wrap(err, "failed to configure transport") +} + +// GetManifest returns an ImageManifest for the reference +func (c *client) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) { + var result manifesttypes.ImageManifest + fetch := func(ctx context.Context, repo distribution.Repository, ref reference.Named) (bool, error) { + var err error + result, err = fetchManifest(ctx, repo, ref) + return result.Ref != nil, err + } + + err := c.iterateEndpoints(ctx, ref, fetch) + return result, err +} + +// GetManifestList returns a list of ImageManifest for the reference +func (c *client) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) { + result := []manifesttypes.ImageManifest{} + fetch := func(ctx context.Context, repo distribution.Repository, ref reference.Named) (bool, error) { + var err error + result, err = fetchList(ctx, repo, ref) + return len(result) > 0, err + } + + err := c.iterateEndpoints(ctx, ref, fetch) + return result, err +} + +func getManifestOptionsFromReference(ref reference.Named) (digest.Digest, []distribution.ManifestServiceOption, error) { + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag := tagged.Tag() + return "", []distribution.ManifestServiceOption{distribution.WithTag(tag)}, nil + } + if digested, isDigested := ref.(reference.Canonical); isDigested { + return digested.Digest(), []distribution.ManifestServiceOption{}, nil + } + return "", nil, errors.Errorf("%s no tag or digest", ref) +} diff --git a/vendor/github.com/docker/cli/cli/registry/client/endpoint.go b/vendor/github.com/docker/cli/cli/registry/client/endpoint.go new file mode 100644 index 000000000..f69c5c0dc --- /dev/null +++ b/vendor/github.com/docker/cli/cli/registry/client/endpoint.go @@ -0,0 +1,130 @@ +package client + +import ( + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + authtypes "github.com/docker/docker/api/types" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +type repositoryEndpoint struct { + info *registry.RepositoryInfo + endpoint registry.APIEndpoint +} + +// Name returns the repository name +func (r repositoryEndpoint) Name() string { + repoName := r.info.Name.Name() + // If endpoint does not support CanonicalName, use the RemoteName instead + if r.endpoint.TrimHostname { + repoName = reference.Path(r.info.Name) + } + return repoName +} + +// BaseURL returns the endpoint url +func (r repositoryEndpoint) BaseURL() string { + return r.endpoint.URL.String() +} + +func newDefaultRepositoryEndpoint(ref reference.Named, insecure bool) (repositoryEndpoint, error) { + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return repositoryEndpoint{}, err + } + endpoint, err := getDefaultEndpointFromRepoInfo(repoInfo) + if err != nil { + return repositoryEndpoint{}, err + } + if insecure { + endpoint.TLSConfig.InsecureSkipVerify = true + } + return repositoryEndpoint{info: repoInfo, endpoint: endpoint}, nil +} + +func getDefaultEndpointFromRepoInfo(repoInfo *registry.RepositoryInfo) (registry.APIEndpoint, error) { + var err error + + options := registry.ServiceOptions{} + registryService, err := registry.NewService(options) + if err != nil { + return registry.APIEndpoint{}, err + } + endpoints, err := registryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return registry.APIEndpoint{}, err + } + // Default to the highest priority endpoint to return + endpoint := endpoints[0] + if !repoInfo.Index.Secure { + for _, ep := range endpoints { + if ep.URL.Scheme == "http" { + endpoint = ep + } + } + } + return endpoint, nil +} + +// getHTTPTransport builds a transport for use in communicating with a registry +func getHTTPTransport(authConfig authtypes.AuthConfig, endpoint registry.APIEndpoint, repoName string, userAgent string) (http.RoundTripper, error) { + // get the http transport, this will be used in a client to upload manifest + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + DisableKeepAlives: true, + } + + modifiers := registry.Headers(userAgent, http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + challengeManager, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + return nil, errors.Wrap(err, "error pinging v2 registry") + } + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + creds := registry.NewStaticCredentialStore(&authConfig) + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "push", "pull") + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + return transport.NewTransport(base, modifiers...), nil +} + +// RepoNameForReference returns the repository name from a reference +func RepoNameForReference(ref reference.Named) (string, error) { + // insecure is fine since this only returns the name + repo, err := newDefaultRepositoryEndpoint(ref, false) + if err != nil { + return "", err + } + return repo.Name(), nil +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} diff --git a/vendor/github.com/docker/cli/cli/registry/client/fetcher.go b/vendor/github.com/docker/cli/cli/registry/client/fetcher.go new file mode 100644 index 000000000..acae274a4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/registry/client/fetcher.go @@ -0,0 +1,327 @@ +package client + +import ( + "context" + "encoding/json" + + "github.com/docker/cli/cli/manifest/types" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/ocischema" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + v2 "github.com/docker/distribution/registry/api/v2" + distclient "github.com/docker/distribution/registry/client" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// fetchManifest pulls a manifest from a registry and returns it. An error +// is returned if no manifest is found matching namedRef. +func fetchManifest(ctx context.Context, repo distribution.Repository, ref reference.Named) (types.ImageManifest, error) { + manifest, err := getManifest(ctx, repo, ref) + if err != nil { + return types.ImageManifest{}, err + } + + switch v := manifest.(type) { + // Removed Schema 1 support + case *schema2.DeserializedManifest: + imageManifest, err := pullManifestSchemaV2(ctx, ref, repo, *v) + if err != nil { + return types.ImageManifest{}, err + } + return imageManifest, nil + case *ocischema.DeserializedManifest: + imageManifest, err := pullManifestOCISchema(ctx, ref, repo, *v) + if err != nil { + return types.ImageManifest{}, err + } + return imageManifest, nil + case *manifestlist.DeserializedManifestList: + return types.ImageManifest{}, errors.Errorf("%s is a manifest list", ref) + } + return types.ImageManifest{}, errors.Errorf("%s is not a manifest", ref) +} + +func fetchList(ctx context.Context, repo distribution.Repository, ref reference.Named) ([]types.ImageManifest, error) { + manifest, err := getManifest(ctx, repo, ref) + if err != nil { + return nil, err + } + + switch v := manifest.(type) { + case *manifestlist.DeserializedManifestList: + imageManifests, err := pullManifestList(ctx, ref, repo, *v) + if err != nil { + return nil, err + } + return imageManifests, nil + default: + return nil, errors.Errorf("unsupported manifest format: %v", v) + } +} + +func getManifest(ctx context.Context, repo distribution.Repository, ref reference.Named) (distribution.Manifest, error) { + manSvc, err := repo.Manifests(ctx) + if err != nil { + return nil, err + } + + dgst, opts, err := getManifestOptionsFromReference(ref) + if err != nil { + return nil, errors.Errorf("image manifest for %q does not exist", ref) + } + return manSvc.Get(ctx, dgst, opts...) +} + +func pullManifestSchemaV2(ctx context.Context, ref reference.Named, repo distribution.Repository, mfst schema2.DeserializedManifest) (types.ImageManifest, error) { + manifestDesc, err := validateManifestDigest(ref, mfst) + if err != nil { + return types.ImageManifest{}, err + } + configJSON, err := pullManifestSchemaV2ImageConfig(ctx, mfst.Target().Digest, repo) + if err != nil { + return types.ImageManifest{}, err + } + + if manifestDesc.Platform == nil { + manifestDesc.Platform = &ocispec.Platform{} + } + + // Fill in os and architecture fields from config JSON + if err := json.Unmarshal(configJSON, manifestDesc.Platform); err != nil { + return types.ImageManifest{}, err + } + + return types.NewImageManifest(ref, manifestDesc, &mfst), nil +} + +func pullManifestOCISchema(ctx context.Context, ref reference.Named, repo distribution.Repository, mfst ocischema.DeserializedManifest) (types.ImageManifest, error) { + manifestDesc, err := validateManifestDigest(ref, mfst) + if err != nil { + return types.ImageManifest{}, err + } + configJSON, err := pullManifestSchemaV2ImageConfig(ctx, mfst.Target().Digest, repo) + if err != nil { + return types.ImageManifest{}, err + } + + if manifestDesc.Platform == nil { + manifestDesc.Platform = &ocispec.Platform{} + } + + // Fill in os and architecture fields from config JSON + if err := json.Unmarshal(configJSON, manifestDesc.Platform); err != nil { + return types.ImageManifest{}, err + } + + return types.NewOCIImageManifest(ref, manifestDesc, &mfst), nil +} + +func pullManifestSchemaV2ImageConfig(ctx context.Context, dgst digest.Digest, repo distribution.Repository) ([]byte, error) { + blobs := repo.Blobs(ctx) + configJSON, err := blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + verifier := dgst.Verifier() + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + return nil, errors.Errorf("image config verification failed for digest %s", dgst) + } + return configJSON, nil +} + +// validateManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func validateManifestDigest(ref reference.Named, mfst distribution.Manifest) (ocispec.Descriptor, error) { + mediaType, canonical, err := mfst.Payload() + if err != nil { + return ocispec.Descriptor{}, err + } + desc := ocispec.Descriptor{ + Digest: digest.FromBytes(canonical), + Size: int64(len(canonical)), + MediaType: mediaType, + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + if digested.Digest() != desc.Digest { + err := errors.Errorf("manifest verification failed for digest %s", digested.Digest()) + return ocispec.Descriptor{}, err + } + } + + return desc, nil +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specific manifests. +func pullManifestList(ctx context.Context, ref reference.Named, repo distribution.Repository, mfstList manifestlist.DeserializedManifestList) ([]types.ImageManifest, error) { + infos := []types.ImageManifest{} + + if _, err := validateManifestDigest(ref, mfstList); err != nil { + return nil, err + } + + for _, manifestDescriptor := range mfstList.Manifests { + manSvc, err := repo.Manifests(ctx) + if err != nil { + return nil, err + } + manifest, err := manSvc.Get(ctx, manifestDescriptor.Digest) + if err != nil { + return nil, err + } + + manifestRef, err := reference.WithDigest(ref, manifestDescriptor.Digest) + if err != nil { + return nil, err + } + + var imageManifest types.ImageManifest + switch v := manifest.(type) { + case *schema2.DeserializedManifest: + imageManifest, err = pullManifestSchemaV2(ctx, manifestRef, repo, *v) + case *ocischema.DeserializedManifest: + imageManifest, err = pullManifestOCISchema(ctx, manifestRef, repo, *v) + default: + err = errors.Errorf("unsupported manifest type: %T", manifest) + } + if err != nil { + return nil, err + } + + // Replace platform from config + imageManifest.Descriptor.Platform = types.OCIPlatform(&manifestDescriptor.Platform) + + infos = append(infos, imageManifest) + } + return infos, nil +} + +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case errcode.Error: + e := err.(errcode.Error) + switch e.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false + case *distclient.UnexpectedHTTPResponseError: + return true + } + return false +} + +func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, each func(context.Context, distribution.Repository, reference.Named) (bool, error)) error { + endpoints, err := allEndpoints(namedRef, c.insecureRegistry) + if err != nil { + return err + } + + repoInfo, err := registry.ParseRepositoryInfo(namedRef) + if err != nil { + return err + } + + confirmedTLSRegistries := make(map[string]bool) + for _, endpoint := range endpoints { + if endpoint.Version == registry.APIVersion1 { + logrus.Debugf("skipping v1 endpoint %s", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + if c.insecureRegistry { + endpoint.TLSConfig.InsecureSkipVerify = true + } + repoEndpoint := repositoryEndpoint{endpoint: endpoint, info: repoInfo} + repo, err := c.getRepositoryForReference(ctx, namedRef, repoEndpoint) + if err != nil { + logrus.Debugf("error %s with repo endpoint %+v", err, repoEndpoint) + if _, ok := err.(ErrHTTPProto); ok { + continue + } + return err + } + + if endpoint.URL.Scheme == "http" && !c.insecureRegistry { + logrus.Debugf("skipping non-tls registry endpoint: %s", endpoint.URL) + continue + } + done, err := each(ctx, repo, namedRef) + if err != nil { + if continueOnError(err) { + if endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = true + } + logrus.Debugf("continuing on error (%T) %s", err, err) + continue + } + logrus.Debugf("not continuing on error (%T) %s", err, err) + return err + } + if done { + return nil + } + } + return newNotFoundError(namedRef.String()) +} + +// allEndpoints returns a list of endpoints ordered by priority (v2, https, v1). +func allEndpoints(namedRef reference.Named, insecure bool) ([]registry.APIEndpoint, error) { + repoInfo, err := registry.ParseRepositoryInfo(namedRef) + if err != nil { + return nil, err + } + + var serviceOpts registry.ServiceOptions + if insecure { + logrus.Debugf("allowing insecure registry for: %s", reference.Domain(namedRef)) + serviceOpts.InsecureRegistries = []string{reference.Domain(namedRef)} + } + registryService, err := registry.NewService(serviceOpts) + if err != nil { + return []registry.APIEndpoint{}, err + } + endpoints, err := registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + logrus.Debugf("endpoints for %s: %v", namedRef, endpoints) + return endpoints, err +} + +func newNotFoundError(ref string) *notFoundError { + return ¬FoundError{err: errors.New("no such manifest: " + ref)} +} + +type notFoundError struct { + err error +} + +func (n *notFoundError) Error() string { + return n.err.Error() +} + +// NotFound satisfies interface github.com/docker/docker/errdefs.ErrNotFound +func (n *notFoundError) NotFound() {} diff --git a/vendor/github.com/docker/cli/cli/streams/in.go b/vendor/github.com/docker/cli/cli/streams/in.go new file mode 100644 index 000000000..44b0de38b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/streams/in.go @@ -0,0 +1,56 @@ +package streams + +import ( + "errors" + "io" + "os" + "runtime" + + "github.com/moby/term" +) + +// In is an input stream used by the DockerCli to read user input +type In struct { + commonStream + in io.ReadCloser +} + +func (i *In) Read(p []byte) (int, error) { + return i.in.Read(p) +} + +// Close implements the Closer interface +func (i *In) Close() error { + return i.in.Close() +} + +// SetRawTerminal sets raw mode on the input terminal +func (i *In) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !i.commonStream.isTerminal { + return nil + } + i.commonStream.state, err = term.SetRawTerminal(i.commonStream.fd) + return err +} + +// CheckTty checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (i *In) CheckTty(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !i.isTerminal { + eText := "the input device is not a TTY" + if runtime.GOOS == "windows" { + return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") + } + return errors.New(eText) + } + return nil +} + +// NewIn returns a new In object from a ReadCloser +func NewIn(in io.ReadCloser) *In { + fd, isTerminal := term.GetFdInfo(in) + return &In{commonStream: commonStream{fd: fd, isTerminal: isTerminal}, in: in} +} diff --git a/vendor/github.com/docker/cli/cli/streams/out.go b/vendor/github.com/docker/cli/cli/streams/out.go new file mode 100644 index 000000000..95e21464a --- /dev/null +++ b/vendor/github.com/docker/cli/cli/streams/out.go @@ -0,0 +1,50 @@ +package streams + +import ( + "io" + "os" + + "github.com/moby/term" + "github.com/sirupsen/logrus" +) + +// Out is an output stream used by the DockerCli to write normal program +// output. +type Out struct { + commonStream + out io.Writer +} + +func (o *Out) Write(p []byte) (int, error) { + return o.out.Write(p) +} + +// SetRawTerminal sets raw mode on the input terminal +func (o *Out) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !o.commonStream.isTerminal { + return nil + } + o.commonStream.state, err = term.SetRawTerminalOutput(o.commonStream.fd) + return err +} + +// GetTtySize returns the height and width in characters of the tty +func (o *Out) GetTtySize() (uint, uint) { + if !o.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(o.fd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return uint(ws.Height), uint(ws.Width) +} + +// NewOut returns a new Out object from a Writer +func NewOut(out io.Writer) *Out { + fd, isTerminal := term.GetFdInfo(out) + return &Out{commonStream: commonStream{fd: fd, isTerminal: isTerminal}, out: out} +} diff --git a/vendor/github.com/docker/cli/cli/streams/stream.go b/vendor/github.com/docker/cli/cli/streams/stream.go new file mode 100644 index 000000000..21f0e452e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/streams/stream.go @@ -0,0 +1,34 @@ +package streams + +import ( + "github.com/moby/term" +) + +// commonStream is an input stream used by the DockerCli to read user input +type commonStream struct { + fd uintptr + isTerminal bool + state *term.State +} + +// FD returns the file descriptor number for this stream +func (s *commonStream) FD() uintptr { + return s.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (s *commonStream) IsTerminal() bool { + return s.isTerminal +} + +// RestoreTerminal restores normal mode to the terminal +func (s *commonStream) RestoreTerminal() { + if s.state != nil { + term.RestoreTerminal(s.fd, s.state) + } +} + +// SetIsTerminal sets the boolean used for isTerminal +func (s *commonStream) SetIsTerminal(isTerminal bool) { + s.isTerminal = isTerminal +} diff --git a/vendor/github.com/docker/cli/cli/trust/trust.go b/vendor/github.com/docker/cli/cli/trust/trust.go new file mode 100644 index 000000000..457f799fd --- /dev/null +++ b/vendor/github.com/docker/cli/cli/trust/trust.go @@ -0,0 +1,386 @@ +package trust + +import ( + "context" + "encoding/json" + "io" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "time" + + "github.com/docker/cli/cli/config" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/tlsconfig" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/theupdateframework/notary" + "github.com/theupdateframework/notary/client" + "github.com/theupdateframework/notary/passphrase" + "github.com/theupdateframework/notary/storage" + "github.com/theupdateframework/notary/trustmanager" + "github.com/theupdateframework/notary/trustpinning" + "github.com/theupdateframework/notary/tuf/data" + "github.com/theupdateframework/notary/tuf/signed" +) + +var ( + // ReleasesRole is the role named "releases" + ReleasesRole = data.RoleName(path.Join(data.CanonicalTargetsRole.String(), "releases")) + // ActionsPullOnly defines the actions for read-only interactions with a Notary Repository + ActionsPullOnly = []string{"pull"} + // ActionsPushAndPull defines the actions for read-write interactions with a Notary Repository + ActionsPushAndPull = []string{"pull", "push"} + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" +) + +// GetTrustDirectory returns the base trust directory name +func GetTrustDirectory() string { + return filepath.Join(config.Dir(), "trust") +} + +// certificateDirectory returns the directory containing +// TLS certificates for the given server. An error is +// returned if there was an error parsing the server string. +func certificateDirectory(server string) (string, error) { + u, err := url.Parse(server) + if err != nil { + return "", err + } + + return filepath.Join(config.Dir(), "tls", u.Host), nil +} + +// Server returns the base URL for the trust server. +func Server(index *registrytypes.IndexInfo) (string, error) { + if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { + urlObj, err := url.Parse(s) + if err != nil || urlObj.Scheme != "https" { + return "", errors.Errorf("valid https URL required for trust server, got %s", s) + } + + return s, nil + } + if index.Official { + return NotaryServer, nil + } + return "https://" + index.Name, nil +} + +type simpleCredentialStore struct { + auth types.AuthConfig +} + +func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { + return scs.auth.Username, scs.auth.Password +} + +func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string { + return scs.auth.IdentityToken +} + +func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// GetNotaryRepository returns a NotaryRepository which stores all the +// information needed to operate on a notary repository. +// It creates an HTTP transport providing authentication support. +func GetNotaryRepository(in io.Reader, out io.Writer, userAgent string, repoInfo *registry.RepositoryInfo, authConfig *types.AuthConfig, actions ...string) (client.Repository, error) { + server, err := Server(repoInfo.Index) + if err != nil { + return nil, err + } + + cfg := tlsconfig.ClientDefault() + cfg.InsecureSkipVerify = !repoInfo.Index.Secure + + // Get certificate base directory + certDir, err := certificateDirectory(server) + if err != nil { + return nil, err + } + logrus.Debugf("reading certificate directory: %s", certDir) + + if err := registry.ReadCertsDirectory(cfg, certDir); err != nil { + return nil, err + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + DisableKeepAlives: true, + } + + // Skip configuration headers since request is not going to Docker daemon + modifiers := registry.Headers(userAgent, http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := server + "/v2/" + req, err := http.NewRequest(http.MethodGet, endpointStr, nil) + if err != nil { + return nil, err + } + + challengeManager := challenge.NewSimpleManager() + + resp, err := pingClient.Do(req) + if err != nil { + // Ignore error on ping to operate in offline mode + logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) + } else { + defer resp.Body.Close() + + // Add response to the challenge manager to parse out + // authentication header and register authentication method + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + } + + scope := auth.RepositoryScope{ + Repository: repoInfo.Name.Name(), + Actions: actions, + Class: repoInfo.Class, + } + creds := simpleCredentialStore{auth: *authConfig} + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(base, modifiers...) + + return client.NewFileCachedRepository( + GetTrustDirectory(), + data.GUN(repoInfo.Name.Name()), + server, + tr, + GetPassphraseRetriever(in, out), + trustpinning.TrustPinConfig{}) +} + +// GetPassphraseRetriever returns a passphrase retriever that utilizes Content Trust env vars +func GetPassphraseRetriever(in io.Reader, out io.Writer) notary.PassRetriever { + aliasMap := map[string]string{ + "root": "root", + "snapshot": "repository", + "targets": "repository", + "default": "repository", + } + baseRetriever := passphrase.PromptRetrieverWithInOut(in, out, aliasMap) + env := map[string]string{ + "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), + "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + } + + return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { + if v := env[alias]; v != "" { + return v, numAttempts > 1, nil + } + // For non-root roles, we can also try the "default" alias if it is specified + if v := env["default"]; v != "" && alias != data.CanonicalRootRole.String() { + return v, numAttempts > 1, nil + } + return baseRetriever(keyName, alias, createNew, numAttempts) + } +} + +// NotaryError formats an error message received from the notary service +func NotaryError(repoName string, err error) error { + switch err.(type) { + case *json.SyntaxError: + logrus.Debugf("Notary syntax error: %s", err) + return errors.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) + case signed.ErrExpired: + return errors.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) + case trustmanager.ErrKeyNotFound: + return errors.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) + case storage.NetworkError: + return errors.Errorf("Error: error contacting notary server: %v", err) + case storage.ErrMetaNotFound: + return errors.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) + case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType: + return errors.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) + case signed.ErrNoKeys: + return errors.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) + case signed.ErrLowVersion: + return errors.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) + case signed.ErrRoleThreshold: + return errors.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) + case client.ErrRepositoryNotExist: + return errors.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) + case signed.ErrInsufficientSignatures: + return errors.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) + } + + return err +} + +// GetSignableRoles returns a list of roles for which we have valid signing +// keys, given a notary repository and a target +func GetSignableRoles(repo client.Repository, target *client.Target) ([]data.RoleName, error) { + var signableRoles []data.RoleName + + // translate the full key names, which includes the GUN, into just the key IDs + allCanonicalKeyIDs := make(map[string]struct{}) + for fullKeyID := range repo.GetCryptoService().ListAllKeys() { + allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} + } + + allDelegationRoles, err := repo.GetDelegationRoles() + if err != nil { + return signableRoles, err + } + + // if there are no delegation roles, then just try to sign it into the targets role + if len(allDelegationRoles) == 0 { + signableRoles = append(signableRoles, data.CanonicalTargetsRole) + return signableRoles, nil + } + + // there are delegation roles, find every delegation role we have a key for, and + // attempt to sign into into all those roles. + for _, delegationRole := range allDelegationRoles { + // We do not support signing any delegation role that isn't a direct child of the targets role. + // Also don't bother checking the keys if we can't add the target + // to this role due to path restrictions + if path.Dir(delegationRole.Name.String()) != data.CanonicalTargetsRole.String() || !delegationRole.CheckPaths(target.Name) { + continue + } + + for _, canonicalKeyID := range delegationRole.KeyIDs { + if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { + signableRoles = append(signableRoles, delegationRole.Name) + break + } + } + } + + if len(signableRoles) == 0 { + return signableRoles, errors.Errorf("no valid signing keys for delegation roles") + } + + return signableRoles, nil +} + +// ImageRefAndAuth contains all reference information and the auth config for an image request +type ImageRefAndAuth struct { + original string + authConfig *types.AuthConfig + reference reference.Named + repoInfo *registry.RepositoryInfo + tag string + digest digest.Digest +} + +// GetImageReferencesAndAuth retrieves the necessary reference and auth information for an image name +// as an ImageRefAndAuth struct +func GetImageReferencesAndAuth(ctx context.Context, rs registry.Service, + authResolver func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig, + imgName string, +) (ImageRefAndAuth, error) { + ref, err := reference.ParseNormalizedNamed(imgName) + if err != nil { + return ImageRefAndAuth{}, err + } + + // Resolve the Repository name from fqn to RepositoryInfo + var repoInfo *registry.RepositoryInfo + if rs != nil { + repoInfo, err = rs.ResolveRepository(ref) + } else { + repoInfo, err = registry.ParseRepositoryInfo(ref) + } + + if err != nil { + return ImageRefAndAuth{}, err + } + + authConfig := authResolver(ctx, repoInfo.Index) + return ImageRefAndAuth{ + original: imgName, + authConfig: &authConfig, + reference: ref, + repoInfo: repoInfo, + tag: getTag(ref), + digest: getDigest(ref), + }, nil +} + +func getTag(ref reference.Named) string { + switch x := ref.(type) { + case reference.Canonical, reference.Digested: + return "" + case reference.NamedTagged: + return x.Tag() + default: + return "" + } +} + +func getDigest(ref reference.Named) digest.Digest { + switch x := ref.(type) { + case reference.Canonical: + return x.Digest() + case reference.Digested: + return x.Digest() + default: + return digest.Digest("") + } +} + +// AuthConfig returns the auth information (username, etc) for a given ImageRefAndAuth +func (imgRefAuth *ImageRefAndAuth) AuthConfig() *types.AuthConfig { + return imgRefAuth.authConfig +} + +// Reference returns the Image reference for a given ImageRefAndAuth +func (imgRefAuth *ImageRefAndAuth) Reference() reference.Named { + return imgRefAuth.reference +} + +// RepoInfo returns the repository information for a given ImageRefAndAuth +func (imgRefAuth *ImageRefAndAuth) RepoInfo() *registry.RepositoryInfo { + return imgRefAuth.repoInfo +} + +// Tag returns the Image tag for a given ImageRefAndAuth +func (imgRefAuth *ImageRefAndAuth) Tag() string { + return imgRefAuth.tag +} + +// Digest returns the Image digest for a given ImageRefAndAuth +func (imgRefAuth *ImageRefAndAuth) Digest() digest.Digest { + return imgRefAuth.digest +} + +// Name returns the image name used to initialize the ImageRefAndAuth +func (imgRefAuth *ImageRefAndAuth) Name() string { + return imgRefAuth.original +} diff --git a/vendor/github.com/docker/cli/cli/version/version.go b/vendor/github.com/docker/cli/cli/version/version.go new file mode 100644 index 000000000..a263b9a73 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/version/version.go @@ -0,0 +1,10 @@ +package version + +// Default build-time variable. +// These values are overridden via ldflags +var ( + PlatformName = "" + Version = "unknown-version" + GitCommit = "unknown-commit" + BuildTime = "unknown-buildtime" +) diff --git a/vendor/github.com/docker/cli/opts/capabilities.go b/vendor/github.com/docker/cli/opts/capabilities.go new file mode 100644 index 000000000..8b5787037 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/capabilities.go @@ -0,0 +1,89 @@ +package opts + +import ( + "sort" + "strings" +) + +const ( + // AllCapabilities is a special value to add or drop all capabilities + AllCapabilities = "ALL" + + // ResetCapabilities is a special value to reset capabilities when updating. + // This value should only be used when updating, not used on "create". + ResetCapabilities = "RESET" +) + +// NormalizeCapability normalizes a capability by upper-casing, trimming white space +// and adding a CAP_ prefix (if not yet present). This function also accepts the +// "ALL" magic-value, as used by CapAdd/CapDrop. +// +// This function only handles rudimentary formatting; no validation is performed, +// as the list of available capabilities can be updated over time, thus should be +// handled by the daemon. +func NormalizeCapability(cap string) string { + cap = strings.ToUpper(strings.TrimSpace(cap)) + if cap == AllCapabilities || cap == ResetCapabilities { + return cap + } + if !strings.HasPrefix(cap, "CAP_") { + cap = "CAP_" + cap + } + return cap +} + +// CapabilitiesMap normalizes the given capabilities and converts them to a map. +func CapabilitiesMap(caps []string) map[string]bool { + normalized := make(map[string]bool) + for _, c := range caps { + normalized[NormalizeCapability(c)] = true + } + return normalized +} + +// EffectiveCapAddCapDrop normalizes and sorts capabilities to "add" and "drop", +// and returns the effective capabilities to include in both. +// +// "CapAdd" takes precedence over "CapDrop", so capabilities included in both +// lists are removed from the list of capabilities to drop. The special "ALL" +// capability is also taken into account. +// +// Note that the special "RESET" value is only used when updating an existing +// service, and will be ignored. +// +// Duplicates are removed, and the resulting lists are sorted. +func EffectiveCapAddCapDrop(add, drop []string) (capAdd, capDrop []string) { + var ( + addCaps = CapabilitiesMap(add) + dropCaps = CapabilitiesMap(drop) + ) + + if addCaps[AllCapabilities] { + // Special case: "ALL capabilities" trumps any other capability added. + addCaps = map[string]bool{AllCapabilities: true} + } + if dropCaps[AllCapabilities] { + // Special case: "ALL capabilities" trumps any other capability added. + dropCaps = map[string]bool{AllCapabilities: true} + } + for c := range dropCaps { + if addCaps[c] { + // Adding a capability takes precedence, so skip dropping + continue + } + if c != ResetCapabilities { + capDrop = append(capDrop, c) + } + } + + for c := range addCaps { + if c != ResetCapabilities { + capAdd = append(capAdd, c) + } + } + + sort.Strings(capAdd) + sort.Strings(capDrop) + + return capAdd, capDrop +} diff --git a/vendor/github.com/docker/cli/opts/config.go b/vendor/github.com/docker/cli/opts/config.go new file mode 100644 index 000000000..3be0fa93d --- /dev/null +++ b/vendor/github.com/docker/cli/opts/config.go @@ -0,0 +1,99 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "strconv" + "strings" + + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// ConfigOpt is a Value type for parsing configs +type ConfigOpt struct { + values []*swarmtypes.ConfigReference +} + +// Set a new config value +func (o *ConfigOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + options := &swarmtypes.ConfigReference{ + File: &swarmtypes.ConfigReferenceFileTarget{ + UID: "0", + GID: "0", + Mode: 0o444, + }, + } + + // support a simple syntax of --config foo + if len(fields) == 1 && !strings.Contains(fields[0], "=") { + options.File.Name = fields[0] + options.ConfigName = fields[0] + o.values = append(o.values, options) + return nil + } + + for _, field := range fields { + key, val, ok := strings.Cut(field, "=") + if !ok || key == "" { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + // TODO(thaJeztah): these options should not be case-insensitive. + switch strings.ToLower(key) { + case "source", "src": + options.ConfigName = val + case "target": + options.File.Name = val + case "uid": + options.File.UID = val + case "gid": + options.File.GID = val + case "mode": + m, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", err) + } + + options.File.Mode = os.FileMode(m) + default: + return fmt.Errorf("invalid field in config request: %s", key) + } + } + + if options.ConfigName == "" { + return fmt.Errorf("source is required") + } + if options.File.Name == "" { + options.File.Name = options.ConfigName + } + + o.values = append(o.values, options) + return nil +} + +// Type returns the type of this option +func (o *ConfigOpt) Type() string { + return "config" +} + +// String returns a string repr of this option +func (o *ConfigOpt) String() string { + configs := []string{} + for _, config := range o.values { + repr := fmt.Sprintf("%s -> %s", config.ConfigName, config.File.Name) + configs = append(configs, repr) + } + return strings.Join(configs, ", ") +} + +// Value returns the config requests +func (o *ConfigOpt) Value() []*swarmtypes.ConfigReference { + return o.values +} diff --git a/vendor/github.com/docker/cli/opts/duration.go b/vendor/github.com/docker/cli/opts/duration.go new file mode 100644 index 000000000..5dc6eeaa7 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/duration.go @@ -0,0 +1,64 @@ +package opts + +import ( + "time" + + "github.com/pkg/errors" +) + +// PositiveDurationOpt is an option type for time.Duration that uses a pointer. +// It behave similarly to DurationOpt but only allows positive duration values. +type PositiveDurationOpt struct { + DurationOpt +} + +// Set a new value on the option. Setting a negative duration value will cause +// an error to be returned. +func (d *PositiveDurationOpt) Set(s string) error { + err := d.DurationOpt.Set(s) + if err != nil { + return err + } + if *d.DurationOpt.value < 0 { + return errors.Errorf("duration cannot be negative") + } + return nil +} + +// DurationOpt is an option type for time.Duration that uses a pointer. This +// allows us to get nil values outside, instead of defaulting to 0 +type DurationOpt struct { + value *time.Duration +} + +// NewDurationOpt creates a DurationOpt with the specified duration +func NewDurationOpt(value *time.Duration) *DurationOpt { + return &DurationOpt{ + value: value, + } +} + +// Set a new value on the option +func (d *DurationOpt) Set(s string) error { + v, err := time.ParseDuration(s) + d.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (d *DurationOpt) Type() string { + return "duration" +} + +// String returns a string repr of this option +func (d *DurationOpt) String() string { + if d.value != nil { + return d.value.String() + } + return "" +} + +// Value returns the time.Duration +func (d *DurationOpt) Value() *time.Duration { + return d.value +} diff --git a/vendor/github.com/docker/cli/opts/env.go b/vendor/github.com/docker/cli/opts/env.go new file mode 100644 index 000000000..214d6f440 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/env.go @@ -0,0 +1,31 @@ +package opts + +import ( + "os" + "strings" + + "github.com/pkg/errors" +) + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it obtains its value from the current environment +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validated, and it's up to the application inside the container +// to validate them or not. +// +// The only validation here is to check if name is empty, per #25099 +func ValidateEnv(val string) (string, error) { + k, _, hasValue := strings.Cut(val, "=") + if k == "" { + return "", errors.New("invalid environment variable: " + val) + } + if hasValue { + // val contains a "=" (but value may be an empty string) + return val, nil + } + if envVal, ok := os.LookupEnv(k); ok { + return k + "=" + envVal, nil + } + return val, nil +} diff --git a/vendor/github.com/docker/cli/opts/envfile.go b/vendor/github.com/docker/cli/opts/envfile.go new file mode 100644 index 000000000..26aa3c3a9 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/envfile.go @@ -0,0 +1,22 @@ +package opts + +import ( + "os" +) + +// ParseEnvFile reads a file with environment variables enumerated by lines +// +// “Environment variable names used by the utilities in the Shell and +// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase +// letters, digits, and the '_' (underscore) from the characters defined in +// Portable Character Set and do not begin with a digit. *But*, other +// characters may be permitted by an implementation; applications shall +// tolerate the presence of such names.” +// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html +// +// As of #16585, it's up to application inside docker to validate or not +// environment variables, that's why we just strip leading whitespace and +// nothing more. +func ParseEnvFile(filename string) ([]string, error) { + return parseKeyValueFile(filename, os.LookupEnv) +} diff --git a/vendor/github.com/docker/cli/opts/file.go b/vendor/github.com/docker/cli/opts/file.go new file mode 100644 index 000000000..72b90e117 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/file.go @@ -0,0 +1,76 @@ +package opts + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + "unicode" + "unicode/utf8" +) + +const whiteSpaces = " \t" + +// ErrBadKey typed error for bad environment variable +type ErrBadKey struct { + msg string +} + +func (e ErrBadKey) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} + +func parseKeyValueFile(filename string, emptyFn func(string) (string, bool)) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + currentLine := 0 + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + if !utf8.Valid(scannedBytes) { + return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) + } + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + // trim the line from all leading whitespace first + line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) + currentLine++ + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + variable, value, hasValue := strings.Cut(line, "=") + + // trim the front of a variable, but nothing else + variable = strings.TrimLeft(variable, whiteSpaces) + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadKey{fmt.Sprintf("variable '%s' contains whitespaces", variable)} + } + if len(variable) == 0 { + return []string{}, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)} + } + + if hasValue { + // pass the value through, no trimming + lines = append(lines, variable+"="+value) + } else { + var present bool + if emptyFn != nil { + value, present = emptyFn(line) + } + if present { + // if only a pass-through variable is given, clean it up. + lines = append(lines, strings.TrimSpace(variable)+"="+value) + } + } + } + } + return lines, scanner.Err() +} diff --git a/vendor/github.com/docker/cli/opts/gpus.go b/vendor/github.com/docker/cli/opts/gpus.go new file mode 100644 index 000000000..93bf93978 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/gpus.go @@ -0,0 +1,111 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/pkg/errors" +) + +// GpuOpts is a Value type for parsing mounts +type GpuOpts struct { + values []container.DeviceRequest +} + +func parseCount(s string) (int, error) { + if s == "all" { + return -1, nil + } + i, err := strconv.Atoi(s) + return i, errors.Wrap(err, "count must be an integer") +} + +// Set a new mount value +// +//nolint:gocyclo +func (o *GpuOpts) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + req := container.DeviceRequest{} + + seen := map[string]struct{}{} + // Set writable as the default + for _, field := range fields { + key, val, withValue := strings.Cut(field, "=") + if _, ok := seen[key]; ok { + return fmt.Errorf("gpu request key '%s' can be specified only once", key) + } + seen[key] = struct{}{} + + if !withValue { + seen["count"] = struct{}{} + req.Count, err = parseCount(key) + if err != nil { + return err + } + continue + } + + switch key { + case "driver": + req.Driver = val + case "count": + req.Count, err = parseCount(val) + if err != nil { + return err + } + case "device": + req.DeviceIDs = strings.Split(val, ",") + case "capabilities": + req.Capabilities = [][]string{append(strings.Split(val, ","), "gpu")} + case "options": + r := csv.NewReader(strings.NewReader(val)) + optFields, err := r.Read() + if err != nil { + return errors.Wrap(err, "failed to read gpu options") + } + req.Options = ConvertKVStringsToMap(optFields) + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + if _, ok := seen["count"]; !ok && req.DeviceIDs == nil { + req.Count = 1 + } + if req.Options == nil { + req.Options = make(map[string]string) + } + if req.Capabilities == nil { + req.Capabilities = [][]string{{"gpu"}} + } + + o.values = append(o.values, req) + return nil +} + +// Type returns the type of this option +func (o *GpuOpts) Type() string { + return "gpu-request" +} + +// String returns a string repr of this option +func (o *GpuOpts) String() string { + gpus := []string{} + for _, gpu := range o.values { + gpus = append(gpus, fmt.Sprintf("%v", gpu)) + } + return strings.Join(gpus, ", ") +} + +// Value returns the mounts +func (o *GpuOpts) Value() []container.DeviceRequest { + return o.values +} diff --git a/vendor/github.com/docker/cli/opts/hosts.go b/vendor/github.com/docker/cli/opts/hosts.go new file mode 100644 index 000000000..7cdd1218f --- /dev/null +++ b/vendor/github.com/docker/cli/opts/hosts.go @@ -0,0 +1,181 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +const ( + // defaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + defaultHTTPPort = "2375" // Default HTTP Port + // defaultTLSHTTPPort Default HTTP Port used when TLS enabled + defaultTLSHTTPPort = "2376" // Default TLS encrypted HTTP Port + // defaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + defaultUnixSocket = "/var/run/docker.sock" + // defaultTCPHost constant defines the default host string used by docker on Windows + defaultTCPHost = "tcp://" + defaultHTTPHost + ":" + defaultHTTPPort + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + defaultTLSHost = "tcp://" + defaultHTTPHost + ":" + defaultTLSHTTPPort + // DefaultNamedPipe defines the default named pipe used by docker on Windows + defaultNamedPipe = `//./pipe/docker_engine` + // hostGatewayName defines a special string which users can append to --add-host + // to add an extra entry in /etc/hosts that maps host.docker.internal to the host IP + // TODO Consider moving the hostGatewayName constant defined in docker at + // github.com/docker/docker/daemon/network/constants.go outside of the "daemon" + // package, so that the CLI can consume it. + hostGatewayName = "host-gateway" +) + +// ValidateHost validates that the specified string is a valid host and returns it. +// +// TODO(thaJeztah): ValidateHost appears to be unused; deprecate it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDockerDaemonHost + if host != "" { + _, err := parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for TLS + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = defaultTLSHost + } else { + host = defaultHost + } + } else { + var err error + host, err = parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDockerDaemonHost(addr string) (string, error) { + proto, host, hasProto := strings.Cut(addr, "://") + if !hasProto && proto != "" { + host = proto + proto = "tcp" + } + + switch proto { + case "tcp": + return ParseTCPAddr(host, defaultTCPHost) + case "unix": + return parseSimpleProtoAddr(proto, host, defaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr(proto, host, defaultNamedPipe) + case "fd": + return addr, nil + case "ssh": + return addr, nil + default: + return "", fmt.Errorf("invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +// +// TODO(thaJeztah): remove client-side validation, and delegate to the API server. +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + k, v, ok := strings.Cut(val, ":") + if !ok || k == "" { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + // Skip IPaddr validation for "host-gateway" string + if v != hostGatewayName { + if _, err := ValidateIPAddress(v); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", v) + } + } + return val, nil +} diff --git a/vendor/github.com/docker/cli/opts/hosts_unix.go b/vendor/github.com/docker/cli/opts/hosts_unix.go new file mode 100644 index 000000000..206834d4d --- /dev/null +++ b/vendor/github.com/docker/cli/opts/hosts_unix.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package opts + +// defaultHost constant defines the default host string used by docker on other hosts than Windows +const defaultHost = "unix://" + defaultUnixSocket + +// defaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const defaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/cli/opts/hosts_windows.go b/vendor/github.com/docker/cli/opts/hosts_windows.go new file mode 100644 index 000000000..1e42a2d77 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/hosts_windows.go @@ -0,0 +1,59 @@ +package opts + +// defaultHost constant defines the default host string used by docker on Windows +const defaultHost = "npipe://" + defaultNamedPipe + +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP5. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// defaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const defaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/cli/opts/ip.go b/vendor/github.com/docker/cli/opts/ip.go new file mode 100644 index 000000000..fb03b5011 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/ip.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parseable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/vendor/github.com/docker/cli/opts/mount.go b/vendor/github.com/docker/cli/opts/mount.go new file mode 100644 index 000000000..2b531127e --- /dev/null +++ b/vendor/github.com/docker/cli/opts/mount.go @@ -0,0 +1,186 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// MountOpt is a Value type for parsing mounts +type MountOpt struct { + values []mounttypes.Mount +} + +// Set a new mount value +// +//nolint:gocyclo +func (m *MountOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + mount := mounttypes.Mount{} + + volumeOptions := func() *mounttypes.VolumeOptions { + if mount.VolumeOptions == nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + Labels: make(map[string]string), + } + } + if mount.VolumeOptions.DriverConfig == nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} + } + return mount.VolumeOptions + } + + bindOptions := func() *mounttypes.BindOptions { + if mount.BindOptions == nil { + mount.BindOptions = new(mounttypes.BindOptions) + } + return mount.BindOptions + } + + tmpfsOptions := func() *mounttypes.TmpfsOptions { + if mount.TmpfsOptions == nil { + mount.TmpfsOptions = new(mounttypes.TmpfsOptions) + } + return mount.TmpfsOptions + } + + setValueOnMap := func(target map[string]string, value string) { + k, v, _ := strings.Cut(value, "=") + if k != "" { + target[k] = v + } + } + + mount.Type = mounttypes.TypeVolume // default to volume mounts + // Set writable as the default + for _, field := range fields { + key, val, ok := strings.Cut(field, "=") + + // TODO(thaJeztah): these options should not be case-insensitive. + key = strings.ToLower(key) + + if !ok { + switch key { + case "readonly", "ro": + mount.ReadOnly = true + continue + case "volume-nocopy": + volumeOptions().NoCopy = true + continue + case "bind-nonrecursive": + bindOptions().NonRecursive = true + continue + default: + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + } + + switch key { + case "type": + mount.Type = mounttypes.Type(strings.ToLower(val)) + case "source", "src": + mount.Source = val + if strings.HasPrefix(val, "."+string(filepath.Separator)) || val == "." { + if abs, err := filepath.Abs(val); err == nil { + mount.Source = abs + } + } + case "target", "dst", "destination": + mount.Target = val + case "readonly", "ro": + mount.ReadOnly, err = strconv.ParseBool(val) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, val) + } + case "consistency": + mount.Consistency = mounttypes.Consistency(strings.ToLower(val)) + case "bind-propagation": + bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(val)) + case "bind-nonrecursive": + bindOptions().NonRecursive, err = strconv.ParseBool(val) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, val) + } + case "volume-nocopy": + volumeOptions().NoCopy, err = strconv.ParseBool(val) + if err != nil { + return fmt.Errorf("invalid value for volume-nocopy: %s", val) + } + case "volume-label": + setValueOnMap(volumeOptions().Labels, val) + case "volume-driver": + volumeOptions().DriverConfig.Name = val + case "volume-opt": + if volumeOptions().DriverConfig.Options == nil { + volumeOptions().DriverConfig.Options = make(map[string]string) + } + setValueOnMap(volumeOptions().DriverConfig.Options, val) + case "tmpfs-size": + sizeBytes, err := units.RAMInBytes(val) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, val) + } + tmpfsOptions().SizeBytes = sizeBytes + case "tmpfs-mode": + ui64, err := strconv.ParseUint(val, 8, 32) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, val) + } + tmpfsOptions().Mode = os.FileMode(ui64) + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + if mount.Type == "" { + return fmt.Errorf("type is required") + } + + if mount.Target == "" { + return fmt.Errorf("target is required") + } + + if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { + return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) + } + if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { + return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) + } + if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { + return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) + } + + m.values = append(m.values, mount) + return nil +} + +// Type returns the type of this option +func (m *MountOpt) Type() string { + return "mount" +} + +// String returns a string repr of this option +func (m *MountOpt) String() string { + mounts := []string{} + for _, mount := range m.values { + repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) + mounts = append(mounts, repr) + } + return strings.Join(mounts, ", ") +} + +// Value returns the mounts +func (m *MountOpt) Value() []mounttypes.Mount { + return m.values +} diff --git a/vendor/github.com/docker/cli/opts/network.go b/vendor/github.com/docker/cli/opts/network.go new file mode 100644 index 000000000..12c3977b1 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/network.go @@ -0,0 +1,127 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "regexp" + "strings" +) + +const ( + networkOptName = "name" + networkOptAlias = "alias" + networkOptIPv4Address = "ip" + networkOptIPv6Address = "ip6" + driverOpt = "driver-opt" +) + +// NetworkAttachmentOpts represents the network options for endpoint creation +type NetworkAttachmentOpts struct { + Target string + Aliases []string + DriverOpts map[string]string + Links []string // TODO add support for links in the csv notation of `--network` + IPv4Address string + IPv6Address string + LinkLocalIPs []string // TODO add support for LinkLocalIPs in the csv notation of `--network` ? +} + +// NetworkOpt represents a network config in swarm mode. +type NetworkOpt struct { + options []NetworkAttachmentOpts +} + +// Set networkopts value +func (n *NetworkOpt) Set(value string) error { + longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) + if err != nil { + return err + } + + var netOpt NetworkAttachmentOpts + if longSyntax { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + netOpt.Aliases = []string{} + for _, field := range fields { + // TODO(thaJeztah): these options should not be case-insensitive. + key, val, ok := strings.Cut(strings.ToLower(field), "=") + if !ok || key == "" { + return fmt.Errorf("invalid field %s", field) + } + + key = strings.TrimSpace(key) + val = strings.TrimSpace(val) + + switch key { + case networkOptName: + netOpt.Target = val + case networkOptAlias: + netOpt.Aliases = append(netOpt.Aliases, val) + case networkOptIPv4Address: + netOpt.IPv4Address = val + case networkOptIPv6Address: + netOpt.IPv6Address = val + case driverOpt: + key, val, err = parseDriverOpt(val) + if err != nil { + return err + } + if netOpt.DriverOpts == nil { + netOpt.DriverOpts = make(map[string]string) + } + netOpt.DriverOpts[key] = val + default: + return fmt.Errorf("invalid field key %s", key) + } + } + if len(netOpt.Target) == 0 { + return fmt.Errorf("network name/id is not specified") + } + } else { + netOpt.Target = value + } + n.options = append(n.options, netOpt) + return nil +} + +// Type returns the type of this option +func (n *NetworkOpt) Type() string { + return "network" +} + +// Value returns the networkopts +func (n *NetworkOpt) Value() []NetworkAttachmentOpts { + return n.options +} + +// String returns the network opts as a string +func (n *NetworkOpt) String() string { + return "" +} + +// NetworkMode return the network mode for the network option +func (n *NetworkOpt) NetworkMode() string { + networkIDOrName := "default" + netOptVal := n.Value() + if len(netOptVal) > 0 { + networkIDOrName = netOptVal[0].Target + } + return networkIDOrName +} + +func parseDriverOpt(driverOpt string) (string, string, error) { + // TODO(thaJeztah): these options should not be case-insensitive. + // TODO(thaJeztah): should value be converted to lowercase as well, or only the key? + key, value, ok := strings.Cut(strings.ToLower(driverOpt), "=") + if !ok || key == "" { + return "", "", fmt.Errorf("invalid key value pair format in driver options") + } + key = strings.TrimSpace(key) + value = strings.TrimSpace(value) + return key, value, nil +} diff --git a/vendor/github.com/docker/cli/opts/opts.go b/vendor/github.com/docker/cli/opts/opts.go new file mode 100644 index 000000000..4e7790f0f --- /dev/null +++ b/vendor/github.com/docker/cli/opts/opts.go @@ -0,0 +1,513 @@ +package opts + +import ( + "fmt" + "math/big" + "net" + "path" + "regexp" + "strings" + + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + if len(*opts.values) == 0 { + return "" + } + return fmt.Sprintf("%v", *opts.values) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + *opts.values = append(*opts.values, value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return *opts.values +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len(*opts.values) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// WithValidator returns the ListOpts with validator set. +func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { + opts.validator = validator + return opts +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + k, v, _ := strings.Cut(value, "=") + opts.values[k] = v + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", opts.values) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + ip := net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateMACAddress validates a MAC address. +func ValidateMACAddress(val string) (string, error) { + _, err := net.ParseMAC(strings.TrimSpace(val)) + if err != nil { + return "", err + } + return val, nil +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// +// Labels are in the form of key=value; key must be a non-empty string, and not +// contain whitespaces. A value is optional (defaults to an empty string if omitted). +// +// Leading whitespace is removed during validation but values are kept as-is +// otherwise, so any string value is accepted for both, which includes whitespace +// (for values) and quotes (surrounding, or embedded in key or value). +// +// TODO discuss if quotes (and other special characters) should be valid or invalid for keys +// TODO discuss if leading/trailing whitespace in keys should be preserved (and valid) +func ValidateLabel(value string) (string, error) { + key, _, _ := strings.Cut(value, "=") + key = strings.TrimLeft(key, whiteSpaces) + if key == "" { + return "", fmt.Errorf("invalid label '%s': empty name", value) + } + if strings.ContainsAny(key, whiteSpaces) { + return "", fmt.Errorf("label '%s' contains whitespaces", key) + } + return value, nil +} + +// ValidateSysctl validates a sysctl and returns it. +func ValidateSysctl(val string) (string, error) { + validSysctlMap := map[string]bool{ + "kernel.msgmax": true, + "kernel.msgmnb": true, + "kernel.msgmni": true, + "kernel.sem": true, + "kernel.shmall": true, + "kernel.shmmax": true, + "kernel.shmmni": true, + "kernel.shm_rmid_forced": true, + } + validSysctlPrefixes := []string{ + "net.", + "fs.mqueue.", + } + k, _, ok := strings.Cut(val, "=") + if !ok || k == "" { + return "", fmt.Errorf("sysctl '%s' is not allowed", val) + } + if validSysctlMap[k] { + return val, nil + } + for _, vp := range validSysctlPrefixes { + if strings.HasPrefix(k, vp) { + return val, nil + } + } + return "", fmt.Errorf("sysctl '%s' is not allowed", val) +} + +// FilterOpt is a flag type for validating filters +type FilterOpt struct { + filter filters.Args +} + +// NewFilterOpt returns a new FilterOpt +func NewFilterOpt() FilterOpt { + return FilterOpt{filter: filters.NewArgs()} +} + +func (o *FilterOpt) String() string { + repr, err := filters.ToJSON(o.filter) + if err != nil { + return "invalid filters" + } + return repr +} + +// Set sets the value of the opt by parsing the command line value +func (o *FilterOpt) Set(value string) error { + if value == "" { + return nil + } + if !strings.Contains(value, "=") { + return errors.New("bad format of filter (expected name=value)") + } + name, val, _ := strings.Cut(value, "=") + + // TODO(thaJeztah): these options should not be case-insensitive. + name = strings.ToLower(strings.TrimSpace(name)) + val = strings.TrimSpace(val) + o.filter.Add(name, val) + return nil +} + +// Type returns the option type +func (o *FilterOpt) Type() string { + return "filter" +} + +// Value returns the value of this option +func (o *FilterOpt) Value() filters.Args { + return o.filter +} + +// NanoCPUs is a type for fixed point fractional number. +type NanoCPUs int64 + +// String returns the string format of the number +func (c *NanoCPUs) String() string { + if *c == 0 { + return "" + } + return big.NewRat(c.Value(), 1e9).FloatString(3) +} + +// Set sets the value of the NanoCPU by passing a string +func (c *NanoCPUs) Set(value string) error { + cpus, err := ParseCPUs(value) + *c = NanoCPUs(cpus) + return err +} + +// Type returns the type +func (c *NanoCPUs) Type() string { + return "decimal" +} + +// Value returns the value in int64 +func (c *NanoCPUs) Value() int64 { + return int64(*c) +} + +// ParseCPUs takes a string ratio and returns an integer value of nano cpus +func ParseCPUs(value string) (int64, error) { + cpu, ok := new(big.Rat).SetString(value) + if !ok { + return 0, fmt.Errorf("failed to parse %v as a rational number", value) + } + nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) + if !nano.IsInt() { + return 0, fmt.Errorf("value is too precise") + } + return nano.Num().Int64(), nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + // We expect two parts, but restrict to three to allow detecting invalid formats. + arr := strings.SplitN(val, ":", 3) + + // TODO(thaJeztah): clean up this logic!! + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + // TODO(thaJeztah): this should trim the "/" prefix as well?? + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + // TODO(thaJeztah): clean up this logic!! + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// ValidateLink validates that the specified string has a valid link format (containerName:alias). +func ValidateLink(val string) (string, error) { + _, _, err := ParseLink(val) + return val, err +} + +// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) +type MemBytes int64 + +// String returns the string format of the human readable memory bytes +func (m *MemBytes) String() string { + // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. + // We return "0" in case value is 0 here so that the default value is hidden. + // (Sometimes "default 0 B" is actually misleading) + if m.Value() != 0 { + return units.BytesSize(float64(m.Value())) + } + return "0" +} + +// Set sets the value of the MemBytes by passing a string +func (m *MemBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = MemBytes(val) + return err +} + +// Type returns the type +func (m *MemBytes) Type() string { + return "bytes" +} + +// Value returns the value in int64 +func (m *MemBytes) Value() int64 { + return int64(*m) +} + +// UnmarshalJSON is the customized unmarshaler for MemBytes +func (m *MemBytes) UnmarshalJSON(s []byte) error { + if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { + return fmt.Errorf("invalid size: %q", s) + } + val, err := units.RAMInBytes(string(s[1 : len(s)-1])) + *m = MemBytes(val) + return err +} + +// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc). +// It differs from MemBytes in that -1 is valid and the default. +type MemSwapBytes int64 + +// Set sets the value of the MemSwapBytes by passing a string +func (m *MemSwapBytes) Set(value string) error { + if value == "-1" { + *m = MemSwapBytes(-1) + return nil + } + val, err := units.RAMInBytes(value) + *m = MemSwapBytes(val) + return err +} + +// Type returns the type +func (m *MemSwapBytes) Type() string { + return "bytes" +} + +// Value returns the value in int64 +func (m *MemSwapBytes) Value() int64 { + return int64(*m) +} + +func (m *MemSwapBytes) String() string { + b := MemBytes(*m) + return b.String() +} + +// UnmarshalJSON is the customized unmarshaler for MemSwapBytes +func (m *MemSwapBytes) UnmarshalJSON(s []byte) error { + b := MemBytes(*m) + return b.UnmarshalJSON(s) +} diff --git a/vendor/github.com/docker/cli/opts/parse.go b/vendor/github.com/docker/cli/opts/parse.go new file mode 100644 index 000000000..017577e4b --- /dev/null +++ b/vendor/github.com/docker/cli/opts/parse.go @@ -0,0 +1,91 @@ +package opts + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys +// present in the file with additional pairs specified in the override parameter +func ReadKVStrings(files []string, override []string) ([]string, error) { + return readKVStrings(files, override, nil) +} + +// ReadKVEnvStrings reads a file of line terminated key=value pairs, and overrides any keys +// present in the file with additional pairs specified in the override parameter. +// If a key has no value, it will get the value from the environment. +func ReadKVEnvStrings(files []string, override []string) ([]string, error) { + return readKVStrings(files, override, os.LookupEnv) +} + +func readKVStrings(files []string, override []string, emptyFn func(string) (string, bool)) ([]string, error) { + var variables []string + for _, ef := range files { + parsedVars, err := parseKeyValueFile(ef, emptyFn) + if err != nil { + return nil, err + } + variables = append(variables, parsedVars...) + } + // parse the '-e' and '--env' after, to allow override + variables = append(variables, override...) + + return variables, nil +} + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + k, v, _ := strings.Cut(value, "=") + result[k] = v + } + + return result +} + +// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"} +// but set unset keys to nil - meaning the ones with no "=" in them. +// We use this in cases where we need to distinguish between +// +// FOO= and FOO +// +// where the latter case just means FOO was mentioned but not given a value +func ConvertKVStringsToMapWithNil(values []string) map[string]*string { + result := make(map[string]*string, len(values)) + for _, value := range values { + k, v, ok := strings.Cut(value, "=") + if !ok { + result[k] = nil + } else { + result[k] = &v + } + } + + return result +} + +// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { + p := container.RestartPolicy{} + + if policy == "" { + return p, nil + } + + k, v, _ := strings.Cut(policy, ":") + if v != "" { + count, err := strconv.Atoi(v) + if err != nil { + return p, fmt.Errorf("invalid restart policy format: maximum retry count must be an integer") + } + p.MaximumRetryCount = count + } + + p.Name = k + return p, nil +} diff --git a/vendor/github.com/docker/cli/opts/port.go b/vendor/github.com/docker/cli/opts/port.go new file mode 100644 index 000000000..fe41cdd28 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/port.go @@ -0,0 +1,171 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "net" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" + "github.com/sirupsen/logrus" +) + +const ( + portOptTargetPort = "target" + portOptPublishedPort = "published" + portOptProtocol = "protocol" + portOptMode = "mode" +) + +// PortOpt represents a port config in swarm mode. +type PortOpt struct { + ports []swarm.PortConfig +} + +// Set a new port value +// +//nolint:gocyclo +func (p *PortOpt) Set(value string) error { + longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) + if err != nil { + return err + } + if longSyntax { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + pConfig := swarm.PortConfig{} + for _, field := range fields { + // TODO(thaJeztah): these options should not be case-insensitive. + key, val, ok := strings.Cut(strings.ToLower(field), "=") + if !ok || key == "" { + return fmt.Errorf("invalid field %s", field) + } + switch key { + case portOptProtocol: + if val != string(swarm.PortConfigProtocolTCP) && val != string(swarm.PortConfigProtocolUDP) && val != string(swarm.PortConfigProtocolSCTP) { + return fmt.Errorf("invalid protocol value %s", val) + } + + pConfig.Protocol = swarm.PortConfigProtocol(val) + case portOptMode: + if val != string(swarm.PortConfigPublishModeIngress) && val != string(swarm.PortConfigPublishModeHost) { + return fmt.Errorf("invalid publish mode value %s", val) + } + + pConfig.PublishMode = swarm.PortConfigPublishMode(val) + case portOptTargetPort: + tPort, err := strconv.ParseUint(val, 10, 16) + if err != nil { + return err + } + + pConfig.TargetPort = uint32(tPort) + case portOptPublishedPort: + pPort, err := strconv.ParseUint(val, 10, 16) + if err != nil { + return err + } + + pConfig.PublishedPort = uint32(pPort) + default: + return fmt.Errorf("invalid field key %s", key) + } + } + + if pConfig.TargetPort == 0 { + return fmt.Errorf("missing mandatory field %q", portOptTargetPort) + } + + if pConfig.PublishMode == "" { + pConfig.PublishMode = swarm.PortConfigPublishModeIngress + } + + if pConfig.Protocol == "" { + pConfig.Protocol = swarm.PortConfigProtocolTCP + } + + p.ports = append(p.ports, pConfig) + } else { + // short syntax + portConfigs := []swarm.PortConfig{} + ports, portBindingMap, err := nat.ParsePortSpecs([]string{value}) + if err != nil { + return err + } + for _, portBindings := range portBindingMap { + for _, portBinding := range portBindings { + if portBinding.HostIP != "" { + return fmt.Errorf("hostip is not supported") + } + } + } + + for port := range ports { + portConfig, err := ConvertPortToPortConfig(port, portBindingMap) + if err != nil { + return err + } + portConfigs = append(portConfigs, portConfig...) + } + p.ports = append(p.ports, portConfigs...) + } + return nil +} + +// Type returns the type of this option +func (p *PortOpt) Type() string { + return "port" +} + +// String returns a string repr of this option +func (p *PortOpt) String() string { + ports := []string{} + for _, port := range p.ports { + repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) + ports = append(ports, repr) + } + return strings.Join(ports, ", ") +} + +// Value returns the ports +func (p *PortOpt) Value() []swarm.PortConfig { + return p.ports +} + +// ConvertPortToPortConfig converts ports to the swarm type +func ConvertPortToPortConfig( + port nat.Port, + portBindings map[nat.Port][]nat.PortBinding, +) ([]swarm.PortConfig, error) { + ports := []swarm.PortConfig{} + + for _, binding := range portBindings[port] { + if p := net.ParseIP(binding.HostIP); p != nil && !p.IsUnspecified() { + logrus.Warnf("ignoring IP-address (%s:%s) service will listen on '0.0.0.0'", net.JoinHostPort(binding.HostIP, binding.HostPort), port) + } + + startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort) + + if err != nil && binding.HostPort != "" { + return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) + } + + for i := startHostPort; i <= endHostPort; i++ { + ports = append(ports, swarm.PortConfig{ + // TODO Name: ? + Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), + TargetPort: uint32(port.Int()), + PublishedPort: uint32(i), + PublishMode: swarm.PortConfigPublishModeIngress, + }) + } + } + return ports, nil +} diff --git a/vendor/github.com/docker/cli/opts/quotedstring.go b/vendor/github.com/docker/cli/opts/quotedstring.go new file mode 100644 index 000000000..741f450b1 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/quotedstring.go @@ -0,0 +1,40 @@ +package opts + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return *s.value +} + +func trimQuotes(value string) string { + if len(value) < 2 { + return value + } + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/vendor/github.com/docker/cli/opts/secret.go b/vendor/github.com/docker/cli/opts/secret.go new file mode 100644 index 000000000..750dbe4f3 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/secret.go @@ -0,0 +1,98 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "strconv" + "strings" + + swarmtypes "github.com/docker/docker/api/types/swarm" +) + +// SecretOpt is a Value type for parsing secrets +type SecretOpt struct { + values []*swarmtypes.SecretReference +} + +// Set a new secret value +func (o *SecretOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + options := &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + UID: "0", + GID: "0", + Mode: 0o444, + }, + } + + // support a simple syntax of --secret foo + if len(fields) == 1 && !strings.Contains(fields[0], "=") { + options.File.Name = fields[0] + options.SecretName = fields[0] + o.values = append(o.values, options) + return nil + } + + for _, field := range fields { + key, val, ok := strings.Cut(field, "=") + if !ok || key == "" { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + // TODO(thaJeztah): these options should not be case-insensitive. + switch strings.ToLower(key) { + case "source", "src": + options.SecretName = val + case "target": + options.File.Name = val + case "uid": + options.File.UID = val + case "gid": + options.File.GID = val + case "mode": + m, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", err) + } + + options.File.Mode = os.FileMode(m) + default: + return fmt.Errorf("invalid field in secret request: %s", key) + } + } + + if options.SecretName == "" { + return fmt.Errorf("source is required") + } + if options.File.Name == "" { + options.File.Name = options.SecretName + } + + o.values = append(o.values, options) + return nil +} + +// Type returns the type of this option +func (o *SecretOpt) Type() string { + return "secret" +} + +// String returns a string repr of this option +func (o *SecretOpt) String() string { + secrets := []string{} + for _, secret := range o.values { + repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name) + secrets = append(secrets, repr) + } + return strings.Join(secrets, ", ") +} + +// Value returns the secret requests +func (o *SecretOpt) Value() []*swarmtypes.SecretReference { + return o.values +} diff --git a/vendor/github.com/docker/cli/opts/throttledevice.go b/vendor/github.com/docker/cli/opts/throttledevice.go new file mode 100644 index 000000000..789acf60f --- /dev/null +++ b/vendor/github.com/docker/cli/opts/throttledevice.go @@ -0,0 +1,104 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/go-units" +) + +// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) + +// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + k, v, ok := strings.Cut(val, ":") + if !ok || k == "" { + return nil, fmt.Errorf("bad format: %s", val) + } + // TODO(thaJeztah): should we really validate this on the client? + if !strings.HasPrefix(k, "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := units.RAMInBytes(v) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + + return &blkiodev.ThrottleDevice{ + Path: k, + Rate: uint64(rate), + }, nil +} + +// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + k, v, ok := strings.Cut(val, ":") + if !ok || k == "" { + return nil, fmt.Errorf("bad format: %s", val) + } + // TODO(thaJeztah): should we really validate this on the client? + if !strings.HasPrefix(k, "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + + return &blkiodev.ThrottleDevice{Path: k, Rate: rate}, nil +} + +// ThrottledeviceOpt defines a map of ThrottleDevices +type ThrottledeviceOpt struct { + values []*blkiodev.ThrottleDevice + validator ValidatorThrottleFctType +} + +// NewThrottledeviceOpt creates a new ThrottledeviceOpt +func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { + values := []*blkiodev.ThrottleDevice{} + return ThrottledeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt +func (opt *ThrottledeviceOpt) Set(val string) error { + var value *blkiodev.ThrottleDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + opt.values = append(opt.values, value) + return nil +} + +// String returns ThrottledeviceOpt values as a string. +func (opt *ThrottledeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to ThrottleDevices. +func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { + return append([]*blkiodev.ThrottleDevice{}, opt.values...) +} + +// Type returns the option type +func (opt *ThrottledeviceOpt) Type() string { + return "list" +} diff --git a/vendor/github.com/docker/cli/opts/ulimit.go b/vendor/github.com/docker/cli/opts/ulimit.go new file mode 100644 index 000000000..4667cc254 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/ulimit.go @@ -0,0 +1,60 @@ +package opts + +import ( + "fmt" + "sort" + + "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt. Ulimits are not validated. +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. Values are sorted by name. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + sort.Strings(out) + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. Values are sorted by name. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + sort.SliceStable(ulimits, func(i, j int) bool { + return ulimits[i].Name < ulimits[j].Name + }) + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} diff --git a/vendor/github.com/docker/cli/opts/weightdevice.go b/vendor/github.com/docker/cli/opts/weightdevice.go new file mode 100644 index 000000000..3077e3da7 --- /dev/null +++ b/vendor/github.com/docker/cli/opts/weightdevice.go @@ -0,0 +1,84 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/blkiodev" +) + +// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) + +// ValidateWeightDevice validates that the specified string has a valid device-weight format. +func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { + k, v, ok := strings.Cut(val, ":") + if !ok || k == "" { + return nil, fmt.Errorf("bad format: %s", val) + } + // TODO(thaJeztah): should we really validate this on the client? + if !strings.HasPrefix(k, "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + weight, err := strconv.ParseUint(v, 10, 16) + if err != nil { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + if weight > 0 && (weight < 10 || weight > 1000) { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + + return &blkiodev.WeightDevice{ + Path: k, + Weight: uint16(weight), + }, nil +} + +// WeightdeviceOpt defines a map of WeightDevices +type WeightdeviceOpt struct { + values []*blkiodev.WeightDevice + validator ValidatorWeightFctType +} + +// NewWeightdeviceOpt creates a new WeightdeviceOpt +func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { + return WeightdeviceOpt{ + values: []*blkiodev.WeightDevice{}, + validator: validator, + } +} + +// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt +func (opt *WeightdeviceOpt) Set(val string) error { + var value *blkiodev.WeightDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + opt.values = append(opt.values, value) + return nil +} + +// String returns WeightdeviceOpt values as a string. +func (opt *WeightdeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to WeightDevices. +func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { + return opt.values +} + +// Type returns the option type +func (opt *WeightdeviceOpt) Type() string { + return "list" +} diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 000000000..88367b0a0 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 000000000..bea2341c7 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,239 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +const ( + // MediaTypeManifestList specifies the mediaType for manifest lists. + MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" +) + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +// OCISchemaVersion provides a pre-initialized version structure for this +// packages OCIschema version of the manifest. +var OCISchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: v1.MediaTypeImageIndex, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + if m.MediaType != MediaTypeManifestList { + err = fmt.Errorf("mediaType in manifest list should be '%s' not '%s'", + MediaTypeManifestList, m.MediaType) + + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + + imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + if err := validateIndex(b); err != nil { + return nil, distribution.Descriptor{}, err + } + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex { + err = fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'", + v1.MediaTypeImageIndex, m.MediaType) + + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageIndex}, err + } + err = distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returns the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + var mediaType string + if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == v1.MediaTypeImageManifest { + mediaType = v1.MediaTypeImageIndex + } else { + mediaType = MediaTypeManifestList + } + + return FromDescriptorsWithMediaType(descriptors, mediaType) +} + +// FromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly +func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mediaType, + }, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + var mediaType string + if m.MediaType == "" { + mediaType = v1.MediaTypeImageIndex + } else { + mediaType = m.MediaType + } + + return mediaType, m.canonical, nil +} + +// unknownDocument represents a manifest, manifest list, or index that has not +// yet been validated +type unknownDocument struct { + Config interface{} `json:"config,omitempty"` + Layers interface{} `json:"layers,omitempty"` +} + +// validateIndex returns an error if the byte slice is invalid JSON or if it +// contains fields that belong to a manifest +func validateIndex(b []byte) error { + var doc unknownDocument + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if doc.Config != nil || doc.Layers != nil { + return errors.New("index: expected index but found manifest") + } + return nil +} diff --git a/vendor/github.com/docker/distribution/manifest/ocischema/builder.go b/vendor/github.com/docker/distribution/manifest/ocischema/builder.go new file mode 100644 index 000000000..b89bf5b71 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/ocischema/builder.go @@ -0,0 +1,107 @@ +package ocischema + +import ( + "context" + "errors" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Builder is a type for constructing manifests. +type Builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor + + // Annotations contains arbitrary metadata relating to the targeted content. + annotations map[string]string + + // For testing purposes + mediaType string +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process, and annotations. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) distribution.ManifestBuilder { + mb := &Builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + annotations: annotations, + mediaType: v1.MediaTypeImageManifest, + } + copy(mb.configJSON, configJSON) + + return mb +} + +// SetMediaType assigns the passed mediatype or error if the mediatype is not a +// valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json" +func (mb *Builder) SetMediaType(mediaType string) error { + if mediaType != "" && mediaType != v1.MediaTypeImageManifest { + return errors.New("invalid media type for OCI image manifest") + } + + mb.mediaType = mediaType + return nil +} + +// Build produces a final manifest from the given references. +func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mb.mediaType, + }, + Layers: make([]distribution.Descriptor, len(mb.layers)), + Annotations: mb.annotations, + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = v1.MediaTypeImageConfig + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, v1.MediaTypeImageConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = v1.MediaTypeImageConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *Builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *Builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go b/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go new file mode 100644 index 000000000..d51f8debb --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go @@ -0,0 +1,146 @@ +package ocischema + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version + MediaType: v1.MediaTypeImageManifest, + } +) + +func init() { + ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + if err := validateManifest(b); err != nil { + return nil, distribution.Descriptor{}, err + } + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageManifest}, err + } + err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a ocischema manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// References returns the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + if manifest.MediaType != "" && manifest.MediaType != v1.MediaTypeImageManifest { + return fmt.Errorf("if present, mediaType in manifest should be '%s' not '%s'", + v1.MediaTypeImageManifest, manifest.MediaType) + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return v1.MediaTypeImageManifest, m.canonical, nil +} + +// unknownDocument represents a manifest, manifest list, or index that has not +// yet been validated +type unknownDocument struct { + Manifests interface{} `json:"manifests,omitempty"` +} + +// validateManifest returns an error if the byte slice is invalid JSON or if it +// contains fields that belong to a index +func validateManifest(b []byte) error { + var doc unknownDocument + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if doc.Manifests != nil { + return errors.New("ocimanifest: expected manifest but found index") + } + return nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 000000000..3facaae62 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,85 @@ +package schema2 + +import ( + "context" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configMediaType is media type used to describe configuration + configMediaType string + + // configJSON references + configJSON []byte + + // dependencies is a list of descriptors that gets built by successive + // calls to AppendReference. In case of image configuration these are layers. + dependencies []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configMediaType: configMediaType, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.dependencies)), + } + copy(m.Layers, mb.dependencies) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = mb.configMediaType + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = mb.configMediaType + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.dependencies = append(mb.dependencies, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.dependencies +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 000000000..41f480292 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,144 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeImageConfig specifies the mediaType for the image configuration. + MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypePluginConfig specifies the mediaType for plugin configuration. + MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + + // MediaTypeForeignLayer is the mediaType used for layers that must be + // downloaded from foreign URLs. + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + + // MediaTypeUncompressedLayer is the mediaType used for layers which + // are not compressed. + MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returns the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + if manifest.MediaType != MediaTypeManifest { + return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", + MediaTypeManifest, manifest.MediaType) + + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 000000000..caa6b14e8 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 000000000..d433ccaf5 --- /dev/null +++ b/vendor/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 000000000..f136c3433 --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 000000000..bee9b875a --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,11 @@ +package api // import "github.com/docker/docker/api" + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion = "1.42" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier = "scratch" +) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 000000000..19fc63d65 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,7 @@ +//go:build !windows +// +build !windows + +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 000000000..590ba5479 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 000000000..f07a02737 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 000000000..afe7a8c37 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,12152 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.42" +info: + title: "Docker Engine API" + version: "1.42" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.42) is used. + For example, calling `/info` is the same as calling `/v1.42/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. + For example: + + ``` + {"net.ipv4.ip_forward": "1"} + ``` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + + When used as `ContainerConfig` field in an image, `ContainerConfig` is an + optional field containing the configuration of the container that was last + committed when creating the image. + + Previous versions of Docker builder used this field to store build cache, + and it is not in active use anymore. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: "MAC address of the container." + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because containers/create currently + # does not support attaching to multiple networks, so the example request + # would be confusing if it showed that multiple networks can be contained + # in the EndpointsConfig. + # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: Name of the network's bridge (for example, `docker0`). + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: IPv6 unicast address using the link-local prefix. + type: "string" + example: "fe80::42:acff:fe11:1" + LinkLocalIPv6PrefixLen: + description: Prefix length of the IPv6 unicast address. + type: "integer" + example: "64" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey identifies the sandbox + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + # TODO is SecondaryIPAddresses actually used? + SecondaryIPAddresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO is SecondaryIPv6Addresses actually used? + SecondaryIPv6Addresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + GraphDriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + x-nullable: false + example: "2022-02-04T21:20:12.497794809Z" + Container: + description: | + The ID of the container that was used to create the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" + ContainerConfig: + $ref: "#/definitions/ContainerConfig" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "20.10.7" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds sinds EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + IPRange: + type: "string" + Gateway: + type: "string" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + MacAddress: + description: | + MAC address for the endpoint on this network. + type: "string" + example: "02:42:ac:11:00:04" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "logentries" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerSummary: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether this container has been killed because it ran out of memory. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "19.03.12" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "19.03.12" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.40" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.12" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.13.14" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "4.19.76-linuxkit" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "16.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + + > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) + > returns the Swarm version instead of the daemon version, for example + > `swarm/1.2.8`. + type: "string" + example: "17.06.0-ce" + ClusterStore: + description: | + URL of the distributed storage backend. + + + The storage backend is used for multihost networking (to store + network and endpoint information) and by the node discovery mechanism. + +


+ + > **Deprecated**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "consul://consul.corp.example.com:8600/some/path" + ClusterAdvertise: + description: | + The network endpoint that the Engine advertises for the purpose of + node discovery. ClusterAdvertise is a `host:port` combination on which + the daemon is reachable by other hosts. + +


+ + > **Deprecated**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "node5.corp.example.com:8000" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.docker.distribution.manifest.v2+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 3987495 + # TODO Not yet including these fields for now, as they are nil / omitted in our response. + # urls: + # description: | + # List of URLs from which this object MAY be downloaded. + # type: "array" + # items: + # type: "string" + # format: "uri" + # annotations: + # description: | + # Arbitrary metadata relating to the targeted content. + # type: "object" + # additionalProperties: + # type: "string" + # platform: + # $ref: "#/definitions/OCIPlatform" + + OCIPlatform: + type: "object" + x-go-name: Platform + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerInspectResponse" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + $ref: "#/definitions/ContainerState" + Image: + description: "The container's image ID" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + Platform: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: | + The size of files that have been created or changed by this + container. + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Healthcheck: + Test: ["CMD-SHELL", "exit 0"] + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + IpcMode: "" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + Health: + Status: "healthy" + FailingStreak: 0 + Log: + - Start: "2019-12-22T10:59:05.6385933Z" + End: "2019-12-22T10:59:05.8078452Z" + ExitCode: 0 + Output: "" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: | + Each process running in the container, where each is process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + title: "ContainerChangeResponseItem" + description: "change item in response to ContainerChanges operation" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: + Detach: false + Tty: true + ConsoleSize: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + title: "NetworkCreateResponse" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: | + Check for networks with duplicate names. Since Network is + primarily keyed based on a random ID and not on the name, and + network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed + way to check for duplicates. CheckDuplicate is there to provide + a best effort checking of any networks which has the same name + but it is not guaranteed to catch all name collisions. + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + title: "ServiceCreateResponse" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go new file mode 100644 index 000000000..9fe07e26f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -0,0 +1,47 @@ +package events // import "github.com/docker/docker/api/types/events" + +// Type is used for event-types. +type Type = string + +// List of known event types. +const ( + BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. + ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. + ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. + DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. + ImageEventType Type = "image" // ImageEventType is the event type that images generate. + NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. + NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. + PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. + SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. + ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. + VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set of attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` // Deprecated: use Action instead. + ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. + From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. + + Type Type + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go new file mode 100644 index 000000000..e302bb0ae --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -0,0 +1,36 @@ +package image // import "github.com/docker/docker/api/types/image" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem individual image layer information in response to ImageHistory operation +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go new file mode 100644 index 000000000..2a74b7a59 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -0,0 +1,131 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 000000000..992f18117 --- /dev/null +++ b/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 000000000..b76bf366b --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// BuildCancel requests the daemon to cancel the ongoing build request. +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 000000000..397d67cdc --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,45 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) + filters, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", filters) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + + if err != nil { + return nil, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 000000000..921024fe4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 000000000..54f55fa76 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 000000000..39cfb959f --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + return checkpoints, err +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 000000000..09ea4851f --- /dev/null +++ b/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,321 @@ +/* +Package client is a Go client for the Docker Engine API. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/api/ + +# Usage + +You use the library by constructing a client object using [NewClientWithOpts] +and calling methods on it. The client can be configured from environment +variables by passing the [FromEnv] option, or configured manually by passing any +of the other available [Opts]. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } +*/ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net" + "net/http" + "net/url" + "path" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return ErrRedirect, otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The Docker client (and by extension docker API client) can be +// made to send a request like POST /containers//start where what would normally +// be in the name section of the URL is empty. This triggers an HTTP 301 from +// the daemon. +// +// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// a 404 from the daemon. This behavior change manifests in the client in that +// before, the 301 was not followed and the client did not generate an error, +// but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewClientWithOpts initializes a new API client with a default HTTPClient, and +// default API host and version. It also initializes the custom HTTP headers to +// add to each request. +// +// It takes an optional list of Opt functional arguments, which are applied in +// the order they're provided, which allows modifying the defaults when creating +// the client. For example, the following initializes a client that configures +// itself with values from environment variables (client.FromEnv), and has +// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// +// cli, err := client.NewClientWithOpts( +// client.FromEnv, +// client.WithAPIVersionNegotiation(), +// ) +func NewClientWithOpts(ops ...Opt) (*Client, error) { + client, err := defaultHTTPClient(DefaultDockerHost) + if err != nil { + return nil, err + } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + client: client, + proto: defaultProto, + addr: defaultAddr, + } + + for _, op := range ops { + if err := op(c); err != nil { + return nil, err + } + } + + if c.scheme == "" { + c.scheme = "http" + + tlsConfig := resolveTLSConfig(c.client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + c.scheme = "https" + } + } + + return c, nil +} + +func defaultHTTPClient(host string) (*http.Client, error) { + hostURL, err := ParseHostURL(host) + if err != nil { + return nil, err + } + transport := &http.Transport{} + _ = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + }, nil +} + +// Close the transport used by the client +func (cli *Client) Close() error { + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = path.Join(cli.basePath, "/v"+v, p) + } else { + apiPath = path.Join(cli.basePath, p) + } + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() +} + +// ClientVersion returns the API version used by this client. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// NegotiateAPIVersion queries the API and updates the version to match the API +// version. NegotiateAPIVersion downgrades the client's API version to match the +// APIVersion if the ping version is lower than the default version. If the API +// version reported by the server is higher than the maximum version supported +// by the client, it uses the client's maximum version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, or if the +// client did not get a successful ping response, it assumes it is connected with +// an old daemon that does not support API version negotiation, in which case it +// downgrades to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } +} + +// NegotiateAPIVersionPing downgrades the client's API version to match the +// APIVersion in the ping response. If the API version in pingResponse is higher +// than the maximum version supported by the client, it uses the client's maximum +// version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, we assume +// we are connected with an old daemon without API version negotiation support, +// and downgrade to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { + if !cli.manualOverride { + cli.negotiateAPIVersionPing(pingResponse) + } +} + +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version from the ping response. +func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { + // default to the latest version before versioning headers existed + if pingResponse.APIVersion == "" { + pingResponse.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(pingResponse.APIVersion, cli.version) { + cli.version = pingResponse.APIVersion + } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + c := *cli.client + return &c +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return nil, errors.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return nil, err + } + addr = parsed.Host + basePath = parsed.Path + } + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, +// that can be used for proxying the daemon connection. +// +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go new file mode 100644 index 000000000..9e366ce20 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_deprecated.go @@ -0,0 +1,27 @@ +package client + +import "net/http" + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// +// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion], +// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API +// version negotiation by passing the [WithAPIVersionNegotiation] option instead +// of WithVersion. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} + +// NewEnvClient initializes a new API client based on environment variables. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option. +func NewEnvClient() (*Client, error) { + return NewClientWithOpts(FromEnv) +} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 000000000..f0783f708 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,11 @@ +//go:build linux || freebsd || openbsd || netbsd || darwin || solaris || illumos || dragonfly +// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly + +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "unix:///var/run/docker.sock" + +const defaultProto = "unix" +const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 000000000..5abe60457 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,8 @@ +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +const defaultProto = "npipe" +const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 000000000..f6b1881fc --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 000000000..9be7882c3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Config{}, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 000000000..565acc6e2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + return configs, err +} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 000000000..24b94e9c1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 000000000..1ac298543 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 000000000..ba92117d3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,59 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{ + "Content-Type": {"text/plain"}, + } + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 000000000..cd7f76346 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ContainerCommit applies changes to a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if !options.Pause { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 000000000..883be7fa3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,93 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := "/containers/" + containerID + "/archive" + response, err := cli.head(ctx, urlStr, query, nil) + defer ensureReaderClosed(response) + if err != nil { + return types.ContainerPathStat{}, err + } + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + defer ensureReaderClosed(response) + if err != nil { + return err + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := "/containers/" + containerID + "/archive" + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 000000000..f82420b67 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,83 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "path" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based on the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) { + var response container.CreateResponse + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { + return response, err + } + + if hostConfig != nil { + if versions.LessThan(cli.ClientVersion(), "1.25") { + // When using API 1.24 and under, the client is responsible for removing the container + hostConfig.AutoRemove = false + } + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { + // KernelMemory was added in API 1.40, and deprecated in API 1.42 + hostConfig.KernelMemory = 0 + } + if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { + // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize + hostConfig.ConsoleSize = [2]uint{0, 0} + } + } + + query := url.Values{} + if p := formatPlatform(platform); p != "" { + query.Set("platform", p) + } + + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). +// +// Similar to containerd's platforms.Format(), but does allow components to be +// omitted (e.g. pass "architecture" only, without "os": +// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 +func formatPlatform(platform *specs.Platform) string { + if platform == nil { + return "" + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 000000000..29dac8491 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + return changes, err +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 000000000..6a2cb006f --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,66 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + headers := map[string][]string{ + "Content-Type": {"application/json"}, + } + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 000000000..d0c0a5cba --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 000000000..d48f0d3a6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 000000000..7c9529f1e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + if signal != "" { + query.Set("signal", signal) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 000000000..bd491b3db --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,57 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + return containers, err +} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 000000000..9bdf2b0fa --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,80 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 000000000..5e7271a37 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 000000000..04383deaa --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 000000000..c21de609b --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 000000000..240fdf552 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 000000000..a9d4c0c79 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 000000000..1e0ad9998 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 000000000..c2e0b15dc --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 000000000..0a6488dde --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,42 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} + +// ContainerStatsOneShot gets a single stat entry from a container. +// It differs from `ContainerStats` in that the API should not wait to prime the stats +func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + query.Set("one-shot", "1") + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 000000000..2a43ce227 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,30 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 000000000..a5b78999b --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 000000000..1d8f87316 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 000000000..bf68a5300 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates the resources of a container. +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 000000000..2375eb1e8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,104 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/url" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.WaitResponse) + errC := make(chan error, 1) + + query := url.Values{} + if condition != "" { + query.Set("condition", string(condition)) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + + body := resp.body + responseText := bytes.NewBuffer(nil) + stream := io.TeeReader(body, responseText) + + var res container.WaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { + // NOTE(nicks): The /wait API does not work well with HTTP proxies. + // At any time, the proxy could cut off the response stream. + // + // But because the HTTP status has already been written, the proxy's + // only option is to write a plaintext error message. + // + // If there's a JSON parsing error, read the real error message + // off the body and send it to the client. + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { + resultC := make(chan container.WaitResponse) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.WaitResponse + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 000000000..ba0d92e9e --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { + var query url.Values + if len(options.Types) > 0 { + query = url.Values{} + for _, t := range options.Types { + query.Add("type", string(t)) + } + } + + serverResp, err := cli.get(ctx, "/system/df", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.DiskUsage{}, err + } + + var du types.DiskUsage + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + return du, nil +} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 000000000..7f36c99a0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with the full manifest. +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + return distributionInspect, err +} diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go new file mode 100644 index 000000000..61dd45c1d --- /dev/null +++ b/vendor/github.com/docker/docker/client/envvars.go @@ -0,0 +1,90 @@ +package client // import "github.com/docker/docker/client" + +const ( + // EnvOverrideHost is the name of the environment variable that can be used + // to override the default host to connect to (DefaultDockerHost). + // + // This env-var is read by FromEnv and WithHostFromEnv and when set to a + // non-empty value, takes precedence over the default host (which is platform + // specific), or any host already set. + EnvOverrideHost = "DOCKER_HOST" + + // EnvOverrideAPIVersion is the name of the environment variable that can + // be used to override the API version to use. Value should be + // formatted as MAJOR.MINOR, for example, "1.19". + // + // This env-var is read by FromEnv and WithVersionFromEnv and when set to a + // non-empty value, takes precedence over API version negotiation. + // + // This environment variable should be used for debugging purposes only, as + // it can set the client to use an incompatible (or invalid) API version. + EnvOverrideAPIVersion = "DOCKER_API_VERSION" + + // EnvOverrideCertPath is the name of the environment variable that can be + // used to specify the directory from which to load the TLS certificates + // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure + // the Client for a TCP connection protected by TLS client authentication. + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection. Refer to EnvTLSVerify below to learn how to + // disable verification for testing purposes. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // For local access to the API, it is recommended to connect with the daemon + // using the default local socket connection (on Linux), or the named pipe + // (on Windows). + // + // If you need to access the API of a remote daemon, consider using an SSH + // (ssh://) connection, which is easier to set up, and requires no additional + // configuration if the host is accessible using ssh. + // + // If you cannot use the alternatives above, and you must expose the API over + // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ + // to learn how to configure the daemon and client to use a TCP connection + // with TLS client authentication. Make sure you know the differences between + // a regular TLS connection and a TLS connection protected by TLS client + // authentication, and verify that the API cannot be accessed by other clients. + EnvOverrideCertPath = "DOCKER_CERT_PATH" + + // EnvTLSVerify is the name of the environment variable that can be used to + // enable or disable TLS certificate verification. When set to a non-empty + // value, TLS certificate verification is enabled, and the client is configured + // to use a TLS connection, using certificates from the default directories + // (within `~/.docker`); refer to EnvOverrideCertPath above for additional + // details. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // Before setting up your client and daemon to use a TCP connection with TLS + // client authentication, consider using one of the alternatives mentioned + // in EnvOverrideCertPath above. + // + // Disabling TLS certificate verification (for testing purposes) + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection, and it is highly recommended to keep verification + // enabled to prevent machine-in-the-middle attacks. Refer to the documentation + // at https://docs.docker.com/engine/security/protect-access/ and pages linked + // from that page to learn how to configure the daemon and client to use a + // TCP connection with TLS client authentication enabled. + // + // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to + // disable TLS certificate verification. Disabling verification is insecure, + // so should only be done for testing purposes. From the Go documentation + // (https://pkg.go.dev/crypto/tls#Config): + // + // InsecureSkipVerify controls whether a client verifies the server's + // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls + // accepts any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle + // attacks unless custom verification is used. This should be used only for + // testing or in combination with VerifyConnection or VerifyPeerCertificate. + EnvTLSVerify = "DOCKER_TLS_VERIFY" +) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 000000000..e5a8a865f --- /dev/null +++ b/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,93 @@ +package client // import "github.com/docker/docker/client" + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + return errors.As(err, &errConnectionFailed{}) +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility +type notFound interface { + error + NotFound() bool +} + +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. +func IsErrNotFound(err error) bool { + if errdefs.IsNotFound(err) { + return true + } + var e notFound + return errors.As(err, &e) +} + +type objectNotFoundError struct { + object string + id string +} + +func (e objectNotFoundError) NotFound() {} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +// +// Deprecated: use errdefs.IsUnauthorized +func IsErrUnauthorized(err error) bool { + return errdefs.IsUnauthorized(err) +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrNotImplemented returns true if the error is a NotImplemented error. +// This is returned by the API when a requested feature has not been +// implemented. +// +// Deprecated: use errdefs.IsNotImplemented +func IsErrNotImplemented(err error) bool { + return errdefs.IsNotImplemented(err) +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 000000000..a9c48a928 --- /dev/null +++ b/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,101 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 000000000..6bdacab10 --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,153 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(ctx, path, query) + req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.NewHijackedResponse(conn, mediaType), err +} + +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest(http.MethodPost, url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + conn, _, err := cli.setupHijackConn(ctx, req, proto) + return conn, err +} + +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + return tls.Dial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + dialer := cli.Dialer() + conn, err := dialer(ctx) + if err != nil { + return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + + //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, "", err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite if the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } + } else { + br.Reset(nil) + } + + var mediaType string + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { + // Prior to 1.42, Content-Type is always set to raw-stream and not relevant + mediaType = resp.Header.Get("Content-Type") + } + + return c, mediaType, nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 000000000..d16e1d8ea --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,146 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends a request to the daemon to build images. +// The Body in the response implements an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return query, err + } + query.Set("platform", strings.ToLower(options.Platform)) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 000000000..b1c022777 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageCreate creates a new image based on the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 000000000..b5bea10d8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + return history, err +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 000000000..c5de42cb7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,40 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based on the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + // Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 000000000..1de10e5a0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ImageInspect{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 000000000..950d51333 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("shared-size", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + return images, err +} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 000000000..91016e493 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 000000000..56af6d7f9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 000000000..a23975591 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,64 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 000000000..845580d4a --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,54 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "errors" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + name := reference.FamiliarName(ref) + query := url.Values{} + if !options.All { + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + } + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 000000000..6a9fb3f41 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + var dels []types.ImageDeleteResponseItem + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return dels, err + } + + err = json.NewDecoder(resp.body).Decode(&dels) + return dels, err +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 000000000..d1314e4b2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 000000000..e69fa3722 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" +) + +// ImageSearch makes the docker host search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + defer ensureReaderClosed(resp) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 000000000..5652bfc25 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 000000000..c856704e2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return info, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 000000000..e9c1ed722 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,201 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client + ServerVersion(ctx context.Context) (types.Version, error) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) + Close() error +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) + ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, options container.StopOptions) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, options container.StopOptions) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, network string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) + VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 000000000..402ffb512 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 000000000..5502cd742 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 000000000..f05852063 --- /dev/null +++ b/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 000000000..571894613 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 000000000..278d9383a --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 000000000..dd1567665 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 000000000..0f90e2bb9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResource, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 000000000..ed2acb557 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + return networkResources, err +} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 000000000..cebb18821 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 000000000..9d6c6cef0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 000000000..95ab9b1be --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + } + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Node{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 000000000..c212906bc --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + return nodes, err +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 000000000..e44436deb --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 000000000..0d0fc3b78 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go new file mode 100644 index 000000000..099ad4184 --- /dev/null +++ b/vendor/github.com/docker/docker/client/options.go @@ -0,0 +1,210 @@ +package client + +import ( + "context" + "net" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + +// FromEnv configures the client with values from environment variables. +// +// FromEnv uses the following environment variables: +// +// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. +// +// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to +// use, leave empty for latest. +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). +func FromEnv(c *Client) error { + ops := []Opt{ + WithTLSClientConfigFromEnv(), + WithHostFromEnv(), + WithVersionFromEnv(), + } + for _, op := range ops { + if err := op(c); err != nil { + return err + } + } + return nil +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHostFromEnv overrides the client host with the host specified in the +// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// or set to an empty value, the host is not modified. +func WithHostFromEnv() Opt { + return func(c *Client) error { + if host := os.Getenv(EnvOverrideHost); host != "" { + return WithHost(host)(c) + } + return nil + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) Opt { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one +func WithScheme(scheme string) Opt { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } +} + +// WithTLSClientConfigFromEnv configures the client's TLS settings with the +// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. +// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. +// +// WithTLSClientConfigFromEnv uses the following environment variables: +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). +func WithTLSClientConfigFromEnv() Opt { + return func(c *Client) error { + dockerCertPath := os.Getenv(EnvOverrideCertPath) + if dockerCertPath == "" { + return nil + } + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + return nil + } +} + +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, +// the version is not modified. +func WithVersionFromEnv() Opt { + return func(c *Client) error { + return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { + return func(c *Client) error { + c.negotiateVersion = true + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 000000000..27e8695cb --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,75 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/http" + "path" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/errdefs" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err == nil { + defer ensureReaderClosed(serverResp) + switch serverResp.statusCode { + case http.StatusOK, http.StatusInternalServerError: + // Server handled the request, so parse the response + return parsePingResponse(cli, serverResp) + } + } else if IsErrConnectionFailed(err) { + return ping, err + } + + req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err = cli.doRequest(ctx, req) + defer ensureReaderClosed(serverResp) + if err != nil { + return ping, err + } + return parsePingResponse(cli, serverResp) +} + +func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { + var ping types.Ping + if resp.header == nil { + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) + } + ping.APIVersion = resp.header.Get("API-Version") + ping.OSType = resp.header.Get("OSType") + if resp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + if bv := resp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } + if si := resp.header.Get("Swarm"); si != "" { + parts := strings.SplitN(si, "/", 2) + ping.SwarmStatus = &swarm.Status{ + NodeState: swarm.LocalNodeState(parts[0]), + ControlAvailable: len(parts) == 2 && parts[1] == "manager", + } + } + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 000000000..b95dbaf68 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 000000000..01f6574f9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 000000000..736da48bd --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 000000000..f09e46066 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 000000000..012afe61c --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,113 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 000000000..2091a054d --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + return plugins, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 000000000..d20bfe844 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 000000000..4cd66958c --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 000000000..dcf5752ca --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 000000000..115cea945 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 000000000..c799095c1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,277 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int + reqURL *url.URL +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == http.MethodPost || method == http.MethodPut) + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + + resp, err := cli.doRequest(ctx, req) + switch { + case errors.Is(err, context.Canceled): + return serverResponse{}, errdefs.Cancelled(err) + case errors.Is(err, context.DeadlineExceeded): + return serverResponse{}, errdefs.Deadline(err) + case err == nil: + err = cli.checkResponseErr(resp) + } + return resp, errdefs.FromStatusCode(err, resp.statusCode) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} + + req = req.WithContext(ctx) + resp, err := cli.client.Do(req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") + } else { + f.Close() + err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") + } + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header + } + return serverResp, nil +} + +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + var body []byte + var err error + if serverResp.body != nil { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.body, + N: int64(bodyMax), + } + body, err = io.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + } + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return errors.Wrap(err, "Error reading JSON") + } + errorMessage = strings.TrimSpace(errorResponse.Message) + } else { + errorMessage = strings.TrimSpace(string(body)) + } + + return errors.Wrap(errors.New(errorMessage), "Error response from daemon") +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + for k, v := range headers { + req.Header[http.CanonicalHeaderKey(k)] = v + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(io.Discard, response.body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 000000000..c65d38a19 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// SecretCreate creates a new secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 000000000..5906874b1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Secret{}, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 000000000..a0289c9f4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 000000000..f47f68b6e --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// SecretRemove removes a secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 000000000..2e939e8ce --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretUpdate attempts to update a secret. +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 000000000..23024d0f8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,178 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// ServiceCreate creates a new service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var response types.ServiceCreateResponse + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} + +func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil { + warning = digestWarning(taskSpec.ContainerSpec.Image) + } else { + taskSpec.ContainerSpec.Image = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil { + warning = digestWarning(taskSpec.PluginSpec.Remote) + } else { + taskSpec.PluginSpec.Remote = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// image unmodified in other situations. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return image +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 000000000..cee020c98 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Service{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 000000000..f97ec75a5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + if options.Status { + query.Set("status", "true") + } + + resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + return services, err +} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 000000000..906fd4059 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 000000000..2c46326eb --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 000000000..8014b8625 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,74 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. +// It should be the value as set *before* the update. You can find this value in the Meta field +// of swarm.Service, which can be found using ServiceInspectWithRaw. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + response = types.ServiceUpdateResponse{} + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", version.String()) + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 000000000..19f59dd58 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 000000000..da3c1637e --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 000000000..b52b67a88 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 000000000..a1cf0455d --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 000000000..90ca84b36 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 000000000..d2412f7d4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 000000000..9fde7d75e --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 000000000..dde1f6c59 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// TaskInspectWithRaw returns the task information and its raw representation. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + } + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Task{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 000000000..4869b4449 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + return tasks, err +} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go new file mode 100644 index 000000000..6222fab57 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 000000000..554134436 --- /dev/null +++ b/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "crypto/tls" + "net/http" +) + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 000000000..7f3ff44eb --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,34 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToJSON(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 000000000..8f17ff4e8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + return server, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 000000000..b3b182437 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/volume" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { + var vol volume.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + defer ensureReaderClosed(resp) + if err != nil { + return vol, err + } + err = json.NewDecoder(resp.body).Decode(&vol) + return vol, err +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 000000000..b3ba4e604 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/volume" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { + vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return vol, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { + if volumeID == "" { + return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + + var vol volume.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return vol, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return vol, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&vol) + return vol, body, err +} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 000000000..d8204f8db --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) { + var volumes volume.ListResponse + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + return volumes, err +} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 000000000..6e324708f --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 000000000..1f2643836 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/versions" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go new file mode 100644 index 000000000..33bd31e53 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -0,0 +1,24 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" +) + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { + if err := cli.NewVersionError("1.42", "volume update"); err != nil { + return err + } + + query := url.Values{} + query.Set("version", version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 000000000..99846ffdd --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,81 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 000000000..98e9a1dc6 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 000000000..a1d7beb4d --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,38 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "errors" + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + return configureUnixTransport(tr, proto, addr) + case "npipe": + return configureNpipeTransport(tr, proto, addr) + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 000000000..386cf0dbb --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package sockets + +import ( + "fmt" + "net" + "net/http" + "syscall" + "time" +) + +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 000000000..5c21644e1 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,27 @@ +package sockets + +import ( + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 000000000..53cbb6c79 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 000000000..a8b5dbb6f --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,32 @@ +// +build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := os.Chown(path, 0, gid); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} diff --git a/vendor/github.com/docker/go/LICENSE b/vendor/github.com/docker/go/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/docker/go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/go/canonical/json/decode.go b/vendor/github.com/docker/go/canonical/json/decode.go new file mode 100644 index 000000000..72b981c53 --- /dev/null +++ b/vendor/github.com/docker/go/canonical/json/decode.go @@ -0,0 +1,1168 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + useNumber bool + canonical bool +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquoteBytes(item) + if !ok { + d.error(errPhase) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type(), false) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/docker/go/canonical/json/encode.go b/vendor/github.com/docker/go/canonical/json/encode.go new file mode 100644 index 000000000..f3491b160 --- /dev/null +++ b/vendor/github.com/docker/go/canonical/json/encode.go @@ -0,0 +1,1250 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + return marshal(v, false) +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalCanonical is like Marshal but encodes into Canonical JSON. +// Read more at: http://wiki.laptop.org/go/Canonical_JSON +func MarshalCanonical(v interface{}) ([]byte, error) { + return marshal(v, true) +} + +func marshal(v interface{}, canonical bool) ([]byte, error) { + e := &encodeState{canonical: canonical} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go new file mode 100644 index 000000000..019c85429 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go @@ -0,0 +1,103 @@ +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// RedocOpts configures the Redoc middlewares +type RedocOpts struct { + // BasePath for the UI path, defaults to: / + BasePath string + // Path combines with BasePath for the full UI path, defaults to: docs + Path string + // SpecURL the url to find the spec for + SpecURL string + // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js + RedocURL string + // Title for the documentation site, default to: API documentation + Title string +} + +// EnsureDefaults in case some options are missing +func (r *RedocOpts) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = "docs" + } + if r.SpecURL == "" { + r.SpecURL = "/swagger.json" + } + if r.RedocURL == "" { + r.RedocURL = redocLatest + } + if r.Title == "" { + r.Title = "API documentation" + } +} + +// Redoc creates a middleware to serve a documentation site for a swagger spec. +// This allows for altering the spec before starting the http listener. +// +func Redoc(opts RedocOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("redoc").Parse(redocTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js" + redocTemplate = ` + + + {{ .Title }} + + + + + + + + + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go new file mode 100644 index 000000000..760c37861 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/request.go @@ -0,0 +1,104 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// UntypedRequestBinder binds and validates the data from a http request +type UntypedRequestBinder struct { + Spec *spec.Swagger + Parameters map[string]spec.Parameter + Formats strfmt.Registry + paramBinders map[string]*untypedParamBinder +} + +// NewUntypedRequestBinder creates a new binder for reading a request. +func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *UntypedRequestBinder { + binders := make(map[string]*untypedParamBinder) + for fieldName, param := range parameters { + binders[fieldName] = newUntypedParamBinder(param, spec, formats) + } + return &UntypedRequestBinder{ + Parameters: parameters, + paramBinders: binders, + Spec: spec, + Formats: formats, + } +} + +// Bind perform the databinding and validation +func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error { + val := reflect.Indirect(reflect.ValueOf(data)) + isMap := val.Kind() == reflect.Map + var result []error + debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) + for fieldName, param := range o.Parameters { + binder := o.paramBinders[fieldName] + debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) + var target reflect.Value + if !isMap { + binder.Name = fieldName + target = val.FieldByName(fieldName) + } + + if isMap { + tpe := binder.Type() + if tpe == nil { + if param.Schema.Type.Contains("array") { + tpe = reflect.TypeOf([]interface{}{}) + } else { + tpe = reflect.TypeOf(map[string]interface{}{}) + } + } + target = reflect.Indirect(reflect.New(tpe)) + } + + if !target.IsValid() { + result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name)) + continue + } + + if err := binder.Bind(request, routeParams, consumer, target); err != nil { + result = append(result, err) + continue + } + + if binder.validator != nil { + rr := binder.validator.Validate(target.Interface()) + if rr != nil && rr.HasErrors() { + result = append(result, rr.AsError()) + } + } + + if isMap { + val.SetMapIndex(reflect.ValueOf(param.Name), target) + } + } + + if len(result) > 0 { + return errors.CompositeValidationError(result...) + } + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go new file mode 100644 index 000000000..5052031c8 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/router.go @@ -0,0 +1,488 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "fmt" + "net/http" + fpath "path" + "regexp" + "strings" + + "github.com/go-openapi/runtime/security" + "github.com/go-openapi/swag" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware/denco" +) + +// RouteParam is a object to capture route params in a framework agnostic way. +// implementations of the muxer should use these route params to communicate with the +// swagger framework +type RouteParam struct { + Name string + Value string +} + +// RouteParams the collection of route params +type RouteParams []RouteParam + +// Get gets the value for the route param for the specified key +func (r RouteParams) Get(name string) string { + vv, _, _ := r.GetOK(name) + if len(vv) > 0 { + return vv[len(vv)-1] + } + return "" +} + +// GetOK gets the value but also returns booleans to indicate if a key or value +// is present. This aids in validation and satisfies an interface in use there +// +// The returned values are: data, has key, has value +func (r RouteParams) GetOK(name string) ([]string, bool, bool) { + for _, p := range r { + if p.Name == name { + return []string{p.Value}, true, p.Value != "" + } + } + return nil, false, false +} + +// NewRouter creates a new context aware router middleware +func NewRouter(ctx *Context, next http.Handler) http.Handler { + if ctx.router == nil { + ctx.router = DefaultRouter(ctx.spec, ctx.api) + } + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if _, rCtx, ok := ctx.RouteInfo(r); ok { + next.ServeHTTP(rw, rCtx) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) + }) +} + +// RoutableAPI represents an interface for things that can serve +// as a provider of implementations for the swagger router +type RoutableAPI interface { + HandlerFor(string, string) (http.Handler, bool) + ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) + ConsumersFor([]string) map[string]runtime.Consumer + ProducersFor([]string) map[string]runtime.Producer + AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator + Authorizer() runtime.Authorizer + Formats() strfmt.Registry + DefaultProduces() string + DefaultConsumes() string +} + +// Router represents a swagger aware router +type Router interface { + Lookup(method, path string) (*MatchedRoute, bool) + OtherMethods(method, path string) []string +} + +type defaultRouteBuilder struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + records map[string][]denco.Record +} + +type defaultRouter struct { + spec *loads.Document + routers map[string]*denco.Router +} + +func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder { + return &defaultRouteBuilder{ + spec: spec, + analyzer: analysis.New(spec.Spec()), + api: api, + records: make(map[string][]denco.Record), + } +} + +// DefaultRouter creates a default implemenation of the router +func DefaultRouter(spec *loads.Document, api RoutableAPI) Router { + builder := newDefaultRouteBuilder(spec, api) + if spec != nil { + for method, paths := range builder.analyzer.Operations() { + for path, operation := range paths { + fp := fpath.Join(spec.BasePath(), path) + debugLog("adding route %s %s %q", method, fp, operation.ID) + builder.AddRoute(method, fp, operation) + } + } + } + return builder.Build() +} + +// RouteAuthenticator is an authenticator that can compose several authenticators together. +// It also knows when it contains an authenticator that allows for anonymous pass through. +// Contains a group of 1 or more authenticators that have a logical AND relationship +type RouteAuthenticator struct { + Authenticator map[string]runtime.Authenticator + Schemes []string + Scopes map[string][]string + allScopes []string + commonScopes []string + allowAnonymous bool +} + +func (ra *RouteAuthenticator) AllowsAnonymous() bool { + return ra.allowAnonymous +} + +// AllScopes returns a list of unique scopes that is the combination +// of all the scopes in the requirements +func (ra *RouteAuthenticator) AllScopes() []string { + return ra.allScopes +} + +// CommonScopes returns a list of unique scopes that are common in all the +// scopes in the requirements +func (ra *RouteAuthenticator) CommonScopes() []string { + return ra.commonScopes +} + +// Authenticate Authenticator interface implementation +func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { + if ra.allowAnonymous { + route.Authenticator = ra + return true, nil, nil + } + // iterate in proper order + var lastResult interface{} + for _, scheme := range ra.Schemes { + if authenticator, ok := ra.Authenticator[scheme]; ok { + applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ + Request: req, + RequiredScopes: ra.Scopes[scheme], + }) + if !applies { + return false, nil, nil + } + if err != nil { + route.Authenticator = ra + return true, nil, err + } + lastResult = princ + } + } + route.Authenticator = ra + return true, lastResult, nil +} + +func stringSliceUnion(slices ...[]string) []string { + unique := make(map[string]struct{}) + var result []string + for _, slice := range slices { + for _, entry := range slice { + if _, ok := unique[entry]; ok { + continue + } + unique[entry] = struct{}{} + result = append(result, entry) + } + } + return result +} + +func stringSliceIntersection(slices ...[]string) []string { + unique := make(map[string]int) + var intersection []string + + total := len(slices) + var emptyCnt int + for _, slice := range slices { + if len(slice) == 0 { + emptyCnt++ + continue + } + + for _, entry := range slice { + unique[entry]++ + if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices + intersection = append(intersection, entry) + } + } + } + + return intersection +} + +// RouteAuthenticators represents a group of authenticators that represent a logical OR +type RouteAuthenticators []RouteAuthenticator + +// AllowsAnonymous returns true when there is an authenticator that means optional auth +func (ras RouteAuthenticators) AllowsAnonymous() bool { + for _, ra := range ras { + if ra.AllowsAnonymous() { + return true + } + } + return false +} + +// Authenticate method implemention so this collection can be used as authenticator +func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { + var lastError error + var allowsAnon bool + var anonAuth RouteAuthenticator + + for _, ra := range ras { + if ra.AllowsAnonymous() { + anonAuth = ra + allowsAnon = true + continue + } + applies, usr, err := ra.Authenticate(req, route) + if !applies || err != nil || usr == nil { + if err != nil { + lastError = err + } + continue + } + return applies, usr, nil + } + + if allowsAnon && lastError == nil { + route.Authenticator = &anonAuth + return true, nil, lastError + } + return lastError != nil, nil, lastError +} + +type routeEntry struct { + PathPattern string + BasePath string + Operation *spec.Operation + Consumes []string + Consumers map[string]runtime.Consumer + Produces []string + Producers map[string]runtime.Producer + Parameters map[string]spec.Parameter + Handler http.Handler + Formats strfmt.Registry + Binder *UntypedRequestBinder + Authenticators RouteAuthenticators + Authorizer runtime.Authorizer +} + +// MatchedRoute represents the route that was matched in this request +type MatchedRoute struct { + routeEntry + Params RouteParams + Consumer runtime.Consumer + Producer runtime.Producer + Authenticator *RouteAuthenticator +} + +// HasAuth returns true when the route has a security requirement defined +func (m *MatchedRoute) HasAuth() bool { + return len(m.Authenticators) > 0 +} + +// NeedsAuth returns true when the request still +// needs to perform authentication +func (m *MatchedRoute) NeedsAuth() bool { + return m.HasAuth() && m.Authenticator == nil +} + +func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { + mth := strings.ToUpper(method) + debugLog("looking up route for %s %s", method, path) + if Debug { + if len(d.routers) == 0 { + debugLog("there are no known routers") + } + for meth := range d.routers { + debugLog("got a router for %s", meth) + } + } + if router, ok := d.routers[mth]; ok { + if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { + if entry, ok := m.(*routeEntry); ok { + debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) + var params RouteParams + for _, p := range rp { + v, err := pathUnescape(p.Value) + if err != nil { + debugLog("failed to escape %q: %v", p.Value, err) + v = p.Value + } + // a workaround to handle fragment/composing parameters until they are supported in denco router + // check if this parameter is a fragment within a path segment + if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { + // extract fragment parameters + ep := strings.Split(entry.PathPattern[xpos:], "/")[0] + pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) + for i, pname := range pnames { + params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) + } + } else { + // use the parameter directly + params = append(params, RouteParam{Name: p.Name, Value: v}) + } + } + return &MatchedRoute{routeEntry: *entry, Params: params}, true + } + } else { + debugLog("couldn't find a route by path for %s %s", method, path) + } + } else { + debugLog("couldn't find a route by method for %s %s", method, path) + } + return nil, false +} + +func (d *defaultRouter) OtherMethods(method, path string) []string { + mn := strings.ToUpper(method) + var methods []string + for k, v := range d.routers { + if k != mn { + if _, _, ok := v.Lookup(fpath.Clean(path)); ok { + methods = append(methods, k) + continue + } + } + } + return methods +} + +// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco +var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) + +func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { + pleft := strings.Index(pattern, "{") + names = append(names, name) + if pleft < 0 { + if strings.HasSuffix(value, pattern) { + values = append(values, value[:len(value)-len(pattern)]) + } else { + values = append(values, "") + } + } else { + toskip := pattern[:pleft] + pright := strings.Index(pattern, "}") + vright := strings.Index(value, toskip) + if vright >= 0 { + values = append(values, value[:vright]) + } else { + values = append(values, "") + value = "" + } + return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) + } + return names, values +} + +func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { + mn := strings.ToUpper(method) + + bp := fpath.Clean(d.spec.BasePath()) + if len(bp) > 0 && bp[len(bp)-1] == '/' { + bp = bp[:len(bp)-1] + } + + debugLog("operation: %#v", *operation) + if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { + consumes := d.analyzer.ConsumesFor(operation) + produces := d.analyzer.ProducesFor(operation) + parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) + + // add API defaults if not part of the spec + if defConsumes := d.api.DefaultConsumes(); defConsumes != "" && !swag.ContainsStringsCI(consumes, defConsumes) { + consumes = append(consumes, defConsumes) + } + + if defProduces := d.api.DefaultProduces(); defProduces != "" && !swag.ContainsStringsCI(produces, defProduces) { + produces = append(produces, defProduces) + } + + record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ + BasePath: bp, + PathPattern: path, + Operation: operation, + Handler: handler, + Consumes: consumes, + Produces: produces, + Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), + Producers: d.api.ProducersFor(normalizeOffers(produces)), + Parameters: parameters, + Formats: d.api.Formats(), + Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()), + Authenticators: d.buildAuthenticators(operation), + Authorizer: d.api.Authorizer(), + }) + d.records[mn] = append(d.records[mn], record) + } +} + +func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { + requirements := d.analyzer.SecurityRequirementsFor(operation) + var auths []RouteAuthenticator + for _, reqs := range requirements { + var schemes []string + scopes := make(map[string][]string, len(reqs)) + var scopeSlices [][]string + for _, req := range reqs { + schemes = append(schemes, req.Name) + scopes[req.Name] = req.Scopes + scopeSlices = append(scopeSlices, req.Scopes) + } + + definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) + authenticators := d.api.AuthenticatorsFor(definitions) + auths = append(auths, RouteAuthenticator{ + Authenticator: authenticators, + Schemes: schemes, + Scopes: scopes, + allScopes: stringSliceUnion(scopeSlices...), + commonScopes: stringSliceIntersection(scopeSlices...), + allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", + }) + } + return auths +} + +func (d *defaultRouteBuilder) Build() *defaultRouter { + routers := make(map[string]*denco.Router) + for method, records := range d.records { + router := denco.New() + _ = router.Build(records) + routers[method] = router + } + return &defaultRouter{ + spec: d.spec, + routers: routers, + } +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go new file mode 100644 index 000000000..2b061caef --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/security.go @@ -0,0 +1,39 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import "net/http" + +func newSecureAPI(ctx *Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + if route != nil && !route.NeedsAuth() { + next.ServeHTTP(rw, r) + return + } + + _, rCtx, err := ctx.Authorize(r, route) + if err != nil { + ctx.Respond(rw, r, route.Produces, route, err) + return + } + r = rCtx + + next.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go new file mode 100644 index 000000000..f02914298 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go @@ -0,0 +1,48 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + "path" +) + +// Spec creates a middleware to serve a swagger spec. +// This allows for altering the spec before starting the http listener. +// This can be useful if you want to serve the swagger spec from another path than /swagger.json +// +func Spec(basePath string, b []byte, next http.Handler) http.Handler { + if basePath == "" { + basePath = "/" + } + pth := path.Join(basePath, "swagger.json") + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + //#nosec + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusNotFound) + return + } + next.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go new file mode 100644 index 000000000..b4dea29e4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go @@ -0,0 +1,168 @@ +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// SwaggerUIOpts configures the Swaggerui middlewares +type SwaggerUIOpts struct { + // BasePath for the UI path, defaults to: / + BasePath string + // Path combines with BasePath for the full UI path, defaults to: docs + Path string + // SpecURL the url to find the spec for + SpecURL string + // OAuthCallbackURL the url called after OAuth2 login + OAuthCallbackURL string + + // The three components needed to embed swagger-ui + SwaggerURL string + SwaggerPresetURL string + SwaggerStylesURL string + + Favicon32 string + Favicon16 string + + // Title for the documentation site, default to: API documentation + Title string +} + +// EnsureDefaults in case some options are missing +func (r *SwaggerUIOpts) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = "docs" + } + if r.SpecURL == "" { + r.SpecURL = "/swagger.json" + } + if r.OAuthCallbackURL == "" { + r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback") + } + if r.SwaggerURL == "" { + r.SwaggerURL = swaggerLatest + } + if r.SwaggerPresetURL == "" { + r.SwaggerPresetURL = swaggerPresetLatest + } + if r.SwaggerStylesURL == "" { + r.SwaggerStylesURL = swaggerStylesLatest + } + if r.Favicon16 == "" { + r.Favicon16 = swaggerFavicon16Latest + } + if r.Favicon32 == "" { + r.Favicon32 = swaggerFavicon32Latest + } + if r.Title == "" { + r.Title = "API documentation" + } +} + +// SwaggerUI creates a middleware to serve a documentation site for a swagger spec. +// This allows for altering the spec before starting the http listener. +func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, &opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Join(r.URL.Path) == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + swaggerLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js" + swaggerPresetLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-standalone-preset.js" + swaggerStylesLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui.css" + swaggerFavicon32Latest = "https://unpkg.com/swagger-ui-dist/favicon-32x32.png" + swaggerFavicon16Latest = "https://unpkg.com/swagger-ui-dist/favicon-16x16.png" + swaggeruiTemplate = ` + + + + + {{ .Title }} + + + + + + + + +
+ + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go new file mode 100644 index 000000000..576f6003f --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go @@ -0,0 +1,122 @@ +package middleware + +import ( + "bytes" + "fmt" + "net/http" + "path" + "text/template" +) + +func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := opts.OAuthCallbackURL + tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, &opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if path.Join(r.URL.Path) == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + swaggerOAuthTemplate = ` + + + + {{ .Title }} + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go new file mode 100644 index 000000000..39a85f7d9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go @@ -0,0 +1,286 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package untyped + +import ( + "fmt" + "net/http" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + + "github.com/go-openapi/runtime" +) + +// NewAPI creates the default untyped API +func NewAPI(spec *loads.Document) *API { + var an *analysis.Spec + if spec != nil && spec.Spec() != nil { + an = analysis.New(spec.Spec()) + } + api := &API{ + spec: spec, + analyzer: an, + consumers: make(map[string]runtime.Consumer, 10), + producers: make(map[string]runtime.Producer, 10), + authenticators: make(map[string]runtime.Authenticator), + operations: make(map[string]map[string]runtime.OperationHandler), + ServeError: errors.ServeError, + Models: make(map[string]func() interface{}), + formats: strfmt.NewFormats(), + } + return api.WithJSONDefaults() +} + +// API represents an untyped mux for a swagger spec +type API struct { + spec *loads.Document + analyzer *analysis.Spec + DefaultProduces string + DefaultConsumes string + consumers map[string]runtime.Consumer + producers map[string]runtime.Producer + authenticators map[string]runtime.Authenticator + authorizer runtime.Authorizer + operations map[string]map[string]runtime.OperationHandler + ServeError func(http.ResponseWriter, *http.Request, error) + Models map[string]func() interface{} + formats strfmt.Registry +} + +// WithJSONDefaults loads the json defaults for this api +func (d *API) WithJSONDefaults() *API { + d.DefaultConsumes = runtime.JSONMime + d.DefaultProduces = runtime.JSONMime + d.consumers[runtime.JSONMime] = runtime.JSONConsumer() + d.producers[runtime.JSONMime] = runtime.JSONProducer() + return d +} + +// WithoutJSONDefaults clears the json defaults for this api +func (d *API) WithoutJSONDefaults() *API { + d.DefaultConsumes = "" + d.DefaultProduces = "" + delete(d.consumers, runtime.JSONMime) + delete(d.producers, runtime.JSONMime) + return d +} + +// Formats returns the registered string formats +func (d *API) Formats() strfmt.Registry { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + return d.formats +} + +// RegisterFormat registers a custom format validator +func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + d.formats.Add(name, format, validator) +} + +// RegisterAuth registers an auth handler in this api +func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { + if d.authenticators == nil { + d.authenticators = make(map[string]runtime.Authenticator) + } + d.authenticators[scheme] = handler +} + +// RegisterAuthorizer registers an authorizer handler in this api +func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { + d.authorizer = handler +} + +// RegisterConsumer registers a consumer for a media type. +func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { + if d.consumers == nil { + d.consumers = make(map[string]runtime.Consumer, 10) + } + d.consumers[strings.ToLower(mediaType)] = handler +} + +// RegisterProducer registers a producer for a media type +func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { + if d.producers == nil { + d.producers = make(map[string]runtime.Producer, 10) + } + d.producers[strings.ToLower(mediaType)] = handler +} + +// RegisterOperation registers an operation handler for an operation name +func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { + if d.operations == nil { + d.operations = make(map[string]map[string]runtime.OperationHandler, 30) + } + um := strings.ToUpper(method) + if b, ok := d.operations[um]; !ok || b == nil { + d.operations[um] = make(map[string]runtime.OperationHandler) + } + d.operations[um][path] = handler +} + +// OperationHandlerFor returns the operation handler for the specified id if it can be found +func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { + if d.operations == nil { + return nil, false + } + if pi, ok := d.operations[strings.ToUpper(method)]; ok { + h, ok := pi[path] + return h, ok + } + return nil, false +} + +// ConsumersFor gets the consumers for the specified media types +func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + result := make(map[string]runtime.Consumer) + for _, mt := range mediaTypes { + if consumer, ok := d.consumers[mt]; ok { + result[mt] = consumer + } + } + return result +} + +// ProducersFor gets the producers for the specified media types +func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + result := make(map[string]runtime.Producer) + for _, mt := range mediaTypes { + if producer, ok := d.producers[mt]; ok { + result[mt] = producer + } + } + return result +} + +// AuthenticatorsFor gets the authenticators for the specified security schemes +func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + result := make(map[string]runtime.Authenticator) + for k := range schemes { + if a, ok := d.authenticators[k]; ok { + result[k] = a + } + } + return result +} + +// Authorizer returns the registered authorizer +func (d *API) Authorizer() runtime.Authorizer { + return d.authorizer +} + +// Validate validates this API for any missing items +func (d *API) Validate() error { + return d.validate() +} + +// validateWith validates the registrations in this API against the provided spec analyzer +func (d *API) validate() error { + var consumes []string + for k := range d.consumers { + consumes = append(consumes, k) + } + + var produces []string + for k := range d.producers { + produces = append(produces, k) + } + + var authenticators []string + for k := range d.authenticators { + authenticators = append(authenticators, k) + } + + var operations []string + for m, v := range d.operations { + for p := range v { + operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) + } + } + + var definedAuths []string + for k := range d.spec.Spec().SecurityDefinitions { + definedAuths = append(definedAuths, k) + } + + if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { + return err + } + if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { + return err + } + if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { + return err + } + + requiredAuths := d.analyzer.RequiredSecuritySchemes() + if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { + return err + } + if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { + return err + } + return nil +} + +func (d *API) verify(name string, registrations []string, expectations []string) error { + sort.Strings(registrations) + sort.Strings(expectations) + + expected := map[string]struct{}{} + seen := map[string]struct{}{} + + for _, v := range expectations { + expected[v] = struct{}{} + } + + var unspecified []string + for _, v := range registrations { + seen[v] = struct{}{} + if _, ok := expected[v]; !ok { + unspecified = append(unspecified, v) + } + } + + for k := range seen { + delete(expected, k) + } + + var unregistered []string + for k := range expected { + unregistered = append(unregistered, k) + } + sort.Strings(unspecified) + sort.Strings(unregistered) + + if len(unregistered) > 0 || len(unspecified) > 0 { + return &errors.APIVerificationFailed{ + Section: name, + MissingSpecification: unspecified, + MissingRegistration: unregistered, + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go new file mode 100644 index 000000000..1f0135b57 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go @@ -0,0 +1,126 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "mime" + "net/http" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/swag" + + "github.com/go-openapi/runtime" +) + +type validation struct { + context *Context + result []error + request *http.Request + route *MatchedRoute + bound map[string]interface{} +} + +// ContentType validates the content type of a request +func validateContentType(allowed []string, actual string) error { + debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", ")) + if len(allowed) == 0 { + return nil + } + mt, _, err := mime.ParseMediaType(actual) + if err != nil { + return errors.InvalidContentType(actual, allowed) + } + if swag.ContainsStringsCI(allowed, mt) { + return nil + } + if swag.ContainsStringsCI(allowed, "*/*") { + return nil + } + parts := strings.Split(actual, "/") + if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") { + return nil + } + return errors.InvalidContentType(actual, allowed) +} + +func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { + debugLog("validating request %s %s", request.Method, request.URL.EscapedPath()) + validate := &validation{ + context: ctx, + request: request, + route: route, + bound: make(map[string]interface{}), + } + + validate.contentType() + if len(validate.result) == 0 { + validate.responseFormat() + } + if len(validate.result) == 0 { + validate.parameters() + } + + return validate +} + +func (v *validation) parameters() { + debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) + if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { + if result.Error() == "validation failure list" { + for _, e := range result.(*errors.Validation).Value.([]interface{}) { + v.result = append(v.result, e.(error)) + } + return + } + v.result = append(v.result, result) + } +} + +func (v *validation) contentType() { + if len(v.result) == 0 && runtime.HasBody(v.request) { + debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) + ct, _, req, err := v.context.ContentType(v.request) + if err != nil { + v.result = append(v.result, err) + } else { + v.request = req + } + + if len(v.result) == 0 { + if err := validateContentType(v.route.Consumes, ct); err != nil { + v.result = append(v.result, err) + } + } + if ct != "" && v.route.Consumer == nil { + cons, ok := v.route.Consumers[ct] + if !ok { + v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct)) + } else { + v.route.Consumer = cons + } + } + } +} + +func (v *validation) responseFormat() { + // if the route provides values for Produces and no format could be identify then return an error. + // if the route does not specify values for Produces then treat request as valid since the API designer + // choose not to specify the format for responses. + if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && len(v.route.Produces) > 0 { + v.request = rCtx + v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) + } +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 000000000..078fda173 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,139 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bufio" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get("content-length") != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return swag.SplitByFormat(v, collectionFormat) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go new file mode 100644 index 000000000..c3ffdac7e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -0,0 +1,276 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package security + +import ( + "context" + "net/http" + "strings" + + "github.com/go-openapi/errors" + + "github.com/go-openapi/runtime" +) + +const ( + query = "query" + header = "header" +) + +// HttpAuthenticator is a function that authenticates a HTTP request +func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { + if request, ok := params.(*http.Request); ok { + return handler(request) + } + if scoped, ok := params.(*ScopedAuthRequest); ok { + return handler(scoped.Request) + } + return false, nil, nil + }) +} + +// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes +func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { + if request, ok := params.(*ScopedAuthRequest); ok { + return handler(request) + } + return false, nil, nil + }) +} + +// UserPassAuthentication authentication function +type UserPassAuthentication func(string, string) (interface{}, error) + +// UserPassAuthenticationCtx authentication function with context.Context +type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error) + +// TokenAuthentication authentication function +type TokenAuthentication func(string) (interface{}, error) + +// TokenAuthenticationCtx authentication function with context.Context +type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error) + +// ScopedTokenAuthentication authentication function +type ScopedTokenAuthentication func(string, []string) (interface{}, error) + +// ScopedTokenAuthenticationCtx authentication function with context.Context +type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error) + +var DefaultRealmName = "API" + +type secCtxKey uint8 + +const ( + failedBasicAuth secCtxKey = iota + oauth2SchemeName +) + +func FailedBasicAuth(r *http.Request) string { + return FailedBasicAuthCtx(r.Context()) +} + +func FailedBasicAuthCtx(ctx context.Context) string { + v, ok := ctx.Value(failedBasicAuth).(string) + if !ok { + return "" + } + return v +} + +func OAuth2SchemeName(r *http.Request) string { + return OAuth2SchemeNameCtx(r.Context()) +} + +func OAuth2SchemeNameCtx(ctx context.Context) string { + v, ok := ctx.Value(oauth2SchemeName).(string) + if !ok { + return "" + } + return v +} + +// BasicAuth creates a basic auth authenticator with the provided authentication function +func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { + return BasicAuthRealm(DefaultRealmName, authenticate) +} + +// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name +func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + if usr, pass, ok := r.BasicAuth(); ok { + p, err := authenticate(usr, pass) + if err != nil { + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + } + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context +func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { + return BasicAuthRealmCtx(DefaultRealmName, authenticate) +} + +// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context +func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + if usr, pass, ok := r.BasicAuth(); ok { + ctx, p, err := authenticate(r.Context(), usr, pass) + if err != nil { + ctx = context.WithValue(ctx, failedBasicAuth, realm) + } + *r = *r.WithContext(ctx) + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// APIKeyAuth creates an authenticator that uses a token for authorization. +// This token can be obtained from either a header or a query string +func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + p, err := authenticate(token) + return true, p, err + }) +} + +// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. +// This token can be obtained from either a header or a query string +func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + ctx, p, err := authenticate(r.Context(), token) + *r = *r.WithContext(ctx) + return true, p, err + }) +} + +// ScopedAuthRequest contains both a http request and the required scopes for a particular operation +type ScopedAuthRequest struct { + Request *http.Request + RequiredScopes []string +} + +// BearerAuth for use with oauth2 flows +func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { + var token string + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) + if strings.HasPrefix(hdr, prefix) { + token = strings.TrimPrefix(hdr, prefix) + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get("access_token") + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue("access_token") + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + *r.Request = *r.Request.WithContext(rctx) + p, err := authenticate(token, r.RequiredScopes) + return true, p, err + }) +} + +// BearerAuthCtx for use with oauth2 flows with support for context.Context. +func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { + var token string + hdr := r.Request.Header.Get(runtime.HeaderAuthorization) + if strings.HasPrefix(hdr, prefix) { + token = strings.TrimPrefix(hdr, prefix) + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get("access_token") + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue("access_token") + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + ctx, p, err := authenticate(rctx, token, r.RequiredScopes) + *r.Request = *r.Request.WithContext(ctx) + return true, p, err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go new file mode 100644 index 000000000..00c1a4d6a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authorizer.go @@ -0,0 +1,27 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package security + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// Authorized provides a default implementation of the Authorizer interface where all +// requests are authorized (successful) +func Authorized() runtime.Authorizer { + return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil }) +} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 000000000..3b011a0bf --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 000000000..f33320b7d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,116 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 000000000..11f5732af --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,19 @@ +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 000000000..821c7393d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,36 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go new file mode 100644 index 000000000..b30d37712 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go @@ -0,0 +1,40 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yamlpc + +import ( + "io" + + "github.com/go-openapi/runtime" + + "gopkg.in/yaml.v2" +) + +// YAMLConsumer creates a consumer for yaml data +func YAMLConsumer() runtime.Consumer { + return runtime.ConsumerFunc(func(r io.Reader, v interface{}) error { + dec := yaml.NewDecoder(r) + return dec.Decode(v) + }) +} + +// YAMLProducer creates a producer for yaml data +func YAMLProducer() runtime.Producer { + return runtime.ProducerFunc(func(w io.Writer, v interface{}) error { + enc := yaml.NewEncoder(w) + defer enc.Close() + return enc.Encode(v) + }) +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 000000000..3152da69a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 000000000..dd91ed6a0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 000000000..835d55e74 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,42 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 000000000..18782c6da --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,34 @@ +# OAI object model + +[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) + +[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +### FAQ + +* What does this do? + +> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model +> 2. It knows how to resolve $ref and expand them to make a single root document + +* How does it play with the rest of the go-openapi packages ? + +> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) +> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations +> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it +> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents + +* Does this library support OpenAPI 3? + +> No. +> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). +> There is no plan to make it evolve toward supporting OpenAPI 3.x. +> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. +> +> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml new file mode 100644 index 000000000..090359391 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/appveyor.yml @@ -0,0 +1,32 @@ +version: "0.1.{build}" + +clone_folder: C:\go-openapi\spec +shallow_clone: true # for startup speed +pull_requests: + do_not_increment_build_number: true + +#skip_tags: true +#skip_branch_with_pr: true + +# appveyor.yml +build: off + +environment: + GOPATH: c:\gopath + +stack: go 1.15 + +test_script: + - go test -v -timeout 20m ./... + +deploy: off + +notifications: + - provider: Slack + incoming_webhook: https://hooks.slack.com/services/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 + auth_token: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= + channel: bots + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go new file mode 100644 index 000000000..afc83850c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -0,0 +1,297 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// schemas/jsonschema-draft-04.json (4.357kB) +// schemas/v2/schema.json (40.248kB) + +package spec + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") + +func jsonschemaDraft04JsonBytes() ([]byte, error) { + return bindataRead( + _jsonschemaDraft04Json, + "jsonschema-draft-04.json", + ) +} + +func jsonschemaDraft04Json() (*asset, error) { + bytes, err := jsonschemaDraft04JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} + return a, nil +} + +var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00") + +func v2SchemaJsonBytes() ([]byte, error) { + return bindataRead( + _v2SchemaJson, + "v2/schema.json", + ) +} + +func v2SchemaJson() (*asset, error) { + bytes, err := v2SchemaJsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "jsonschema-draft-04.json": jsonschemaDraft04Json, + + "v2/schema.json": v2SchemaJson, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}}, + "v2": {nil, map[string]*bintree{ + "schema.json": {v2SchemaJson, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 000000000..122993b44 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,98 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "sync" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +func (s *simpleCache) ShallowClone() ResolutionCache { + store := make(map[string]interface{}, len(s.store)) + s.lock.RLock() + for k, v := range s.store { + store[k] = v + } + s.lock.RUnlock() + + return &simpleCache{ + store: store, + } +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + s.lock.RLock() + v, ok := s.store[uri] + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var ( + // resCache is a package level cache for $ref resolution and expansion. + // It is initialized lazily by methods that have the need for it: no + // memory is allocated unless some expander methods are called. + // + // It is initialized with JSON schema and swagger schema, + // which do not mutate during normal operations. + // + // All subsequent utilizations of this cache are produced from a shallow + // clone of this initial version. + resCache *simpleCache + onceCache sync.Once + + _ ResolutionCache = &simpleCache{} +) + +// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. +func initResolutionCache() { + resCache = defaultResolutionCache() +} + +func defaultResolutionCache() *simpleCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func cacheOrDefault(cache ResolutionCache) ResolutionCache { + onceCache.Do(initResolutionCache) + + if cache != nil { + return cache + } + + // get a shallow clone of the base cache with swagger and json schema + return resCache.ShallowClone() +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 000000000..2f7bb219b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,57 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + ContactInfoProps + VendorExtensible +} + +// ContactInfoProps hold the properties of a ContactInfo object +type ContactInfoProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} + +// UnmarshalJSON hydrates ContactInfo from json +func (c *ContactInfo) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { + return err + } + return json.Unmarshal(data, &c.VendorExtensible) +} + +// MarshalJSON produces ContactInfo as json +func (c ContactInfo) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(c.ContactInfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(c.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 000000000..fc889f6d0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path" + "runtime" +) + +// Debug is true when the SWAGGER_DEBUG env var is not empty. +// +// It enables a more verbose logging of this package. +var Debug = os.Getenv("SWAGGER_DEBUG") != "" + +var ( + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go new file mode 100644 index 000000000..6992c7ba7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/errors.go @@ -0,0 +1,19 @@ +package spec + +import "errors" + +// Error codes +var ( + // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type + ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") + + // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer + ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") + + // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. + // At the moment, $ref are supported only inside: schemas, parameters, responses, path items + ErrDerefUnsupportedType = errors.New("deref: unsupported type") + + // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type + ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") +) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 000000000..d4ea889d4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,594 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" +) + +// ExpandOptions provides options for the spec expander. +// +// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. +// +// If left empty, the root document is assumed to be located in the current working directory: +// all relative $ref's will be resolved from there. +// +// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. +// +type ExpandOptions struct { + RelativeBase string // the path to the root document to expand. This is a file, not a directory + SkipSchemas bool // do not expand schemas, just paths, parameters and responses + ContinueOnError bool // continue expanding even after and error is found + PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document + AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs +} + +func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { + if opts != nil { + clone := *opts // shallow clone to avoid internal changes to be propagated to the caller + if clone.RelativeBase != "" { + clone.RelativeBase = normalizeBase(clone.RelativeBase) + } + // if the relative base is empty, let the schema loader choose a pseudo root document + return &clone + } + return &ExpandOptions{} +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(spec, options, nil, nil) + + specBasePath := options.RelativeBase + + if !options.SkipSchemas { + for key, definition := range spec.Definitions { + parentRefs := make([]string, 0, 10) + parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) + + def, err := expandSchema(definition, parentRefs, resolver, specBasePath) + if resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + pth := spec.Paths.Paths[key] + if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = pth + } + } + + return nil +} + +const rootBase = ".root" + +// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry +// for further $ref resolution +// +// Setting the cache is optional and this parameter may safely be left to nil. +func baseForRoot(root interface{}, cache ResolutionCache) string { + if root == nil { + return "" + } + + // cache the root document to resolve $ref's + normalizedBase := normalizeBase(rootBase) + cache.Set(normalizedBase, root) + + return normalizedBase +} + +// ExpandSchema expands the refs in the schema object with reference to the root object. +// +// go-openapi/validate uses this function. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandSchemaWithBasePath to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + if root == nil { + root = schema + } + + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + } + + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + cache = cacheOrDefault(cache) + + opts = optionsOrDefault(opts) + + resolver := defaultSchemaLoader(nil, opts, cache, nil) + + parentRefs := make([]string, 0, 10) + s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) + if err != nil { + return err + } + if s != nil { + // guard for when continuing on error + *schema = *s + } + + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items == nil { + return &target, nil + } + + // array + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + + // tuple + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + newRef := normalizeRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + if target.ID != "" { + basePath, _ = resolver.setSchemaID(target, target.ID, basePath) + } + + if target.Ref.String() != "" { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + for k := range target.Definitions { + tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if tt != nil { + target.Definitions[k] = *tt + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t + } + } + + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.AnyOf[i] = *t + } + } + + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + return &target, nil +} + +func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + // if a Ref is found, all sibling fields are skipped + // Ref also changes the resolution scope of children expandSchema + + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + var t *Schema + err := resolver.Resolve(&target.Ref, &t, basePath) + if resolver.shouldStopOnError(err) { + return nil, err + } + + if t == nil { + // guard for when continuing on error + return &target, nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + if pathItem.Ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) + basePath = transitiveResolver.updateBasePath(resolver, basePath) + resolver = transitiveResolver + } + + pathItem.Ref = Ref{} + for i := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses == nil { + return nil + } + + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandResponse to resolve external references). +// +// Setting the cache is optional and this parameter may safely be left to nil. +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// +// All refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. +// +// Notice that it is impossible to reference a json schema in a different document other than root +// (use ExpandParameter to resolve external references). +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + cache = cacheOrDefault(cache) + + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + } + resolver := defaultSchemaLoader(root, opts, cache, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + opts := optionsOrDefault(&ExpandOptions{ + RelativeBase: basePath, + }) + resolver := defaultSchemaLoader(nil, opts, nil, nil) + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ( + ref *Ref + sch *Schema + ) + + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) + } + + return ref, sch, nil +} + +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, _, err := getRefAndSchema(input) + if err != nil { + return err + } + + if ref == nil { + return nil + } + + parentRefs := make([]string, 0, 10) + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ := getRefAndSchema(input) + if ref.String() != "" { + transitiveResolver := resolver.transitiveResolver(basePath, *ref) + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch == nil { + // nothing to be expanded + if ref != nil { + *ref = Ref{} + } + return nil + } + + if sch.Ref.String() != "" { + rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) + if ern != nil { + return ern + } + + switch { + case resolver.isCircular(&rebasedRef, basePath, parentRefs...): + // this is a circular $ref: stop expansion + if !resolver.options.AbsoluteCircularRef { + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } else { + sch.Ref = rebasedRef + } + case !resolver.options.SkipSchemas: + // schema expanded to a $ref in another root + sch.Ref = rebasedRef + debugLog("rebased to: %s", sch.Ref.String()) + default: + // skip schema expansion but rebase $ref to schema + sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + } + } + + if ref != nil { + *ref = Ref{} + } + + // expand schema + if !resolver.options.SkipSchemas { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + if s == nil { + // guard for when continuing on error + return nil + } + *sch = *s + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 000000000..88add91b2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 000000000..9dfd17b18 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,203 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// WithValidations is a fluent method to set header validations +func (h *Header) WithValidations(val CommonValidations) *Header { + h.SetValidations(SchemaValidations{CommonValidations: val}) + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 000000000..582f0fd4c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,184 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 000000000..e2afb2133 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,234 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// WithValidations is a fluent method to set Items validations +func (i *Items) WithValidations(val CommonValidations) *Items { + i.SetValidations(SchemaValidations{CommonValidations: val}) + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 000000000..b42f80368 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,56 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + LicenseProps + VendorExtensible +} + +// LicenseProps holds the properties of a License object +type LicenseProps struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} + +// UnmarshalJSON hydrates License from json +func (l *License) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &l.LicenseProps); err != nil { + return err + } + return json.Unmarshal(data, &l.VendorExtensible) +} + +// MarshalJSON produces License as json +func (l License) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(l.LicenseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(l.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 000000000..e8b600994 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path" + "strings" +) + +const fileScheme = "file" + +// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. +// +// NOTE(windows): there is a tolerance over the strict URI format on windows. +// +// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like +// 'C:\Path\file.Yaml'. +// +// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: +// 'file:///c:/path/file.yaml' +// +// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or +// 'file:///c:\folder\File.json'. +// +// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" +// is attempted. +// +// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). +func normalizeURI(refPath, base string) string { + refURL, err := parseURL(refPath) + if err != nil { + specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) + refURL, refPath = repairURI(refPath) + } + + fixWindowsURI(refURL, refPath) // noop on non-windows OS + + refURL.Path = path.Clean(refURL.Path) + if refURL.Path == "." { + refURL.Path = "" + } + + r := MustCreateRef(refURL.String()) + if r.IsCanonical() { + return refURL.String() + } + + baseURL, _ := parseURL(base) + if path.IsAbs(refURL.Path) { + baseURL.Path = refURL.Path + } else if refURL.Path != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + + return baseURL.String() +} + +// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. +// +// When calling this, we assume that: +// * $ref is a canonical URI +// * originalRelativeBase is a canonical URI +// +// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. +// In this case, expansion stops and normally renders the internal canonical $ref. +// +// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. +// +// There is a special case for schemas that are anchored with an "id": +// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. +// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. +func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { + debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + // short circuit: $ref to current doc + return *ref + } + + if id != "" { + idBaseURL, err := parseURL(id) + if err == nil { // if the schema id is not usable as a URI, ignore it + if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") + // $ref relative to the ID of the schema in the root document + return ref + } + } + } + + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) + + r, _ := rebase(ref, originalRelativeBaseURL, false) + + return r +} + +func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { + var newBase url.URL + + u := ref.GetURL() + + if u.Scheme != v.Scheme || u.Host != v.Host { + return *ref, false + } + + docPath := v.Path + v.Path = path.Dir(v.Path) + + if v.Path == "." { + v.Path = "" + } else if !strings.HasSuffix(v.Path, "/") { + v.Path += "/" + } + + newBase.Fragment = u.Fragment + + if strings.HasPrefix(u.Path, docPath) { + newBase.Path = strings.TrimPrefix(u.Path, docPath) + } else { + newBase.Path = strings.TrimPrefix(u.Path, v.Path) + } + + if notEqual && newBase.Path == "" && newBase.Fragment == "" { + // do not want rebasing to end up in an empty $ref + return *ref, false + } + + if path.IsAbs(newBase.Path) { + // whenever we end up with an absolute path, specify the scheme and host + newBase.Scheme = v.Scheme + newBase.Host = v.Host + } + + return MustCreateRef(newBase.String()), true +} + +// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor +func normalizeRef(ref *Ref, relativeBase string) *Ref { + r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) + return &r +} + +// normalizeBase performs a normalization of the input base path. +// +// This always yields a canonical URI (absolute), usable for the document cache. +// +// It ensures that all further internal work on basePath may safely assume +// a non-empty, cross-platform, canonical URI (i.e. absolute). +// +// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this +// in a file:// URL with lower cased drive letter and path. +// +// See also: https://en.wikipedia.org/wiki/File_URI_scheme +func normalizeBase(in string) string { + u, err := parseURL(in) + if err != nil { + specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) + u, in = repairURI(in) + } + + u.Fragment = "" // any fragment in the base is irrelevant + + fixWindowsURI(u, in) // noop on non-windows OS + + u.Path = path.Clean(u.Path) + if u.Path == "." { // empty after Clean() + u.Path = "" + } + + if u.Scheme != "" { + if path.IsAbs(u.Path) || u.Scheme != fileScheme { + // this is absolute or explicitly not a local file: we're good + return u.String() + } + } + + // no scheme or file scheme with relative path: assume file and make it absolute + // enforce scheme file://... with absolute path. + // + // If the input path is relative, we anchor the path to the current working directory. + // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json + + u.Scheme = fileScheme + u.Path = absPath(u.Path) // platform-dependent + u.RawQuery = "" // any query component is irrelevant for a base + return u.String() +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go new file mode 100644 index 000000000..2df072315 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -0,0 +1,44 @@ +//go:build !windows +// +build !windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "path/filepath" +) + +// absPath makes a file path absolute and compatible with a URI path component. +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + return anchored +} + +func repairURI(in string) (*url.URL, string) { + u, _ := parseURL("") + debugLog("repaired URI: original: %q, repaired: %q", in, "") + return u, "" +} + +func fixWindowsURI(u *url.URL, in string) { +} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go new file mode 100644 index 000000000..a66c532db --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -0,0 +1,154 @@ +// -build windows + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// absPath makes a file path absolute and compatible with a URI path component +// +// The parameter must be a path, not an URI. +func absPath(in string) string { + // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. + // See https://github.com/golang/go/issues/24441 + if in == "" { + in = "." + } + + anchored, err := filepath.Abs(in) + if err != nil { + specLogger.Printf("warning: could not resolve current working directory: %v", err) + return in + } + + pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) + if !strings.HasPrefix(pth, "/") { + pth = "/" + pth + } + + return path.Clean(pth) +} + +// repairURI tolerates invalid file URIs with common typos +// such as 'file://E:\folder\file', that break the regular URL parser. +// +// Adopting the same defaults as for unixes (e.g. return an empty path) would +// result into a counter-intuitive result for that case (e.g. E:\folder\file is +// eventually resolved as the current directory). The repair will detect the missing "/". +// +// Note that this only works for the file scheme. +func repairURI(in string) (*url.URL, string) { + const prefix = fileScheme + "://" + if !strings.HasPrefix(in, prefix) { + // giving up: resolve to empty path + u, _ := parseURL("") + + return u, "" + } + + // attempt the repair, stripping the scheme should be sufficient + u, _ := parseURL(strings.TrimPrefix(in, prefix)) + debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) + + return u, u.String() +} + +// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml +// and makes it a canonical URI: file:///c:/base/file.yaml +// +// Catch 22 notes for Windows: +// +// * There may be a drive letter on windows (it is lower-cased) +// * There may be a share UNC, e.g. \\server\folder\data.xml +// * Paths are case insensitive +// * Paths may already contain slashes +// * Paths must be slashed +// +// NOTE: there is no escaping. "/" may be valid separators just like "\". +// We don't use ToSlash() (which escapes everything) because windows now also +// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. +func fixWindowsURI(u *url.URL, in string) { + drive := filepath.VolumeName(in) + + if len(drive) > 0 { + if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter + u.Scheme = fileScheme + u.Host = "" + u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) + } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume + // NOTE: the special host@port syntax for UNC is not supported (yet) + u.Scheme = fileScheme + + // this is a modified version of filepath.Dir() to apply on the VolumeName itself + i := len(drive) - 1 + for i >= 0 && !os.IsPathSeparator(drive[i]) { + i-- + } + host := drive[:i] // \\host\share => host + + u.Path = strings.TrimPrefix(u.Path, host) + u.Host = strings.TrimPrefix(host, `\\`) + } + + u.Opaque = "" + u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) + + // ensure we form an absolute path + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + + u.Path = path.Clean(u.Path) + + return + } + + if u.Scheme == fileScheme { + // Handle dodgy cases for file://{...} URIs on windows. + // A canonical URI should always be followed by an absolute path. + // + // Examples: + // * file:///folder/file => valid, unchanged + // * file:///c:\folder\file => slashed + // * file:///./folder/file => valid, cleaned to remove the dot + // * file:///.\folder\file => remapped to cwd + // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) + // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) + if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { + // ensure we form an absolute path + u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) + if !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + } + u.Path = strings.ToLower(u.Path) + } + + // NOTE: lower case normalization does not propagate to inner resources, + // generated when rebasing: when joining a relative URI with a file to an absolute base, + // only the base is currently lower-cased. + // + // For now, we assume this is good enough for most use cases + // and try not to generate too many differences + // between the output produced on different platforms. + u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 000000000..995ce6acb --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,397 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +func init() { + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := append(o.Parameters[:i], *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 000000000..2b2b89b67 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,326 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// WithValidations is a fluent method to set parameter validations +func (p *Parameter) WithValidations(val CommonValidations) *Parameter { + p.SetValidations(SchemaValidations{CommonValidations: val}) + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 000000000..68fc8e901 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 000000000..9dc82a290 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go new file mode 100644 index 000000000..91d2435f0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -0,0 +1,91 @@ +package spec + +import ( + "bytes" + "encoding/json" + "reflect" + "sort" +) + +// OrderSchemaItem holds a named schema (e.g. from a property of an object) +type OrderSchemaItem struct { + Name string + Schema +} + +// OrderSchemaItems is a sortable slice of named schemas. +// The ordering is defined by the x-order schema extension. +type OrderSchemaItems []OrderSchemaItem + +// MarshalJSON produces a json object with keys defined by the name schemas +// of the OrderSchemaItems slice, keeping the original order of the slice. +func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteString("{") + for i := range items { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString("\"") + buf.WriteString(items[i].Name) + buf.WriteString("\":") + bs, err := json.Marshal(&items[i].Schema) + if err != nil { + return nil, err + } + buf.Write(bs) + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +func (items OrderSchemaItems) Len() int { return len(items) } +func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } +func (items OrderSchemaItems) Less(i, j int) (ret bool) { + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") + if oki { + if okj { + defer func() { + if err := recover(); err != nil { + defer func() { + if err = recover(); err != nil { + ret = items[i].Name < items[j].Name + } + }() + ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() + } + }() + return ii < ij + } + return true + } else if okj { + return false + } + return items[i].Name < items[j].Name +} + +// SchemaProperties is a map representing the properties of a Schema object. +// It knows how to transform its keys into an ordered slice. +type SchemaProperties map[string]Schema + +// ToOrderedSchemaItems transforms the map of properties into a sortable slice +func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { + items := make(OrderSchemaItems, 0, len(properties)) + for k, v := range properties { + items = append(items, OrderSchemaItem{ + Name: k, + Schema: v, + }) + } + sort.Sort(items) + return items +} + +// MarshalJSON produces properties as json, keeping their order. +func (properties SchemaProperties) MarshalJSON() ([]byte, error) { + if properties == nil { + return []byte("null"), nil + } + return json.Marshal(properties.ToOrderedSchemaItems()) +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 000000000..b0ef9bd9c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,193 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return "" + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + //nolint:noctx,gosec + rr, err := http.Get(v) + if err != nil { + return false + } + defer rr.Body.Close() + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go new file mode 100644 index 000000000..47d1ee13f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/resolver.go @@ -0,0 +1,127 @@ +package spec + +import ( + "fmt" + + "github.com/go-openapi/swag" +) + +func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { + options = optionsOrDefault(options) + resolver := defaultSchemaLoader(root, options, nil, nil) + + if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { + return err + } + + return nil +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { + result := new(Schema) + + if err := resolveAnyWithBase(root, ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveRef resolves a reference for a schema against a context root +// ref is guaranteed to be in root (no need to go to external files) +// +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + return nil, err + } + + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + newSch := new(Schema) + if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { + return nil, err + } + return newSch, nil + default: + return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) + } +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { + result := new(Parameter) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { + result := new(Response) + + err := resolveAnyWithBase(root, &ref, result, options) + if err != nil { + return nil, err + } + + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolvePathItemWithBase resolves response a path item against a context root and base path +func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + result := new(PathItem) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +// +// Deprecated: use ResolvePathItemWithBase instead +func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { + return ResolvePathItemWithBase(root, ref, options) +} + +// ResolveItemsWithBase resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + result := new(Items) + + if err := resolveAnyWithBase(root, &ref, result, options); err != nil { + return nil, err + } + + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// Deprecated: use ResolveItemsWithBase instead +func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { + return ResolveItemsWithBase(root, ref, options) +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 000000000..0340b60d8 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if r.Ref.String() == "" { + // when there is no $ref, empty description is rendered as an empty string + b1, err = json.Marshal(r.ResponseProps) + } else { + // when there is $ref inside the schema, description should be omitempty-ied + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` + }{ + Description: r.ResponseProps.Description, + Schema: r.ResponseProps.Schema, + Examples: r.ResponseProps.Examples, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 000000000..16c3076fe --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,140 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + + if v, ok := res["default"]; ok { + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes + delete(res, "default") + } + for k, v := range res { + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp + } + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 000000000..4e9be8576 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,645 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := parseURL(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties SchemaProperties `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// SetValidations defines all schema validations. +// +// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. +func (s *Schema) SetValidations(val SchemaValidations) { + s.Maximum = val.Maximum + s.ExclusiveMaximum = val.ExclusiveMaximum + s.Minimum = val.Minimum + s.ExclusiveMinimum = val.ExclusiveMinimum + s.MaxLength = val.MaxLength + s.MinLength = val.MinLength + s.Pattern = val.Pattern + s.MaxItems = val.MaxItems + s.MinItems = val.MinItems + s.UniqueItems = val.UniqueItems + s.MultipleOf = val.MultipleOf + s.Enum = val.Enum + s.MinProperties = val.MinProperties + s.MaxProperties = val.MaxProperties + s.PatternProperties = val.PatternProperties +} + +// WithValidations is a fluent method to set schema validations +func (s *Schema) WithValidations(val SchemaValidations) *Schema { + s.SetValidations(val) + return s +} + +// Validations returns a clone of the validations for this schema +func (s Schema) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: CommonValidations{ + Maximum: s.Maximum, + ExclusiveMaximum: s.ExclusiveMaximum, + Minimum: s.Minimum, + ExclusiveMinimum: s.ExclusiveMinimum, + MaxLength: s.MaxLength, + MinLength: s.MinLength, + Pattern: s.Pattern, + MaxItems: s.MaxItems, + MinItems: s.MinItems, + UniqueItems: s.UniqueItems, + MultipleOf: s.MultipleOf, + Enum: s.Enum, + }, + MinProperties: s.MinProperties, + MaxProperties: s.MaxProperties, + PatternProperties: s.PatternProperties, + } +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 000000000..b81175afd --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,338 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader is a function to use when loading remote refs. +// +// This is a package level default. It may be overridden or bypassed by +// specifying the loader in ExpandOptions. +// +// NOTE: if you are using the go-openapi/loads package, it will override +// this value with its own default (a loader to retrieve YAML documents as +// well as JSON ones). +var PathLoader = func(pth string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(pth) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, to shortcircuit $ref resolution. + // + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string + loadDoc func(string) (json.RawMessage, error) + rootID string +} + +func newResolverContext(options *ExpandOptions) *resolverContext { + expandOptions := optionsOrDefault(options) + + // path loader may be overridden by options + var loader func(string) (json.RawMessage, error) + if expandOptions.PathLoader == nil { + loader = PathLoader + } else { + loader = expandOptions.PathLoader + } + + return &resolverContext{ + circulars: make(map[string]bool), + basePath: expandOptions.RelativeBase, // keep the root base path in context + loadDoc: loader, + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { + if ref.IsRoot() || ref.HasFragmentOnly { + return r + } + + baseRef := MustCreateRef(basePath) + currentRef := normalizeRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r + } + + // set a new root against which to resolve + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + + return defaultSchemaLoader(root, newOptions, r.cache, r.context) +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + if transitive.options != nil && transitive.options.RelativeBase != "" { + return normalizeBase(transitive.options.RelativeBase) + } + } + + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return ErrResolveRefNeedsAPointer + } + + if ref.GetURL() == nil { + return nil + } + + var ( + res interface{} + data interface{} + err error + ) + + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeRef(ref, basePath) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + var err error + pth := toFetch.String() + normalized := normalizeBase(pth) + debugLog("loading doc from: %s", normalized) + + unescaped, err := url.PathUnescape(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + u := url.URL{Path: unescaped} + + data, fromCache := r.cache.Get(u.RequestURI()) + if fromCache { + return data, toFetch, fromCache, nil + } + + b, err := r.context.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + var doc interface{} + if err := json.Unmarshal(b, &doc); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, doc) + + return doc, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizeURI(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target. +// +// Resolve is not in charge of following references: it only resolves ref by following its URL. +// +// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. +// +// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) + } + + curRef := ref.String() + if curRef == "" { + return nil + } + + normalizedRef := normalizeRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + if ref.String() == "" || ref.String() == curRef { + // done with rereferencing + return nil + } + + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { + debugLog("schema has ID: %s", id) + + // handling the case when id is a folder + // remember that basePath has to point to a file + var refPath string + if strings.HasSuffix(id, "/") { + // ensure this is detected as a file, not a folder + refPath = fmt.Sprintf("%s%s", id, "placeholder.json") + } else { + refPath = id + } + + // updates the current base path + // * important: ID can be a relative path + // * registers target to be fetchable from the new base proposed by this id + newBasePath := normalizeURI(refPath, basePath) + + // store found IDs for possible future reuse in $ref + r.cache.Set(newBasePath, target) + + // the root document has an ID: all $ref relative to that ID may + // be rebased relative to the root document + if basePath == r.context.basePath { + debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) + r.context.rootID = newBasePath + } + + return newBasePath, refPath +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) *schemaLoader { + + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + + cache = cacheOrDefault(cache) + + if expandOptions.RelativeBase == "" { + // if no relative base is provided, assume the root document + // contains all $ref, or at least, that the relative documents + // may be resolved from the current working directory. + expandOptions.RelativeBase = baseForRoot(root, cache) + } + debugLog("effective expander options: %#v", expandOptions) + + if context == nil { + context = newResolverContext(expandOptions) + } + + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + } +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 000000000..9d0bdae90 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,170 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + var ( + b1 []byte + err error + ) + + if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { + // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string + b1, err = json.Marshal(s.SecuritySchemeProps) + } else { + // when not oauth2, empty AuthorizationURL should be omitted + b1, err = json.Marshal(struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 + }{ + Description: s.Description, + Type: s.Type, + Name: s.Name, + In: s.In, + Flow: s.Flow, + AuthorizationURL: s.AuthorizationURL, + TokenURL: s.TokenURL, + Scopes: s.Scopes, + }) + } + if err != nil { + return nil, err + } + + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 000000000..7d38b6e62 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,78 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" +) + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := Asset("jsonschema-draft-04.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := Asset("v2/schema.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 000000000..44722ffd5 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,448 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) >= 4 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 000000000..faa3d3de1 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,75 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/vendor/github.com/go-openapi/spec/url_go18.go new file mode 100644 index 000000000..60b785153 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go18.go @@ -0,0 +1,8 @@ +//go:build !go1.19 +// +build !go1.19 + +package spec + +import "net/url" + +var parseURL = url.Parse diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 000000000..392e3e639 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,14 @@ +//go:build go1.19 +// +build go1.19 + +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go new file mode 100644 index 000000000..6360a8ea7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/validations.go @@ -0,0 +1,215 @@ +package spec + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// SetValidations defines all validations for a simple schema. +// +// NOTE: the input is the larger set of validations available for schemas. +// For simple schemas, MinProperties and MaxProperties are ignored. +func (v *CommonValidations) SetValidations(val SchemaValidations) { + v.Maximum = val.Maximum + v.ExclusiveMaximum = val.ExclusiveMaximum + v.Minimum = val.Minimum + v.ExclusiveMinimum = val.ExclusiveMinimum + v.MaxLength = val.MaxLength + v.MinLength = val.MinLength + v.Pattern = val.Pattern + v.MaxItems = val.MaxItems + v.MinItems = val.MinItems + v.UniqueItems = val.UniqueItems + v.MultipleOf = val.MultipleOf + v.Enum = val.Enum +} + +type clearedValidation struct { + Validation string + Value interface{} +} + +type clearedValidations []clearedValidation + +func (c clearedValidations) apply(cbs []func(string, interface{})) { + for _, cb := range cbs { + for _, cleared := range c { + cb(cleared.Validation, cleared.Value) + } + } +} + +// ClearNumberValidations clears all number validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 5) + defer func() { + done.apply(cbs) + }() + + if v.Minimum != nil { + done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) + v.Minimum = nil + } + if v.Maximum != nil { + done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) + v.Maximum = nil + } + if v.ExclusiveMaximum { + done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) + v.ExclusiveMaximum = false + } + if v.ExclusiveMinimum { + done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) + v.ExclusiveMinimum = false + } + if v.MultipleOf != nil { + done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) + v.MultipleOf = nil + } +} + +// ClearStringValidations clears all string validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.Pattern != "" { + done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) + v.Pattern = "" + } + if v.MinLength != nil { + done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) + v.MinLength = nil + } + if v.MaxLength != nil { + done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) + v.MaxLength = nil + } +} + +// ClearArrayValidations clears all array validations. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxItems != nil { + done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) + v.MaxItems = nil + } + if v.MinItems != nil { + done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) + v.MinItems = nil + } + if v.UniqueItems { + done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) + v.UniqueItems = false + } +} + +// Validations returns a clone of the validations for a simple schema. +// +// NOTE: in the context of simple schema objects, MinProperties, MaxProperties +// and PatternProperties remain unset. +func (v CommonValidations) Validations() SchemaValidations { + return SchemaValidations{ + CommonValidations: v, + } +} + +// HasNumberValidations indicates if the validations are for numbers or integers +func (v CommonValidations) HasNumberValidations() bool { + return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil +} + +// HasStringValidations indicates if the validations are for strings +func (v CommonValidations) HasStringValidations() bool { + return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" +} + +// HasArrayValidations indicates if the validations are for arrays +func (v CommonValidations) HasArrayValidations() bool { + return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems +} + +// HasEnum indicates if the validation includes some enum constraint +func (v CommonValidations) HasEnum() bool { + return len(v.Enum) > 0 +} + +// SchemaValidations describes the validation properties of a schema +// +// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change +// in the exported members: all initializers using litterals would fail. +type SchemaValidations struct { + CommonValidations + + PatternProperties SchemaProperties `json:"patternProperties,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` +} + +// HasObjectValidations indicates if the validations are for objects +func (v SchemaValidations) HasObjectValidations() bool { + return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil +} + +// SetValidations for schema validations +func (v *SchemaValidations) SetValidations(val SchemaValidations) { + v.CommonValidations.SetValidations(val) + v.PatternProperties = val.PatternProperties + v.MaxProperties = val.MaxProperties + v.MinProperties = val.MinProperties +} + +// Validations for a schema +func (v SchemaValidations) Validations() SchemaValidations { + val := v.CommonValidations.Validations() + val.PatternProperties = v.PatternProperties + val.MinProperties = v.MinProperties + val.MaxProperties = v.MaxProperties + return val +} + +// ClearObjectValidations returns a clone of the validations with all object validations cleared. +// +// Some callbacks may be set by the caller to capture changed values. +func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { + done := make(clearedValidations, 0, 3) + defer func() { + done.apply(cbs) + }() + + if v.MaxProperties != nil { + done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) + v.MaxProperties = nil + } + if v.MinProperties != nil { + done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) + v.MinProperties = nil + } + if v.PatternProperties != nil { + done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) + v.PatternProperties = nil + } +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 000000000..945a46703 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 000000000..3152da69a --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes new file mode 100644 index 000000000..d020be8ea --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 000000000..dd91ed6a0 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 000000000..be4899cb1 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,59 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 31 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable: + - revive + - goimports + - gosec + - unparam + - unconvert + - predeclared + - prealloc + - misspell + + # disable: + # - maligned + # - lll + # - gochecknoinits + # - gochecknoglobals + # - godox + # - gocognit + # - whitespace + # - wsl + # - funlen + # - wrapcheck + # - testpackage + # - nlreturn + # - gofumpt + # - goerr113 + # - gci + # - gomnd + # - godot + # - exhaustivestruct + # - paralleltest + # - varnamelen + # - ireturn + # - exhaustruct + # #- thelper + +issues: + exclude-rules: + - path: bson.go + text: "should be .*ObjectID" + linters: + - golint + - stylecheck + diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 000000000..0cf89d776 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,88 @@ +# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5 + - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") + - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Type conversion + +All types defined here are stringers and may be converted to strings with `.String()`. +Note that most types defined by this package may be converted directly to string like `string(Email{})`. + +`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. +Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` + +## Using pointers + +The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does +with primitive types. + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- CIDR +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- UUID3 +- UUID4 +- UUID5 +- [ULID](https://github.com/ulid/spec) diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 000000000..a8a3604a2 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "fmt" + + "go.mongodb.org/mongo-driver/bson" + + "go.mongodb.org/mongo-driver/bson/bsontype" + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID //nolint:revive + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { //nolint:revive + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw interface{}) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id ObjectId) Value() (driver.Value, error) { + return driver.Value(bsonprim.ObjectID(id).Hex()), nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).Hex() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// MarshalBSON renders the object id as a BSON document +func (id ObjectId) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) +} + +// UnmarshalBSON reads the objectId from a BSON document +func (id *ObjectId) UnmarshalBSON(data []byte) error { + var obj struct { + Data bsonprim.ObjectID + } + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + *id = ObjectId(obj.Data) + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { + oid := bsonprim.ObjectID(id) + return bsontype.ObjectID, oid[:], nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { + var oid bsonprim.ObjectID + copy(oid[:], data) + *id = ObjectId(oid) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 000000000..3c93381c7 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,187 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw interface{}) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +func (d Date) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Date) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation) + if err != nil { + return err + } + *d = Date(rd) + return nil + } + + return errors.New("couldn't unmarshal bson bytes value as Date") +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} + +// GobEncode implements the gob.GobEncoder interface. +func (d Date) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface. +func (d *Date) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Date) MarshalBinary() ([]byte, error) { + return time.Time(d).MarshalBinary() +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Date) UnmarshalBinary(data []byte) error { + var original time.Time + + err := original.UnmarshalBinary(data) + if err != nil { + return err + } + + *d = Date(original) + + return nil +} + +// Equal checks if two Date instances are equal +func (d Date) Equal(d2 Date) bool { + return time.Time(d).Equal(time.Time(d2)) +} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 000000000..a89a4de3f --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,2035 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/asaskevich/govalidator" + "go.mongodb.org/mongo-driver/bson" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 + // A string instance is valid against this attribute if it is a valid + // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // http://tools.ietf.org/html/rfc1034#section-3.5 + // ::= any one of the ten digits 0 through 9 + // var digit = /[0-9]/; + // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case + // var letter = /[a-zA-Z]/; + // ::= | + // var letDig = /[0-9a-zA-Z]/; + // ::= | "-" + // var letDigHyp = /[-0-9a-zA-Z]/; + // ::= | + // var ldhStr = /[-0-9a-zA-Z]+/; + //